diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index 30e6074..e64bbc6 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -1,2 +1,3 @@ [default.extend-words] groth = "groth" +mimc = "mimc" diff --git a/.gitignore b/.gitignore index d38766b..d3067b1 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,5 @@ solidity-verifiers/generated examples/*.sol examples/*.calldata examples/*.inputs +*.serialized +*/*.serialized diff --git a/examples/circom_full_flow.rs b/examples/circom_full_flow.rs index 10a79ba..dccf0d8 100644 --- a/examples/circom_full_flow.rs +++ b/examples/circom_full_flow.rs @@ -89,7 +89,8 @@ fn main() { let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); // run n steps of the folding iteration for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { @@ -99,6 +100,14 @@ fn main() { println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } + // verify the last IVC proof + let ivc_proof = nova.ivc_proof(); + N::verify( + nova_params.1, // Nova's verifier params + ivc_proof, + ) + .unwrap(); + let start = Instant::now(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); println!("generated Decider proof: {:?}", start.elapsed()); diff --git a/examples/external_inputs.rs b/examples/external_inputs.rs index 2f70809..f4a6af1 100644 --- a/examples/external_inputs.rs +++ b/examples/external_inputs.rs @@ -207,17 +207,11 @@ fn main() { folding_scheme.state() ); - let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); - println!("Run the Nova's IVC verifier"); + let ivc_proof = folding_scheme.ivc_proof(); N::verify( - nova_params.1, - initial_state.clone(), - folding_scheme.state(), // latest state - Fr::from(num_steps as u32), - running_instance, - incoming_instance, - cyclefold_instance, + nova_params.1, // Nova's verifier params + ivc_proof, ) .unwrap(); } diff --git a/examples/multi_inputs.rs b/examples/multi_inputs.rs index f781952..a337c89 100644 --- a/examples/multi_inputs.rs +++ b/examples/multi_inputs.rs @@ -154,17 +154,11 @@ fn main() { println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } - let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); - println!("Run the Nova's IVC verifier"); + let ivc_proof = folding_scheme.ivc_proof(); N::verify( - nova_params.1, - initial_state.clone(), - folding_scheme.state(), // latest state - Fr::from(num_steps as u32), - running_instance, - incoming_instance, - cyclefold_instance, + nova_params.1, // Nova's verifier params + ivc_proof, ) .unwrap(); } diff --git a/examples/noir_full_flow.rs b/examples/noir_full_flow.rs index ec5107a..6453109 100644 --- a/examples/noir_full_flow.rs +++ b/examples/noir_full_flow.rs @@ -79,7 +79,8 @@ fn main() { let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); // run n steps of the folding iteration for i in 0..5 { @@ -87,6 +88,13 @@ fn main() { nova.prove_step(rng, vec![], None).unwrap(); println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } + // verify the last IVC proof + let ivc_proof = nova.ivc_proof(); + N::verify( + nova_params.1, // Nova's verifier params + ivc_proof, + ) + .unwrap(); let start = Instant::now(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); diff --git a/examples/noname_full_flow.rs b/examples/noname_full_flow.rs index 73a1a67..00dccbf 100644 --- a/examples/noname_full_flow.rs +++ b/examples/noname_full_flow.rs @@ -89,7 +89,8 @@ fn main() { let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); // prepare the Decider prover & verifier params - let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + let (decider_pp, decider_vp) = + D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap(); // run n steps of the folding iteration for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { @@ -99,6 +100,14 @@ fn main() { println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } + // verify the last IVC proof + let ivc_proof = nova.ivc_proof(); + N::verify( + nova_params.1, // Nova's verifier params + ivc_proof, + ) + .unwrap(); + let start = Instant::now(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); println!("generated Decider proof: {:?}", start.elapsed()); diff --git a/examples/sha256.rs b/examples/sha256.rs index 705dcb8..d974d65 100644 --- a/examples/sha256.rs +++ b/examples/sha256.rs @@ -138,17 +138,11 @@ fn main() { println!("Nova::prove_step {}: {:?}", i, start.elapsed()); } - let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances(); - println!("Run the Nova's IVC verifier"); + let ivc_proof = folding_scheme.ivc_proof(); N::verify( - nova_params.1, - initial_state, - folding_scheme.state(), // latest state - Fr::from(num_steps as u32), - running_instance, - incoming_instance, - cyclefold_instance, + nova_params.1, // Nova's verifier params + ivc_proof, ) .unwrap(); } diff --git a/folding-schemes/Cargo.toml b/folding-schemes/Cargo.toml index 2fbbc3b..d7c4548 100644 --- a/folding-schemes/Cargo.toml +++ b/folding-schemes/Cargo.toml @@ -41,6 +41,10 @@ ark-pallas = {version="0.4.0", features=["r1cs"]} ark-vesta = {version="0.4.0", features=["r1cs"]} ark-bn254 = {version="0.4.0", features=["r1cs"]} ark-grumpkin = {version="0.4.0", features=["r1cs"]} +# Note: do not use the MNTx_298 curves in practice due security reasons, here +# we only use them in the tests. +ark-mnt4-298 = {version="0.4.0", features=["r1cs"]} +ark-mnt6-298 = {version="0.4.0", features=["r1cs"]} rand = "0.8.5" tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } tracing-subscriber = { version = "0.2" } diff --git a/folding-schemes/src/arith/ccs.rs b/folding-schemes/src/arith/ccs.rs index 10425f8..e1c9575 100644 --- a/folding-schemes/src/arith/ccs.rs +++ b/folding-schemes/src/arith/ccs.rs @@ -1,9 +1,12 @@ use ark_ff::PrimeField; use ark_std::log2; -use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix}; +use crate::utils::vec::{ + hadamard, is_zero_vec, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix, +}; use crate::Error; +use super::ArithSerializer; use super::{r1cs::R1CS, Arith}; /// CCS represents the Customizable Constraint Systems structure defined in @@ -35,8 +38,9 @@ pub struct CCS { pub c: Vec, } -impl Arith for CCS { - fn eval_relation(&self, z: &[F]) -> Result, Error> { +impl CCS { + /// Evaluates the CCS relation at a given vector of assignments `z` + pub fn eval_at_z(&self, z: &[F]) -> Result, Error> { let mut result = vec![F::zero(); self.m]; for i in 0..self.q { @@ -59,6 +63,25 @@ impl Arith for CCS { Ok(result) } + /// returns a tuple containing (w, x) (witness and public inputs respectively) + pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { + (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) + } +} + +impl, U: AsRef<[F]>> Arith for CCS { + type Evaluation = Vec; + + fn eval_relation(&self, w: &W, u: &U) -> Result { + self.eval_at_z(&[&[F::one()], u.as_ref(), w.as_ref()].concat()) + } + + fn check_evaluation(_w: &W, _u: &U, e: Self::Evaluation) -> Result<(), Error> { + is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied) + } +} + +impl ArithSerializer for CCS { fn params_to_le_bytes(&self) -> Vec { [ self.l.to_le_bytes(), @@ -72,14 +95,14 @@ impl Arith for CCS { } } -impl CCS { - pub fn from_r1cs(r1cs: R1CS) -> Self { - let m = r1cs.A.n_rows; - let n = r1cs.A.n_cols; +impl From> for CCS { + fn from(r1cs: R1CS) -> Self { + let m = r1cs.num_constraints(); + let n = r1cs.num_variables(); CCS { m, n, - l: r1cs.l, + l: r1cs.num_public_inputs(), s: log2(m) as usize, s_prime: log2(n) as usize, t: 3, @@ -91,29 +114,19 @@ impl CCS { M: vec![r1cs.A, r1cs.B, r1cs.C], } } - - pub fn to_r1cs(self) -> R1CS { - R1CS:: { - l: self.l, - A: self.M[0].clone(), - B: self.M[1].clone(), - C: self.M[2].clone(), - } - } } #[cfg(test)] pub mod tests { use super::*; use crate::{ - arith::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z}, + arith::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z, get_test_z_split}, utils::vec::is_zero_vec, }; use ark_pallas::Fr; pub fn get_test_ccs() -> CCS { - let r1cs = get_test_r1cs::(); - CCS::::from_r1cs(r1cs) + get_test_r1cs::().into() } pub fn get_test_z(input: usize) -> Vec { r1cs_get_test_z(input) @@ -122,13 +135,13 @@ pub mod tests { #[test] fn test_eval_ccs_relation() { let ccs = get_test_ccs::(); - let mut z = get_test_z(3); + let (_, x, mut w) = get_test_z_split(3); - let f_w = ccs.eval_relation(&z).unwrap(); + let f_w = ccs.eval_relation(&w, &x).unwrap(); assert!(is_zero_vec(&f_w)); - z[1] = Fr::from(111); - let f_w = ccs.eval_relation(&z).unwrap(); + w[1] = Fr::from(111); + let f_w = ccs.eval_relation(&w, &x).unwrap(); assert!(!is_zero_vec(&f_w)); } @@ -136,8 +149,8 @@ pub mod tests { #[test] fn test_check_ccs_relation() { let ccs = get_test_ccs::(); - let z = get_test_z(3); + let (_, x, w) = get_test_z_split(3); - ccs.check_relation(&z).unwrap(); + ccs.check_relation(&w, &x).unwrap(); } } diff --git a/folding-schemes/src/arith/mod.rs b/folding-schemes/src/arith/mod.rs index d1fa8a3..b5ebd7b 100644 --- a/folding-schemes/src/arith/mod.rs +++ b/folding-schemes/src/arith/mod.rs @@ -1,28 +1,156 @@ -use ark_ff::PrimeField; +use ark_ec::CurveGroup; +use ark_relations::r1cs::SynthesisError; +use ark_std::rand::RngCore; -use crate::Error; +use crate::{commitment::CommitmentScheme, folding::traits::Dummy, Error}; pub mod ccs; pub mod r1cs; -pub trait Arith { - /// Evaluate the given Arith structure at `z`, a vector of assignments, and - /// return the evaluation. - fn eval_relation(&self, z: &[F]) -> Result, Error>; +/// `Arith` defines the operations that a constraint system (e.g., R1CS, CCS, +/// etc.) should support. +/// +/// Here, `W` is the type of witness, and `U` is the type of statement / public +/// input / public IO / instance. +/// Note that the same constraint system may support different types of `W` and +/// `U`, and the satisfiability check may vary. +/// +/// For example, both plain R1CS and relaxed R1CS are represented by 3 matrices, +/// but the types of `W` and `U` are different: +/// - The plain R1CS has `W` and `U` as vectors of field elements. +/// +/// `W = w` and `U = x` satisfy R1CS if `Az ∘ Bz = Cz`, where `z = [1, x, w]`. +/// +/// - In Nova, Relaxed R1CS has `W` as [`crate::folding::nova::Witness`], +/// and `U` as [`crate::folding::nova::CommittedInstance`]. +/// +/// `W = (w, e, ...)` and `U = (u, x, ...)` satisfy Relaxed R1CS if +/// `Az ∘ Bz = uCz + e`, where `z = [u, x, w]`. +/// (commitments in `U` are not checked here) +/// +/// Also, `W` and `U` have non-native field elements as their components when +/// used as CycleFold witness and instance. +/// +/// - In ProtoGalaxy, Relaxed R1CS has `W` as [`crate::folding::protogalaxy::Witness`], +/// and `U` as [`crate::folding::protogalaxy::CommittedInstance`]. +/// +/// `W = (w, ...)` and `U = (x, e, β, ...)` satisfy Relaxed R1CS if +/// `e = Σ pow_i(β) v_i`, where `v = Az ∘ Bz - Cz`, `z = [1, x, w]`. +/// (commitments in `U` are not checked here) +/// +/// This is also the case of CCS, where `W` and `U` may be vectors of field +/// elements, [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::lcccs::LCCCS`], +/// or [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::cccs::CCCS`]. +pub trait Arith: Clone { + type Evaluation; - /// Checks that the given Arith structure is satisfied by a z vector, i.e., - /// if the evaluation is a zero vector + /// Returns a dummy witness and instance + fn dummy_witness_instance<'a>(&'a self) -> (W, U) + where + W: Dummy<&'a Self>, + U: Dummy<&'a Self>, + { + (W::dummy(self), U::dummy(self)) + } + + /// Evaluates the constraint system `self` at witness `w` and instance `u`. + /// Returns the evaluation result. + /// + /// The evaluation result is usually a vector of field elements. + /// For instance: + /// - Evaluating the plain R1CS at `W = w` and `U = x` returns + /// `Az ∘ Bz - Cz`, where `z = [1, x, w]`. + /// + /// - Evaluating the relaxed R1CS in Nova at `W = (w, e, ...)` and + /// `U = (u, x, ...)` returns `Az ∘ Bz - uCz`, where `z = [u, x, w]`. + /// + /// - Evaluating the relaxed R1CS in ProtoGalaxy at `W = (w, ...)` and + /// `U = (x, e, β, ...)` returns `Az ∘ Bz - Cz`, where `z = [1, x, w]`. + /// + /// However, we use `Self::Evaluation` to represent the evaluation result + /// for future extensibility. + fn eval_relation(&self, w: &W, u: &U) -> Result; + + /// Checks if the evaluation result is valid. The witness `w` and instance + /// `u` are also parameters, because the validity check may need information + /// contained in `w` and/or `u`. + /// + /// For instance: + /// - The evaluation `v` of plain R1CS at satisfying `W` and `U` should be + /// an all-zero vector. + /// + /// - The evaluation `v` of relaxed R1CS in Nova at satisfying `W` and `U` + /// should be equal to the error term `e` in the witness. + /// + /// - The evaluation `v` of relaxed R1CS in ProtoGalaxy at satisfying `W` + /// and `U` should satisfy `e = Σ pow_i(β) v_i`, where `e` is the error + /// term in the committed instance. + fn check_evaluation(w: &W, u: &U, v: Self::Evaluation) -> Result<(), Error>; + + /// Checks if witness `w` and instance `u` satisfy the constraint system + /// `self` by first computing the evaluation result and then checking the + /// validity of the evaluation result. /// /// Used only for testing. - fn check_relation(&self, z: &[F]) -> Result<(), Error> { - if self.eval_relation(z)?.iter().all(|f| f.is_zero()) { - Ok(()) - } else { - Err(Error::NotSatisfied) - } + fn check_relation(&self, w: &W, u: &U) -> Result<(), Error> { + let e = self.eval_relation(w, u)?; + Self::check_evaluation(w, u, e) } +} +/// `ArithSerializer` is for serializing constraint systems. +/// +/// Currently we only support converting parameters to bytes, but in the future +/// we may consider implementing methods for serializing the actual data (e.g., +/// R1CS matrices). +pub trait ArithSerializer { /// Returns the bytes that represent the parameters, that is, the matrices sizes, the amount of /// public inputs, etc, without the matrices/polynomials values. fn params_to_le_bytes(&self) -> Vec; } + +/// `ArithSampler` allows sampling random pairs of witness and instance that +/// satisfy the constraint system `self`. +/// +/// This is useful for constructing a zero-knowledge layer for a folding-based +/// IVC. +/// An example of such a layer can be found in Appendix D of the [HyperNova] +/// paper. +/// +/// Note that we use a separate trait for sampling, because this operation may +/// not be supported by all witness-instance pairs. +/// For instance, it is difficult (if not impossible) to do this for `w` and `x` +/// in a plain R1CS. +/// +/// [HyperNova]: https://eprint.iacr.org/2023/573.pdf +pub trait ArithSampler: Arith { + /// Samples a random witness and instance that satisfy the constraint system. + fn sample_witness_instance>( + &self, + params: &CS::ProverParams, + rng: impl RngCore, + ) -> Result<(W, U), Error>; +} + +/// `ArithGadget` defines the in-circuit counterparts of operations specified in +/// `Arith` on constraint systems. +pub trait ArithGadget { + type Evaluation; + + /// Evaluates the constraint system `self` at witness `w` and instance `u`. + /// Returns the evaluation result. + fn eval_relation(&self, w: &WVar, u: &UVar) -> Result; + + /// Generates constraints for enforcing that witness `w` and instance `u` + /// satisfy the constraint system `self` by first computing the evaluation + /// result and then checking the validity of the evaluation result. + fn enforce_relation(&self, w: &WVar, u: &UVar) -> Result<(), SynthesisError> { + let e = self.eval_relation(w, u)?; + Self::enforce_evaluation(w, u, e) + } + + /// Generates constraints for enforcing that the evaluation result is valid. + /// The witness `w` and instance `u` are also parameters, because the + /// validity check may need information contained in `w` and/or `u`. + fn enforce_evaluation(w: &WVar, u: &UVar, e: Self::Evaluation) -> Result<(), SynthesisError>; +} diff --git a/folding-schemes/src/arith/r1cs.rs b/folding-schemes/src/arith/r1cs.rs index 4643cd9..164528c 100644 --- a/folding-schemes/src/arith/r1cs.rs +++ b/folding-schemes/src/arith/r1cs.rs @@ -1,13 +1,13 @@ -use crate::commitment::CommitmentScheme; -use crate::RngCore; -use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_relations::r1cs::ConstraintSystem; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; -use super::Arith; -use crate::utils::vec::{hadamard, mat_vec_mul, vec_scalar_mul, vec_sub, SparseMatrix}; +use super::ccs::CCS; +use super::{Arith, ArithSerializer}; +use crate::utils::vec::{ + hadamard, is_zero_vec, mat_vec_mul, vec_scalar_mul, vec_sub, SparseMatrix, +}; use crate::Error; #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] @@ -18,8 +18,9 @@ pub struct R1CS { pub C: SparseMatrix, } -impl Arith for R1CS { - fn eval_relation(&self, z: &[F]) -> Result, Error> { +impl R1CS { + /// Evaluates the CCS relation at a given vector of variables `z` + pub fn eval_at_z(&self, z: &[F]) -> Result, Error> { if z.len() != self.A.n_cols { return Err(Error::NotSameLength( "z.len()".to_string(), @@ -33,12 +34,26 @@ impl Arith for R1CS { let Bz = mat_vec_mul(&self.B, z)?; let Cz = mat_vec_mul(&self.C, z)?; // Multiply Cz by z[0] (u) here, allowing this method to be reused for - // both relaxed and unrelaxed R1CS. + // both relaxed and plain R1CS. let uCz = vec_scalar_mul(&Cz, &z[0]); let AzBz = hadamard(&Az, &Bz)?; vec_sub(&AzBz, &uCz) } +} + +impl, U: AsRef<[F]>> Arith for R1CS { + type Evaluation = Vec; + + fn eval_relation(&self, w: &W, u: &U) -> Result { + self.eval_at_z(&[&[F::one()], u.as_ref(), w.as_ref()].concat()) + } + fn check_evaluation(_w: &W, _u: &U, e: Self::Evaluation) -> Result<(), Error> { + is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied) + } +} + +impl ArithSerializer for R1CS { fn params_to_le_bytes(&self) -> Vec { [ self.l.to_le_bytes(), @@ -67,66 +82,41 @@ impl R1CS { } } - /// returns a tuple containing (w, x) (witness and public inputs respectively) - pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { - (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) + #[inline] + pub fn num_constraints(&self) -> usize { + self.A.n_rows } -} - -pub trait RelaxedR1CS: Arith { - /// returns a dummy running instance (Witness and CommittedInstance) for the current R1CS structure - fn dummy_running_instance(&self) -> (W, U); - - /// returns a dummy incoming instance (Witness and CommittedInstance) for the current R1CS structure - fn dummy_incoming_instance(&self) -> (W, U); - - /// checks if the given instance is relaxed - fn is_relaxed(w: &W, u: &U) -> bool; - - /// extracts `z`, the vector of variables, from the given Witness and CommittedInstance - fn extract_z(w: &W, u: &U) -> Vec; - - /// checks if the computed error terms correspond to the actual one in `w` - /// or `u` - fn check_error_terms(w: &W, u: &U, e: Vec) -> Result<(), Error>; - /// checks the tight (unrelaxed) R1CS relation - fn check_tight_relation(&self, w: &W, u: &U) -> Result<(), Error> { - if Self::is_relaxed(w, u) { - return Err(Error::R1CSUnrelaxedFail); - } - - let z = Self::extract_z(w, u); - self.check_relation(&z) + #[inline] + pub fn num_public_inputs(&self) -> usize { + self.l } - /// checks the relaxed R1CS relation - fn check_relaxed_relation(&self, w: &W, u: &U) -> Result<(), Error> { - let z = Self::extract_z(w, u); - let e = self.eval_relation(&z)?; - Self::check_error_terms(w, u, e) + #[inline] + pub fn num_variables(&self) -> usize { + self.A.n_cols } - // Computes the E term, given A, B, C, z, u - fn compute_E( - A: &SparseMatrix, - B: &SparseMatrix, - C: &SparseMatrix, - z: &[C::ScalarField], - u: &C::ScalarField, - ) -> Result, Error> { - let Az = mat_vec_mul(A, z)?; - let Bz = mat_vec_mul(B, z)?; - let AzBz = hadamard(&Az, &Bz)?; + #[inline] + pub fn num_witnesses(&self) -> usize { + self.num_variables() - self.num_public_inputs() - 1 + } - let Cz = mat_vec_mul(C, z)?; - let uCz = vec_scalar_mul(&Cz, u); - vec_sub(&AzBz, &uCz) + /// returns a tuple containing (w, x) (witness and public inputs respectively) + pub fn split_z(&self, z: &[F]) -> (Vec, Vec) { + (z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec()) } +} - fn sample(&self, params: &CS::ProverParams, rng: impl RngCore) -> Result<(W, U), Error> - where - CS: CommitmentScheme; +impl From> for R1CS { + fn from(ccs: CCS) -> Self { + R1CS:: { + l: ccs.l, + A: ccs.M[0].clone(), + B: ccs.M[1].clone(), + C: ccs.M[2].clone(), + } + } } /// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS @@ -173,27 +163,12 @@ pub fn extract_w_x(cs: &ConstraintSystem) -> (Vec, Vec) #[cfg(test)] pub mod tests { use super::*; - use crate::folding::nova::{CommittedInstance, Witness}; - use crate::{ - commitment::pedersen::Pedersen, - utils::vec::{ - is_zero_vec, - tests::{to_F_matrix, to_F_vec}, - }, + use crate::utils::vec::{ + is_zero_vec, + tests::{to_F_matrix, to_F_vec}, }; - use ark_pallas::{Fr, Projective}; - - #[test] - pub fn sample_relaxed_r1cs() { - let rng = rand::rngs::OsRng; - let r1cs = get_test_r1cs::(); - let (prover_params, _) = Pedersen::::setup(rng, r1cs.A.n_rows).unwrap(); - - let sampled: Result<(Witness, CommittedInstance), _> = - r1cs.sample::>(&prover_params, rng); - assert!(sampled.is_ok()); - } + use ark_pallas::Fr; pub fn get_test_r1cs() -> R1CS { // R1CS for: x^3 + x + 5 = y (example from article @@ -252,20 +227,20 @@ pub mod tests { fn test_eval_r1cs_relation() { let mut rng = ark_std::test_rng(); let r1cs = get_test_r1cs::(); - let mut z = get_test_z::(rng.gen::() as usize); + let (_, x, mut w) = get_test_z_split::(rng.gen::() as usize); - let f_w = r1cs.eval_relation(&z).unwrap(); + let f_w = r1cs.eval_relation(&w, &x).unwrap(); assert!(is_zero_vec(&f_w)); - z[1] = Fr::from(111); - let f_w = r1cs.eval_relation(&z).unwrap(); + w[1] = Fr::from(111); + let f_w = r1cs.eval_relation(&w, &x).unwrap(); assert!(!is_zero_vec(&f_w)); } #[test] fn test_check_r1cs_relation() { let r1cs = get_test_r1cs::(); - let z = get_test_z(5); - r1cs.check_relation(&z).unwrap(); + let (_, x, w) = get_test_z_split(5); + r1cs.check_relation(&w, &x).unwrap(); } } diff --git a/folding-schemes/src/folding/circuits/cyclefold.rs b/folding-schemes/src/folding/circuits/cyclefold.rs index 2e293e8..91a8787 100644 --- a/folding-schemes/src/folding/circuits/cyclefold.rs +++ b/folding-schemes/src/folding/circuits/cyclefold.rs @@ -24,7 +24,7 @@ use super::{nonnative::uint::NonNativeUintVar, CF1, CF2}; use crate::arith::r1cs::{extract_w_x, R1CS}; use crate::commitment::CommitmentScheme; use crate::constants::NOVA_N_BITS_RO; -use crate::folding::nova::nifs::NIFS; +use crate::folding::nova::{nifs::NIFS, traits::NIFSTrait}; use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; use crate::Error; @@ -61,10 +61,12 @@ where f().and_then(|val| { let cs = cs.into(); + let u = + NonNativeUintVar::>::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?; + let x: Vec>> = + Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?; let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; - let u = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?; - let x = Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?; Ok(Self { cmE, u, cmW, x }) }) @@ -568,9 +570,8 @@ where let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) .expect("cf_r_bits out of bounds"); - let (cf_W_i1, cf_U_i1) = NIFS::::fold_instances( - cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT, - )?; + let (cf_W_i1, cf_U_i1) = + NIFS::::prove(cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, &cf_cmT)?; Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) } @@ -582,10 +583,11 @@ pub mod tests { poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, }; use ark_r1cs_std::R1CSVar; - use ark_std::UniformRand; + use ark_std::{One, UniformRand}; use super::*; - use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs; + use crate::commitment::pedersen::Pedersen; + use crate::folding::nova::CommittedInstance; use crate::transcript::poseidon::poseidon_canonical_config; use crate::utils::get_cm_coordinates; @@ -667,12 +669,30 @@ pub mod tests { #[test] fn test_nifs_full_gadget() { - let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, _) = prepare_simple_fold_inputs(); + let mut rng = ark_std::test_rng(); - let cs = ConstraintSystem::::new_ref(); + // prepare the committed instances to test in-circuit + let ci: Vec> = (0..2) + .into_iter() + .map(|_| CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: vec![Fr::rand(&mut rng); 1], + }) + .collect(); + let (ci1, mut ci2) = (ci[0].clone(), ci[1].clone()); + // make the 2nd instance a 'fresh' instance (ie. cmE=0, u=1) + ci2.cmE = Projective::zero(); + ci2.u = Fr::one(); + let r_bits: Vec = + Fr::rand(&mut rng).into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); + let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); + let cmT = Projective::rand(&mut rng); + let ci3 = NIFS::>::verify(r_Fr, &ci1, &ci2, &cmT); + let cs = ConstraintSystem::::new_ref(); let r_bitsVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); - let ci1Var = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(ci1.clone()) diff --git a/folding-schemes/src/folding/circuits/mod.rs b/folding-schemes/src/folding/circuits/mod.rs index 40d6b86..b442b93 100644 --- a/folding-schemes/src/folding/circuits/mod.rs +++ b/folding-schemes/src/folding/circuits/mod.rs @@ -7,10 +7,13 @@ pub mod nonnative; pub mod sum_check; pub mod utils; -/// CF1 represents the ConstraintField used for the main folding circuit which is over E1::Fr, where -/// E1 is the main curve where we do the folding. +/// CF1 uses the ScalarField of the given C. CF1 represents the ConstraintField used for the main +/// folding circuit which is over E1::Fr, where E1 is the main curve where we do the folding. +/// In CF1, the points of C can not be natively represented. pub type CF1 = <::Affine as AffineRepr>::ScalarField; -/// CF2 represents the ConstraintField used for the CycleFold circuit which is over E2::Fr=E1::Fq, -/// where E2 is the auxiliary curve (from [CycleFold](https://eprint.iacr.org/2023/1192.pdf) -/// approach) where we check the folding of the commitments (elliptic curve points). +/// CF2 uses the BaseField of the given C. CF2 represents the ConstraintField used for the +/// CycleFold circuit which is over E2::Fr=E1::Fq, where E2 is the auxiliary curve (from +/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf) approach) where we check the folding of the +/// commitments (elliptic curve points). +/// In CF2, the points of C can be natively represented. pub type CF2 = <::BaseField as Field>::BasePrimeField; diff --git a/folding-schemes/src/folding/hypernova/cccs.rs b/folding-schemes/src/folding/hypernova/cccs.rs index 5b95df2..9e42bd9 100644 --- a/folding-schemes/src/folding/hypernova/cccs.rs +++ b/folding-schemes/src/folding/hypernova/cccs.rs @@ -3,23 +3,22 @@ use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; -use ark_std::One; -use ark_std::Zero; -use std::sync::Arc; - -use ark_std::rand::Rng; +use ark_std::{rand::Rng, sync::Arc, One, Zero}; +use super::circuits::CCCSVar; use super::Witness; use crate::arith::{ccs::CCS, Arith}; use crate::commitment::CommitmentScheme; +use crate::folding::circuits::CF1; +use crate::folding::traits::{CommittedInstanceOps, Dummy}; use crate::transcript::AbsorbNonNative; use crate::utils::mle::dense_vec_to_dense_mle; -use crate::utils::vec::mat_vec_mul; +use crate::utils::vec::{is_zero_vec, mat_vec_mul}; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; use crate::Error; /// Committed CCS instance -#[derive(Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] +#[derive(Debug, Clone, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] pub struct CCCS { // Commitment to witness pub C: C, @@ -91,34 +90,34 @@ impl CCS { } } -impl CCCS { - pub fn dummy(l: usize) -> CCCS - where - C::ScalarField: PrimeField, - { - CCCS:: { +impl Dummy<&CCS>> for CCCS { + fn dummy(ccs: &CCS>) -> Self { + Self { C: C::zero(), - x: vec![C::ScalarField::zero(); l], + x: vec![CF1::::zero(); ccs.l], } } +} + +impl Arith>, CCCS> for CCS> { + type Evaluation = Vec>; + + fn eval_relation(&self, w: &Witness>, u: &CCCS) -> Result { + // evaluate CCCS relation + self.eval_at_z(&[&[CF1::::one()][..], &u.x, &w.w].concat()) + } /// Perform the check of the CCCS instance described at section 4.1, /// notice that this method does not check the commitment correctness - pub fn check_relation( - &self, - ccs: &CCS, - w: &Witness, + fn check_evaluation( + _w: &Witness>, + _u: &CCCS, + e: Self::Evaluation, ) -> Result<(), Error> { - // check CCCS relation - let z: Vec = - [vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat(); - // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in // the hypercube, evaluating over the whole boolean hypercube for a normal-sized instance // would take too much, this checks the CCS relation of the CCCS. - ccs.check_relation(&z)?; - - Ok(()) + is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied) } } @@ -126,9 +125,8 @@ impl Absorb for CCCS where C::ScalarField: Absorb, { - fn to_sponge_bytes(&self, _dest: &mut Vec) { - // This is never called - unimplemented!() + fn to_sponge_bytes(&self, dest: &mut Vec) { + C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { @@ -142,6 +140,18 @@ where } } +impl CommittedInstanceOps for CCCS { + type Var = CCCSVar; + + fn get_commitments(&self) -> Vec { + vec![self.C] + } + + fn is_incoming(&self) -> bool { + true + } +} + #[cfg(test)] pub mod tests { use ark_pallas::Fr; @@ -180,7 +190,8 @@ pub mod tests { let ccs: CCS = get_test_ccs(); let z = get_test_z(3); - ccs.check_relation(&z).unwrap(); + let (w, x) = ccs.split_z(&z); + ccs.check_relation(&w, &x).unwrap(); let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); @@ -214,7 +225,8 @@ pub mod tests { let ccs: CCS = get_test_ccs(); let z = get_test_z(3); - ccs.check_relation(&z).unwrap(); + let (w, x) = ccs.split_z(&z); + ccs.check_relation(&w, &x).unwrap(); // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube diff --git a/folding-schemes/src/folding/hypernova/circuits.rs b/folding-schemes/src/folding/hypernova/circuits.rs index 1c2d786..e9f42d8 100644 --- a/folding-schemes/src/folding/hypernova/circuits.rs +++ b/folding-schemes/src/folding/hypernova/circuits.rs @@ -1,6 +1,6 @@ /// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) circuits use ark_crypto_primitives::sponge::{ - constraints::CryptographicSpongeVar, + constraints::{AbsorbGadget, CryptographicSpongeVar}, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, CryptographicSponge, }; @@ -14,6 +14,7 @@ use ark_r1cs_std::{ fields::{fp::FpVar, FieldVar}, groups::GroupOpsBounds, prelude::CurveVar, + uint8::UInt8, R1CSVar, ToConstraintFieldGadget, }; use ark_relations::r1cs::{ @@ -41,6 +42,7 @@ use crate::folding::{ CF1, CF2, }, nova::get_r1cs_from_cs, + traits::{CommittedInstanceVarOps, Dummy}, }; use crate::frontend::FCircuit; use crate::utils::virtual_polynomial::VPAuxInfo; @@ -52,19 +54,16 @@ use crate::{ /// Committed CCS instance #[derive(Debug, Clone)] -pub struct CCCSVar -where - ::BaseField: PrimeField, -{ +pub struct CCCSVar { // Commitment to witness pub C: NonNativeAffineVar, // Public io pub x: Vec>>, } + impl AllocVar, CF1> for CCCSVar where C: CurveGroup, - ::BaseField: PrimeField, { fn new_variable>>( cs: impl Into>>, @@ -83,12 +82,30 @@ where } } +impl CommittedInstanceVarOps for CCCSVar { + type PointVar = NonNativeAffineVar; + + fn get_commitments(&self) -> Vec { + vec![self.C.clone()] + } + + fn get_public_inputs(&self) -> &[FpVar>] { + &self.x + } + + fn enforce_incoming(&self) -> Result<(), SynthesisError> { + // `CCCSVar` is always the incoming instance + Ok(()) + } + + fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.x.enforce_equal(&other.x) + } +} + /// Linearized Committed CCS instance #[derive(Debug, Clone)] -pub struct LCCCSVar -where - ::BaseField: PrimeField, -{ +pub struct LCCCSVar { // Commitment to witness pub C: NonNativeAffineVar, // Relaxation factor of z for folded LCCCS @@ -100,10 +117,10 @@ where // Vector of v_i pub v: Vec>>, } + impl AllocVar, CF1> for LCCCSVar where C: CurveGroup, - ::BaseField: PrimeField, { fn new_variable>>( cs: impl Into>>, @@ -127,41 +144,44 @@ where } } -impl LCCCSVar -where - C: CurveGroup, - ::ScalarField: Absorb, - ::BaseField: ark_ff::PrimeField, -{ - /// [`LCCCSVar`].hash implements the LCCCS instance hash compatible with the native - /// implementation from LCCCS.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the LCCCS. - /// Additionally it returns the vector of the field elements from the self parameters, so they - /// can be reused in other gadgets avoiding recalculating (reconstraining) them. - #[allow(clippy::type_complexity)] - pub fn hash( - self, - sponge: &PoseidonSpongeVar>, - pp_hash: FpVar>, - i: FpVar>, - z_0: Vec>>, - z_i: Vec>>, - ) -> Result<(FpVar>, Vec>>), SynthesisError> { - let mut sponge = sponge.clone(); - let U_vec = [ - self.C.to_constraint_field()?, - vec![self.u], - self.x, - self.r_x, - self.v, +impl AbsorbGadget for LCCCSVar { + fn to_sponge_bytes(&self) -> Result>, SynthesisError> { + FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) + } + + fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { + Ok([ + &self.C.to_constraint_field()?, + &[self.u.clone()][..], + &self.x, + &self.r_x, + &self.v, ] - .concat(); - sponge.absorb(&pp_hash)?; - sponge.absorb(&i)?; - sponge.absorb(&z_0)?; - sponge.absorb(&z_i)?; - sponge.absorb(&U_vec)?; - Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) + .concat()) + } +} + +impl CommittedInstanceVarOps for LCCCSVar { + type PointVar = NonNativeAffineVar; + + fn get_commitments(&self) -> Vec { + vec![self.C.clone()] + } + + fn get_public_inputs(&self) -> &[FpVar>] { + &self.x + } + + fn enforce_incoming(&self) -> Result<(), SynthesisError> { + // `LCCCSVar` is always the running instance + Err(SynthesisError::Unsatisfiable) + } + + fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.u.enforce_equal(&other.u)?; + self.x.enforce_equal(&other.x)?; + self.r_x.enforce_equal(&other.r_x)?; + self.v.enforce_equal(&other.v) } } @@ -582,13 +602,13 @@ where /// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there. pub fn upper_bound_ccs(&self) -> Result, Error> { let r1cs = get_r1cs_from_cs::>(self.clone()).unwrap(); - let mut ccs = CCS::from_r1cs(r1cs.clone()); + let mut ccs = CCS::from(r1cs); let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()]; let mut W_i = Witness::::dummy(&ccs); - let mut U_i = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let mut U_i = LCCCS::::dummy(&ccs); let mut w_i = W_i.clone(); - let mut u_i = CCCS::::dummy(ccs.l); + let mut u_i = CCCS::::dummy(&ccs); let n_iters = 2; for _ in 0..n_iters { @@ -654,7 +674,7 @@ where r_w: C1::ScalarField::one(), }; W_i = Witness::::dummy(&ccs); - U_i = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + U_i = LCCCS::::dummy(&ccs); } Ok(ccs) @@ -671,7 +691,7 @@ where cs.finalize(); let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let r1cs = extract_r1cs::(&cs); - let ccs = CCS::from_r1cs(r1cs.clone()); + let ccs = CCS::from(r1cs); Ok((cs, ccs)) } @@ -714,8 +734,8 @@ where .unwrap_or(vec![CF1::::zero(); self.F.external_inputs_len()])) })?; - let U_dummy = LCCCS::::dummy(self.ccs.l, self.ccs.t, self.ccs.s); - let u_dummy = CCCS::::dummy(self.ccs.l); + let U_dummy = LCCCS::::dummy(&self.ccs); + let u_dummy = CCCS::::dummy(&self.ccs); let U_i = LCCCSVar::::new_witness(cs.clone(), || Ok(self.U_i.unwrap_or(U_dummy.clone())))?; @@ -728,7 +748,7 @@ where let U_i1_C = NonNativeAffineVar::new_witness(cs.clone(), || { Ok(self.U_i1_C.unwrap_or_else(C1::zero)) })?; - let nimfs_proof_dummy = NIMFSProof::::dummy(&self.ccs, MU, NU); + let nimfs_proof_dummy = NIMFSProof::::dummy((&self.ccs, MU, NU)); let nimfs_proof = ProofVar::::new_witness(cs.clone(), || { Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy)) })?; @@ -742,25 +762,13 @@ where let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); - // get z_{i+1} from the F circuit - let i_usize = self.i_usize.unwrap_or(0); - let z_i1 = - self.F - .generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?; - let is_basecase = i.is_zero()?; let is_not_basecase = is_basecase.not(); // Primary Part // P.1. Compute u_i.x // u_i.x[0] = H(i, z_0, z_i, U_i) - let (u_i_x, _) = U_i.clone().hash( - &sponge, - pp_hash.clone(), - i.clone(), - z_0.clone(), - z_i.clone(), - )?; + let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; // u_i.x[1] = H(cf_U_i) let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; @@ -795,19 +803,26 @@ where U_i1.C = U_i1_C; // P.4.a compute and check the first output of F' + + // get z_{i+1} from the F circuit + let i_usize = self.i_usize.unwrap_or(0); + let z_i1 = self + .F + .generate_step_constraints(cs.clone(), i_usize, z_i, external_inputs)?; + let (u_i1_x, _) = U_i1.clone().hash( &sponge, - pp_hash.clone(), - i + FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &(i + FpVar::>::one()), + &z_0, + &z_i1, )?; let (u_i1_x_base, _) = LCCCSVar::new_constant(cs.clone(), U_dummy)?.hash( &sponge, - pp_hash.clone(), - FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &FpVar::>::one(), + &z_0, + &z_i1, )?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; @@ -891,7 +906,8 @@ mod tests { use crate::{ arith::{ ccs::tests::{get_test_ccs, get_test_z}, - r1cs::{extract_w_x, RelaxedR1CS}, + r1cs::extract_w_x, + Arith, }, commitment::{pedersen::Pedersen, CommitmentScheme}, folding::{ @@ -900,6 +916,7 @@ mod tests { utils::{compute_c, compute_sigmas_thetas}, HyperNovaCycleFoldCircuit, }, + traits::CommittedInstanceOps, }, frontend::utils::CubicFCircuit, transcript::poseidon::poseidon_canonical_config, @@ -1084,7 +1101,7 @@ mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); // allocate circuit inputs let cs = ConstraintSystem::::new_ref(); @@ -1113,6 +1130,37 @@ mod tests { assert_eq!(folded_lcccsVar.u.value().unwrap(), folded_lcccs.u); } + /// test that checks the native LCCCS.to_sponge_{bytes,field_elements} vs + /// the R1CS constraints version + #[test] + pub fn test_lcccs_to_sponge_preimage() { + let mut rng = test_rng(); + + let ccs = get_test_ccs(); + let z1 = get_test_z::(3); + + let (pedersen_params, _) = + Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); + + let (lcccs, _) = ccs + .to_lcccs::<_, _, Pedersen, true>(&mut rng, &pedersen_params, &z1) + .unwrap(); + let bytes = lcccs.to_sponge_bytes_as_vec(); + let field_elements = lcccs.to_sponge_field_elements_as_vec(); + + let cs = ConstraintSystem::::new_ref(); + + let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); + let bytes_var = lcccsVar.to_sponge_bytes().unwrap(); + let field_elements_var = lcccsVar.to_sponge_field_elements().unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + // check that the natively computed and in-circuit computed hashes match + assert_eq!(bytes_var.value().unwrap(), bytes); + assert_eq!(field_elements_var.value().unwrap(), field_elements); + } + /// test that checks the native LCCCS.hash vs the R1CS constraints version #[test] pub fn test_lcccs_hash() { @@ -1133,9 +1181,7 @@ mod tests { let (lcccs, _) = ccs .to_lcccs::<_, _, Pedersen, true>(&mut rng, &pedersen_params, &z1) .unwrap(); - let h = lcccs - .clone() - .hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone()); + let h = lcccs.clone().hash(&sponge, pp_hash, i, &z_0, &z_i); let cs = ConstraintSystem::::new_ref(); @@ -1147,13 +1193,7 @@ mod tests { let lcccsVar = LCCCSVar::::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); let (hVar, _) = lcccsVar .clone() - .hash( - &spongeVar, - pp_hashVar, - iVar.clone(), - z_0Var.clone(), - z_iVar.clone(), - ) + .hash(&spongeVar, &pp_hashVar, &iVar, &z_0Var, &z_iVar) .unwrap(); assert!(cs.is_satisfied().unwrap()); @@ -1209,13 +1249,13 @@ mod tests { // prepare the dummy instances let W_dummy = Witness::::dummy(&ccs); - let U_dummy = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let U_dummy = LCCCS::::dummy(&ccs); let w_dummy = W_dummy.clone(); - let u_dummy = CCCS::::dummy(ccs.l); + let u_dummy = CCCS::::dummy(&ccs); let (cf_W_dummy, cf_U_dummy): ( CycleFoldWitness, CycleFoldCommittedInstance, - ) = cf_r1cs.dummy_running_instance(); + ) = cf_r1cs.dummy_witness_instance(); // set the initial dummy instances let mut W_i = W_dummy.clone(); @@ -1225,7 +1265,7 @@ mod tests { let mut cf_W_i = cf_W_dummy.clone(); let mut cf_U_i = cf_U_dummy.clone(); u_i.x = vec![ - U_i.hash(&sponge, pp_hash, Fr::zero(), z_0.clone(), z_i.clone()), + U_i.hash(&sponge, pp_hash, Fr::zero(), &z_0, &z_i), cf_U_i.hash_cyclefold(&sponge, pp_hash), ]; @@ -1250,9 +1290,9 @@ mod tests { if i == 0 { W_i1 = Witness::::dummy(&ccs); - U_i1 = LCCCS::dummy(ccs.l, ccs.t, ccs.s); + U_i1 = LCCCS::dummy(&ccs); - let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), z_0.clone(), z_i1.clone()); + let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), &z_0, &z_i1); // hash the initial (dummy) CycleFold instance, which is used as the 2nd public // input in the AugmentedFCircuit @@ -1307,10 +1347,9 @@ mod tests { .unwrap(); // sanity check: check the folded instance relation - U_i1.check_relation(&ccs, &W_i1).unwrap(); + ccs.check_relation(&W_i1, &U_i1).unwrap(); - let u_i1_x = - U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone()); + let u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); let rho_Fq = Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); @@ -1427,15 +1466,14 @@ mod tests { (u_i, w_i) = ccs .to_cccs::<_, _, Pedersen, false>(&mut rng, &pedersen_params, &r1cs_z) .unwrap(); - u_i.check_relation(&ccs, &w_i).unwrap(); + ccs.check_relation(&w_i, &u_i).unwrap(); // sanity checks assert_eq!(w_i.w, r1cs_w_i1); assert_eq!(u_i.x, r1cs_x_i1); assert_eq!(u_i.x[0], augmented_f_circuit.x.unwrap()); assert_eq!(u_i.x[1], augmented_f_circuit.cf_x.unwrap()); - let expected_u_i1_x = - U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone()); + let expected_u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1); let expected_cf_U_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); // u_i is already u_i1 at this point, check that has the expected value at x[0] assert_eq!(u_i.x[0], expected_u_i1_x); @@ -1449,12 +1487,12 @@ mod tests { W_i = W_i1.clone(); // check the new LCCCS instance relation - U_i.check_relation(&ccs, &W_i).unwrap(); + ccs.check_relation(&W_i, &U_i).unwrap(); // check the new CCCS instance relation - u_i.check_relation(&ccs, &w_i).unwrap(); + ccs.check_relation(&w_i, &u_i).unwrap(); // check the CycleFold instance relation - cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i).unwrap(); + cf_r1cs.check_relation(&cf_W_i, &cf_U_i).unwrap(); println!("augmented_f_circuit step {}: {:?}", i, start.elapsed()); } diff --git a/folding-schemes/src/folding/hypernova/decider_eth.rs b/folding-schemes/src/folding/hypernova/decider_eth.rs index e45876e..f6b0118 100644 --- a/folding-schemes/src/folding/hypernova/decider_eth.rs +++ b/folding-schemes/src/folding/hypernova/decider_eth.rs @@ -234,9 +234,7 @@ pub mod tests { use super::*; use crate::commitment::{kzg::KZG, pedersen::Pedersen}; use crate::folding::hypernova::cccs::CCCS; - use crate::folding::hypernova::{ - PreprocessorParam, ProverParams, VerifierParams as HyperNovaVerifierParams, - }; + use crate::folding::hypernova::PreprocessorParam; use crate::folding::nova::decider_eth::VerifierParam; use crate::frontend::utils::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; @@ -371,33 +369,19 @@ pub mod tests { .serialize_compressed(&mut hypernova_vp_serialized) .unwrap(); - let hypernova_pp_deserialized = ProverParams::< - Projective, - Projective2, - KZG<'static, Bn254>, - Pedersen, - false, - >::deserialize_prover_params( + let hypernova_pp_deserialized = HN::pp_deserialize_with_mode( hypernova_pp_serialized.as_slice(), Compress::Yes, Validate::No, - &hypernova_params.0.ccs, - &poseidon_config, + (), // FCircuit's Params ) .unwrap(); - let hypernova_vp_deserialized = HyperNovaVerifierParams::< - Projective, - Projective2, - KZG<'static, Bn254>, - Pedersen, - false, - >::deserialize_verifier_params( + let hypernova_vp_deserialized = HN::vp_deserialize_with_mode( hypernova_vp_serialized.as_slice(), Compress::Yes, Validate::No, - &hypernova_params.0.ccs.unwrap(), - &poseidon_config, + (), // FCircuit's Params ) .unwrap(); @@ -416,7 +400,7 @@ pub mod tests { let verified = D::verify( decider_vp.clone(), - hypernova.i.clone(), + hypernova.i, hypernova.z_0.clone(), hypernova.z_i.clone(), &(), @@ -483,7 +467,7 @@ pub mod tests { let verified = D::verify( decider_vp_deserialized, - i_deserialized.clone(), + i_deserialized, z_0_deserialized.clone(), z_i_deserialized.clone(), &(), diff --git a/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs b/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs index b4e2cf8..256b0cb 100644 --- a/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/hypernova/decider_eth_circuit.rs @@ -26,9 +26,6 @@ use super::{ nimfs::{NIMFSProof, NIMFS}, HyperNova, Witness, CCCS, LCCCS, }; -use crate::arith::ccs::CCS; -use crate::arith::r1cs::R1CS; -use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::folding::circuits::{ cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness}, CF1, CF2, @@ -40,6 +37,14 @@ use crate::utils::{ vec::poly_from_vec, }; use crate::Error; +use crate::{ + arith::{ccs::CCS, r1cs::R1CS}, + folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps}, +}; +use crate::{ + commitment::{pedersen::Params as PedersenParams, CommitmentScheme}, + folding::nova::decider_eth_circuit::evaluate_gadget, +}; /// In-circuit representation of the Witness associated to the CommittedInstance. #[derive(Debug, Clone)] @@ -66,6 +71,12 @@ impl AllocVar, F> for WitnessVar { } } +impl WitnessVarOps for WitnessVar { + fn get_openings(&self) -> Vec<(&[FpVar], FpVar)> { + vec![(&self.w, self.r_w.clone())] + } +} + /// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters. #[derive(Debug, Clone)] pub struct CCSMatricesVar { @@ -286,8 +297,8 @@ where Ok(self.z_i.unwrap_or(vec![CF1::::zero()])) })?; - let U_dummy_native = LCCCS::::dummy(self.ccs.l, self.ccs.t, self.ccs.s); - let u_dummy_native = CCCS::::dummy(self.ccs.l); + let U_dummy_native = LCCCS::::dummy(&self.ccs); + let u_dummy_native = CCCS::::dummy(&self.ccs); let w_dummy_native = Witness::::new( vec![C1::ScalarField::zero(); self.ccs.n - 3 /* (3=2+1, since u_i.x.len=2) */], ); @@ -305,7 +316,7 @@ where let W_i1 = WitnessVar::::new_witness(cs.clone(), || { Ok(self.W_i1.unwrap_or(w_dummy_native.clone())) })?; - let nimfs_proof_dummy = NIMFSProof::::dummy(&self.ccs, 1, 1); // mu=1 & nu=1 because the last fold is 2-to-1 + let nimfs_proof_dummy = NIMFSProof::::dummy((&self.ccs, 1, 1)); // mu=1 & nu=1 because the last fold is 2-to-1 let nimfs_proof = NIMFSProofVar::::new_witness(cs.clone(), || { Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy)) })?; @@ -314,7 +325,7 @@ where let kzg_challenge = FpVar::>::new_input(cs.clone(), || { Ok(self.kzg_challenge.unwrap_or_else(CF1::::zero)) })?; - let _eval_W = FpVar::>::new_input(cs.clone(), || { + let eval_W = FpVar::>::new_input(cs.clone(), || { Ok(self.eval_W.unwrap_or_else(CF1::::zero)) })?; @@ -340,13 +351,7 @@ where )?; // 3.a u_i.x[0] == H(i, z_0, z_i, U_i) - let (u_i_x, _) = U_i.clone().hash( - &sponge, - pp_hash.clone(), - i.clone(), - z_0.clone(), - z_i.clone(), - )?; + let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; (u_i.x[0]).enforce_equal(&u_i_x)?; #[cfg(feature = "light-test")] @@ -373,10 +378,7 @@ where let cf_u_dummy_native = CycleFoldCommittedInstance::::dummy(NovaCycleFoldConfig::::IO_LEN); - let cf_w_dummy_native = CycleFoldWitness::::dummy( - self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l, - self.cf_E_len, - ); + let cf_w_dummy_native = CycleFoldWitness::::dummy(&self.cf_r1cs); let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) })?; @@ -424,14 +426,6 @@ where // `rho_bits` computed along the way of computing `computed_U_i1` for the later `rho_powers` // check (6.b). - // Check 7 is temporary disabled due - // https://github.com/privacy-scaling-explorations/sonobe/issues/80 - log::warn!("[WARNING]: issue #80 (https://github.com/privacy-scaling-explorations/sonobe/issues/80) is not resolved yet."); - // - // 7. check eval_W==p_W(c_W) - // let incircuit_eval_W = evaluate_gadget::>(W_i1.W, incircuit_c_W)?; - // incircuit_eval_W.enforce_equal(&eval_W)?; - // 8.a verify the NIMFS.V of the final fold, and check that the obtained rho_powers from the // transcript match the one from the public input (so we avoid the onchain logic of the // verifier computing it). @@ -463,6 +457,10 @@ where computed_U_i1.r_x.enforce_equal(&U_i1.r_x)?; computed_U_i1.v.enforce_equal(&U_i1.v)?; + // 7. check eval_W==p_W(c_W) + let incircuit_eval_W = evaluate_gadget::>(W_i1.w, incircuit_challenge)?; + incircuit_eval_W.enforce_equal(&eval_W)?; + // 8.b check that the in-circuit computed r is equal to the inputted r. let rho = Boolean::le_bits_to_fp_var(&rho_bits)?; @@ -511,7 +509,6 @@ pub mod tests { use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use ark_relations::r1cs::ConstraintSystem; - use ark_std::One; use ark_std::{test_rng, UniformRand}; use super::*; @@ -527,7 +524,7 @@ pub mod tests { let n_rows = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize; let r1cs = R1CS::::rand(&mut rng, n_rows, n_cols); - let ccs = CCS::from_r1cs(r1cs); + let ccs = CCS::from(r1cs); let z: Vec = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); let (pedersen_params, _) = @@ -585,22 +582,10 @@ pub mod tests { // generate a Nova instance and do a step of it let mut hypernova = HN::init(&hn_params, F_circuit, z_0.clone()).unwrap(); - hypernova - .prove_step(&mut rng, vec![], Some((vec![], vec![]))) - .unwrap(); + hypernova.prove_step(&mut rng, vec![], None).unwrap(); - let ivc_v = hypernova.clone(); - let (running_instance, incoming_instance, cyclefold_instance) = ivc_v.instances(); - HN::verify( - hn_params.1, // HN's verifier_params - z_0, - ivc_v.z_i, - Fr::one(), - running_instance, - incoming_instance, - cyclefold_instance, - ) - .unwrap(); + let ivc_proof = hypernova.ivc_proof(); + HN::verify(hn_params.1, ivc_proof).unwrap(); // load the DeciderEthCircuit from the generated Nova instance let decider_circuit = DeciderEthCircuit::< diff --git a/folding-schemes/src/folding/hypernova/lcccs.rs b/folding-schemes/src/folding/hypernova/lcccs.rs index 6aa1652..b3f6f1a 100644 --- a/folding-schemes/src/folding/hypernova/lcccs.rs +++ b/folding-schemes/src/folding/hypernova/lcccs.rs @@ -1,5 +1,5 @@ use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; +use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_poly::DenseMultilinearExtension; use ark_poly::MultilinearExtension; @@ -8,10 +8,14 @@ use ark_serialize::CanonicalSerialize; use ark_std::rand::Rng; use ark_std::Zero; +use super::circuits::LCCCSVar; use super::Witness; use crate::arith::ccs::CCS; +use crate::arith::Arith; use crate::commitment::CommitmentScheme; -use crate::transcript::{AbsorbNonNative, Transcript}; +use crate::folding::circuits::CF1; +use crate::folding::traits::{CommittedInstanceOps, Dummy}; +use crate::transcript::AbsorbNonNative; use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::vec::mat_vec_mul; use crate::Error; @@ -78,42 +82,41 @@ impl CCS { } } -impl LCCCS { - pub fn dummy(l: usize, t: usize, s: usize) -> LCCCS - where - C::ScalarField: PrimeField, - { - LCCCS:: { +impl Dummy<&CCS>> for LCCCS { + fn dummy(ccs: &CCS>) -> Self { + Self { C: C::zero(), - u: C::ScalarField::zero(), - x: vec![C::ScalarField::zero(); l], - r_x: vec![C::ScalarField::zero(); s], - v: vec![C::ScalarField::zero(); t], + u: CF1::::zero(), + x: vec![CF1::::zero(); ccs.l], + r_x: vec![CF1::::zero(); ccs.s], + v: vec![CF1::::zero(); ccs.t], } } +} + +impl Arith>, LCCCS> for CCS> { + type Evaluation = Vec>; /// Perform the check of the LCCCS instance described at section 4.2, /// notice that this method does not check the commitment correctness - pub fn check_relation( - &self, - ccs: &CCS, - w: &Witness, - ) -> Result<(), Error> { - // check CCS relation - let z: Vec = [vec![self.u], self.x.clone(), w.w.to_vec()].concat(); + fn eval_relation(&self, w: &Witness>, u: &LCCCS) -> Result { + let z = [&[u.u][..], &u.x, &w.w].concat(); - let computed_v: Vec = ccs - .M + self.M .iter() .map(|M_j| { - let Mz_mle = dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, &z)?); - Mz_mle.evaluate(&self.r_x).ok_or(Error::EvaluationFail) + let Mz_mle = dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, &z)?); + Mz_mle.evaluate(&u.r_x).ok_or(Error::EvaluationFail) }) - .collect::>()?; - if computed_v != self.v { - return Err(Error::NotSatisfied); - } - Ok(()) + .collect() + } + + fn check_evaluation( + _w: &Witness>, + u: &LCCCS, + e: Self::Evaluation, + ) -> Result<(), Error> { + (u.v == e).then_some(()).ok_or(Error::NotSatisfied) } } @@ -121,9 +124,8 @@ impl Absorb for LCCCS where C::ScalarField: Absorb, { - fn to_sponge_bytes(&self, _dest: &mut Vec) { - // This is never called - unimplemented!() + fn to_sponge_bytes(&self, dest: &mut Vec) { + C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { @@ -140,29 +142,15 @@ where } } -impl LCCCS -where - ::ScalarField: Absorb, - ::BaseField: ark_ff::PrimeField, -{ - /// [`LCCCS`].hash implements the committed instance hash compatible with the gadget - /// implemented in nova/circuits.rs::CommittedInstanceVar.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the LCCCS. - pub fn hash>( - &self, - sponge: &T, - pp_hash: C::ScalarField, - i: C::ScalarField, - z_0: Vec, - z_i: Vec, - ) -> C::ScalarField { - let mut sponge = sponge.clone(); - sponge.absorb(&pp_hash); - sponge.absorb(&i); - sponge.absorb(&z_0); - sponge.absorb(&z_i); - sponge.absorb(&self); - sponge.squeeze_field_elements(1)[0] +impl CommittedInstanceOps for LCCCS { + type Var = LCCCSVar; + + fn get_commitments(&self) -> Vec { + vec![self.C] + } + + fn is_incoming(&self) -> bool { + false } } @@ -216,7 +204,7 @@ pub mod tests { let n_rows = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize; let r1cs = R1CS::::rand(&mut rng, n_rows, n_cols); - let ccs = CCS::from_r1cs(r1cs); + let ccs = CCS::from(r1cs); let z: Vec = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); let (pedersen_params, _) = @@ -250,12 +238,14 @@ pub mod tests { let ccs = get_test_ccs(); let z = get_test_z(3); - ccs.check_relation(&z.clone()).unwrap(); + let (w, x) = ccs.split_z(&z); + ccs.check_relation(&w, &x).unwrap(); // Mutate z so that the relation does not hold let mut bad_z = z.clone(); bad_z[3] = Fr::zero(); - assert!(ccs.check_relation(&bad_z.clone()).is_err()); + let (bad_w, bad_x) = ccs.split_z(&bad_z); + assert!(ccs.check_relation(&bad_w, &bad_x).is_err()); let (pedersen_params, _) = Pedersen::::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); diff --git a/folding-schemes/src/folding/hypernova/mod.rs b/folding-schemes/src/folding/hypernova/mod.rs index 2917ea4..2034b79 100644 --- a/folding-schemes/src/folding/hypernova/mod.rs +++ b/folding-schemes/src/folding/hypernova/mod.rs @@ -6,7 +6,7 @@ use ark_crypto_primitives::sponge::{ use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError}; use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero}; pub mod cccs; @@ -15,31 +15,36 @@ pub mod decider_eth; pub mod decider_eth_circuit; pub mod lcccs; pub mod nimfs; -pub mod serialize; pub mod utils; use cccs::CCCS; use circuits::AugmentedFCircuit; +use decider_eth_circuit::WitnessVar; use lcccs::LCCCS; use nimfs::NIMFS; +use crate::commitment::CommitmentScheme; use crate::constants::NOVA_N_BITS_RO; -use crate::folding::circuits::{ - cyclefold::{ - fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, - CycleFoldWitness, +use crate::folding::{ + circuits::{ + cyclefold::{ + fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, + CycleFoldWitness, + }, + CF2, }, - CF2, + nova::{get_r1cs_from_cs, PreprocessorParam}, + traits::{CommittedInstanceOps, Dummy, WitnessOps}, }; -use crate::folding::nova::{get_r1cs_from_cs, PreprocessorParam}; use crate::frontend::FCircuit; +use crate::transcript::poseidon::poseidon_canonical_config; use crate::utils::{get_cm_coordinates, pp_hash}; use crate::Error; -use crate::{arith::r1cs::RelaxedR1CS, commitment::CommitmentScheme}; use crate::{ arith::{ ccs::CCS, r1cs::{extract_w_x, R1CS}, + Arith, }, FoldingScheme, MultiFolding, }; @@ -76,8 +81,19 @@ impl Witness { // always. Self { w, r_w: F::zero() } } - pub fn dummy(ccs: &CCS) -> Self { - Witness::::new(vec![F::zero(); ccs.n - ccs.l - 1]) +} + +impl Dummy<&CCS> for Witness { + fn dummy(ccs: &CCS) -> Self { + Self::new(vec![F::zero(); ccs.n - ccs.l - 1]) + } +} + +impl WitnessOps for Witness { + type Var = WitnessVar; + + fn get_openings(&self) -> Vec<(&[F], F)> { + vec![(&self.w, self.r_w)] } } @@ -101,6 +117,28 @@ where pub ccs: Option>, } +impl< + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + const H: bool, + > CanonicalSerialize for ProverParams +{ + fn serialize_with_mode( + &self, + mut writer: W, + compress: Compress, + ) -> Result<(), SerializationError> { + self.cs_pp.serialize_with_mode(&mut writer, compress)?; + self.cf_cs_pp.serialize_with_mode(&mut writer, compress) + } + + fn serialized_size(&self, compress: Compress) -> usize { + self.cs_pp.serialized_size(compress) + self.cf_cs_pp.serialized_size(compress) + } +} + /// Verification parameters for HyperNova-based IVC #[derive(Debug, Clone)] pub struct VerifierParams< @@ -122,6 +160,27 @@ pub struct VerifierParams< pub cf_cs_vp: CS2::VerifierParams, } +impl CanonicalSerialize for VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn serialize_with_mode( + &self, + mut writer: W, + compress: ark_serialize::Compress, + ) -> Result<(), ark_serialize::SerializationError> { + self.cs_vp.serialize_with_mode(&mut writer, compress)?; + self.cf_cs_vp.serialize_with_mode(&mut writer, compress) + } + + fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { + self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress) + } +} + impl VerifierParams where C1: CurveGroup, @@ -141,6 +200,23 @@ where } } +#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct IVCProof +where + C1: CurveGroup, + C2: CurveGroup, +{ + pub i: C1::ScalarField, + pub z_0: Vec, + pub z_i: Vec, + pub W_i: Witness, + pub U_i: LCCCS, + pub w_i: Witness, + pub u_i: CCCS, + pub cf_W_i: CycleFoldWitness, + pub cf_U_i: CycleFoldCommittedInstance, +} + /// Implements HyperNova+CycleFold's IVC, described in /// [HyperNova](https://eprint.iacr.org/2023/573.pdf) and /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait @@ -241,7 +317,7 @@ where .to_lcccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?; #[cfg(test)] - U_i.check_relation(&self.ccs, &W_i)?; + self.ccs.check_relation(&W_i, &U_i)?; Ok((U_i, W_i)) } @@ -263,7 +339,7 @@ where .to_cccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?; #[cfg(test)] - u_i.check_relation(&self.ccs, &w_i)?; + self.ccs.check_relation(&w_i, &u_i)?; Ok((u_i, w_i)) } @@ -295,10 +371,10 @@ where external_inputs: Vec, ) -> Result, Error> { // prepare the initial dummy instances - let U_i = LCCCS::::dummy(self.ccs.l, self.ccs.t, self.ccs.s); - let mut u_i = CCCS::::dummy(self.ccs.l); + let U_i = LCCCS::::dummy(&self.ccs); + let mut u_i = CCCS::::dummy(&self.ccs); let (_, cf_U_i): (CycleFoldWitness, CycleFoldCommittedInstance) = - self.cf_r1cs.dummy_running_instance(); + self.cf_r1cs.dummy_witness_instance(); let sponge = PoseidonSponge::::new(&self.poseidon_config); @@ -307,8 +383,8 @@ where &sponge, self.pp_hash, C1::ScalarField::zero(), // i - self.z_0.clone(), - state.clone(), + &self.z_0, + &state, ), cf_U_i.hash_cyclefold(&sponge, self.pp_hash), ]; @@ -319,13 +395,13 @@ where .step_native(0, state.clone(), external_inputs.clone())?; // compute u_{i+1}.x - let U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + let U_i1 = LCCCS::dummy(&self.ccs); let u_i1_x = U_i1.hash( &sponge, self.pp_hash, C1::ScalarField::one(), // i+1, where i=0 - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, self.pp_hash); @@ -403,6 +479,74 @@ where type MultiCommittedInstanceWithWitness = (Vec, Vec); type CFInstance = (CycleFoldCommittedInstance, CycleFoldWitness); + type IVCProof = IVCProof; + + fn pp_deserialize_with_mode( + mut reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, + ) -> Result { + let poseidon_config = poseidon_canonical_config::(); + + // generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing + // to serialize them, saving significant space in the VerifierParams serialized size. + + // main circuit R1CS: + let f_circuit = FC::new(fc_params)?; + let augmented_F_circuit = AugmentedFCircuit::::empty( + &poseidon_config, + f_circuit.clone(), + None, + )?; + let ccs = augmented_F_circuit.ccs; + + let cs_pp = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; + let cf_cs_pp = CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; + + Ok(ProverParams { + poseidon_config, + cs_pp, + cf_cs_pp, + ccs: Some(ccs), + }) + } + + fn vp_deserialize_with_mode( + mut reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, + ) -> Result { + let poseidon_config = poseidon_canonical_config::(); + + // generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing + // to serialize them, saving significant space in the VerifierParams serialized size. + + // main circuit R1CS: + let f_circuit = FC::new(fc_params)?; + let augmented_F_circuit = AugmentedFCircuit::::empty( + &poseidon_config, + f_circuit.clone(), + None, + )?; + let ccs = augmented_F_circuit.ccs; + + // CycleFold circuit R1CS + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; + + let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + + Ok(VerifierParams { + poseidon_config, + ccs, + cf_r1cs, + cs_vp, + cf_cs_vp, + }) + } fn preprocess( mut rng: impl RngCore, @@ -488,19 +632,13 @@ where // setup the dummy instances let W_dummy = Witness::::dummy(&ccs); - let U_dummy = LCCCS::::dummy(ccs.l, ccs.t, ccs.s); + let U_dummy = LCCCS::::dummy(&ccs); let w_dummy = W_dummy.clone(); - let mut u_dummy = CCCS::::dummy(ccs.l); + let mut u_dummy = CCCS::::dummy(&ccs); let (cf_W_dummy, cf_U_dummy): (CycleFoldWitness, CycleFoldCommittedInstance) = - cf_r1cs.dummy_running_instance(); + cf_r1cs.dummy_witness_instance(); u_dummy.x = vec![ - U_dummy.hash( - &sponge, - pp_hash, - C1::ScalarField::zero(), - z_0.clone(), - z_0.clone(), - ), + U_dummy.hash(&sponge, pp_hash, C1::ScalarField::zero(), &z_0, &z_0), cf_U_dummy.hash_cyclefold(&sponge, pp_hash), ]; @@ -556,36 +694,42 @@ where // `sponge` is for digest computation. let sponge = PoseidonSponge::::new(&self.poseidon_config); - let other_instances = other_instances.ok_or(Error::MissingOtherInstances)?; - - #[allow(clippy::type_complexity)] - let (lcccs, cccs): ( - Vec<(LCCCS, Witness)>, - Vec<(CCCS, Witness)>, - ) = other_instances; - - // recall, mu & nu is the number of all the LCCCS & CCCS respectively, including the - // running and incoming instances that are not part of the 'other_instances', hence the +1 - // in the couple of following checks. - if lcccs.len() + 1 != MU { - return Err(Error::NotSameLength( - "other_instances.lcccs.len()".to_string(), - lcccs.len(), - "hypernova.mu".to_string(), - MU, - )); - } - if cccs.len() + 1 != NU { - return Err(Error::NotSameLength( - "other_instances.cccs.len()".to_string(), - cccs.len(), - "hypernova.nu".to_string(), - NU, - )); - } + let (Us, Ws, us, ws) = if MU > 1 || NU > 1 { + let other_instances = other_instances.ok_or(Error::MissingOtherInstances(MU, NU))?; + + #[allow(clippy::type_complexity)] + let (lcccs, cccs): ( + Vec<(LCCCS, Witness)>, + Vec<(CCCS, Witness)>, + ) = other_instances; + + // recall, mu & nu is the number of all the LCCCS & CCCS respectively, including the + // running and incoming instances that are not part of the 'other_instances', hence the +1 + // in the couple of following checks. + if lcccs.len() + 1 != MU { + return Err(Error::NotSameLength( + "other_instances.lcccs.len()".to_string(), + lcccs.len(), + "hypernova.mu".to_string(), + MU, + )); + } + if cccs.len() + 1 != NU { + return Err(Error::NotSameLength( + "other_instances.cccs.len()".to_string(), + cccs.len(), + "hypernova.nu".to_string(), + NU, + )); + } - let (Us, Ws): (Vec>, Vec>) = lcccs.into_iter().unzip(); - let (us, ws): (Vec>, Vec>) = cccs.into_iter().unzip(); + let (Us, Ws): (Vec>, Vec>) = + lcccs.into_iter().unzip(); + let (us, ws): (Vec>, Vec>) = cccs.into_iter().unzip(); + (Some(Us), Some(Ws), Some(us), Some(ws)) + } else { + (None, None, None, None) + }; let augmented_f_circuit: AugmentedFCircuit; @@ -637,14 +781,14 @@ where if self.i == C1::ScalarField::zero() { W_i1 = Witness::::dummy(&self.ccs); W_i1.r_w = self.W_i.r_w; - U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s); + U_i1 = LCCCS::dummy(&self.ccs); let u_i1_x = U_i1.hash( &sponge, self.pp_hash, C1::ScalarField::one(), - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); // hash the initial (dummy) CycleFold instance, which is used as the 2nd public @@ -663,9 +807,9 @@ where z_i: Some(self.z_i.clone()), external_inputs: Some(external_inputs.clone()), U_i: Some(self.U_i.clone()), - Us: Some(Us.clone()), + Us: Us.clone(), u_i_C: Some(self.u_i.C), - us: Some(us.clone()), + us: us.clone(), U_i1_C: Some(U_i1.C), F: self.F.clone(), x: Some(u_i1_x), @@ -681,26 +825,43 @@ where let mut transcript_p: PoseidonSponge = PoseidonSponge::::new(&self.poseidon_config); transcript_p.absorb(&self.pp_hash); + + let (all_Us, all_us, all_Ws, all_ws) = if MU > 1 || NU > 1 { + ( + [vec![self.U_i.clone()], Us.clone().unwrap()].concat(), + [vec![self.u_i.clone()], us.clone().unwrap()].concat(), + [vec![self.W_i.clone()], Ws.unwrap()].concat(), + [vec![self.w_i.clone()], ws.unwrap()].concat(), + ) + } else { + ( + vec![self.U_i.clone()], + vec![self.u_i.clone()], + vec![self.W_i.clone()], + vec![self.w_i.clone()], + ) + }; + let (rho, nimfs_proof); (nimfs_proof, U_i1, W_i1, rho) = NIMFS::>::prove( &mut transcript_p, &self.ccs, - &[vec![self.U_i.clone()], Us.clone()].concat(), - &[vec![self.u_i.clone()], us.clone()].concat(), - &[vec![self.W_i.clone()], Ws].concat(), - &[vec![self.w_i.clone()], ws].concat(), + &all_Us, + &all_us, + &all_Ws, + &all_ws, )?; // sanity check: check the folded instance relation #[cfg(test)] - U_i1.check_relation(&self.ccs, &W_i1)?; + self.ccs.check_relation(&W_i1, &U_i1)?; let u_i1_x = U_i1.hash( &sponge, self.pp_hash, self.i + C1::ScalarField::one(), - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); @@ -715,12 +876,12 @@ where // where each p_i is in fact p_i.to_constraint_field() let cf_u_i_x = [ vec![rho_Fq], - get_cm_coordinates(&self.U_i.C), - Us.iter() + all_Us + .iter() .flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) .collect(), - get_cm_coordinates(&self.u_i.C), - us.iter() + all_us + .iter() .flat_map(|us_i| get_cm_coordinates(&us_i.C)) .collect(), get_cm_coordinates(&U_i1.C), @@ -732,10 +893,8 @@ where r_bits: Some(rho_bits.clone()), points: Some( [ - vec![self.U_i.clone().C], - Us.iter().map(|Us_i| Us_i.C).collect(), - vec![self.u_i.clone().C], - us.iter().map(|us_i| us_i.C).collect(), + all_Us.iter().map(|Us_i| Us_i.C).collect::>(), + all_us.iter().map(|us_i| us_i.C).collect::>(), ] .concat(), ), @@ -776,9 +935,9 @@ where z_i: Some(self.z_i.clone()), external_inputs: Some(external_inputs), U_i: Some(self.U_i.clone()), - Us: Some(Us.clone()), + Us: Us.clone(), u_i_C: Some(self.u_i.C), - us: Some(us.clone()), + us: us.clone(), U_i1_C: Some(U_i1.C), F: self.F.clone(), x: Some(u_i1_x), @@ -827,9 +986,9 @@ where #[cfg(test)] { // check the new LCCCS instance relation - self.U_i.check_relation(&self.ccs, &self.W_i)?; + self.ccs.check_relation(&self.W_i, &self.U_i)?; // check the new CCCS instance relation - self.u_i.check_relation(&self.ccs, &self.w_i)?; + self.ccs.check_relation(&self.w_i, &self.u_i)?; } Ok(()) @@ -839,31 +998,87 @@ where self.z_i.clone() } - fn instances( - &self, - ) -> ( - Self::RunningInstance, - Self::IncomingInstance, - Self::CFInstance, - ) { - ( - (self.U_i.clone(), self.W_i.clone()), - (self.u_i.clone(), self.w_i.clone()), - (self.cf_U_i.clone(), self.cf_W_i.clone()), - ) + fn ivc_proof(&self) -> Self::IVCProof { + Self::IVCProof { + i: self.i, + z_0: self.z_0.clone(), + z_i: self.z_i.clone(), + W_i: self.W_i.clone(), + U_i: self.U_i.clone(), + w_i: self.w_i.clone(), + u_i: self.u_i.clone(), + cf_W_i: self.cf_W_i.clone(), + cf_U_i: self.cf_U_i.clone(), + } } - /// Implements IVC.V of HyperNova+CycleFold. Notice that this method does not include the + fn from_ivc_proof( + ivc_proof: Self::IVCProof, + fcircuit_params: FC::Params, + params: (Self::ProverParam, Self::VerifierParam), + ) -> Result { + let IVCProof { + i, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; + let (pp, vp) = params; + + let f_circuit = FC::new(fcircuit_params).unwrap(); + let augmented_f_circuit = AugmentedFCircuit::::empty( + &pp.poseidon_config, + f_circuit.clone(), + None, + )?; + let cf_circuit = HyperNovaCycleFoldCircuit::::empty(); + + let ccs = augmented_f_circuit.ccs.clone(); + let cf_r1cs = get_r1cs_from_cs::(cf_circuit)?; + + Ok(Self { + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + ccs, + cf_r1cs, + poseidon_config: pp.poseidon_config, + cs_pp: pp.cs_pp, + cf_cs_pp: pp.cf_cs_pp, + F: f_circuit, + pp_hash: vp.pp_hash()?, + i, + z_0, + z_i, + w_i, + u_i, + W_i, + U_i, + cf_W_i, + cf_U_i, + }) + } + + /// Implements IVC.V of Hyp.clone()erNova+CycleFold. Notice that this method does not include the /// commitments verification, which is done in the Decider. - fn verify( - vp: Self::VerifierParam, - z_0: Vec, // initial state - z_i: Vec, // last state - num_steps: C1::ScalarField, - running_instance: Self::RunningInstance, - incoming_instance: Self::IncomingInstance, - cyclefold_instance: Self::CFInstance, - ) -> Result<(), Error> { + fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> { + let Self::IVCProof { + i: num_steps, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; + if num_steps == C1::ScalarField::zero() { if z_0 != z_i { return Err(Error::IVCVerificationFail); @@ -873,9 +1088,6 @@ where // `sponge` is for digest computation. let sponge = PoseidonSponge::::new(&vp.poseidon_config); - let (U_i, W_i) = running_instance; - let (u_i, w_i) = incoming_instance; - let (cf_U_i, cf_W_i) = cyclefold_instance; if u_i.x.len() != 2 || U_i.x.len() != 2 { return Err(Error::IVCVerificationFail); } @@ -884,7 +1096,7 @@ where // check that u_i's output points to the running instance // u_i.X[0] == H(i, z_0, z_i, U_i) - let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone()); + let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i); if expected_u_i_x != u_i.x[0] { return Err(Error::IVCVerificationFail); } @@ -895,12 +1107,12 @@ where } // check LCCCS satisfiability - U_i.check_relation(&vp.ccs, &W_i)?; + vp.ccs.check_relation(&W_i, &U_i)?; // check CCCS satisfiability - u_i.check_relation(&vp.ccs, &w_i)?; + vp.ccs.check_relation(&w_i, &u_i)?; // check CycleFold's RelaxedR1CS satisfiability - vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?; + vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?; Ok(()) } @@ -940,6 +1152,7 @@ mod tests { test_ivc_opt::, Pedersen, false>(poseidon_config, F_circuit); } + #[allow(clippy::type_complexity)] // test_ivc allowing to choose the CommitmentSchemes pub fn test_ivc_opt< CS1: CommitmentScheme, @@ -948,18 +1161,6 @@ mod tests { >( poseidon_config: PoseidonConfig, F_circuit: CubicFCircuit, - ) -> ( - HyperNova, CS1, CS2, 2, 3, H>, - ( - ProverParams, - VerifierParams, - ), - (LCCCS, Witness), - (CCCS, Witness), - ( - CycleFoldCommittedInstance, - CycleFoldWitness, - ), ) { let mut rng = ark_std::test_rng(); @@ -1013,24 +1214,11 @@ mod tests { } assert_eq!(Fr::from(num_steps as u32), hypernova.i); - let (running_instance, incoming_instance, cyclefold_instance) = hypernova.instances(); + let ivc_proof = hypernova.ivc_proof(); HN::verify( hypernova_params.1.clone(), // verifier_params - z_0, - hypernova.z_i.clone(), - hypernova.i.clone(), - running_instance.clone(), - incoming_instance.clone(), - cyclefold_instance.clone(), + ivc_proof, ) .unwrap(); - - ( - hypernova, - hypernova_params, - running_instance, - incoming_instance, - cyclefold_instance, - ) } } diff --git a/folding-schemes/src/folding/hypernova/nimfs.rs b/folding-schemes/src/folding/hypernova/nimfs.rs index 6312162..1fae370 100644 --- a/folding-schemes/src/folding/hypernova/nimfs.rs +++ b/folding-schemes/src/folding/hypernova/nimfs.rs @@ -13,6 +13,8 @@ use super::{ }; use crate::arith::ccs::CCS; use crate::constants::NOVA_N_BITS_RO; +use crate::folding::circuits::CF1; +use crate::folding::traits::Dummy; use crate::transcript::Transcript; use crate::utils::sum_check::structs::{IOPProof as SumCheckProof, IOPProverMessage}; use crate::utils::sum_check::{IOPSumCheck, SumCheck}; @@ -29,8 +31,8 @@ pub struct NIMFSProof { pub sigmas_thetas: SigmasThetas, } -impl NIMFSProof { - pub fn dummy(ccs: &CCS, mu: usize, nu: usize) -> Self { +impl Dummy<(&CCS>, usize, usize)> for NIMFSProof { + fn dummy((ccs, mu, nu): (&CCS>, usize, usize)) -> Self { // use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the // same in-circuit representation to match the number of constraints of an actual proof. NIMFSProof:: { @@ -410,8 +412,10 @@ pub mod tests { let ccs = get_test_ccs(); let z1 = get_test_z::(3); let z2 = get_test_z::(4); - ccs.check_relation(&z1).unwrap(); - ccs.check_relation(&z2).unwrap(); + let (w1, x1) = ccs.split_z(&z1); + let (w2, x2) = ccs.split_z(&z2); + ccs.check_relation(&w1, &x1).unwrap(); + ccs.check_relation(&w2, &x2).unwrap(); let mut rng = test_rng(); let r_x_prime: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); @@ -429,8 +433,8 @@ pub mod tests { .to_cccs::<_, Projective, Pedersen, false>(&mut rng, &pedersen_params, &z2) .unwrap(); - lcccs.check_relation(&ccs, &w1).unwrap(); - cccs.check_relation(&ccs, &w2).unwrap(); + ccs.check_relation(&w1, &lcccs).unwrap(); + ccs.check_relation(&w2, &cccs).unwrap(); let mut rng = test_rng(); let rho = Fr::rand(&mut rng); @@ -446,7 +450,7 @@ pub mod tests { let w_folded = NIMFS::>::fold_witness(&[w1], &[w2], rho); // check lcccs relation - folded.check_relation(&ccs, &w_folded).unwrap(); + ccs.check_relation(&w_folded, &folded).unwrap(); } /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper) @@ -506,7 +510,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); } /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance @@ -566,7 +570,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // check that the folded instance with the folded witness holds the LCCCS relation - folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); running_instance = folded_lcccs; w1 = folded_witness; @@ -652,7 +656,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); } /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step @@ -740,7 +744,7 @@ pub mod tests { assert_eq!(folded_lcccs, folded_lcccs_v); // Check that the folded LCCCS instance is a valid instance with respect to the folded witness - folded_lcccs.check_relation(&ccs, &folded_witness).unwrap(); + ccs.check_relation(&folded_witness, &folded_lcccs).unwrap(); } } } diff --git a/folding-schemes/src/folding/hypernova/serialize.rs b/folding-schemes/src/folding/hypernova/serialize.rs deleted file mode 100644 index a7aa6d0..0000000 --- a/folding-schemes/src/folding/hypernova/serialize.rs +++ /dev/null @@ -1,420 +0,0 @@ -use crate::arith::ccs::CCS; -use crate::arith::r1cs::R1CS; -use crate::folding::hypernova::ProverParams; -use crate::folding::hypernova::VerifierParams; -use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; -use ark_crypto_primitives::sponge::Absorb; -use ark_ec::{CurveGroup, Group}; -use ark_ff::PrimeField; -use ark_r1cs_std::groups::{CurveVar, GroupOpsBounds}; -use ark_r1cs_std::ToConstraintFieldGadget; -use ark_serialize::CanonicalDeserialize; -use ark_serialize::{CanonicalSerialize, Compress, SerializationError, Validate}; -use ark_std::marker::PhantomData; - -use crate::folding::hypernova::cccs::CCCS; -use crate::folding::hypernova::lcccs::LCCCS; -use crate::folding::hypernova::Witness; -use crate::folding::nova::{ - CommittedInstance as CycleFoldCommittedInstance, Witness as CycleFoldWitness, -}; -use crate::FoldingScheme; -use crate::{ - commitment::CommitmentScheme, - folding::{circuits::CF2, nova::PreprocessorParam}, - frontend::FCircuit, -}; - -use super::HyperNova; - -impl - CanonicalSerialize for HyperNova -where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar>, - FC: FCircuit, - CS1: CommitmentScheme, - CS2: CommitmentScheme, -{ - fn serialize_compressed( - &self, - writer: W, - ) -> Result<(), ark_serialize::SerializationError> { - self.serialize_with_mode(writer, ark_serialize::Compress::Yes) - } - - fn compressed_size(&self) -> usize { - self.serialized_size(ark_serialize::Compress::Yes) - } - - fn serialize_uncompressed( - &self, - writer: W, - ) -> Result<(), ark_serialize::SerializationError> { - self.serialize_with_mode(writer, ark_serialize::Compress::No) - } - - fn uncompressed_size(&self) -> usize { - self.serialized_size(ark_serialize::Compress::No) - } - - fn serialize_with_mode( - &self, - mut writer: W, - compress: ark_serialize::Compress, - ) -> Result<(), ark_serialize::SerializationError> { - self.pp_hash.serialize_with_mode(&mut writer, compress)?; - self.i.serialize_with_mode(&mut writer, compress)?; - self.z_0.serialize_with_mode(&mut writer, compress)?; - self.z_i.serialize_with_mode(&mut writer, compress)?; - self.W_i.serialize_with_mode(&mut writer, compress)?; - self.U_i.serialize_with_mode(&mut writer, compress)?; - self.w_i.serialize_with_mode(&mut writer, compress)?; - self.u_i.serialize_with_mode(&mut writer, compress)?; - self.cf_W_i.serialize_with_mode(&mut writer, compress)?; - self.cf_U_i.serialize_with_mode(&mut writer, compress) - } - - fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { - self.pp_hash.serialized_size(compress) - + self.i.serialized_size(compress) - + self.z_0.serialized_size(compress) - + self.z_i.serialized_size(compress) - + self.W_i.serialized_size(compress) - + self.U_i.serialized_size(compress) - + self.w_i.serialized_size(compress) - + self.u_i.serialized_size(compress) - + self.cf_W_i.serialized_size(compress) - + self.cf_U_i.serialized_size(compress) - } -} - -impl - HyperNova -where - C1: CurveGroup, - GC1: CurveVar> + ToConstraintFieldGadget>, - C2: CurveGroup, - GC2: CurveVar> + ToConstraintFieldGadget>, - FC: FCircuit, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, - for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, - for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, -{ - #[allow(clippy::too_many_arguments)] - pub fn deserialize_hypernova( - mut reader: R, - compress: Compress, - validate: Validate, - poseidon_config: PoseidonConfig, - cs_pp: CS1::ProverParams, - cs_vp: CS1::VerifierParams, - cf_cs_pp: CS2::ProverParams, - cf_cs_vp: CS2::VerifierParams, - ) -> Result { - let f_circuit = FC::new(()).unwrap(); - let prep_param = PreprocessorParam { - poseidon_config: poseidon_config.clone(), - F: f_circuit.clone(), - cs_pp: Some(cs_pp.clone()), - cs_vp: Some(cs_vp.clone()), - cf_cs_pp: Some(cf_cs_pp.clone()), - cf_cs_vp: Some(cf_cs_vp.clone()), - }; - // `test_rng` won't be used in `preprocess`, since parameters have already been initialized - let (prover_params, verifier_params) = Self::preprocess(ark_std::test_rng(), &prep_param) - .or(Err(SerializationError::InvalidData))?; - let pp_hash = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; - let i = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; - let z_0 = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; - let z_i = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; - let W_i = - Witness::::deserialize_with_mode(&mut reader, compress, validate)?; - let U_i = LCCCS::::deserialize_with_mode(&mut reader, compress, validate)?; - let w_i = - Witness::::deserialize_with_mode(&mut reader, compress, validate)?; - let u_i = CCCS::::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_W_i = - CycleFoldWitness::::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_U_i = CycleFoldCommittedInstance::::deserialize_with_mode( - &mut reader, - compress, - validate, - )?; - let ccs = prover_params.ccs.ok_or(SerializationError::InvalidData)?; - - Ok(HyperNova { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, - ccs, - cf_r1cs: verifier_params.cf_r1cs, - poseidon_config, - cs_pp, - cf_cs_pp, - F: f_circuit, - pp_hash, - i, - z_0, - z_i, - W_i, - U_i, - w_i, - u_i, - cf_W_i, - cf_U_i, - }) - } -} - -impl< - C1: CurveGroup, - C2: CurveGroup, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - const H: bool, - > CanonicalSerialize for ProverParams -{ - fn serialize_compressed( - &self, - writer: W, - ) -> Result<(), SerializationError> { - self.serialize_with_mode(writer, Compress::Yes) - } - - fn compressed_size(&self) -> usize { - self.serialized_size(Compress::Yes) - } - - fn serialize_uncompressed( - &self, - writer: W, - ) -> Result<(), SerializationError> { - self.serialize_with_mode(writer, Compress::No) - } - - fn uncompressed_size(&self) -> usize { - self.serialized_size(Compress::No) - } - - fn serialize_with_mode( - &self, - mut writer: W, - compress: Compress, - ) -> Result<(), SerializationError> { - self.cs_pp.serialize_with_mode(&mut writer, compress)?; - self.cf_cs_pp.serialize_with_mode(&mut writer, compress) - } - - fn serialized_size(&self, compress: Compress) -> usize { - self.cs_pp.serialized_size(compress) + self.cf_cs_pp.serialized_size(compress) - } -} - -impl< - C1: CurveGroup, - C2: CurveGroup, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - const H: bool, - > ProverParams -{ - pub fn deserialize_prover_params( - mut reader: R, - compress: Compress, - validate: Validate, - ccs: &Option>, - poseidon_config: &PoseidonConfig, - ) -> Result { - let cs_pp = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_cs_pp = CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; - - Ok(ProverParams { - cs_pp, - cf_cs_pp, - ccs: ccs.clone(), - poseidon_config: poseidon_config.clone(), - }) - } -} - -impl< - C1: CurveGroup, - C2: CurveGroup, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - const H: bool, - > CanonicalSerialize for VerifierParams -{ - fn serialize_compressed( - &self, - writer: W, - ) -> Result<(), SerializationError> { - self.serialize_with_mode(writer, Compress::Yes) - } - - fn compressed_size(&self) -> usize { - self.serialized_size(Compress::Yes) - } - - fn serialize_uncompressed( - &self, - writer: W, - ) -> Result<(), SerializationError> { - self.serialize_with_mode(writer, Compress::No) - } - - fn uncompressed_size(&self) -> usize { - self.serialized_size(Compress::No) - } - - fn serialize_with_mode( - &self, - mut writer: W, - compress: Compress, - ) -> Result<(), SerializationError> { - self.cf_r1cs.serialize_with_mode(&mut writer, compress)?; - self.cs_vp.serialize_with_mode(&mut writer, compress)?; - self.cf_cs_vp.serialize_with_mode(&mut writer, compress) - } - - fn serialized_size(&self, compress: Compress) -> usize { - self.cf_r1cs.serialized_size(compress) - + self.cs_vp.serialized_size(compress) - + self.cf_cs_vp.serialized_size(compress) - } -} - -impl< - C1: CurveGroup, - C2: CurveGroup, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - const H: bool, - > VerifierParams -{ - pub fn deserialize_verifier_params( - mut reader: R, - compress: Compress, - validate: Validate, - ccs: &CCS, - poseidon_config: &PoseidonConfig, - ) -> Result { - let cf_r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?; - let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; - Ok(VerifierParams { - ccs: ccs.clone(), - poseidon_config: poseidon_config.clone(), - cf_r1cs, - cs_vp, - cf_cs_vp, - }) - } -} - -#[cfg(test)] -pub mod tests { - use crate::FoldingScheme; - use crate::MultiFolding; - use ark_serialize::{Compress, Validate, Write}; - use std::fs; - - use crate::{ - commitment::{kzg::KZG, pedersen::Pedersen}, - folding::hypernova::{tests::test_ivc_opt, HyperNova}, - frontend::{utils::CubicFCircuit, FCircuit}, - transcript::poseidon::poseidon_canonical_config, - }; - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_serialize::CanonicalSerialize; - - #[test] - fn test_serde_hypernova() { - let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - let (mut hn, (_, verifier_params), _, _, _) = test_ivc_opt::< - KZG, - Pedersen, - false, - >(poseidon_config.clone(), F_circuit); - - let mut writer = vec![]; - assert!(hn.serialize_compressed(&mut writer).is_ok()); - let mut writer = vec![]; - assert!(hn.serialize_uncompressed(&mut writer).is_ok()); - - let mut file = fs::OpenOptions::new() - .create(true) - .write(true) - .open("./hypernova.serde") - .unwrap(); - - file.write_all(&writer).unwrap(); - - let bytes = fs::read("./hypernova.serde").unwrap(); - - let mut hn_deserialized = HyperNova::< - Projective, - GVar, - Projective2, - GVar2, - CubicFCircuit, - KZG, - Pedersen, - 2, - 3, - false, - >::deserialize_hypernova( - bytes.as_slice(), - Compress::No, - Validate::No, - poseidon_config, - hn.cs_pp.clone(), - verifier_params.cs_vp, - hn.cf_cs_pp.clone(), - verifier_params.cf_cs_vp, - ) - .unwrap(); - - assert_eq!(hn.i, hn_deserialized.i); - - let mut rng = ark_std::test_rng(); - for _ in 0..3 { - // prepare some new instances to fold in the multifolding step - let mut lcccs = vec![]; - for j in 0..1 { - let instance_state = vec![Fr::from(j as u32 + 85_u32)]; - let (U, W) = hn - .new_running_instance(&mut rng, instance_state, vec![]) - .unwrap(); - lcccs.push((U, W)); - } - let mut cccs = vec![]; - for j in 0..2 { - let instance_state = vec![Fr::from(j as u32 + 15_u32)]; - let (u, w) = hn - .new_incoming_instance(&mut rng, instance_state, vec![]) - .unwrap(); - cccs.push((u, w)); - } - - hn.prove_step(&mut rng, vec![], Some((lcccs.clone(), cccs.clone()))) - .unwrap(); - hn_deserialized - .prove_step(&mut rng, vec![], Some((lcccs, cccs))) - .unwrap(); - } - - assert_eq!(hn.z_i, hn_deserialized.z_i); - } -} diff --git a/folding-schemes/src/folding/hypernova/utils.rs b/folding-schemes/src/folding/hypernova/utils.rs index 8e0b084..b2df214 100644 --- a/folding-schemes/src/folding/hypernova/utils.rs +++ b/folding-schemes/src/folding/hypernova/utils.rs @@ -230,8 +230,10 @@ pub mod tests { let ccs = get_test_ccs(); let z1 = get_test_z(3); let z2 = get_test_z(4); - ccs.check_relation(&z1).unwrap(); - ccs.check_relation(&z2).unwrap(); + let (w1, x1) = ccs.split_z(&z1); + let (w2, x2) = ccs.split_z(&z2); + ccs.check_relation(&w1, &x1).unwrap(); + ccs.check_relation(&w2, &x2).unwrap(); let mut rng = test_rng(); let gamma: Fr = Fr::rand(&mut rng); @@ -282,8 +284,10 @@ pub mod tests { let ccs: CCS = get_test_ccs(); let z1 = get_test_z(3); let z2 = get_test_z(4); - ccs.check_relation(&z1).unwrap(); - ccs.check_relation(&z2).unwrap(); + let (w1, x1) = ccs.split_z(&z1); + let (w2, x2) = ccs.split_z(&z2); + ccs.check_relation(&w1, &x1).unwrap(); + ccs.check_relation(&w2, &x2).unwrap(); let gamma: Fr = Fr::rand(&mut rng); let beta: Vec = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); diff --git a/folding-schemes/src/folding/mod.rs b/folding-schemes/src/folding/mod.rs index 7964fdc..a56f12a 100644 --- a/folding-schemes/src/folding/mod.rs +++ b/folding-schemes/src/folding/mod.rs @@ -3,3 +3,171 @@ pub mod hypernova; pub mod mova; pub mod nova; pub mod protogalaxy; +pub mod traits; + +#[cfg(test)] +pub mod tests { + use ark_ec::CurveGroup; + use ark_ff::PrimeField; + use ark_pallas::{constraints::GVar as GVar1, Fr, Projective as G1}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + use ark_vesta::{constraints::GVar as GVar2, Projective as G2}; + use std::io::Write; + + use crate::commitment::pedersen::Pedersen; + use crate::folding::{ + hypernova::HyperNova, + nova::{Nova, PreprocessorParam as NovaPreprocessorParam}, + protogalaxy::ProtoGalaxy, + }; + use crate::frontend::utils::CubicFCircuit; + use crate::frontend::FCircuit; + use crate::transcript::poseidon::poseidon_canonical_config; + use crate::Error; + use crate::FoldingScheme; + + /// tests the IVC proofs and its serializers for the 3 implemented IVCs: Nova, HyperNova and + /// ProtoGalaxy. + #[test] + fn test_serialize_ivc_nova_hypernova_protogalaxy() { + let poseidon_config = poseidon_canonical_config::(); + type FC = CubicFCircuit; + let f_circuit = FC::new(()).unwrap(); + + // test Nova + type N = Nova, Pedersen, false>; + let prep_param = NovaPreprocessorParam::new(poseidon_config.clone(), f_circuit); + test_serialize_ivc_opt::("nova".to_string(), prep_param.clone()).unwrap(); + + // test HyperNova + type HN = HyperNova< + G1, + GVar1, + G2, + GVar2, + FC, + Pedersen, + Pedersen, + 1, // mu + 1, // nu + false, + >; + test_serialize_ivc_opt::("hypernova".to_string(), prep_param).unwrap(); + + // test ProtoGalaxy + type P = ProtoGalaxy, Pedersen>; + let prep_param = (poseidon_config, f_circuit); + test_serialize_ivc_opt::("protogalaxy".to_string(), prep_param).unwrap(); + } + + fn test_serialize_ivc_opt< + C1: CurveGroup, + C2: CurveGroup, + FC: FCircuit, + FS: FoldingScheme, + >( + name: String, + prep_param: FS::PreprocessorParam, + ) -> Result<(), Error> + where + C1: CurveGroup, + C2::BaseField: PrimeField, + FC: FCircuit, + { + let mut rng = ark_std::test_rng(); + let F_circuit = FC::new(())?; + + let fs_params = FS::preprocess(&mut rng, &prep_param)?; + + let z_0 = vec![C1::ScalarField::from(3_u32)]; + let mut fs = FS::init(&fs_params, F_circuit, z_0.clone()).unwrap(); + + // perform multiple IVC steps (internally folding) + let num_steps: usize = 3; + for _ in 0..num_steps { + fs.prove_step(&mut rng, vec![], None).unwrap(); + } + + // verify the IVCProof + let ivc_proof: FS::IVCProof = fs.ivc_proof(); + FS::verify(fs_params.1.clone(), ivc_proof.clone()).unwrap(); + + // serialize the IVCProof and store it in a file + let mut writer = vec![]; + assert!(ivc_proof.serialize_compressed(&mut writer).is_ok()); + + let mut file = std::fs::OpenOptions::new() + .create(true) + .write(true) + .open(format!("./ivc_proof-{}.serialized", name)) + .unwrap(); + file.write_all(&writer).unwrap(); + + // read the IVCProof from the file deserializing it + let bytes = std::fs::read(format!("./ivc_proof-{}.serialized", name)).unwrap(); + let deserialized_ivc_proof = + FS::IVCProof::deserialize_compressed(bytes.as_slice()).unwrap(); + // verify deserialized IVCProof + FS::verify(fs_params.1.clone(), deserialized_ivc_proof.clone()).unwrap(); + + // build the FS from the given IVCProof, FC::Params, ProverParams and VerifierParams + let mut new_fs = FS::from_ivc_proof(deserialized_ivc_proof, (), fs_params.clone()).unwrap(); + + // serialize the Nova params + let mut fs_pp_serialized = vec![]; + fs_params + .0 + .serialize_compressed(&mut fs_pp_serialized) + .unwrap(); + let mut fs_vp_serialized = vec![]; + fs_params + .1 + .serialize_compressed(&mut fs_vp_serialized) + .unwrap(); + + // deserialize the Nova params. This would be done by the client reading from a file + let _fs_pp_deserialized = FS::pp_deserialize_with_mode( + &mut fs_pp_serialized.as_slice(), + ark_serialize::Compress::Yes, + ark_serialize::Validate::Yes, + (), // FCircuit's Params + ) + .unwrap(); + + // perform several IVC steps on both the original FS instance and the recovered from the + // serialization new FS instance + let num_steps: usize = 3; + for _ in 0..num_steps { + new_fs.prove_step(&mut rng, vec![], None).unwrap(); + fs.prove_step(&mut rng, vec![], None).unwrap(); + } + + // check that the IVCProofs from both FS instances are equal + assert_eq!(new_fs.ivc_proof(), fs.ivc_proof()); + + let fs_vp_deserialized = FS::vp_deserialize_with_mode( + &mut fs_vp_serialized.as_slice(), + ark_serialize::Compress::Yes, + ark_serialize::Validate::Yes, + (), // fcircuit_params + ) + .unwrap(); + + // get the IVCProof + let ivc_proof: FS::IVCProof = new_fs.ivc_proof(); + + // serialize IVCProof + let mut ivc_proof_serialized = vec![]; + assert!(ivc_proof + .serialize_compressed(&mut ivc_proof_serialized) + .is_ok()); + // deserialize IVCProof + let ivc_proof_deserialized = + FS::IVCProof::deserialize_compressed(ivc_proof_serialized.as_slice()).unwrap(); + + // verify the last IVCProof from the recovered from serialization FS + FS::verify(fs_vp_deserialized.clone(), ivc_proof_deserialized).unwrap(); + + Ok(()) + } +} diff --git a/folding-schemes/src/folding/nova/circuits.rs b/folding-schemes/src/folding/nova/circuits.rs index a506c8c..b657b61 100644 --- a/folding-schemes/src/folding/nova/circuits.rs +++ b/folding-schemes/src/folding/nova/circuits.rs @@ -21,7 +21,6 @@ use ark_std::{fmt::Debug, One, Zero}; use core::{borrow::Borrow, marker::PhantomData}; use super::{CommittedInstance, NovaCycleFoldConfig}; -use crate::constants::NOVA_N_BITS_RO; use crate::folding::circuits::{ cyclefold::{ CycleFoldChallengeGadget, CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, @@ -32,15 +31,16 @@ use crate::folding::circuits::{ }; use crate::frontend::FCircuit; use crate::transcript::{AbsorbNonNativeGadget, Transcript, TranscriptVar}; +use crate::{ + constants::NOVA_N_BITS_RO, + folding::traits::{CommittedInstanceVarOps, Dummy}, +}; /// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova /// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are /// represented non-natively over the constraint field. #[derive(Debug, Clone)] -pub struct CommittedInstanceVar -where - ::BaseField: ark_ff::PrimeField, -{ +pub struct CommittedInstanceVar { pub u: FpVar, pub x: Vec>, pub cmE: NonNativeAffineVar, @@ -50,7 +50,6 @@ where impl AllocVar, CF1> for CommittedInstanceVar where C: CurveGroup, - ::BaseField: PrimeField, { fn new_variable>>( cs: impl Into>>, @@ -80,7 +79,7 @@ where ::BaseField: ark_ff::PrimeField, { fn to_sponge_bytes(&self) -> Result>, SynthesisError> { - unimplemented!() + FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { @@ -94,35 +93,27 @@ where } } -impl CommittedInstanceVar -where - C: CurveGroup, - ::ScalarField: Absorb, - ::BaseField: ark_ff::PrimeField, -{ - /// hash implements the committed instance hash compatible with the native implementation from - /// CommittedInstance.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the - /// `CommittedInstance`. - /// Additionally it returns the vector of the field elements from the self parameters, so they - /// can be reused in other gadgets avoiding recalculating (reconstraining) them. - #[allow(clippy::type_complexity)] - pub fn hash, S>>( - self, - sponge: &T, - pp_hash: FpVar>, - i: FpVar>, - z_0: Vec>>, - z_i: Vec>>, - ) -> Result<(FpVar>, Vec>>), SynthesisError> { - let mut sponge = sponge.clone(); - let U_vec = self.to_sponge_field_elements()?; - sponge.absorb(&pp_hash)?; - sponge.absorb(&i)?; - sponge.absorb(&z_0)?; - sponge.absorb(&z_i)?; - sponge.absorb(&U_vec)?; - Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) +impl CommittedInstanceVarOps for CommittedInstanceVar { + type PointVar = NonNativeAffineVar; + + fn get_commitments(&self) -> Vec { + vec![self.cmW.clone(), self.cmE.clone()] + } + + fn get_public_inputs(&self) -> &[FpVar>] { + &self.x + } + + fn enforce_incoming(&self) -> Result<(), SynthesisError> { + let zero = NonNativeUintVar::new_constant(ConstraintSystemRef::None, CF2::::zero())?; + self.cmE.x.enforce_equal_unaligned(&zero)?; + self.cmE.y.enforce_equal_unaligned(&zero)?; + self.u.enforce_equal(&FpVar::one()) + } + + fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.u.enforce_equal(&other.u)?; + self.x.enforce_equal(&other.x) } } @@ -177,10 +168,11 @@ where /// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a /// rust-native and a in-circuit compatible versions. -pub struct ChallengeGadget { +pub struct ChallengeGadget { _c: PhantomData, + _ci: PhantomData, } -impl ChallengeGadget +impl ChallengeGadget where C: CurveGroup, ::BaseField: PrimeField, @@ -189,14 +181,17 @@ where pub fn get_challenge_native>( transcript: &mut T, pp_hash: C::ScalarField, // public params hash - U_i: CommittedInstance, - u_i: CommittedInstance, - cmT: C, + U_i: &CI, + u_i: &CI, + cmT: Option<&C>, ) -> Vec { transcript.absorb(&pp_hash); transcript.absorb(&U_i); transcript.absorb(&u_i); - transcript.absorb_nonnative(&cmT); + // in the Nova case we absorb the cmT, in Ova case we don't since it is not used. + if let Some(cmT_value) = cmT { + transcript.absorb_nonnative(cmT_value); + } transcript.squeeze_bits(NOVA_N_BITS_RO) } @@ -206,12 +201,15 @@ where pp_hash: FpVar>, // public params hash U_i_vec: Vec>>, // apready processed input, so we don't have to recompute these values u_i: CommittedInstanceVar, - cmT: NonNativeAffineVar, + cmT: Option>, ) -> Result>, SynthesisError> { transcript.absorb(&pp_hash)?; transcript.absorb(&U_i_vec)?; transcript.absorb(&u_i)?; - transcript.absorb_nonnative(&cmT)?; + // in the Nova case we absorb the cmT, in Ova case we don't since it is not used. + if let Some(cmT_value) = cmT { + transcript.absorb_nonnative(&cmT_value)?; + } transcript.squeeze_bits(NOVA_N_BITS_RO) } } @@ -359,24 +357,12 @@ where // `transcript` is for challenge generation. let mut transcript = sponge.clone(); - // get z_{i+1} from the F circuit - let i_usize = self.i_usize.unwrap_or(0); - let z_i1 = - self.F - .generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?; - let is_basecase = i.is_zero()?; // Primary Part // P.1. Compute u_i.x // u_i.x[0] = H(i, z_0, z_i, U_i) - let (u_i_x, U_i_vec) = U_i.clone().hash( - &sponge, - pp_hash.clone(), - i.clone(), - z_0.clone(), - z_i.clone(), - )?; + let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; // u_i.x[1] = H(cf_U_i) let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; @@ -397,12 +383,12 @@ where // P.3. nifs.verify, obtains U_{i+1} by folding u_i & U_i . // compute r = H(u_i, U_i, cmT) - let r_bits = ChallengeGadget::::get_challenge_gadget( + let r_bits = ChallengeGadget::>::get_challenge_gadget( &mut transcript, pp_hash.clone(), U_i_vec, u_i.clone(), - cmT.clone(), + Some(cmT.clone()), )?; let r = Boolean::le_bits_to_fp_var(&r_bits)?; // Also convert r_bits to a `NonNativeFieldVar` @@ -421,21 +407,28 @@ where U_i1.cmW = U_i1_cmW; // P.4.a compute and check the first output of F' + + // get z_{i+1} from the F circuit + let i_usize = self.i_usize.unwrap_or(0); + let z_i1 = self + .F + .generate_step_constraints(cs.clone(), i_usize, z_i, external_inputs)?; + // Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot}) // Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1}) let (u_i1_x, _) = U_i1.clone().hash( &sponge, - pp_hash.clone(), - i + FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &(i + FpVar::>::one()), + &z_0, + &z_i1, )?; let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash( &sponge, - pp_hash.clone(), - FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &FpVar::>::one(), + &z_0, + &z_i1, )?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; @@ -536,8 +529,9 @@ pub mod tests { use ark_std::UniformRand; use crate::commitment::pedersen::Pedersen; - use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs; use crate::folding::nova::nifs::NIFS; + use crate::folding::nova::traits::NIFSTrait; + use crate::folding::traits::CommittedInstanceOps; use crate::transcript::poseidon::poseidon_canonical_config; #[test] @@ -563,10 +557,22 @@ pub mod tests { #[test] fn test_nifs_gadget() { - let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, _, r_Fr) = prepare_simple_fold_inputs(); + let mut rng = ark_std::test_rng(); - let ci3_verifier = NIFS::>::verify(r_Fr, &ci1, &ci2, &cmT); - assert_eq!(ci3_verifier, ci3); + // prepare the committed instances to test in-circuit + let ci: Vec> = (0..2) + .into_iter() + .map(|_| CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: vec![Fr::rand(&mut rng); 1], + }) + .collect(); + let (ci1, ci2) = (ci[0].clone(), ci[1].clone()); + let r_Fr = Fr::rand(&mut rng); + let cmT = Projective::rand(&mut rng); + let ci3 = NIFS::>::verify(r_Fr, &ci1, &ci2, &cmT); let cs = ConstraintSystem::::new_ref(); @@ -591,6 +597,36 @@ pub mod tests { assert!(cs.is_satisfied().unwrap()); } + /// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements} + /// vs the R1CS constraints version + #[test] + pub fn test_committed_instance_to_sponge_preimage() { + let mut rng = ark_std::test_rng(); + + let ci = CommittedInstance:: { + cmE: Projective::rand(&mut rng), + u: Fr::rand(&mut rng), + cmW: Projective::rand(&mut rng), + x: vec![Fr::rand(&mut rng); 1], + }; + + let bytes = ci.to_sponge_bytes_as_vec(); + let field_elements = ci.to_sponge_field_elements_as_vec(); + + let cs = ConstraintSystem::::new_ref(); + + let ciVar = + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(ci.clone())).unwrap(); + let bytes_var = ciVar.to_sponge_bytes().unwrap(); + let field_elements_var = ciVar.to_sponge_field_elements().unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + // check that the natively computed and in-circuit computed hashes match + assert_eq!(bytes_var.value().unwrap(), bytes); + assert_eq!(field_elements_var.value().unwrap(), field_elements); + } + #[test] fn test_committed_instance_hash() { let mut rng = ark_std::test_rng(); @@ -609,7 +645,7 @@ pub mod tests { }; // compute the CommittedInstance hash natively - let h = ci.hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone()); + let h = ci.hash(&sponge, pp_hash, i, &z_0, &z_i); let cs = ConstraintSystem::::new_ref(); @@ -624,7 +660,7 @@ pub mod tests { // compute the CommittedInstance hash in-circuit let (hVar, _) = ciVar - .hash(&sponge, pp_hashVar, iVar, z_0Var, z_iVar) + .hash(&sponge, &pp_hashVar, &iVar, &z_0Var, &z_iVar) .unwrap(); assert!(cs.is_satisfied().unwrap()); @@ -656,13 +692,14 @@ pub mod tests { let pp_hash = Fr::from(42u32); // only for testing // compute the challenge natively - let r_bits = ChallengeGadget::::get_challenge_native( - &mut transcript, - pp_hash, - U_i.clone(), - u_i.clone(), - cmT, - ); + let r_bits = + ChallengeGadget::>::get_challenge_native( + &mut transcript, + pp_hash, + &U_i, + &u_i, + Some(&cmT), + ); let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); let cs = ConstraintSystem::::new_ref(); @@ -684,14 +721,15 @@ pub mod tests { U_iVar.cmW.to_constraint_field().unwrap(), ] .concat(); - let r_bitsVar = ChallengeGadget::::get_challenge_gadget( - &mut transcriptVar, - pp_hashVar, - U_iVar_vec, - u_iVar, - cmTVar, - ) - .unwrap(); + let r_bitsVar = + ChallengeGadget::>::get_challenge_gadget( + &mut transcriptVar, + pp_hashVar, + U_iVar_vec, + u_iVar, + Some(cmTVar), + ) + .unwrap(); assert!(cs.is_satisfied().unwrap()); // check that the natively computed and in-circuit computed hashes match diff --git a/folding-schemes/src/folding/nova/decider.rs b/folding-schemes/src/folding/nova/decider.rs new file mode 100644 index 0000000..901f152 --- /dev/null +++ b/folding-schemes/src/folding/nova/decider.rs @@ -0,0 +1,492 @@ +/// This file implements the offchain decider. For ethereum use cases, use the +/// DeciderEth from decider_eth.rs file. +/// More details can be found at the documentation page: +/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::{AffineRepr, CurveGroup, Group}; +use ark_ff::{BigInteger, PrimeField}; +use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_snark::SNARK; +use ark_std::rand::{CryptoRng, RngCore}; +use ark_std::{One, Zero}; +use core::marker::PhantomData; + +use super::decider_circuits::{DeciderCircuit1, DeciderCircuit2}; +use super::{nifs::NIFS, traits::NIFSTrait, CommittedInstance, Nova}; +use crate::commitment::CommitmentScheme; +use crate::folding::circuits::{ + cyclefold::CycleFoldCommittedInstance, + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, + CF2, +}; +use crate::frontend::FCircuit; +use crate::Error; +use crate::{Decider as DeciderTrait, FoldingScheme}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Proof +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + S1: SNARK, + S2: SNARK, +{ + c1_snark_proof: S1::Proof, + c2_snark_proof: S2::Proof, + cs1_proofs: [CS1::Proof; 2], + cs2_proofs: [CS2::Proof; 2], + // cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are + // checked in-circuit + cmT: C1, + r: C1::ScalarField, + // cyclefold committed instance + cf_U_i: CycleFoldCommittedInstance, + // the CS challenges are provided by the prover, but in-circuit they are checked to match the + // in-circuit computed computed ones. + cs1_challenges: [C1::ScalarField; 2], + cs2_challenges: [C2::ScalarField; 2], +} + +#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct ProverParam +where + CS1_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + S1_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + CS2_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + S2_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize, +{ + pub c1_snark_pp: S1_ProvingKey, + pub c1_cs_pp: CS1_ProvingKey, + pub c2_snark_pp: S2_ProvingKey, + pub c2_cs_pp: CS2_ProvingKey, +} + +#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct VerifierParam +where + C1: CurveGroup, + CS1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + S1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + CS2_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, + S2_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize, +{ + pub pp_hash: C1::ScalarField, + pub c1_snark_vp: S1_VerifyingKey, + pub c1_cs_vp: CS1_VerifyingKey, + pub c2_snark_vp: S2_VerifyingKey, + pub c2_cs_vp: CS2_VerifyingKey, +} + +/// Onchain Decider, for ethereum use cases +#[derive(Clone, Debug)] +pub struct Decider { + _c1: PhantomData, + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + _fc: PhantomData, + _cs1: PhantomData, + _cs2: PhantomData, + _s1: PhantomData, + _s2: PhantomData, + _fs: PhantomData, +} + +impl DeciderTrait + for Decider +where + C1: CurveGroup, + C2: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + GC2: CurveVar> + ToConstraintFieldGadget>, + FC: FCircuit, + CS1: CommitmentScheme< + C1, + ProverChallenge = C1::ScalarField, + Challenge = C1::ScalarField, + Proof = crate::commitment::kzg::Proof, + >, + CS2: CommitmentScheme< + C2, + ProverChallenge = C2::ScalarField, + Challenge = C2::ScalarField, + Proof = crate::commitment::kzg::Proof, + >, + S1: SNARK, + S2: SNARK, + FS: FoldingScheme, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>, + for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>, + // constrain FS into Nova, since this is a Decider specifically for Nova + Nova: From, + crate::folding::nova::ProverParams: + From<>::ProverParam>, + crate::folding::nova::VerifierParams: + From<>::VerifierParam>, +{ + type PreprocessorParam = (FS::ProverParam, FS::VerifierParam); + type ProverParam = + ProverParam; + type Proof = Proof; + type VerifierParam = VerifierParam< + C1, + CS1::VerifierParams, + S1::VerifyingKey, + CS2::VerifierParams, + S2::VerifyingKey, + >; + type PublicInput = Vec; + type CommittedInstance = CommittedInstance; + + fn preprocess( + mut rng: impl RngCore + CryptoRng, + prep_param: Self::PreprocessorParam, + fs: FS, + ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { + let circuit1 = DeciderCircuit1::::from_nova::( + fs.clone().into(), + )?; + let circuit2 = + DeciderCircuit2::::from_nova::(fs.into())?; + + // get the Groth16 specific setup for the circuits + let (c1_g16_pk, c1_g16_vk) = S1::circuit_specific_setup(circuit1, &mut rng).unwrap(); + let (c2_g16_pk, c2_g16_vk) = S2::circuit_specific_setup(circuit2, &mut rng).unwrap(); + + // get the FoldingScheme prover & verifier params from Nova + #[allow(clippy::type_complexity)] + let nova_pp: as FoldingScheme< + C1, + C2, + FC, + >>::ProverParam = prep_param.0.clone().into(); + #[allow(clippy::type_complexity)] + let nova_vp: as FoldingScheme< + C1, + C2, + FC, + >>::VerifierParam = prep_param.1.clone().into(); + + let pp_hash = nova_vp.pp_hash()?; + let pp = Self::ProverParam { + c1_snark_pp: c1_g16_pk, + c1_cs_pp: nova_pp.cs_pp, + c2_snark_pp: c2_g16_pk, + c2_cs_pp: nova_pp.cf_cs_pp, + }; + let vp = Self::VerifierParam { + pp_hash, + c1_snark_vp: c1_g16_vk, + c1_cs_vp: nova_vp.cs_vp, + c2_snark_vp: c2_g16_vk, + c2_cs_vp: nova_vp.cf_cs_vp, + }; + Ok((pp, vp)) + } + + fn prove( + mut rng: impl RngCore + CryptoRng, + pp: Self::ProverParam, + fs: FS, + ) -> Result { + let circuit1 = DeciderCircuit1::::from_nova::( + fs.clone().into(), + )?; + let circuit2 = + DeciderCircuit2::::from_nova::(fs.into())?; + + let c1_snark_proof = S1::prove(&pp.c1_snark_pp, circuit1.clone(), &mut rng) + .map_err(|e| Error::Other(e.to_string()))?; + let c2_snark_proof = S2::prove(&pp.c2_snark_pp, circuit2.clone(), &mut rng) + .map_err(|e| Error::Other(e.to_string()))?; + + let cmT = circuit1.cmT.unwrap(); + let r_Fr = circuit1.r.unwrap(); + let W_i1 = circuit1.W_i1.unwrap(); + let cf_W_i = circuit2.cf_W_i.unwrap(); + + // get the challenges that have been already computed when preparing the circuits inputs in + // the above `from_nova` calls + let challenge_W = circuit1 + .cs_c_W + .ok_or(Error::MissingValue("cs_c_W".to_string()))?; + let challenge_E = circuit1 + .cs_c_E + .ok_or(Error::MissingValue("cs_c_E".to_string()))?; + let c2_challenge_W = circuit2 + .cs_c_W + .ok_or(Error::MissingValue("c2's cs_c_W".to_string()))?; + let c2_challenge_E = circuit2 + .cs_c_E + .ok_or(Error::MissingValue("c2's cs_c_E".to_string()))?; + + // generate CommitmentScheme proofs for the main instance + let U_cmW_proof = CS1::prove_with_challenge( + &pp.c1_cs_pp, + challenge_W, + &W_i1.W, + &C1::ScalarField::zero(), + None, + )?; + let U_cmE_proof = CS1::prove_with_challenge( + &pp.c1_cs_pp, + challenge_E, + &W_i1.E, + &C1::ScalarField::zero(), + None, + )?; + // CS proofs for the CycleFold instance + let cf_cmW_proof = CS2::prove_with_challenge( + &pp.c2_cs_pp, + c2_challenge_W, + &cf_W_i.W, + &C2::ScalarField::zero(), + None, + )?; + let cf_cmE_proof = CS2::prove_with_challenge( + &pp.c2_cs_pp, + c2_challenge_E, + &cf_W_i.E, + &C2::ScalarField::zero(), + None, + )?; + + Ok(Self::Proof { + c1_snark_proof, + c2_snark_proof, + cs1_proofs: [U_cmW_proof, U_cmE_proof], + cs2_proofs: [cf_cmW_proof, cf_cmE_proof], + cmT, + r: r_Fr, + cf_U_i: circuit1.cf_U_i.unwrap(), + cs1_challenges: [challenge_W, challenge_E], + cs2_challenges: [c2_challenge_W, c2_challenge_E], + }) + } + + fn verify( + vp: Self::VerifierParam, + i: C1::ScalarField, + z_0: Vec, + z_i: Vec, + running_instance: &Self::CommittedInstance, + incoming_instance: &Self::CommittedInstance, + proof: &Self::Proof, + ) -> Result { + if i <= C1::ScalarField::one() { + return Err(Error::NotEnoughSteps); + } + + // compute U = U_{d+1}= NIFS.V(U_d, u_d, cmT) + let U = NIFS::::verify(proof.r, running_instance, incoming_instance, &proof.cmT); + + let (cmE_x, cmE_y) = NonNativeAffineVar::inputize(U.cmE)?; + let (cmW_x, cmW_y) = NonNativeAffineVar::inputize(U.cmW)?; + let (cmT_x, cmT_y) = NonNativeAffineVar::inputize(proof.cmT)?; + + let zero = (&C2::BaseField::zero(), &C2::BaseField::zero()); + let cmE_affine = proof.cf_U_i.cmE.into_affine(); + let cmW_affine = proof.cf_U_i.cmW.into_affine(); + let (cf_cmE_x, cf_cmE_y) = cmE_affine.xy().unwrap_or(zero); + let cf_cmE_z = C1::ScalarField::one(); + let (cf_cmW_x, cf_cmW_y) = cmW_affine.xy().unwrap_or(zero); + let cf_cmW_z = C1::ScalarField::one(); + + // snark proof 1 + let c1_public_input: Vec = [ + vec![vp.pp_hash, i], + z_0, + z_i, + // U_{i+1} values: + vec![U.u], + U.x.clone(), + cmE_x, + cmE_y, + cmW_x, + cmW_y, + // CS1 values: + proof.cs1_challenges.to_vec(), // c_W, c_E + vec![ + proof.cs1_proofs[0].eval, // eval_W + proof.cs1_proofs[1].eval, // eval_E + ], + // cf_U_i values + NonNativeUintVar::>::inputize(proof.cf_U_i.u), + proof + .cf_U_i + .x + .iter() + .flat_map(|&x_i| NonNativeUintVar::>::inputize(x_i)) + .collect::>(), + vec![ + *cf_cmE_x, *cf_cmE_y, cf_cmE_z, *cf_cmW_x, *cf_cmW_y, cf_cmW_z, + ], + // NIFS values: + cmT_x, + cmT_y, + vec![proof.r], + ] + .concat(); + + let c1_snark_v = S1::verify(&vp.c1_snark_vp, &c1_public_input, &proof.c1_snark_proof) + .map_err(|e| Error::Other(e.to_string()))?; + if !c1_snark_v { + return Err(Error::SNARKVerificationFail); + } + + let (cf2_cmE_x, cf2_cmE_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmE)?; + let (cf2_cmW_x, cf2_cmW_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmW)?; + + // snark proof 2 + // migrate pp_hash from C1::Fr to C1::Fq + let pp_hash_Fq = + C2::ScalarField::from_le_bytes_mod_order(&vp.pp_hash.into_bigint().to_bytes_le()); + let c2_public_input: Vec = [ + vec![pp_hash_Fq], + vec![proof.cf_U_i.u], + proof.cf_U_i.x.clone(), + cf2_cmE_x, + cf2_cmE_y, + cf2_cmW_x, + cf2_cmW_y, + proof.cs2_challenges.to_vec(), + vec![ + proof.cs2_proofs[0].eval, // eval_W + proof.cs2_proofs[1].eval, // eval_E + ], + ] + .concat(); + + let c2_snark_v = S2::verify(&vp.c2_snark_vp, &c2_public_input, &proof.c2_snark_proof) + .map_err(|e| Error::Other(e.to_string()))?; + if !c2_snark_v { + return Err(Error::SNARKVerificationFail); + } + + // check C1 commitments (main instance commitments) + CS1::verify_with_challenge( + &vp.c1_cs_vp, + proof.cs1_challenges[0], + &U.cmW, + &proof.cs1_proofs[0], + )?; + CS1::verify_with_challenge( + &vp.c1_cs_vp, + proof.cs1_challenges[1], + &U.cmE, + &proof.cs1_proofs[1], + )?; + + // check C2 commitments (CycleFold instance commitments) + CS2::verify_with_challenge( + &vp.c2_cs_vp, + proof.cs2_challenges[0], + &proof.cf_U_i.cmW, + &proof.cs2_proofs[0], + )?; + CS2::verify_with_challenge( + &vp.c2_cs_vp, + proof.cs2_challenges[1], + &proof.cf_U_i.cmE, + &proof.cs2_proofs[1], + )?; + + Ok(true) + } +} + +#[cfg(test)] +pub mod tests { + use ark_groth16::Groth16; + + // Note: do not use the MNTx_298 curves in practice, these are just for tests. Use the MNTx_753 + // curves instead. + use ark_mnt4_298::{ + constraints::G1Var as GVar, Fr, G1Projective as Projective, MNT4_298 as MNT4, + }; + use ark_mnt6_298::{ + constraints::G1Var as GVar2, G1Projective as Projective2, MNT6_298 as MNT6, + }; + use std::time::Instant; + + use super::*; + use crate::commitment::kzg::KZG; + use crate::folding::nova::PreprocessorParam; + use crate::frontend::utils::CubicFCircuit; + use crate::transcript::poseidon::poseidon_canonical_config; + + #[test] + fn test_decider() { + // use Nova as FoldingScheme + type N = Nova< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + KZG<'static, MNT4>, + KZG<'static, MNT6>, + false, + >; + type D = Decider< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + KZG<'static, MNT4>, + KZG<'static, MNT6>, + Groth16, + Groth16, + N, // here we define the FoldingScheme to use + >; + + let mut rng = ark_std::test_rng(); + let poseidon_config = poseidon_canonical_config::(); + + let F_circuit = CubicFCircuit::::new(()).unwrap(); + let z_0 = vec![Fr::from(3_u32)]; + + let start = Instant::now(); + let prep_param = PreprocessorParam::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + println!("Nova preprocess, {:?}", start.elapsed()); + + let start = Instant::now(); + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); + println!("Nova initialized, {:?}", start.elapsed()); + let start = Instant::now(); + nova.prove_step(&mut rng, vec![], None).unwrap(); + println!("prove_step, {:?}", start.elapsed()); + nova.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step + + let mut rng = rand::rngs::OsRng; + + // prepare the Decider prover & verifier params + let start = Instant::now(); + let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap(); + println!("Decider preprocess, {:?}", start.elapsed()); + + // decider proof generation + let start = Instant::now(); + let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); + println!("Decider prove, {:?}", start.elapsed()); + + // decider proof verification + let start = Instant::now(); + let verified = D::verify( + decider_vp, nova.i, nova.z_0, nova.z_i, &nova.U_i, &nova.u_i, &proof, + ) + .unwrap(); + assert!(verified); + println!("Decider verify, {:?}", start.elapsed()); + } +} diff --git a/folding-schemes/src/folding/nova/decider_circuits.rs b/folding-schemes/src/folding/nova/decider_circuits.rs new file mode 100644 index 0000000..cfd56c1 --- /dev/null +++ b/folding-schemes/src/folding/nova/decider_circuits.rs @@ -0,0 +1,553 @@ +/// This file implements the offchain decider circuit. For ethereum use cases, use the +/// DeciderEthCircuit. +/// More details can be found at the documentation page: +/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html +use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, +}; +use ark_ec::{CurveGroup, Group}; +use ark_ff::{BigInteger, PrimeField}; +use ark_poly::Polynomial; +use ark_r1cs_std::{ + alloc::AllocVar, + boolean::Boolean, + eq::EqGadget, + fields::{fp::FpVar, FieldVar}, + groups::GroupOpsBounds, + prelude::CurveVar, + ToConstraintFieldGadget, +}; +use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; +use ark_std::Zero; +use core::marker::PhantomData; + +use super::{ + circuits::{ChallengeGadget, CommittedInstanceVar}, + decider_eth_circuit::{ + evaluate_gadget, KZGChallengesGadget, R1CSVar, RelaxedR1CSGadget, WitnessVar, + }, + nifs::NIFS, + traits::NIFSTrait, + CommittedInstance, Nova, Witness, +}; +use crate::arith::r1cs::R1CS; +use crate::commitment::CommitmentScheme; +use crate::folding::circuits::{ + cyclefold::{ + CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldConfig, + CycleFoldWitness, + }, + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, + CF1, CF2, +}; +use crate::folding::nova::NovaCycleFoldConfig; +use crate::folding::traits::{CommittedInstanceVarOps, Dummy}; +use crate::frontend::FCircuit; +use crate::utils::vec::poly_from_vec; +use crate::Error; + +/// Circuit that implements part of the in-circuit checks needed for the offchain verification over +/// the Curve2's BaseField (=Curve1's ScalarField). +#[derive(Clone, Debug)] +pub struct DeciderCircuit1 +where + C1: CurveGroup, + C2: CurveGroup, + GC2: CurveVar>, +{ + _c1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + + /// E vector's length of the Nova instance witness + pub E_len: usize, + /// E vector's length of the CycleFold instance witness + pub cf_E_len: usize, + /// R1CS of the Augmented Function circuit + pub r1cs: R1CS, + pub poseidon_config: PoseidonConfig>, + /// public params hash + pub pp_hash: Option, + pub i: Option>, + /// initial state + pub z_0: Option>, + /// current i-th state + pub z_i: Option>, + /// Nova instances + pub u_i: Option>, + pub w_i: Option>, + pub U_i: Option>, + pub W_i: Option>, + pub U_i1: Option>, + pub W_i1: Option>, + pub cmT: Option, + pub r: Option, + /// CycleFold running instance + pub cf_U_i: Option>, + + /// Commitment Scheme challenges + pub cs_c_W: Option, + pub cs_c_E: Option, + /// Evaluations of the committed polynomials at the challenge + pub eval_W: Option, + pub eval_E: Option, +} +impl DeciderCircuit1 +where + C1: CurveGroup, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + C2: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>, +{ + pub fn from_nova>( + nova: Nova, + ) -> Result + where + C2: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + GC2: CurveVar> + ToConstraintFieldGadget>, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + { + let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); + // pp_hash is absorbed to transcript at the ChallengeGadget::get_challenge_native call + + // compute the U_{i+1}, W_{i+1} + let (T, cmT) = NIFS::::compute_cmT( + &nova.cs_pp, + &nova.r1cs.clone(), + &nova.w_i.clone(), + &nova.u_i.clone(), + &nova.W_i.clone(), + &nova.U_i.clone(), + )?; + let r_bits = NIFS::::get_challenge( + &mut transcript, + nova.pp_hash, + &nova.U_i, + &nova.u_i, + &cmT, + ); + let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) + .ok_or(Error::OutOfBounds)?; + let (W_i1, U_i1) = + NIFS::::prove(r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &T, &cmT)?; + + // compute the commitment scheme challenges used as inputs in the circuit + let (cs_challenge_W, cs_challenge_E) = + KZGChallengesGadget::::get_challenges_native(&mut transcript, U_i1.clone()); + + // get evals of the committed polys at the challenges + let mut W = W_i1.W.clone(); + W.extend( + std::iter::repeat(C1::ScalarField::zero()) + .take(W_i1.W.len().next_power_of_two() - W_i1.W.len()), + ); + let mut E = W_i1.E.clone(); + E.extend( + std::iter::repeat(C1::ScalarField::zero()) + .take(W_i1.E.len().next_power_of_two() - W_i1.E.len()), + ); + let p_W = poly_from_vec(W.to_vec())?; + let eval_W = p_W.evaluate(&cs_challenge_W); + let p_E = poly_from_vec(E.to_vec())?; + let eval_E = p_E.evaluate(&cs_challenge_E); + + Ok(Self { + _c1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + + E_len: nova.W_i.E.len(), + cf_E_len: nova.cf_W_i.E.len(), + r1cs: nova.r1cs, + poseidon_config: nova.poseidon_config, + pp_hash: Some(nova.pp_hash), + i: Some(nova.i), + z_0: Some(nova.z_0), + z_i: Some(nova.z_i), + u_i: Some(nova.u_i), + w_i: Some(nova.w_i), + U_i: Some(nova.U_i), + W_i: Some(nova.W_i), + U_i1: Some(U_i1), + W_i1: Some(W_i1), + cmT: Some(cmT), + r: Some(r_Fr), + cf_U_i: Some(nova.cf_U_i), + cs_c_W: Some(cs_challenge_W), + cs_c_E: Some(cs_challenge_E), + eval_W: Some(eval_W), + eval_E: Some(eval_E), + }) + } +} + +impl ConstraintSynthesizer> for DeciderCircuit1 +where + C1: CurveGroup, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + C2: CurveGroup, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + C1: CurveGroup, + GC2: CurveVar> + ToConstraintFieldGadget>, + for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + let r1cs = + R1CSVar::, FpVar>>::new_witness(cs.clone(), || { + Ok(self.r1cs.clone()) + })?; + + let pp_hash = FpVar::>::new_input(cs.clone(), || { + Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) + })?; + let i = + FpVar::>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::::zero)))?; + let z_0 = Vec::>>::new_input(cs.clone(), || { + Ok(self.z_0.unwrap_or(vec![CF1::::zero()])) + })?; + let z_i = Vec::>>::new_input(cs.clone(), || { + Ok(self.z_i.unwrap_or(vec![CF1::::zero()])) + })?; + + let u_dummy_native = CommittedInstance::::dummy(&self.r1cs); + let w_dummy_native = Witness::::dummy(&self.r1cs); + + let u_i = CommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(self.u_i.unwrap_or(u_dummy_native.clone())) + })?; + let U_i = CommittedInstanceVar::::new_witness(cs.clone(), || { + Ok(self.U_i.unwrap_or(u_dummy_native.clone())) + })?; + // here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i)) + let U_i1 = CommittedInstanceVar::::new_input(cs.clone(), || { + Ok(self.U_i1.unwrap_or(u_dummy_native.clone())) + })?; + let W_i1 = WitnessVar::::new_witness(cs.clone(), || { + Ok(self.W_i1.unwrap_or(w_dummy_native.clone())) + })?; + + // allocate the inputs for the check 6 + let cs_c_W = FpVar::>::new_input(cs.clone(), || { + Ok(self.cs_c_W.unwrap_or_else(CF1::::zero)) + })?; + let cs_c_E = FpVar::>::new_input(cs.clone(), || { + Ok(self.cs_c_E.unwrap_or_else(CF1::::zero)) + })?; + let eval_W = FpVar::>::new_input(cs.clone(), || { + Ok(self.eval_W.unwrap_or_else(CF1::::zero)) + })?; + let eval_E = FpVar::>::new_input(cs.clone(), || { + Ok(self.eval_E.unwrap_or_else(CF1::::zero)) + })?; + + // `sponge` is for digest computation. + let sponge = PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); + // `transcript` is for challenge generation. + let mut transcript = sponge.clone(); + // notice that the `pp_hash` is absorbed inside the ChallengeGadget::get_challenge_gadget call + + // 2. u_i.cmE==cm(0), u_i.u==1 + // Here zero is the x & y coordinates of the zero point affine representation. + let zero = NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::zero())?; + u_i.cmE.x.enforce_equal_unaligned(&zero)?; + u_i.cmE.y.enforce_equal_unaligned(&zero)?; + (u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?; + + // 3.a u_i.x[0] == H(i, z_0, z_i, U_i) + let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; + (u_i.x[0]).enforce_equal(&u_i_x)?; + + // 3.b u_i.x[1] == H(cf_U_i) + let cf_u_dummy_native = + CycleFoldCommittedInstance::::dummy(NovaCycleFoldConfig::::IO_LEN); + let cf_U_i = CycleFoldCommittedInstanceVar::::new_input(cs.clone(), || { + Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) + })?; + let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; + (u_i.x[1]).enforce_equal(&cf_u_i_x)?; + + // 4. check RelaxedR1CS of U_{i+1} + let z_U1: Vec>> = + [vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat(); + RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?; + + // 1.1.a, 5.1 compute NIFS.V and Commitment Scheme challenges. + // We need to ensure the order of challenge generation is the same as + // the native counterpart, so we first compute the challenges here and + // do the actual checks later. + let cmT = + NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; + let r_bits = ChallengeGadget::>::get_challenge_gadget( + &mut transcript, + pp_hash, + U_i_vec, + u_i.clone(), + Some(cmT.clone()), + )?; + // 5.1. + let (incircuit_c_W, incircuit_c_E) = + KZGChallengesGadget::::get_challenges_gadget(&mut transcript, U_i1.clone())?; + incircuit_c_W.enforce_equal(&cs_c_W)?; + incircuit_c_E.enforce_equal(&cs_c_E)?; + + // 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E) + let incircuit_eval_W = evaluate_gadget::>(W_i1.W, incircuit_c_W)?; + let incircuit_eval_E = evaluate_gadget::>(W_i1.E, incircuit_c_E)?; + incircuit_eval_W.enforce_equal(&eval_W)?; + incircuit_eval_E.enforce_equal(&eval_E)?; + + // 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid + // the verifier computing it) + let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?; + // check that the in-circuit computed r is equal to the inputted r + let r = + FpVar::>::new_input(cs.clone(), || Ok(self.r.unwrap_or_else(CF1::::zero)))?; + r_Fr.enforce_equal(&r)?; + + Ok(()) + } +} + +/// Circuit that implements part of the in-circuit checks needed for the offchain verification over +/// the Curve1's BaseField (=Curve2's ScalarField). +#[derive(Clone, Debug)] +pub struct DeciderCircuit2 +where + C1: CurveGroup, + C2: CurveGroup, +{ + _c1: PhantomData, + _gc1: PhantomData, + _c2: PhantomData, + + /// E vector's length of the CycleFold instance witness + pub cf_E_len: usize, + /// R1CS of the CycleFold circuit + pub cf_r1cs: R1CS, + pub poseidon_config: PoseidonConfig>, + /// public params hash + pub pp_hash: Option, + + /// CycleFold running instance. Notice that here we use Nova's CommittedInstance (instead of + /// CycleFoldCommittedInstance), since we are over C2::Fr, so that the CycleFold instances can + /// be computed natively + pub cf_U_i: Option>, + pub cf_W_i: Option>, + /// Commitment Scheme challenges + pub cs_c_W: Option, + pub cs_c_E: Option, + /// Evaluations of the committed polynomials at the challenge + pub eval_W: Option, + pub eval_E: Option, +} +impl DeciderCircuit2 +where + C1: CurveGroup, + C2: CurveGroup, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + GC1: CurveVar> + ToConstraintFieldGadget>, +{ + pub fn from_nova>( + nova: Nova, + ) -> Result + where + GC2: CurveVar> + ToConstraintFieldGadget>, + CS1: CommitmentScheme, + CS2: CommitmentScheme, + { + // compute the Commitment Scheme challenges of the CycleFold instance commitments, used as + // inputs in the circuit + let poseidon_config = + crate::transcript::poseidon::poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); + let pp_hash_Fq = + C2::ScalarField::from_le_bytes_mod_order(&nova.pp_hash.into_bigint().to_bytes_le()); + transcript.absorb(&pp_hash_Fq); + + let (cs_challenge_W, cs_challenge_E) = + KZGChallengesGadget::::get_challenges_native(&mut transcript, nova.cf_U_i.clone()); + + // get evals of the committed polynomials at the challenge + let mut W = nova.cf_W_i.W.clone(); + W.extend( + std::iter::repeat(C2::ScalarField::zero()) + .take(nova.cf_W_i.W.len().next_power_of_two() - nova.cf_W_i.W.len()), + ); + let mut E = nova.cf_W_i.E.clone(); + E.extend( + std::iter::repeat(C2::ScalarField::zero()) + .take(nova.cf_W_i.E.len().next_power_of_two() - nova.cf_W_i.E.len()), + ); + let p_W = poly_from_vec(W.to_vec())?; + let eval_W = p_W.evaluate(&cs_challenge_W); + let p_E = poly_from_vec(E.to_vec())?; + let eval_E = p_E.evaluate(&cs_challenge_E); + + Ok(Self { + _c1: PhantomData, + _gc1: PhantomData, + _c2: PhantomData, + + cf_E_len: nova.cf_W_i.E.len(), + cf_r1cs: nova.cf_r1cs, + poseidon_config, + pp_hash: Some(pp_hash_Fq), + + cf_U_i: Some(nova.cf_U_i), + cf_W_i: Some(nova.cf_W_i), + + // CycleFold instance commitments challenges + cs_c_W: Some(cs_challenge_W), + cs_c_E: Some(cs_challenge_E), + eval_W: Some(eval_W), + eval_E: Some(eval_E), + }) + } +} + +impl ConstraintSynthesizer> for DeciderCircuit2 +where + C1: CurveGroup, + C2: CurveGroup, + ::BaseField: PrimeField, + ::BaseField: PrimeField, + ::ScalarField: Absorb, + ::ScalarField: Absorb, + C1: CurveGroup, + GC1: CurveVar> + ToConstraintFieldGadget>, + for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, +{ + fn generate_constraints(self, cs: ConstraintSystemRef>) -> Result<(), SynthesisError> { + let pp_hash = FpVar::>::new_input(cs.clone(), || { + Ok(self.pp_hash.unwrap_or_else(CF1::::zero)) + })?; + + let cf_u_dummy_native = CommittedInstance::::dummy(&self.cf_r1cs); + let w_dummy_native = Witness::::dummy(&self.cf_r1cs); + let cf_U_i = CommittedInstanceVar::::new_input(cs.clone(), || { + Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) + })?; + let cf_W_i = WitnessVar::::new_witness(cs.clone(), || { + Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone())) + })?; + + let cf_r1cs = + R1CSVar::, FpVar>>::new_witness(cs.clone(), || { + Ok(self.cf_r1cs.clone()) + })?; + + // 6. check RelaxedR1CS of cf_U_i + let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat(); + RelaxedR1CSGadget::check_native(cf_r1cs, cf_W_i.E.clone(), cf_U_i.u.clone(), cf_z_U)?; + + // `transcript` is for challenge generation. + let mut transcript = + PoseidonSpongeVar::::new(cs.clone(), &self.poseidon_config); + transcript.absorb(&pp_hash)?; + + // allocate the inputs for the check 7.1 + let cs_c_W = FpVar::>::new_input(cs.clone(), || { + Ok(self.cs_c_W.unwrap_or_else(CF1::::zero)) + })?; + let cs_c_E = FpVar::>::new_input(cs.clone(), || { + Ok(self.cs_c_E.unwrap_or_else(CF1::::zero)) + })?; + // allocate the inputs for the check 7.2 + let eval_W = FpVar::>::new_input(cs.clone(), || { + Ok(self.eval_W.unwrap_or_else(CF1::::zero)) + })?; + let eval_E = FpVar::>::new_input(cs.clone(), || { + Ok(self.eval_E.unwrap_or_else(CF1::::zero)) + })?; + + // 7.1. check the commitment scheme challenges correct computation + let (incircuit_c_W, incircuit_c_E) = + KZGChallengesGadget::::get_challenges_gadget(&mut transcript, cf_U_i.clone())?; + incircuit_c_W.enforce_equal(&cs_c_W)?; + incircuit_c_E.enforce_equal(&cs_c_E)?; + + // 7.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E) + let incircuit_eval_W = evaluate_gadget::>(cf_W_i.W, incircuit_c_W)?; + let incircuit_eval_E = evaluate_gadget::>(cf_W_i.E, incircuit_c_E)?; + incircuit_eval_W.enforce_equal(&eval_W)?; + incircuit_eval_E.enforce_equal(&eval_E)?; + + Ok(()) + } +} + +#[cfg(test)] +pub mod tests { + use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; + use ark_relations::r1cs::ConstraintSystem; + use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; + + use super::*; + use crate::commitment::pedersen::Pedersen; + use crate::folding::nova::PreprocessorParam; + use crate::frontend::utils::CubicFCircuit; + use crate::transcript::poseidon::poseidon_canonical_config; + use crate::FoldingScheme; + + #[test] + fn test_decider_circuits() { + let mut rng = ark_std::test_rng(); + let poseidon_config = poseidon_canonical_config::(); + + let F_circuit = CubicFCircuit::::new(()).unwrap(); + let z_0 = vec![Fr::from(3_u32)]; + + type N = Nova< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + Pedersen, + Pedersen, + false, + >; + + let prep_param = PreprocessorParam::< + Projective, + Projective2, + CubicFCircuit, + Pedersen, + Pedersen, + false, + >::new(poseidon_config, F_circuit); + let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); + + // generate a Nova instance and do a step of it + let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); + nova.prove_step(&mut rng, vec![], None).unwrap(); + // verify the IVC + let ivc_proof = nova.ivc_proof(); + N::verify(nova_params.1, ivc_proof).unwrap(); + + // load the DeciderCircuit 1 & 2 from the Nova instance + let decider_circuit1 = + DeciderCircuit1::::from_nova(nova.clone()).unwrap(); + let decider_circuit2 = + DeciderCircuit2::::from_nova(nova).unwrap(); + + // generate the constraints of both circuits and check that are satisfied by the inputs + let cs1 = ConstraintSystem::::new_ref(); + decider_circuit1.generate_constraints(cs1.clone()).unwrap(); + assert!(cs1.is_satisfied().unwrap()); + let cs2 = ConstraintSystem::::new_ref(); + decider_circuit2.generate_constraints(cs2.clone()).unwrap(); + assert!(cs2.is_satisfied().unwrap()); + } +} diff --git a/folding-schemes/src/folding/nova/decider_eth.rs b/folding-schemes/src/folding/nova/decider_eth.rs index 570c351..24d27ed 100644 --- a/folding-schemes/src/folding/nova/decider_eth.rs +++ b/folding-schemes/src/folding/nova/decider_eth.rs @@ -1,4 +1,7 @@ -/// This file implements the Nova's onchain (Ethereum's EVM) decider. +/// This file implements the Nova's onchain (Ethereum's EVM) decider. For non-ethereum use cases, +/// the Decider from decider.rs file will be more efficient. +/// More details can be found at the documentation page: +/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html use ark_bn254::Bn254; use ark_crypto_primitives::sponge::Absorb; use ark_ec::{AffineRepr, CurveGroup, Group}; @@ -12,6 +15,7 @@ use ark_std::{One, Zero}; use core::marker::PhantomData; pub use super::decider_eth_circuit::DeciderEthCircuit; +use super::traits::NIFSTrait; use super::{nifs::NIFS, CommittedInstance, Nova}; use crate::commitment::{ kzg::{Proof as KZGProof, KZG}, @@ -71,8 +75,8 @@ impl DeciderTrait for Decider where C1: CurveGroup, - C2: CurveGroup, GC1: CurveVar> + ToConstraintFieldGadget>, + C2: CurveGroup, GC2: CurveVar> + ToConstraintFieldGadget>, FC: FCircuit, // CS1 is a KZG commitment, where challenge is C1::Fr elem @@ -339,9 +343,7 @@ pub mod tests { use super::*; use crate::commitment::pedersen::Pedersen; - use crate::folding::nova::{ - PreprocessorParam, ProverParams as NovaProverParams, VerifierParams as NovaVerifierParams, - }; + use crate::folding::nova::{PreprocessorParam, ProverParams as NovaProverParams}; use crate::frontend::utils::CubicFCircuit; use crate::transcript::poseidon::poseidon_canonical_config; @@ -400,7 +402,7 @@ pub mod tests { let start = Instant::now(); let verified = D::verify( decider_vp.clone(), - nova.i.clone(), + nova.i, nova.z_0.clone(), nova.z_i.clone(), &nova.U_i, @@ -486,13 +488,15 @@ pub mod tests { &mut nova_pp_serialized.as_slice() ) .unwrap(); - let nova_vp_deserialized = NovaVerifierParams::< + let nova_vp_deserialized = , - Pedersen, - >::deserialize_compressed( - &mut nova_vp_serialized.as_slice() + CubicFCircuit, + >>::vp_deserialize_with_mode( + &mut nova_vp_serialized.as_slice(), + ark_serialize::Compress::Yes, + ark_serialize::Validate::Yes, + (), // fcircuit_params ) .unwrap(); @@ -514,7 +518,7 @@ pub mod tests { let start = Instant::now(); let verified = D::verify( decider_vp.clone(), - nova.i.clone(), + nova.i, nova.z_0.clone(), nova.z_i.clone(), &nova.U_i, diff --git a/folding-schemes/src/folding/nova/decider_eth_circuit.rs b/folding-schemes/src/folding/nova/decider_eth_circuit.rs index 25564a1..a1dd809 100644 --- a/folding-schemes/src/folding/nova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/nova/decider_eth_circuit.rs @@ -1,5 +1,7 @@ /// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases, /// other more efficient approaches can be used. +/// More details can be found at the documentation page: +/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html use ark_crypto_primitives::sponge::{ constraints::CryptographicSpongeVar, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, @@ -25,9 +27,9 @@ use core::{borrow::Borrow, marker::PhantomData}; use super::{ circuits::{ChallengeGadget, CommittedInstanceVar}, nifs::NIFS, + traits::NIFSTrait, CommittedInstance, Nova, Witness, }; -use crate::arith::r1cs::R1CS; use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::folding::circuits::{ cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness}, @@ -41,6 +43,10 @@ use crate::utils::{ vec::poly_from_vec, }; use crate::Error; +use crate::{ + arith::r1cs::R1CS, + folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps}, +}; #[derive(Debug, Clone)] pub struct RelaxedR1CSGadget {} @@ -135,7 +141,6 @@ pub struct WitnessVar { impl AllocVar, CF1> for WitnessVar where C: CurveGroup, - ::BaseField: PrimeField, { fn new_variable>>( cs: impl Into>>, @@ -160,6 +165,12 @@ where } } +impl WitnessVarOps for WitnessVar { + fn get_openings(&self) -> Vec<(&[FpVar], FpVar)> { + vec![(&self.E, self.rE.clone()), (&self.W, self.rW.clone())] + } +} + /// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM) /// verification. #[derive(Clone, Debug)] @@ -235,7 +246,7 @@ where let mut transcript = PoseidonSponge::::new(&nova.poseidon_config); // compute the U_{i+1}, W_{i+1} - let (T, cmT) = NIFS::::compute_cmT( + let (aux_p, aux_v) = NIFS::::compute_aux( &nova.cs_pp, &nova.r1cs.clone(), &nova.w_i.clone(), @@ -243,17 +254,18 @@ where &nova.W_i.clone(), &nova.U_i.clone(), )?; - let r_bits = ChallengeGadget::::get_challenge_native( + let cmT = aux_v; + let r_bits = ChallengeGadget::>::get_challenge_native( &mut transcript, nova.pp_hash, - nova.U_i.clone(), - nova.u_i.clone(), - cmT, + &nova.U_i, + &nova.u_i, + Some(&cmT), ); let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) .ok_or(Error::OutOfBounds)?; - let (W_i1, U_i1) = NIFS::::fold_instances( - r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &T, cmT, + let (W_i1, U_i1) = NIFS::::prove( + r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &aux_p, &aux_v, )?; // compute the KZG challenges used as inputs in the circuit @@ -346,11 +358,8 @@ where Ok(self.z_i.unwrap_or(vec![CF1::::zero()])) })?; - let u_dummy_native = CommittedInstance::::dummy(2); - let w_dummy_native = Witness::::dummy( - self.r1cs.A.n_cols - 3, /* (3=2+1, since u_i.x.len=2) */ - self.E_len, - ); + let u_dummy_native = CommittedInstance::::dummy(&self.r1cs); + let w_dummy_native = Witness::::dummy(&self.r1cs); let u_i = CommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.u_i.unwrap_or(u_dummy_native.clone())) @@ -366,17 +375,18 @@ where Ok(self.W_i1.unwrap_or(w_dummy_native.clone())) })?; - // allocate the inputs for the check 6 + // allocate the inputs for the check 5.1 let kzg_c_W = FpVar::>::new_input(cs.clone(), || { Ok(self.kzg_c_W.unwrap_or_else(CF1::::zero)) })?; let kzg_c_E = FpVar::>::new_input(cs.clone(), || { Ok(self.kzg_c_E.unwrap_or_else(CF1::::zero)) })?; - let _eval_W = FpVar::>::new_input(cs.clone(), || { + // allocate the inputs for the check 5.2 + let eval_W = FpVar::>::new_input(cs.clone(), || { Ok(self.eval_W.unwrap_or_else(CF1::::zero)) })?; - let _eval_E = FpVar::>::new_input(cs.clone(), || { + let eval_E = FpVar::>::new_input(cs.clone(), || { Ok(self.eval_E.unwrap_or_else(CF1::::zero)) })?; @@ -385,10 +395,8 @@ where // `transcript` is for challenge generation. let mut transcript = sponge.clone(); - // 1. check RelaxedR1CS of U_{i+1} - let z_U1: Vec>> = - [vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat(); - RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?; + // The following enumeration of the steps matches the one used at the documentation page + // https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html // 2. u_i.cmE==cm(0), u_i.u==1 // Here zero is the x & y coordinates of the zero point affine representation. @@ -398,15 +406,14 @@ where (u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?; // 3.a u_i.x[0] == H(i, z_0, z_i, U_i) - let (u_i_x, U_i_vec) = U_i.clone().hash( - &sponge, - pp_hash.clone(), - i.clone(), - z_0.clone(), - z_i.clone(), - )?; + let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; (u_i.x[0]).enforce_equal(&u_i_x)?; + // 4. check RelaxedR1CS of U_{i+1} + let z_U1: Vec>> = + [vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat(); + RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?; + #[cfg(feature = "light-test")] log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes."); @@ -429,10 +436,7 @@ where let cf_u_dummy_native = CycleFoldCommittedInstance::::dummy(NovaCycleFoldConfig::::IO_LEN); - let w_dummy_native = CycleFoldWitness::::dummy( - self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l, - self.cf_E_len, - ); + let w_dummy_native = CycleFoldWitness::::dummy(&self.cf_r1cs); let cf_U_i = CycleFoldCommittedInstanceVar::::new_witness(cs.clone(), || { Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) })?; @@ -444,7 +448,7 @@ where let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; (u_i.x[1]).enforce_equal(&cf_u_i_x)?; - // 4. check Pedersen commitments of cf_U_i.{cmE, cmW} + // 7. check Pedersen commitments of cf_U_i.{cmE, cmW} let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?; let G = Vec::::new_constant(cs.clone(), self.cf_pedersen_params.generators)?; let cf_W_i_E_bits: Result>>>, SynthesisError> = @@ -469,43 +473,39 @@ where || Ok(self.cf_r1cs.clone()), )?; - // 5. check RelaxedR1CS of cf_U_i + // 6. check RelaxedR1CS of cf_U_i (CycleFold instance) let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat(); RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?; } - // 8.a, 6.a compute NIFS.V and KZG challenges. + // 1.1.a, 5.1. compute NIFS.V and KZG challenges. // We need to ensure the order of challenge generation is the same as // the native counterpart, so we first compute the challenges here and // do the actual checks later. let cmT = NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; - let r_bits = ChallengeGadget::::get_challenge_gadget( + // 1.1.a + let r_bits = ChallengeGadget::>::get_challenge_gadget( &mut transcript, pp_hash, U_i_vec, u_i.clone(), - cmT.clone(), + Some(cmT), )?; + // 5.1. let (incircuit_c_W, incircuit_c_E) = KZGChallengesGadget::::get_challenges_gadget(&mut transcript, U_i1.clone())?; - - // 6.b check KZG challenges incircuit_c_W.enforce_equal(&kzg_c_W)?; incircuit_c_E.enforce_equal(&kzg_c_E)?; - // Check 7 is temporary disabled due - // https://github.com/privacy-scaling-explorations/sonobe/issues/80 - log::warn!("[WARNING]: issue #80 (https://github.com/privacy-scaling-explorations/sonobe/issues/80) is not resolved yet."); - // - // 7. check eval_W==p_W(c_W) and eval_E==p_E(c_E) - // let incircuit_eval_W = evaluate_gadget::>(W_i1.W, incircuit_c_W)?; - // let incircuit_eval_E = evaluate_gadget::>(W_i1.E, incircuit_c_E)?; - // incircuit_eval_W.enforce_equal(&eval_W)?; - // incircuit_eval_E.enforce_equal(&eval_E)?; - - // 8.b check the NIFS.V challenge matches the one from the public input (so we - // avoid the verifier computing it) + // 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E) + let incircuit_eval_W = evaluate_gadget::>(W_i1.W, incircuit_c_W)?; + let incircuit_eval_E = evaluate_gadget::>(W_i1.E, incircuit_c_E)?; + incircuit_eval_W.enforce_equal(&eval_W)?; + incircuit_eval_E.enforce_equal(&eval_E)?; + + // 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid + // the verifier computing it) let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?; // check that the in-circuit computed r is equal to the inputted r let r = @@ -519,13 +519,11 @@ where /// Interpolates the polynomial from the given vector, and then returns it's evaluation at the /// given point. #[allow(unused)] // unused while check 7 is disabled -fn evaluate_gadget( - v: Vec>, +pub fn evaluate_gadget( + mut v: Vec>, point: FpVar, ) -> Result, SynthesisError> { - if !v.len().is_power_of_two() { - return Err(SynthesisError::Unsatisfiable); - } + v.resize(v.len().next_power_of_two(), FpVar::zero()); let n = v.len() as u64; let gen = F::get_root_of_unity(n).unwrap(); let domain = Radix2DomainVar::new(gen, log2(v.len()) as u64, FpVar::one()).unwrap(); @@ -591,7 +589,7 @@ pub mod tests { use ark_relations::r1cs::ConstraintSystem; use ark_std::{ rand::{thread_rng, Rng}, - One, UniformRand, + UniformRand, }; use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; @@ -600,7 +598,6 @@ pub mod tests { r1cs::{ extract_r1cs, extract_w_x, tests::{get_test_r1cs, get_test_z}, - RelaxedR1CS, }, Arith, }; @@ -610,20 +607,18 @@ pub mod tests { use crate::transcript::poseidon::poseidon_canonical_config; use crate::FoldingScheme; - fn prepare_instances, R: Rng>( + // Convert `z` to a witness-instance pair for the relaxed R1CS + fn prepare_relaxed_witness_instance, R: Rng>( mut rng: R, r1cs: &R1CS, z: &[C::ScalarField], - ) -> (Witness, CommittedInstance) - where - C::ScalarField: Absorb, - { + ) -> (Witness, CommittedInstance) { let (w, x) = r1cs.split_z(z); let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap(); let mut w = Witness::new::(w, r1cs.A.n_rows, &mut rng); - w.E = r1cs.eval_relation(z).unwrap(); + w.E = r1cs.eval_at_z(z).unwrap(); let mut u = w.commit::(&cs_pp, x).unwrap(); u.u = z[0]; @@ -635,9 +630,10 @@ pub mod tests { let rng = &mut thread_rng(); let r1cs: R1CS = get_test_r1cs(); + let mut z = get_test_z(3); - z[0] = Fr::rand(rng); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); + z[0] = Fr::rand(rng); // Randomize `z[0]` (i.e. `u.u`) to test the relaxed R1CS + let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen, _>(rng, &r1cs, &z); let cs = ConstraintSystem::::new_ref(); @@ -665,12 +661,11 @@ pub mod tests { let r1cs = extract_r1cs::(&cs); let (w, x) = extract_w_x::(&cs); - let mut z = [vec![Fr::one()], x, w].concat(); - r1cs.check_relation(&z).unwrap(); + r1cs.check_relation(&w, &x).unwrap(); - z[0] = Fr::rand(rng); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); - r1cs.check_relaxed_relation(&w, &u).unwrap(); + let z = [vec![Fr::rand(rng)], x, w].concat(); + let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen, _>(rng, &r1cs, &z); + r1cs.check_relation(&w, &u).unwrap(); // set new CS for the circuit that checks the RelaxedR1CS of our original circuit let cs = ConstraintSystem::::new_ref(); @@ -759,9 +754,10 @@ pub mod tests { let cs = cs.into_inner().unwrap(); let r1cs = extract_r1cs::(&cs); let (w, x) = extract_w_x::(&cs); - let z = [vec![Fq::rand(rng)], x, w].concat(); - let (w, u) = prepare_instances::<_, Pedersen, _>(rng, &r1cs, &z); + let z = [vec![Fq::rand(rng)], x, w].concat(); + let (w, u) = + prepare_relaxed_witness_instance::<_, Pedersen, _>(rng, &r1cs, &z); // natively let cs = ConstraintSystem::::new_ref(); @@ -783,7 +779,7 @@ pub mod tests { } #[test] - fn test_decider_circuit() { + fn test_decider_eth_circuit() { let mut rng = ark_std::test_rng(); let poseidon_config = poseidon_canonical_config::(); @@ -814,21 +810,11 @@ pub mod tests { // generate a Nova instance and do a step of it let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); nova.prove_step(&mut rng, vec![], None).unwrap(); - let ivc_v = nova.clone(); - let (running_instance, incoming_instance, cyclefold_instance) = ivc_v.instances(); - N::verify( - nova_params.1, // verifier_params - z_0, - ivc_v.z_i, - Fr::one(), - running_instance, - incoming_instance, - cyclefold_instance, - ) - .unwrap(); + let ivc_proof = nova.ivc_proof(); + N::verify(nova_params.1, ivc_proof).unwrap(); - // load the DeciderEthCircuit from the generated Nova instance - let decider_circuit = DeciderEthCircuit::< + // load the DeciderEthCircuit from the Nova instance + let decider_eth_circuit = DeciderEthCircuit::< Projective, GVar, Projective2, @@ -841,7 +827,9 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); // generate the constraints and check that are satisfied by the inputs - decider_circuit.generate_constraints(cs.clone()).unwrap(); + decider_eth_circuit + .generate_constraints(cs.clone()) + .unwrap(); assert!(cs.is_satisfied().unwrap()); } @@ -880,10 +868,6 @@ pub mod tests { assert_eq!(challenge_E_Var.value().unwrap(), challenge_E); } - // The test test_polynomial_interpolation is temporary disabled due - // https://github.com/privacy-scaling-explorations/sonobe/issues/80 - // for n<=11 it will work, but for n>11 it will fail with stack overflow. - #[ignore] #[test] fn test_polynomial_interpolation() { let mut rng = ark_std::test_rng(); diff --git a/folding-schemes/src/folding/nova/mod.rs b/folding-schemes/src/folding/nova/mod.rs index 02e6d3b..d67c33b 100644 --- a/folding-schemes/src/folding/nova/mod.rs +++ b/folding-schemes/src/folding/nova/mod.rs @@ -13,33 +13,46 @@ use ark_std::fmt::Debug; use ark_std::rand::RngCore; use ark_std::{One, UniformRand, Zero}; use core::marker::PhantomData; +use decider_eth_circuit::WitnessVar; use crate::folding::circuits::cyclefold::{ fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, CycleFoldWitness, }; -use crate::folding::circuits::CF2; +use crate::folding::{ + circuits::{CF1, CF2}, + traits::Dummy, +}; use crate::frontend::FCircuit; use crate::transcript::{poseidon::poseidon_canonical_config, AbsorbNonNative, Transcript}; use crate::utils::vec::is_zero_vec; use crate::Error; use crate::FoldingScheme; -use crate::{arith::r1cs::RelaxedR1CS, commitment::CommitmentScheme}; use crate::{ arith::r1cs::{extract_r1cs, extract_w_x, R1CS}, constants::NOVA_N_BITS_RO, utils::{get_cm_coordinates, pp_hash}, }; +use crate::{arith::Arith, commitment::CommitmentScheme}; pub mod circuits; -pub mod decider_eth; -pub mod decider_eth_circuit; pub mod nifs; -pub mod serialize; +pub mod ova; pub mod traits; pub mod zk; -use circuits::{AugmentedFCircuit, ChallengeGadget}; + +use circuits::{AugmentedFCircuit, ChallengeGadget, CommittedInstanceVar}; use nifs::NIFS; +use traits::NIFSTrait; + +// offchain decider +pub mod decider; +pub mod decider_circuits; +// onchain decider +pub mod decider_eth; +pub mod decider_eth_circuit; + +use super::traits::{CommittedInstanceOps, WitnessOps}; /// Configuration for Nova's CycleFold circuit pub struct NovaCycleFoldConfig { @@ -68,24 +81,29 @@ pub struct CommittedInstance { pub x: Vec, } -impl CommittedInstance { - pub fn dummy(io_len: usize) -> Self { +impl Dummy for CommittedInstance { + fn dummy(io_len: usize) -> Self { Self { cmE: C::zero(), - u: C::ScalarField::zero(), + u: CF1::::zero(), cmW: C::zero(), - x: vec![C::ScalarField::zero(); io_len], + x: vec![CF1::::zero(); io_len], } } } +impl Dummy<&R1CS>> for CommittedInstance { + fn dummy(r1cs: &R1CS>) -> Self { + Self::dummy(r1cs.l) + } +} + impl Absorb for CommittedInstance where C::ScalarField: Absorb, { - fn to_sponge_bytes(&self, _dest: &mut Vec) { - // This is never called - unimplemented!() + fn to_sponge_bytes(&self, dest: &mut Vec) { + C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { @@ -103,30 +121,15 @@ where } } -impl CommittedInstance -where - ::ScalarField: Absorb, - ::BaseField: ark_ff::PrimeField, -{ - /// hash implements the committed instance hash compatible with the gadget implemented in - /// nova/circuits.rs::CommittedInstanceVar.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the - /// `CommittedInstance`. - pub fn hash>( - &self, - sponge: &T, - pp_hash: C::ScalarField, // public params hash - i: C::ScalarField, - z_0: Vec, - z_i: Vec, - ) -> C::ScalarField { - let mut sponge = sponge.clone(); - sponge.absorb(&pp_hash); - sponge.absorb(&i); - sponge.absorb(&z_0); - sponge.absorb(&z_i); - sponge.absorb(&self); - sponge.squeeze_field_elements(1)[0] +impl CommittedInstanceOps for CommittedInstance { + type Var = CommittedInstanceVar; + + fn get_commitments(&self) -> Vec { + vec![self.cmW, self.cmE] + } + + fn is_incoming(&self) -> bool { + self.cmE == C::zero() && self.u == One::one() } } @@ -157,18 +160,6 @@ impl Witness { } } - pub fn dummy(w_len: usize, e_len: usize) -> Self { - let (rW, rE) = (C::ScalarField::zero(), C::ScalarField::zero()); - let w = vec![C::ScalarField::zero(); w_len]; - - Self { - E: vec![C::ScalarField::zero(); e_len], - rE, - W: w, - rW, - } - } - pub fn commit, const HC: bool>( &self, params: &CS::ProverParams, @@ -188,6 +179,25 @@ impl Witness { } } +impl Dummy<&R1CS>> for Witness { + fn dummy(r1cs: &R1CS>) -> Self { + Self { + E: vec![C::ScalarField::zero(); r1cs.A.n_rows], + rE: C::ScalarField::zero(), + W: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l], + rW: C::ScalarField::zero(), + } + } +} + +impl WitnessOps for Witness { + type Var = WitnessVar; + + fn get_openings(&self) -> Vec<(&[C::ScalarField], C::ScalarField)> { + vec![(&self.W, self.rW), (&self.E, self.rE)] + } +} + #[derive(Debug, Clone)] pub struct PreprocessorParam where @@ -334,15 +344,6 @@ where CS2: CommitmentScheme, { fn check(&self) -> Result<(), ark_serialize::SerializationError> { - self.poseidon_config.full_rounds.check()?; - self.poseidon_config.partial_rounds.check()?; - self.poseidon_config.alpha.check()?; - self.poseidon_config.ark.check()?; - self.poseidon_config.mds.check()?; - self.poseidon_config.rate.check()?; - self.poseidon_config.capacity.check()?; - self.r1cs.check()?; - self.cf_r1cs.check()?; self.cs_vp.check()?; self.cf_cs_vp.check()?; Ok(()) @@ -360,42 +361,12 @@ where mut writer: W, compress: ark_serialize::Compress, ) -> Result<(), ark_serialize::SerializationError> { - self.r1cs.serialize_with_mode(&mut writer, compress)?; - self.cf_r1cs.serialize_with_mode(&mut writer, compress)?; self.cs_vp.serialize_with_mode(&mut writer, compress)?; self.cf_cs_vp.serialize_with_mode(&mut writer, compress) } fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { - self.r1cs.serialized_size(compress) - + self.cf_r1cs.serialized_size(compress) - + self.cs_vp.serialized_size(compress) - + self.cf_cs_vp.serialized_size(compress) - } -} -impl CanonicalDeserialize for VerifierParams -where - C1: CurveGroup, - C2: CurveGroup, - CS1: CommitmentScheme, - CS2: CommitmentScheme, -{ - fn deserialize_with_mode( - mut reader: R, - compress: ark_serialize::Compress, - validate: ark_serialize::Validate, - ) -> Result { - let r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?; - let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; - Ok(VerifierParams { - poseidon_config: poseidon_canonical_config::(), - r1cs, - cf_r1cs, - cs_vp, - cf_cs_vp, - }) + self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress) } } @@ -418,6 +389,29 @@ where } } +#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct IVCProof +where + C1: CurveGroup, + C2: CurveGroup, +{ + // current step of the IVC + pub i: C1::ScalarField, + // initial state + pub z_0: Vec, + // current state + pub z_i: Vec, + // running instance + pub W_i: Witness, + pub U_i: CommittedInstance, + // incoming instance + pub w_i: Witness, + pub u_i: CommittedInstance, + // CycleFold instances + pub cf_W_i: CycleFoldWitness, + pub cf_U_i: CycleFoldCommittedInstance, +} + /// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait /// The `H` const generic specifies whether the homorphic commitment scheme is blinding @@ -489,6 +483,58 @@ where type IncomingInstance = (CommittedInstance, Witness); type MultiCommittedInstanceWithWitness = (); type CFInstance = (CycleFoldCommittedInstance, CycleFoldWitness); + type IVCProof = IVCProof; + + fn pp_deserialize_with_mode( + reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + _fc_params: FC::Params, // FCircuit params + ) -> Result { + Ok(Self::ProverParam::deserialize_with_mode( + reader, compress, validate, + )?) + } + fn vp_deserialize_with_mode( + mut reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, + ) -> Result { + let poseidon_config = poseidon_canonical_config::(); + + // generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing + // to serialize them, saving significant space in the VerifierParams serialized size. + + // main circuit R1CS: + let f_circuit = FC::new(fc_params)?; + let cs = ConstraintSystem::::new_ref(); + let augmented_F_circuit = + AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone()); + augmented_F_circuit.generate_constraints(cs.clone())?; + cs.finalize(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let r1cs = extract_r1cs::(&cs); + + // CycleFold circuit R1CS + let cs2 = ConstraintSystem::::new_ref(); + let cf_circuit = NovaCycleFoldCircuit::::empty(); + cf_circuit.generate_constraints(cs2.clone())?; + cs2.finalize(); + let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let cf_r1cs = extract_r1cs::(&cs2); + + let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + + Ok(Self::VerifierParam { + poseidon_config, + r1cs, + cf_r1cs, + cs_vp, + cf_cs_vp, + }) + } fn preprocess( mut rng: impl RngCore, @@ -562,9 +608,9 @@ where let pp_hash = vp.pp_hash()?; // setup the dummy instances - let (W_dummy, U_dummy) = r1cs.dummy_running_instance(); - let (w_dummy, u_dummy) = r1cs.dummy_incoming_instance(); - let (cf_W_dummy, cf_U_dummy) = cf_r1cs.dummy_running_instance(); + let (W_dummy, U_dummy) = r1cs.dummy_witness_instance(); + let (w_dummy, u_dummy) = r1cs.dummy_witness_instance(); + let (cf_W_dummy, cf_U_dummy) = cf_r1cs.dummy_witness_instance(); // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // R1CS that we're working with. @@ -669,15 +715,16 @@ where .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; // compute T and cmT for AugmentedFCircuit - let (T, cmT) = self.compute_cmT()?; + let (aux_p, aux_v) = self.compute_cmT()?; + let cmT = aux_v; // r_bits is the r used to the RLC of the F' instances - let r_bits = ChallengeGadget::::get_challenge_native( + let r_bits = ChallengeGadget::>::get_challenge_native( &mut transcript, self.pp_hash, - self.U_i.clone(), - self.u_i.clone(), - cmT, + &self.U_i, + &self.u_i, + Some(&cmT), ); let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) .ok_or(Error::OutOfBounds)?; @@ -685,10 +732,9 @@ where .ok_or(Error::OutOfBounds)?; // fold Nova instances - let (W_i1, U_i1): (Witness, CommittedInstance) = - NIFS::::fold_instances( - r_Fr, &self.W_i, &self.U_i, &self.w_i, &self.u_i, &T, cmT, - )?; + let (W_i1, U_i1): (Witness, CommittedInstance) = NIFS::::prove( + r_Fr, &self.W_i, &self.U_i, &self.w_i, &self.u_i, &aux_p, &aux_v, + )?; // folded instance output (public input, x) // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) @@ -696,8 +742,8 @@ where &sponge, self.pp_hash, self.i + C1::ScalarField::one(), - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); // u_{i+1}.x[1] = H(cf_U_{i+1}) let cf_u_i1_x: C1::ScalarField; @@ -815,10 +861,11 @@ where #[cfg(test)] { - self.cf_r1cs.check_tight_relation(&_cfW_w_i, &cfW_u_i)?; - self.cf_r1cs.check_tight_relation(&_cfE_w_i, &cfE_u_i)?; - self.cf_r1cs - .check_relaxed_relation(&self.cf_W_i, &self.cf_U_i)?; + cfW_u_i.check_incoming()?; + cfE_u_i.check_incoming()?; + self.cf_r1cs.check_relation(&_cfW_w_i, &cfW_u_i)?; + self.cf_r1cs.check_relation(&_cfE_w_i, &cfE_u_i)?; + self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?; } } @@ -850,8 +897,9 @@ where #[cfg(test)] { - self.r1cs.check_tight_relation(&self.w_i, &self.u_i)?; - self.r1cs.check_relaxed_relation(&self.W_i, &self.U_i)?; + self.u_i.check_incoming()?; + self.r1cs.check_relation(&self.w_i, &self.u_i)?; + self.r1cs.check_relation(&self.W_i, &self.U_i)?; } Ok(()) @@ -861,31 +909,93 @@ where self.z_i.clone() } - fn instances( - &self, - ) -> ( - Self::RunningInstance, - Self::IncomingInstance, - Self::CFInstance, - ) { - ( - (self.U_i.clone(), self.W_i.clone()), - (self.u_i.clone(), self.w_i.clone()), - (self.cf_U_i.clone(), self.cf_W_i.clone()), - ) + fn ivc_proof(&self) -> Self::IVCProof { + Self::IVCProof { + i: self.i, + z_0: self.z_0.clone(), + z_i: self.z_i.clone(), + W_i: self.W_i.clone(), + U_i: self.U_i.clone(), + w_i: self.w_i.clone(), + u_i: self.u_i.clone(), + cf_W_i: self.cf_W_i.clone(), + cf_U_i: self.cf_U_i.clone(), + } + } + + fn from_ivc_proof( + ivc_proof: IVCProof, + fcircuit_params: FC::Params, + params: (Self::ProverParam, Self::VerifierParam), + ) -> Result { + let IVCProof { + i, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; + let (pp, vp) = params; + + let f_circuit = FC::new(fcircuit_params).unwrap(); + let cs = ConstraintSystem::::new_ref(); + let cs2 = ConstraintSystem::::new_ref(); + let augmented_F_circuit = + AugmentedFCircuit::::empty(&pp.poseidon_config, f_circuit.clone()); + let cf_circuit = NovaCycleFoldCircuit::::empty(); + + augmented_F_circuit.generate_constraints(cs.clone())?; + cs.finalize(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let r1cs = extract_r1cs::(&cs); + + cf_circuit.generate_constraints(cs2.clone())?; + cs2.finalize(); + let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let cf_r1cs = extract_r1cs::(&cs2); + + Ok(Self { + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + r1cs, + cf_r1cs, + poseidon_config: pp.poseidon_config, + cs_pp: pp.cs_pp, + cf_cs_pp: pp.cf_cs_pp, + F: f_circuit, + pp_hash: vp.pp_hash()?, + i, + z_0, + z_i, + w_i, + u_i, + W_i, + U_i, + cf_W_i, + cf_U_i, + }) } - /// Implements IVC.V of Nova+CycleFold. Notice that this method does not include the + /// Implements IVC.V of Nov.clone()a+CycleFold. Notice that this method does not include the /// commitments verification, which is done in the Decider. - fn verify( - vp: Self::VerifierParam, - z_0: Vec, // initial state - z_i: Vec, // last state - num_steps: C1::ScalarField, - running_instance: Self::RunningInstance, - incoming_instance: Self::IncomingInstance, - cyclefold_instance: Self::CFInstance, - ) -> Result<(), Error> { + fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> { + let Self::IVCProof { + i: num_steps, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; + let sponge = PoseidonSponge::::new(&vp.poseidon_config); if num_steps == C1::ScalarField::zero() { @@ -895,10 +1005,6 @@ where return Ok(()); } - let (U_i, W_i) = running_instance; - let (u_i, w_i) = incoming_instance; - let (cf_U_i, cf_W_i) = cyclefold_instance; - if u_i.x.len() != 2 || U_i.x.len() != 2 { return Err(Error::IVCVerificationFail); } @@ -907,7 +1013,7 @@ where // check that u_i's output points to the running instance // u_i.X[0] == H(i, z_0, z_i, U_i) - let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone()); + let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i); if expected_u_i_x != u_i.x[0] { return Err(Error::IVCVerificationFail); } @@ -917,13 +1023,15 @@ where return Err(Error::IVCVerificationFail); } - // check R1CS satisfiability, which also enforces u_i.cmE==0, u_i.u==1 - vp.r1cs.check_tight_relation(&w_i, &u_i)?; + // check R1CS satisfiability, which is equivalent to checking if `u_i` + // is an incoming instance and if `w_i` and `u_i` satisfy RelaxedR1CS + u_i.check_incoming()?; + vp.r1cs.check_relation(&w_i, &u_i)?; // check RelaxedR1CS satisfiability - vp.r1cs.check_relaxed_relation(&W_i, &U_i)?; + vp.r1cs.check_relation(&W_i, &U_i)?; // check CycleFold RelaxedR1CS satisfiability - vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?; + vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?; Ok(()) } @@ -945,7 +1053,7 @@ where { // computes T and cmT for the AugmentedFCircuit fn compute_cmT(&self) -> Result<(Vec, C1), Error> { - NIFS::::compute_cmT( + NIFS::::compute_aux( &self.cs_pp, &self.r1cs, &self.w_i, @@ -1160,15 +1268,67 @@ pub mod tests { } assert_eq!(Fr::from(num_steps as u32), nova.i); - let (running_instance, incoming_instance, cyclefold_instance) = nova.instances(); + // serialize the Nova Prover & Verifier params. These params are the trusted setup of the commitment schemes used + let mut nova_pp_serialized = vec![]; + nova_params + .0 + .serialize_compressed(&mut nova_pp_serialized) + .unwrap(); + let mut nova_vp_serialized = vec![]; + nova_params + .1 + .serialize_compressed(&mut nova_vp_serialized) + .unwrap(); + + // deserialize the Nova params + let _nova_pp_deserialized = + ProverParams::::deserialize_compressed( + &mut nova_pp_serialized.as_slice(), + ) + .unwrap(); + let nova_vp_deserialized = Nova::< + Projective, + GVar, + Projective2, + GVar2, + CubicFCircuit, + CS1, + CS2, + H, + >::vp_deserialize_with_mode( + &mut nova_vp_serialized.as_slice(), + ark_serialize::Compress::Yes, + ark_serialize::Validate::Yes, + (), // fcircuit_params + ) + .unwrap(); + + let ivc_proof = nova.ivc_proof(); + + // serialize IVCProof + let mut ivc_proof_serialized = vec![]; + assert!(ivc_proof + .serialize_compressed(&mut ivc_proof_serialized) + .is_ok()); + // deserialize IVCProof + let ivc_proof_deserialized = , + CS1, + CS2, + H, + > as FoldingScheme>>::IVCProof::deserialize_compressed( + ivc_proof_serialized.as_slice() + ) + .unwrap(); + + // verify the deserialized IVCProof with the deserialized VerifierParams Nova::, CS1, CS2, H>::verify( - nova_params.1, // Nova's verifier params - z_0.clone(), - nova.z_i.clone(), - nova.i, - running_instance, - incoming_instance, - cyclefold_instance, + nova_vp_deserialized, // Nova's verifier params + ivc_proof_deserialized, ) .unwrap(); diff --git a/folding-schemes/src/folding/nova/nifs.rs b/folding-schemes/src/folding/nova/nifs.rs index 03a0ef3..0a2d807 100644 --- a/folding-schemes/src/folding/nova/nifs.rs +++ b/folding-schemes/src/folding/nova/nifs.rs @@ -1,8 +1,12 @@ use ark_crypto_primitives::sponge::Absorb; use ark_ec::{CurveGroup, Group}; +use ark_ff::PrimeField; +use ark_std::rand::RngCore; use ark_std::Zero; use std::marker::PhantomData; +use super::circuits::ChallengeGadget; +use super::traits::NIFSTrait; use super::{CommittedInstance, Witness}; use crate::arith::r1cs::R1CS; use crate::commitment::CommitmentScheme; @@ -19,11 +23,119 @@ pub struct NIFS, const H: bool = false _cp: PhantomData, } +impl, const H: bool> NIFSTrait + for NIFS +where + ::ScalarField: Absorb, + ::BaseField: PrimeField, +{ + type CommittedInstance = CommittedInstance; + type Witness = Witness; + type ProverAux = Vec; + type VerifierAux = C; + + fn new_witness(w: Vec, e_len: usize, rng: impl RngCore) -> Self::Witness { + Witness::new::(w, e_len, rng) + } + + fn new_instance( + W: &Self::Witness, + params: &CS::ProverParams, + x: Vec, + _aux: Vec, + ) -> Result { + W.commit::(params, x) + } + + fn fold_witness( + r: C::ScalarField, + W_i: &Self::Witness, + w_i: &Self::Witness, + aux: &Self::ProverAux, + ) -> Result { + let r2 = r * r; + let E: Vec = vec_add( + &vec_add(&W_i.E, &vec_scalar_mul(aux, &r))?, // aux is Nova's T + &vec_scalar_mul(&w_i.E, &r2), + )?; + // use r_T=0 since we don't need hiding property for cm(T) + let rT = C::ScalarField::zero(); + let rE = W_i.rE + r * rT + r2 * w_i.rE; + let W: Vec = W_i + .W + .iter() + .zip(&w_i.W) + .map(|(a, b)| *a + (r * b)) + .collect(); + + let rW = W_i.rW + r * w_i.rW; + Ok(Self::Witness { E, rE, W, rW }) + } + + fn compute_aux( + cs_prover_params: &CS::ProverParams, + r1cs: &R1CS, + W_i: &Self::Witness, + U_i: &Self::CommittedInstance, + w_i: &Self::Witness, + u_i: &Self::CommittedInstance, + ) -> Result<(Self::ProverAux, Self::VerifierAux), Error> { + let z1: Vec = [vec![U_i.u], U_i.x.to_vec(), W_i.W.to_vec()].concat(); + let z2: Vec = [vec![u_i.u], u_i.x.to_vec(), w_i.W.to_vec()].concat(); + + // compute cross terms + let T = Self::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2)?; + // use r_T=0 since we don't need hiding property for cm(T) + let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; + Ok((T, cmT)) + } + + fn get_challenge>( + transcript: &mut T, + pp_hash: C::ScalarField, // public params hash + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + aux: &Self::VerifierAux, // cmT + ) -> Vec { + ChallengeGadget::::get_challenge_native( + transcript, + pp_hash, + U_i, + u_i, + Some(aux), + ) + } + + // Notice: `prove` method is implemented at the trait level. + + fn verify( + // r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element + r: C::ScalarField, + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + cmT: &C, // VerifierAux + ) -> Self::CommittedInstance { + let r2 = r * r; + let cmE = U_i.cmE + cmT.mul(r) + u_i.cmE.mul(r2); + let u = U_i.u + r * u_i.u; + let cmW = U_i.cmW + u_i.cmW.mul(r); + let x = U_i + .x + .iter() + .zip(&u_i.x) + .map(|(a, b)| *a + (r * b)) + .collect::>(); + + Self::CommittedInstance { cmE, u, cmW, x } + } +} + impl, const H: bool> NIFS where ::ScalarField: Absorb, + ::BaseField: PrimeField, { - // compute_T: compute cross-terms T + /// compute_T: compute cross-terms T pub fn compute_T( r1cs: &R1CS, u1: C::ScalarField, @@ -49,48 +161,8 @@ where vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1) } - pub fn fold_witness( - r: C::ScalarField, - w1: &Witness, - w2: &Witness, - T: &[C::ScalarField], - rT: C::ScalarField, - ) -> Result, Error> { - let r2 = r * r; - let E: Vec = vec_add( - &vec_add(&w1.E, &vec_scalar_mul(T, &r))?, - &vec_scalar_mul(&w2.E, &r2), - )?; - let rE = w1.rE + r * rT + r2 * w2.rE; - let W: Vec = w1.W.iter().zip(&w2.W).map(|(a, b)| *a + (r * b)).collect(); - - let rW = w1.rW + r * w2.rW; - Ok(Witness:: { E, rE, W, rW }) - } - - pub fn fold_committed_instance( - r: C::ScalarField, - ci1: &CommittedInstance, // U_i - ci2: &CommittedInstance, // u_i - cmT: &C, - ) -> CommittedInstance { - let r2 = r * r; - let cmE = ci1.cmE + cmT.mul(r) + ci2.cmE.mul(r2); - let u = ci1.u + r * ci2.u; - let cmW = ci1.cmW + ci2.cmW.mul(r); - let x = ci1 - .x - .iter() - .zip(&ci2.x) - .map(|(a, b)| *a + (r * b)) - .collect::>(); - - CommittedInstance:: { cmE, u, cmW, x } - } - - /// NIFS.P is the consecutive combination of compute_cmT with fold_instances - - /// compute_cmT is part of the NIFS.P logic + /// In Nova, NIFS.P is the consecutive combination of compute_cmT with fold_instances, + /// ie. compute_cmT is part of the NIFS.P logic. pub fn compute_cmT( cs_prover_params: &CS::ProverParams, r1cs: &R1CS, @@ -108,6 +180,7 @@ where let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; Ok((T, cmT)) } + pub fn compute_cyclefold_cmT( cs_prover_params: &CS::ProverParams, r1cs: &R1CS, // R1CS over C2.Fr=C1.Fq (here C=C2) @@ -129,40 +202,6 @@ where Ok((T, cmT)) } - /// fold_instances is part of the NIFS.P logic described in - /// [Nova](https://eprint.iacr.org/2021/370.pdf)'s section 4. It returns the folded Committed - /// Instances and the Witness. - pub fn fold_instances( - r: C::ScalarField, - w1: &Witness, - ci1: &CommittedInstance, - w2: &Witness, - ci2: &CommittedInstance, - T: &[C::ScalarField], - cmT: C, - ) -> Result<(Witness, CommittedInstance), Error> { - // fold witness - // use r_T=0 since we don't need hiding property for cm(T) - let w3 = NIFS::::fold_witness(r, w1, w2, T, C::ScalarField::zero())?; - - // fold committed instances - let ci3 = NIFS::::fold_committed_instance(r, ci1, ci2, &cmT); - - Ok((w3, ci3)) - } - - /// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s - /// section 4. It returns the folded Committed Instance - pub fn verify( - // r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element - r: C::ScalarField, - ci1: &CommittedInstance, - ci2: &CommittedInstance, - cmT: &C, - ) -> CommittedInstance { - NIFS::::fold_committed_instance(r, ci1, ci2, cmT) - } - /// Verify committed folded instance (ci) relations. Notice that this method does not open the /// commitments, but just checks that the given committed instances (ci1, ci2) when folded /// result in the folded committed instance (ci3) values. @@ -173,7 +212,7 @@ where ci3: &CommittedInstance, cmT: &C, ) -> Result<(), Error> { - let expected = Self::fold_committed_instance(r, ci1, ci2, cmT); + let expected = Self::verify(r, ci1, ci2, cmT); if ci3.cmE != expected.cmE || ci3.u != expected.u || ci3.cmW != expected.cmW @@ -202,217 +241,32 @@ where #[cfg(test)] pub mod tests { use super::*; - use ark_crypto_primitives::sponge::{ - poseidon::{PoseidonConfig, PoseidonSponge}, - CryptographicSponge, - }; + use crate::transcript::poseidon::poseidon_canonical_config; + use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge}; use ark_ff::{BigInteger, PrimeField}; use ark_pallas::{Fr, Projective}; - use ark_std::{ops::Mul, test_rng, UniformRand}; + use ark_std::{test_rng, UniformRand}; - use crate::arith::r1cs::{ - tests::{get_test_r1cs, get_test_z}, - RelaxedR1CS, + use crate::arith::{ + r1cs::tests::{get_test_r1cs, get_test_z}, + Arith, }; - use crate::commitment::pedersen::{Params as PedersenParams, Pedersen}; - use crate::folding::nova::circuits::ChallengeGadget; - use crate::transcript::poseidon::poseidon_canonical_config; + use crate::commitment::pedersen::Pedersen; + use crate::folding::nova::traits::NIFSTrait; - #[allow(clippy::type_complexity)] - pub(crate) fn prepare_simple_fold_inputs() -> ( - PedersenParams, - PoseidonConfig, - R1CS, - Witness, // w1 - CommittedInstance, // ci1 - Witness, // w2 - CommittedInstance, // ci2 - Witness, // w3 - CommittedInstance, // ci3 - Vec, // T - C, // cmT - Vec, // r_bits - C::ScalarField, // r_Fr - ) - where - C: CurveGroup, - ::BaseField: PrimeField, - C::ScalarField: Absorb, - { - let r1cs = get_test_r1cs(); - let z1 = get_test_z(3); - let z2 = get_test_z(4); - let (w1, x1) = r1cs.split_z(&z1); - let (w2, x2) = r1cs.split_z(&z2); - - let w1 = Witness::::new::(w1.clone(), r1cs.A.n_rows, test_rng()); - let w2 = Witness::::new::(w2.clone(), r1cs.A.n_rows, test_rng()); - - let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - // compute committed instances - let ci1 = w1 - .commit::, false>(&pedersen_params, x1.clone()) - .unwrap(); - let ci2 = w2 - .commit::, false>(&pedersen_params, x2.clone()) - .unwrap(); - - // NIFS.P - let (T, cmT) = - NIFS::>::compute_cmT(&pedersen_params, &r1cs, &w1, &ci1, &w2, &ci2) - .unwrap(); - - let poseidon_config = poseidon_canonical_config::(); - let mut transcript = PoseidonSponge::::new(&poseidon_config); - - let pp_hash = C::ScalarField::from(42u32); // only for test - let r_bits = ChallengeGadget::::get_challenge_native( - &mut transcript, - pp_hash, - ci1.clone(), - ci2.clone(), - cmT, - ); - let r_Fr = C::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); - - let (w3, ci3) = - NIFS::>::fold_instances(r_Fr, &w1, &ci1, &w2, &ci2, &T, cmT).unwrap(); - - ( - pedersen_params, - poseidon_config, - r1cs, - w1, - ci1, - w2, - ci2, - w3, - ci3, - T, - cmT, - r_bits, - r_Fr, - ) - } - - // fold 2 dummy instances and check that the folded instance holds the relaxed R1CS relation #[test] - fn test_nifs_fold_dummy() { - let r1cs = get_test_r1cs::(); - let z1 = get_test_z(3); - let (w1, x1) = r1cs.split_z(&z1); + fn test_nifs_nova() { + let (W, U) = test_nifs_opt::>>(); - let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - - // dummy instance, witness and public inputs zeroes - let w_dummy = Witness::::dummy(w1.len(), r1cs.A.n_rows); - let mut u_dummy = w_dummy - .commit::, false>(&pedersen_params, vec![Fr::zero(); x1.len()]) - .unwrap(); - u_dummy.u = Fr::zero(); - - let w_i = w_dummy.clone(); - let u_i = u_dummy.clone(); - let W_i = w_dummy.clone(); - let U_i = u_dummy.clone(); - r1cs.check_relaxed_relation(&w_i, &u_i).unwrap(); - r1cs.check_relaxed_relation(&W_i, &U_i).unwrap(); - - let r_Fr = Fr::from(3_u32); - - let (T, cmT) = NIFS::>::compute_cmT( - &pedersen_params, - &r1cs, - &w_i, - &u_i, - &W_i, - &U_i, - ) - .unwrap(); - let (W_i1, U_i1) = NIFS::>::fold_instances( - r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT, - ) - .unwrap(); - r1cs.check_relaxed_relation(&W_i1, &U_i1).unwrap(); - } - - // fold 2 instances into one - #[test] - fn test_nifs_one_fold() { - let (pedersen_params, poseidon_config, r1cs, w1, ci1, w2, ci2, w3, ci3, T, cmT, _, r) = - prepare_simple_fold_inputs(); - - // NIFS.V - let ci3_v = NIFS::>::verify(r, &ci1, &ci2, &cmT); - assert_eq!(ci3_v, ci3); - - // check that relations hold for the 2 inputted instances and the folded one - r1cs.check_relaxed_relation(&w1, &ci1).unwrap(); - r1cs.check_relaxed_relation(&w2, &ci2).unwrap(); - r1cs.check_relaxed_relation(&w3, &ci3).unwrap(); - - // check that folded commitments from folded instance (ci) are equal to folding the - // use folded rE, rW to commit w3 - let ci3_expected = w3 - .commit::, false>(&pedersen_params, ci3.x.clone()) - .unwrap(); - assert_eq!(ci3_expected.cmE, ci3.cmE); - assert_eq!(ci3_expected.cmW, ci3.cmW); - - // next equalities should hold since we started from two cmE of zero-vector E's - assert_eq!(ci3.cmE, cmT.mul(r)); - assert_eq!(w3.E, vec_scalar_mul(&T, &r)); - - // NIFS.Verify_Folded_Instance: - NIFS::>::verify_folded_instance(r, &ci1, &ci2, &ci3, &cmT) - .unwrap(); - - // init Prover's transcript - let mut transcript_p = PoseidonSponge::::new(&poseidon_config); - // init Verifier's transcript - let mut transcript_v = PoseidonSponge::::new(&poseidon_config); - - // prove the ci3.cmE, ci3.cmW, cmT commitments - let cm_proofs = NIFS::>::prove_commitments( - &mut transcript_p, - &pedersen_params, - &w3, - &ci3, - T, - &cmT, - ) - .unwrap(); - - // verify the ci3.cmE, ci3.cmW, cmT commitments - assert_eq!(cm_proofs.len(), 3); - Pedersen::::verify( - &pedersen_params, - &mut transcript_v, - &ci3.cmE, - &cm_proofs[0].clone(), - ) - .unwrap(); - Pedersen::::verify( - &pedersen_params, - &mut transcript_v, - &ci3.cmW, - &cm_proofs[1].clone(), - ) - .unwrap(); - Pedersen::::verify( - &pedersen_params, - &mut transcript_v, - &cmT, - &cm_proofs[2].clone(), - ) - .unwrap(); + // check the last folded instance relation + let r1cs = get_test_r1cs(); + r1cs.check_relation(&W, &U).unwrap(); } - #[test] - fn test_nifs_fold_loop() { + /// runs a loop using the NIFS trait, and returns the last Witness and CommittedInstance so + /// that their relation can be checked. + pub(crate) fn test_nifs_opt>>( + ) -> (N::Witness, N::CommittedInstance) { let r1cs = get_test_r1cs(); let z = get_test_z(3); let (w, x) = r1cs.split_z(&z); @@ -420,66 +274,68 @@ pub mod tests { let mut rng = ark_std::test_rng(); let (pedersen_params, _) = Pedersen::::setup(&mut rng, r1cs.A.n_cols).unwrap(); - // prepare the running instance - let mut running_instance_w = - Witness::::new::(w.clone(), r1cs.A.n_rows, test_rng()); - let mut running_committed_instance = running_instance_w - .commit::, false>(&pedersen_params, x) - .unwrap(); + let poseidon_config = poseidon_canonical_config::(); + let mut transcript = PoseidonSponge::::new(&poseidon_config); + let pp_hash = Fr::rand(&mut rng); - r1cs.check_relaxed_relation(&running_instance_w, &running_committed_instance) - .unwrap(); + // prepare the running instance + let mut running_witness = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng()); + let mut running_committed_instance = + N::new_instance(&running_witness, &pedersen_params, x, vec![]).unwrap(); let num_iters = 10; for i in 0..num_iters { // prepare the incoming instance let incoming_instance_z = get_test_z(i + 4); let (w, x) = r1cs.split_z(&incoming_instance_z); - let incoming_instance_w = - Witness::::new::(w.clone(), r1cs.A.n_rows, test_rng()); - let incoming_committed_instance = incoming_instance_w - .commit::, false>(&pedersen_params, x) - .unwrap(); - r1cs.check_relaxed_relation(&incoming_instance_w, &incoming_committed_instance) - .unwrap(); + let incoming_witness = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng()); + let incoming_committed_instance = + N::new_instance(&incoming_witness, &pedersen_params, x, vec![]).unwrap(); - let r = Fr::rand(&mut rng); // folding challenge would come from the RO - - // NIFS.P - let (T, cmT) = NIFS::>::compute_cmT( + let (aux_p, aux_v) = N::compute_aux( &pedersen_params, &r1cs, - &running_instance_w, + &running_witness, &running_committed_instance, - &incoming_instance_w, + &incoming_witness, &incoming_committed_instance, ) .unwrap(); - let (folded_w, _) = NIFS::>::fold_instances( + + let r_bits = N::get_challenge( + &mut transcript, + pp_hash, + &running_committed_instance, + &incoming_committed_instance, + &aux_v, + ); + let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); + + // NIFS.P + let (folded_witness, _) = N::prove( r, - &running_instance_w, + &running_witness, &running_committed_instance, - &incoming_instance_w, + &incoming_witness, &incoming_committed_instance, - &T, - cmT, + &aux_p, + &aux_v, ) .unwrap(); // NIFS.V - let folded_committed_instance = NIFS::>::verify( + let folded_committed_instance = N::verify( r, &running_committed_instance, &incoming_committed_instance, - &cmT, + &aux_v, ); - r1cs.check_relaxed_relation(&folded_w, &folded_committed_instance) - .unwrap(); - // set running_instance for next loop iteration - running_instance_w = folded_w; + running_witness = folded_witness; running_committed_instance = folded_committed_instance; } + + (running_witness, running_committed_instance) } } diff --git a/folding-schemes/src/folding/nova/ova.rs b/folding-schemes/src/folding/nova/ova.rs new file mode 100644 index 0000000..95bd91a --- /dev/null +++ b/folding-schemes/src/folding/nova/ova.rs @@ -0,0 +1,269 @@ +/// This module contains the implementation the NIFSTrait for the +/// [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) NIFS (Non-Interactive Folding Scheme) as +/// outlined in the protocol description doc: +/// authored by Benedikt Bünz. +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::{CurveGroup, Group}; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::fmt::Debug; +use ark_std::rand::RngCore; +use ark_std::{One, UniformRand, Zero}; +use std::marker::PhantomData; + +use super::{circuits::ChallengeGadget, traits::NIFSTrait}; +use crate::arith::r1cs::R1CS; +use crate::commitment::CommitmentScheme; +use crate::folding::{circuits::CF1, traits::Dummy}; +use crate::transcript::{AbsorbNonNative, Transcript}; +use crate::utils::vec::{hadamard, mat_vec_mul, vec_scalar_mul, vec_sub}; +use crate::Error; + +/// A CommittedInstance in [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) is represented by `W` or +/// `W'`. It is the result of the commitment to a vector that contains the witness `w` concatenated +/// with `t` or `e` + the public inputs `x` and a relaxation factor `u`. (Notice that in the Ova +/// document `u` is denoted as `mu`, in this implementation we use `u` so it follows the original +/// Nova notation, so code is easier to follow). +#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct CommittedInstance { + pub u: C::ScalarField, // in the Ova document is denoted as `mu` + pub x: Vec, + pub cmWE: C, +} + +impl Absorb for CommittedInstance +where + C::ScalarField: Absorb, +{ + fn to_sponge_bytes(&self, dest: &mut Vec) { + C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); + } + + fn to_sponge_field_elements(&self, dest: &mut Vec) { + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); + // We cannot call `to_native_sponge_field_elements(dest)` directly, as + // `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`, + // but here `F` is a generic `PrimeField`. + self.cmWE + .to_native_sponge_field_elements_as_vec() + .to_sponge_field_elements(dest); + } +} + +// #[allow(dead_code)] // Clippy flag needed for now. +/// A Witness in Ova is represented by `w`. It also contains a blinder which can or not be used +/// when committing to the witness itself. +#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Witness { + pub w: Vec, + pub rW: C::ScalarField, +} + +impl Witness { + /// Generates a new `Witness` instance from a given witness vector. + /// If `H = true`, then we assume we want to blind it at commitment time, + /// hence sampling `rW` from the randomness passed. + pub fn new(w: Vec, mut rng: impl RngCore) -> Self { + Self { + w, + rW: if H { + C::ScalarField::rand(&mut rng) + } else { + C::ScalarField::zero() + }, + } + } + + /// Given `x` (public inputs) and `t` or `e` (which we always concatenate in Ova) and the + /// public inputs `x`, generates a [`CommittedInstance`] as a result which will or not be + /// blinded depending on how the const generic `HC` is set up. + pub fn commit, const HC: bool>( + &self, + params: &CS::ProverParams, + x: Vec, + t_or_e: Vec, + ) -> Result, Error> { + let cmWE = CS::commit(params, &[self.w.clone(), t_or_e].concat(), &self.rW)?; + Ok(CommittedInstance { + u: C::ScalarField::one(), + cmWE, + x, + }) + } +} + +impl Dummy<&R1CS>> for Witness { + fn dummy(r1cs: &R1CS>) -> Self { + Self { + w: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l], + rW: C::ScalarField::zero(), + } + } +} + +/// Implements the NIFS (Non-Interactive Folding Scheme) trait for Ova. +pub struct NIFS, const H: bool = false> { + _c: PhantomData, + _cp: PhantomData, +} + +impl, const H: bool> NIFSTrait + for NIFS +where + ::ScalarField: Absorb, + ::BaseField: PrimeField, +{ + type CommittedInstance = CommittedInstance; + type Witness = Witness; + type ProverAux = (); + type VerifierAux = (); + + fn new_witness(w: Vec, _e_len: usize, rng: impl RngCore) -> Self::Witness { + Witness::new::(w, rng) + } + + fn new_instance( + W: &Self::Witness, + params: &CS::ProverParams, + x: Vec, + aux: Vec, // t_or_e + ) -> Result { + W.commit::(params, x, aux) + } + + fn fold_witness( + r: C::ScalarField, // in Ova's hackmd denoted as `alpha` + W_i: &Self::Witness, + w_i: &Self::Witness, + _aux: &Self::ProverAux, + ) -> Result { + let w: Vec = W_i + .w + .iter() + .zip(&w_i.w) + .map(|(a, b)| *a + (r * b)) + .collect(); + + let rW = W_i.rW + r * w_i.rW; + Ok(Self::Witness { w, rW }) + } + + fn compute_aux( + _cs_prover_params: &CS::ProverParams, + _r1cs: &R1CS, + _W_i: &Self::Witness, + _U_i: &Self::CommittedInstance, + _w_i: &Self::Witness, + _u_i: &Self::CommittedInstance, + ) -> Result<(Self::ProverAux, Self::VerifierAux), Error> { + Ok(((), ())) + } + + fn get_challenge>( + transcript: &mut T, + pp_hash: C::ScalarField, // public params hash + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + _aux: &Self::VerifierAux, + ) -> Vec { + // reuse Nova's get_challenge method + ChallengeGadget::::get_challenge_native( + transcript, pp_hash, U_i, u_i, None, // empty in Ova's case + ) + } + + // Notice: `prove` method is implemented at the trait level. + + fn verify( + // r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element + r: C::ScalarField, + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + _aux: &Self::VerifierAux, + ) -> Self::CommittedInstance { + // recall that r <==> alpha, and u <==> mu between Nova and Ova respectively + let u = U_i.u + r; // u_i.u is always 1 IN ova as we just can do sequential IVC. + let cmWE = U_i.cmWE + u_i.cmWE.mul(r); + let x = U_i + .x + .iter() + .zip(&u_i.x) + .map(|(a, b)| *a + (r * b)) + .collect::>(); + + Self::CommittedInstance { cmWE, u, x } + } +} + +/// Computes the E parameter (error terms) for the given R1CS and the instance's z and u. This +/// method is used by the verifier to obtain E in order to check the RelaxedR1CS relation. +pub fn compute_E( + r1cs: &R1CS, + z: &[C::ScalarField], + u: C::ScalarField, +) -> Result, Error> { + let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone()); + + // this is parallelizable (for the future) + let Az = mat_vec_mul(&A, z)?; + let Bz = mat_vec_mul(&B, z)?; + let Cz = mat_vec_mul(&C, z)?; + + let Az_Bz = hadamard(&Az, &Bz)?; + let uCz = vec_scalar_mul(&Cz, &u); + + vec_sub(&Az_Bz, &uCz) +} + +#[cfg(test)] +pub mod tests { + use super::*; + use ark_pallas::{Fr, Projective}; + + use crate::arith::{r1cs::tests::get_test_r1cs, Arith}; + use crate::commitment::pedersen::Pedersen; + use crate::folding::nova::nifs::tests::test_nifs_opt; + + // Simple auxiliary structure mainly used to help pass a witness for which we can check + // easily an R1CS relation. + // Notice that checking it requires us to have `E` as per [`Arith`] trait definition. + // But since we don't hold `E` nor `e` within the NIFS, we create this structure to pass + // `e` such that the check can be done. + #[derive(Debug, Clone)] + pub(crate) struct TestingWitness { + pub(crate) w: Vec, + pub(crate) e: Vec, + } + impl Arith, CommittedInstance> for R1CS> { + type Evaluation = Vec>; + + fn eval_relation( + &self, + w: &TestingWitness, + u: &CommittedInstance, + ) -> Result { + self.eval_at_z(&[&[u.u], u.x.as_slice(), &w.w].concat()) + } + + fn check_evaluation( + w: &TestingWitness, + _u: &CommittedInstance, + e: Self::Evaluation, + ) -> Result<(), Error> { + (w.e == e).then_some(()).ok_or(Error::NotSatisfied) + } + } + + #[test] + fn test_nifs_ova() { + let (W, U) = test_nifs_opt::>>(); + + // check the last folded instance relation + let r1cs = get_test_r1cs(); + let z: Vec = [&[U.u][..], &U.x, &W.w].concat(); + let e = compute_E::(&r1cs, &z, U.u).unwrap(); + r1cs.check_relation(&TestingWitness:: { e, w: W.w.clone() }, &U) + .unwrap(); + } +} diff --git a/folding-schemes/src/folding/nova/serialize.rs b/folding-schemes/src/folding/nova/serialize.rs deleted file mode 100644 index e5e5382..0000000 --- a/folding-schemes/src/folding/nova/serialize.rs +++ /dev/null @@ -1,268 +0,0 @@ -use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; -use ark_ec::{CurveGroup, Group}; -use ark_ff::PrimeField; -use ark_r1cs_std::{ - groups::{CurveVar, GroupOpsBounds}, - ToConstraintFieldGadget, -}; -use ark_relations::r1cs::ConstraintSynthesizer; -use ark_relations::r1cs::ConstraintSystem; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError, Write}; -use std::marker::PhantomData; - -use super::{ - circuits::AugmentedFCircuit, CommittedInstance, Nova, NovaCycleFoldCircuit, ProverParams, - Witness, -}; -use crate::{ - arith::r1cs::extract_r1cs, - commitment::CommitmentScheme, - folding::circuits::{CF1, CF2}, - frontend::FCircuit, -}; - -impl CanonicalSerialize - for Nova -where - C1: CurveGroup, - C2: CurveGroup, - FC: FCircuit, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, - for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, - for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, - GC1: CurveVar::ScalarField>, - GC1: ToConstraintFieldGadget<::ScalarField>, - GC2: CurveVar::BaseField>, -{ - fn serialize_with_mode( - &self, - mut writer: W, - compress: ark_serialize::Compress, - ) -> Result<(), ark_serialize::SerializationError> { - self.pp_hash.serialize_with_mode(&mut writer, compress)?; - self.i.serialize_with_mode(&mut writer, compress)?; - self.z_0.serialize_with_mode(&mut writer, compress)?; - self.z_i.serialize_with_mode(&mut writer, compress)?; - self.w_i.serialize_with_mode(&mut writer, compress)?; - self.u_i.serialize_with_mode(&mut writer, compress)?; - self.W_i.serialize_with_mode(&mut writer, compress)?; - self.U_i.serialize_with_mode(&mut writer, compress)?; - self.cf_W_i.serialize_with_mode(&mut writer, compress)?; - self.cf_U_i.serialize_with_mode(&mut writer, compress) - } - - fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { - self.pp_hash.serialized_size(compress) - + self.i.serialized_size(compress) - + self.z_0.serialized_size(compress) - + self.z_i.serialized_size(compress) - + self.w_i.serialized_size(compress) - + self.u_i.serialized_size(compress) - + self.W_i.serialized_size(compress) - + self.U_i.serialized_size(compress) - + self.cf_W_i.serialized_size(compress) - + self.cf_U_i.serialized_size(compress) - } - - fn serialize_compressed( - &self, - writer: W, - ) -> Result<(), ark_serialize::SerializationError> { - self.serialize_with_mode(writer, ark_serialize::Compress::Yes) - } - - fn compressed_size(&self) -> usize { - self.serialized_size(ark_serialize::Compress::Yes) - } - - fn serialize_uncompressed( - &self, - writer: W, - ) -> Result<(), ark_serialize::SerializationError> { - self.serialize_with_mode(writer, ark_serialize::Compress::No) - } - - fn uncompressed_size(&self) -> usize { - self.serialized_size(ark_serialize::Compress::No) - } -} - -// Note that we can't derive or implement `CanonicalDeserialize` directly. -// This is because `CurveVar` notably does not implement the `Sync` trait. -impl Nova -where - C1: CurveGroup, - C2: CurveGroup, - FC: FCircuit, Params = ()>, - CS1: CommitmentScheme, - CS2: CommitmentScheme, - ::BaseField: PrimeField, - ::BaseField: PrimeField, - ::ScalarField: Absorb, - ::ScalarField: Absorb, - C1: CurveGroup, - for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>, - for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>, - GC1: CurveVar::ScalarField>, - GC1: ToConstraintFieldGadget<::ScalarField>, - GC2: CurveVar>, - GC2: ToConstraintFieldGadget<::BaseField>, -{ - pub fn deserialize_nova( - mut reader: R, - compress: ark_serialize::Compress, - validate: ark_serialize::Validate, - prover_params: ProverParams, - poseidon_config: PoseidonConfig, - ) -> Result { - let pp_hash = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; - let i = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?; - let z_0 = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; - let z_i = Vec::::deserialize_with_mode(&mut reader, compress, validate)?; - let w_i = Witness::::deserialize_with_mode(&mut reader, compress, validate)?; - let u_i = CommittedInstance::::deserialize_with_mode(&mut reader, compress, validate)?; - let W_i = Witness::::deserialize_with_mode(&mut reader, compress, validate)?; - let U_i = CommittedInstance::::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_W_i = Witness::::deserialize_with_mode(&mut reader, compress, validate)?; - let cf_U_i = - CommittedInstance::::deserialize_with_mode(&mut reader, compress, validate)?; - - let f_circuit = FC::new(()).unwrap(); - let cs = ConstraintSystem::::new_ref(); - let cs2 = ConstraintSystem::::new_ref(); - let augmented_F_circuit = - AugmentedFCircuit::::empty(&poseidon_config, f_circuit.clone()); - let cf_circuit = NovaCycleFoldCircuit::::empty(); - - augmented_F_circuit - .generate_constraints(cs.clone()) - .map_err(|_| SerializationError::InvalidData)?; - cs.finalize(); - let cs = cs.into_inner().ok_or(SerializationError::InvalidData)?; - let r1cs = extract_r1cs::(&cs); - - cf_circuit - .generate_constraints(cs2.clone()) - .map_err(|_| SerializationError::InvalidData)?; - cs2.finalize(); - let cs2 = cs2.into_inner().ok_or(SerializationError::InvalidData)?; - let cf_r1cs = extract_r1cs::(&cs2); - Ok(Nova { - _gc1: PhantomData, - _c2: PhantomData, - _gc2: PhantomData, - r1cs, - cf_r1cs, - poseidon_config, - cs_pp: prover_params.cs_pp, - cf_cs_pp: prover_params.cf_cs_pp, - F: f_circuit, - pp_hash, - i, - z_0, - z_i, - w_i, - u_i, - W_i, - U_i, - cf_W_i, - cf_U_i, - }) - } -} - -#[cfg(test)] -pub mod tests { - use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; - use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; - use ark_serialize::{CanonicalSerialize, Compress, Validate}; - use std::{fs, io::Write}; - - use crate::{ - commitment::{kzg::KZG, pedersen::Pedersen}, - folding::nova::{Nova, PreprocessorParam}, - frontend::{utils::CubicFCircuit, FCircuit}, - transcript::poseidon::poseidon_canonical_config, - FoldingScheme, - }; - - #[test] - fn test_serde_nova() { - let mut rng = ark_std::test_rng(); - let poseidon_config = poseidon_canonical_config::(); - let F_circuit = CubicFCircuit::::new(()).unwrap(); - - // Initialize nova and make multiple `prove_step()` - type N = Nova< - Projective, - GVar, - Projective2, - GVar2, - CubicFCircuit, - KZG<'static, Bn254>, - Pedersen, - false, - >; - let prep_param = PreprocessorParam::new(poseidon_config.clone(), F_circuit); - let nova_params = N::preprocess(&mut rng, &prep_param).unwrap(); - - let z_0 = vec![Fr::from(3_u32)]; - let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); - - let num_steps: usize = 3; - for _ in 0..num_steps { - nova.prove_step(&mut rng, vec![], None).unwrap(); - } - - let mut writer = vec![]; - assert!(nova - .serialize_with_mode(&mut writer, ark_serialize::Compress::No) - .is_ok()); - - let mut file = fs::OpenOptions::new() - .create(true) - .write(true) - .open("./nova.serde") - .unwrap(); - - file.write_all(&writer).unwrap(); - - let bytes = fs::read("./nova.serde").unwrap(); - - let mut deserialized_nova = Nova::< - Projective, - GVar, - Projective2, - GVar2, - CubicFCircuit, - KZG, - Pedersen, - false, - >::deserialize_nova( - bytes.as_slice(), - Compress::No, - Validate::No, - nova_params.0, // Nova's prover params - poseidon_config, - ) - .unwrap(); - - assert_eq!(nova.i, deserialized_nova.i); - - let num_steps: usize = 3; - for _ in 0..num_steps { - deserialized_nova - .prove_step(&mut rng, vec![], None) - .unwrap(); - nova.prove_step(&mut rng, vec![], None).unwrap(); - } - - assert_eq!(deserialized_nova.w_i, nova.w_i); - } -} diff --git a/folding-schemes/src/folding/nova/traits.rs b/folding-schemes/src/folding/nova/traits.rs index 62870d9..ce325f1 100644 --- a/folding-schemes/src/folding/nova/traits.rs +++ b/folding-schemes/src/folding/nova/traits.rs @@ -1,50 +1,139 @@ +use ark_crypto_primitives::sponge::Absorb; use ark_ec::CurveGroup; -use ark_std::{rand::RngCore, One, UniformRand}; +use ark_std::fmt::Debug; +use ark_std::{rand::RngCore, UniformRand}; use super::{CommittedInstance, Witness}; -use crate::arith::r1cs::{RelaxedR1CS, R1CS}; +use crate::arith::ArithSampler; +use crate::arith::{r1cs::R1CS, Arith}; +use crate::commitment::CommitmentScheme; +use crate::folding::circuits::CF1; +use crate::transcript::Transcript; use crate::Error; -impl RelaxedR1CS, CommittedInstance> for R1CS { - fn dummy_running_instance(&self) -> (Witness, CommittedInstance) { - let w_len = self.A.n_cols - 1 - self.l; - let w_dummy = Witness::::dummy(w_len, self.A.n_rows); - let u_dummy = CommittedInstance::::dummy(self.l); - (w_dummy, u_dummy) - } +/// Defines the NIFS (Non-Interactive Folding Scheme) trait, initially defined in +/// [Nova](https://eprint.iacr.org/2021/370.pdf), and it's variants +/// [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) and +/// [Mova](https://eprint.iacr.org/2024/1220.pdf). +/// `H` specifies whether the NIFS will use a blinding factor. +pub trait NIFSTrait, const H: bool = false> { + type CommittedInstance: Debug + Clone + Absorb; + type Witness: Debug + Clone; + type ProverAux: Debug + Clone; // Prover's aux params + type VerifierAux: Debug + Clone; // Verifier's aux params - fn dummy_incoming_instance(&self) -> (Witness, CommittedInstance) { - self.dummy_running_instance() - } + fn new_witness(w: Vec, e_len: usize, rng: impl RngCore) -> Self::Witness; + fn new_instance( + w: &Self::Witness, + params: &CS::ProverParams, + x: Vec, + aux: Vec, // t_or_e in Ova, empty for Nova + ) -> Result; + + fn fold_witness( + r: C::ScalarField, + W: &Self::Witness, // running witness + w: &Self::Witness, // incoming witness + aux: &Self::ProverAux, + ) -> Result; - fn is_relaxed(_w: &Witness, u: &CommittedInstance) -> bool { - u.cmE != C::zero() || u.u != C::ScalarField::one() + /// computes the auxiliary parameters, eg. in Nova: (T, cmT), in Ova: T + fn compute_aux( + cs_prover_params: &CS::ProverParams, + r1cs: &R1CS, + W_i: &Self::Witness, + U_i: &Self::CommittedInstance, + w_i: &Self::Witness, + u_i: &Self::CommittedInstance, + ) -> Result<(Self::ProverAux, Self::VerifierAux), Error>; + + fn get_challenge>( + transcript: &mut T, + pp_hash: C::ScalarField, // public params hash + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + aux: &Self::VerifierAux, // ie. in Nova wouild be cmT, in Ova it's empty + ) -> Vec; + + /// NIFS.P. Notice that this method is implemented at the trait level, and depends on the other + /// two methods `fold_witness` and `verify`. + fn prove( + r: C::ScalarField, + W_i: &Self::Witness, // running witness + U_i: &Self::CommittedInstance, // running committed instance + w_i: &Self::Witness, // incoming witness + u_i: &Self::CommittedInstance, // incoming committed instance + aux_p: &Self::ProverAux, + aux_v: &Self::VerifierAux, + ) -> Result<(Self::Witness, Self::CommittedInstance), Error> { + let w = Self::fold_witness(r, W_i, w_i, aux_p)?; + let ci = Self::verify(r, U_i, u_i, aux_v); + Ok((w, ci)) } - fn extract_z(w: &Witness, u: &CommittedInstance) -> Vec { - [&[u.u][..], &u.x, &w.W].concat() + /// NIFS.V + fn verify( + // r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element + r: C::ScalarField, + U_i: &Self::CommittedInstance, + u_i: &Self::CommittedInstance, + aux: &Self::VerifierAux, + ) -> Self::CommittedInstance; +} + +/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and +/// the committed instance is of type [`CommittedInstance`]. +/// +/// Due to the error terms `Witness.E` and `CommittedInstance.u`, R1CS here is +/// considered as a relaxed R1CS. +/// +/// One may wonder why we do not provide distinct structs for R1CS and relaxed +/// R1CS. +/// This is because both plain R1CS and relaxed R1CS have the same structure: +/// they are both represented by three matrices. +/// What makes them different is the error terms, which are not part of the R1CS +/// struct, but are part of the witness and committed instance. +/// +/// As a follow-up, one may further ask why not providing a trait for relaxed +/// R1CS and implement it for the `R1CS` struct, where the relaxed R1CS trait +/// has methods for relaxed satisfiability check, while the `Arith` trait that +/// `R1CS` implements has methods for plain satisfiability check. +/// However, it would be more ideal if we have a single method that can smartly +/// choose the type of satisfiability check, which would make the code more +/// generic and easier to maintain. +/// +/// This is achieved thanks to the new design of the [`Arith`] trait, where we +/// can implement the trait for the same constraint system with different types +/// of witnesses and committed instances. +/// For R1CS, whether it is relaxed or not is now determined by the types of `W` +/// and `U`: the satisfiability check is relaxed if `W` and `U` are defined by +/// folding schemes, and plain if they are vectors of field elements. +impl Arith, CommittedInstance> for R1CS> { + type Evaluation = Vec>; + + fn eval_relation( + &self, + w: &Witness, + u: &CommittedInstance, + ) -> Result { + self.eval_at_z(&[&[u.u][..], &u.x, &w.W].concat()) } - fn check_error_terms( + fn check_evaluation( w: &Witness, _u: &CommittedInstance, - e: Vec, + e: Self::Evaluation, ) -> Result<(), Error> { - if w.E == e { - Ok(()) - } else { - Err(Error::NotSatisfied) - } + (w.E == e).then_some(()).ok_or(Error::NotSatisfied) } +} - fn sample( +impl ArithSampler, CommittedInstance> for R1CS> { + fn sample_witness_instance>( &self, params: &CS::ProverParams, mut rng: impl RngCore, - ) -> Result<(Witness, CommittedInstance), Error> - where - CS: crate::commitment::CommitmentScheme, - { + ) -> Result<(Witness, CommittedInstance), Error> { // Implements sampling a (committed) RelaxedR1CS // See construction 5 in https://eprint.iacr.org/2023/573.pdf let u = C::ScalarField::rand(&mut rng); @@ -61,16 +150,7 @@ impl RelaxedR1CS, CommittedInstance> for R1CS, CommittedInstance>>::compute_E( - &self.A, &self.B, &self.C, &z, &u, - )?; - - debug_assert!( - z.len() == self.A.n_cols, - "Length of z is {}, while A has {} columns.", - z.len(), - self.A.n_cols - ); + let E = self.eval_at_z(&z)?; let witness = Witness { E, rE, W, rW }; let mut cm_witness = witness.commit::(params, x)?; @@ -79,7 +159,7 @@ impl RelaxedR1CS, CommittedInstance> for R1CS, cmT: C1, ) -> Result { - let r_bits = ChallengeGadget::::get_challenge_native(sponge, pp_hash, U_i, u_i, cmT); + let r_bits = ChallengeGadget::>::get_challenge_native( + sponge, + pp_hash, + &U_i, + &u_i, + Some(&cmT), + ); C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds) } @@ -133,15 +142,16 @@ where )?; // c. Compute fold - let (W_f, U_f) = NIFS::::fold_instances( - r, &nova.w_i, &nova.u_i, &nova.W_i, &nova.U_i, &T, cmT, - )?; + let (W_f, U_f) = + NIFS::::prove(r, &nova.w_i, &nova.u_i, &nova.W_i, &nova.U_i, &T, &cmT)?; // d. Store folding proof let pi = FoldingProof { cmT }; // 2. Sample a satisfying relaxed R1CS instance-witness pair (W_r, U_r) - let (W_r, U_r) = nova.r1cs.sample::(&nova.cs_pp, &mut rng)?; + let (W_r, U_r) = nova + .r1cs + .sample_witness_instance::(&nova.cs_pp, &mut rng)?; // 3. Fold the instance-witness pair (U_f, W_f) with (U_r, W_r) // a. Compute T @@ -158,15 +168,8 @@ where )?; // c. Compute fold - let (W_i_prime, _) = NIFS::::fold_instances( - r_2, - &W_f, - &U_f, - &W_r, - &U_r, - &T_i_prime, - cmT_i_prime, - )?; + let (W_i_prime, _) = + NIFS::::prove(r_2, &W_f, &U_f, &W_r, &U_r, &T_i_prime, &cmT_i_prime)?; // d. Store folding proof let pi_prime = FoldingProof { cmT: cmT_i_prime }; @@ -226,7 +229,7 @@ where // b. Check computed hashes are correct let mut sponge = PoseidonSponge::::new(poseidon_config); - let expected_u_i_x = proof.U_i.hash(&sponge, pp_hash, i, z_0, z_i); + let expected_u_i_x = proof.U_i.hash(&sponge, pp_hash, i, &z_0, &z_i); if expected_u_i_x != proof.u_i.x[0] { return Err(Error::zkIVCVerificationFail); } @@ -252,12 +255,7 @@ where )?; // b. Get the U_f instance - let U_f = NIFS::::fold_committed_instance( - r, - &proof.u_i, - &proof.U_i, - &proof.pi.cmT, - ); + let U_f = NIFS::::verify(r, &proof.u_i, &proof.U_i, &proof.pi.cmT); // 4. Obtain the U^{\prime}_i folded instance // a. Compute folding challenge @@ -270,18 +268,13 @@ where )?; // b. Compute fold - let U_i_prime = NIFS::::fold_committed_instance( - r_2, - &U_f, - &proof.U_r, - &proof.pi_prime.cmT, - ); + let U_i_prime = NIFS::::verify(r_2, &U_f, &proof.U_r, &proof.pi_prime.cmT); // 5. Check that W^{\prime}_i is a satisfying witness - r1cs.check_relaxed_relation(&proof.W_i_prime, &U_i_prime)?; + r1cs.check_relation(&proof.W_i_prime, &U_i_prime)?; // 6. Check that the cyclefold instance-witness pair satisfies the cyclefold relaxed r1cs - cf_r1cs.check_relaxed_relation(&proof.cf_W_i, &proof.cf_U_i)?; + cf_r1cs.check_relation(&proof.cf_W_i, &proof.cf_U_i)?; Ok(()) } @@ -369,7 +362,7 @@ pub mod tests { ); let (_, sampled_committed_instance) = nova .r1cs - .sample::>(&nova.cs_pp, rng) + .sample_witness_instance::>(&nova.cs_pp, rng) .unwrap(); // proof verification fails with incorrect running instance @@ -406,7 +399,7 @@ pub mod tests { ); let (sampled_committed_witness, _) = nova .r1cs - .sample::>(&nova.cs_pp, rng) + .sample_witness_instance::>(&nova.cs_pp, rng) .unwrap(); // proof generation fails with incorrect running witness diff --git a/folding-schemes/src/folding/protogalaxy/circuits.rs b/folding-schemes/src/folding/protogalaxy/circuits.rs index 74a7332..4c30886 100644 --- a/folding-schemes/src/folding/protogalaxy/circuits.rs +++ b/folding-schemes/src/folding/protogalaxy/circuits.rs @@ -24,13 +24,16 @@ use super::{ CommittedInstance, CommittedInstanceVar, ProtoGalaxyCycleFoldConfig, }; use crate::{ - folding::circuits::{ - cyclefold::{ - CycleFoldChallengeGadget, CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, - CycleFoldConfig, NIFSFullGadget, + folding::{ + circuits::{ + cyclefold::{ + CycleFoldChallengeGadget, CycleFoldCommittedInstance, + CycleFoldCommittedInstanceVar, CycleFoldConfig, NIFSFullGadget, + }, + nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, + CF1, CF2, }, - nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar}, - CF1, CF2, + traits::{CommittedInstanceVarOps, Dummy}, }, frontend::FCircuit, transcript::{AbsorbNonNativeGadget, TranscriptVar}, @@ -44,13 +47,13 @@ impl FoldingGadget { pub fn fold_committed_instance( transcript: &mut impl TranscriptVar, // running instance - instance: &CommittedInstanceVar, + instance: &CommittedInstanceVar, // incoming instances - vec_instances: &[CommittedInstanceVar], + vec_instances: &[CommittedInstanceVar], // polys from P F_coeffs: Vec>, K_coeffs: Vec>, - ) -> Result<(CommittedInstanceVar, Vec>), SynthesisError> { + ) -> Result<(CommittedInstanceVar, Vec>), SynthesisError> { let t = instance.betas.len(); // absorb the committed instances @@ -132,13 +135,13 @@ impl AugmentationGadget { #[allow(clippy::type_complexity)] pub fn prepare_and_fold_primary( transcript: &mut impl TranscriptVar, S>, - U: CommittedInstanceVar, + U: CommittedInstanceVar, u_phis: Vec>, u_xs: Vec>>>, new_U_phi: NonNativeAffineVar, F_coeffs: Vec>>, K_coeffs: Vec>>, - ) -> Result<(CommittedInstanceVar, Vec>>), SynthesisError> { + ) -> Result<(CommittedInstanceVar, Vec>>), SynthesisError> { assert_eq!(u_phis.len(), u_xs.len()); // Prepare the incoming instances. @@ -247,7 +250,7 @@ pub struct AugmentedFCircuit< pub(super) external_inputs: Vec>, pub(super) F: FC, // F circuit pub(super) u_i_phi: C1, - pub(super) U_i: CommittedInstance, + pub(super) U_i: CommittedInstance, pub(super) U_i1_phi: C1, pub(super) F_coeffs: Vec>, pub(super) K_coeffs: Vec>, @@ -275,7 +278,7 @@ where d: usize, k: usize, ) -> Self { - let u_dummy = CommittedInstance::dummy_running(2, t); + let u_dummy = CommittedInstance::dummy((2, t)); let cf_u_dummy = CycleFoldCommittedInstance::dummy(ProtoGalaxyCycleFoldConfig::::IO_LEN); @@ -324,8 +327,8 @@ where let external_inputs = Vec::>>::new_witness(cs.clone(), || Ok(self.external_inputs))?; - let u_dummy = CommittedInstance::::dummy_running(2, self.U_i.betas.len()); - let U_i = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.U_i))?; + let u_dummy = CommittedInstance::::dummy((2, self.U_i.betas.len())); + let U_i = CommittedInstanceVar::::new_witness(cs.clone(), || Ok(self.U_i))?; let u_i_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.u_i_phi))?; let U_i1_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.U_i1_phi))?; let phi_stars = @@ -346,24 +349,12 @@ where // `transcript` is for challenge generation. let mut transcript = sponge.clone(); - // get z_{i+1} from the F circuit - let i_usize = self.i_usize; - let z_i1 = - self.F - .generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?; - let is_basecase = i.is_zero()?; // Primary Part // P.1. Compute u_i.x // u_i.x[0] = H(i, z_0, z_i, U_i) - let (u_i_x, _) = U_i.clone().hash( - &sponge, - pp_hash.clone(), - i.clone(), - z_0.clone(), - z_i.clone(), - )?; + let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?; // u_i.x[1] = H(cf_U_i) let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; @@ -380,21 +371,27 @@ where )?; // P.4.a compute and check the first output of F' + + // get z_{i+1} from the F circuit + let z_i1 = + self.F + .generate_step_constraints(cs.clone(), self.i_usize, z_i, external_inputs)?; + // Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot}) // Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1}) let (u_i1_x, _) = U_i1.clone().hash( &sponge, - pp_hash.clone(), - i + FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &(i + FpVar::>::one()), + &z_0, + &z_i1, )?; let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash( &sponge, - pp_hash.clone(), - FpVar::>::one(), - z_0.clone(), - z_i1.clone(), + &pp_hash, + &FpVar::>::one(), + &z_0, + &z_i1, )?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; diff --git a/folding-schemes/src/folding/protogalaxy/constants.rs b/folding-schemes/src/folding/protogalaxy/constants.rs new file mode 100644 index 0000000..cadbf10 --- /dev/null +++ b/folding-schemes/src/folding/protogalaxy/constants.rs @@ -0,0 +1,4 @@ +/// `RUNNING` indicates that the committed instance is a running instance. +pub const RUNNING: bool = true; +/// `INCOMING` indicates that the committed instance is an incoming instance. +pub const INCOMING: bool = false; diff --git a/folding-schemes/src/folding/protogalaxy/folding.rs b/folding-schemes/src/folding/protogalaxy/folding.rs index 71add9a..3aefee4 100644 --- a/folding-schemes/src/folding/protogalaxy/folding.rs +++ b/folding-schemes/src/folding/protogalaxy/folding.rs @@ -14,9 +14,7 @@ use super::utils::{all_powers, betas_star, exponential_powers, pow_i}; use super::ProtoGalaxyError; use super::{CommittedInstance, Witness}; -#[cfg(test)] -use crate::arith::r1cs::RelaxedR1CS; -use crate::arith::{r1cs::R1CS, Arith}; +use crate::arith::r1cs::R1CS; use crate::transcript::Transcript; use crate::utils::vec::*; use crate::Error; @@ -38,14 +36,14 @@ where transcript: &mut impl Transcript, r1cs: &R1CS, // running instance - instance: &CommittedInstance, + instance: &CommittedInstance, w: &Witness, // incoming instances - vec_instances: &[CommittedInstance], + vec_instances: &[CommittedInstance], vec_w: &[Witness], ) -> Result< ( - CommittedInstance, + CommittedInstance, Witness, Vec, // F_X coeffs Vec, // K_X coeffs @@ -97,7 +95,7 @@ where let delta = transcript.get_challenge(); let deltas = exponential_powers(delta, t); - let mut f_z = r1cs.eval_relation(&z)?; + let mut f_z = r1cs.eval_at_z(&z)?; if f_z.len() != m { return Err(Error::NotSameLength( "number of constraints in R1CS".to_string(), @@ -127,15 +125,18 @@ where // sanity check: check that the new randomized instance (the original instance but with // 'refreshed' randomness) satisfies the relation. #[cfg(test)] - r1cs.check_relaxed_relation( - w, - &CommittedInstance { - phi: instance.phi, - betas: betas_star.clone(), - e: F_alpha, - x: instance.x.clone(), - }, - )?; + { + use crate::arith::Arith; + r1cs.check_relation( + w, + &CommittedInstance::<_, true> { + phi: instance.phi, + betas: betas_star.clone(), + e: F_alpha, + x: instance.x.clone(), + }, + )?; + } let zs: Vec> = std::iter::once(z.clone()) .chain( @@ -178,7 +179,7 @@ where inner[j] += Lh * zj; } } - let f_ev = r1cs.eval_relation(&inner)?; + let f_ev = r1cs.eval_at_z(&inner)?; G_evals[hi] = cfg_into_iter!(f_ev) .enumerate() @@ -253,13 +254,13 @@ where pub fn verify( transcript: &mut impl Transcript, // running instance - instance: &CommittedInstance, + instance: &CommittedInstance, // incoming instances - vec_instances: &[CommittedInstance], + vec_instances: &[CommittedInstance], // polys from P F_coeffs: Vec, K_coeffs: Vec, - ) -> Result, Error> { + ) -> Result, Error> { let t = instance.betas.len(); // absorb the committed instances @@ -395,6 +396,7 @@ pub mod tests { use ark_std::{rand::Rng, UniformRand}; use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z_split}; + use crate::arith::Arith; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::transcript::poseidon::poseidon_canonical_config; @@ -419,9 +421,9 @@ pub mod tests { k: usize, ) -> ( Witness, - CommittedInstance, + CommittedInstance, Vec>, - Vec>, + Vec>, ) { let mut rng = ark_std::test_rng(); @@ -439,7 +441,7 @@ pub mod tests { r_w: C::ScalarField::zero(), }; let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap(); - let instance = CommittedInstance:: { + let instance = CommittedInstance:: { phi, betas: betas.clone(), e: C::ScalarField::zero(), @@ -447,7 +449,7 @@ pub mod tests { }; // same for the other instances let mut witnesses: Vec> = Vec::new(); - let mut instances: Vec> = Vec::new(); + let mut instances: Vec> = Vec::new(); #[allow(clippy::needless_range_loop)] for _ in 0..k { let (_, x_i, w_i) = get_test_z_split::(rng.gen::() as usize); @@ -457,7 +459,7 @@ pub mod tests { }; let phi_i = Pedersen::::commit(&pedersen_params, &witness_i.w, &witness_i.r_w).unwrap(); - let instance_i = CommittedInstance:: { + let instance_i = CommittedInstance:: { phi: phi_i, betas: vec![], e: C::ScalarField::zero(), @@ -509,7 +511,7 @@ pub mod tests { assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation - r1cs.check_relaxed_relation(&folded_witness, &folded_instance) + r1cs.check_relation(&folded_witness, &folded_instance) .unwrap(); } @@ -558,7 +560,7 @@ pub mod tests { assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation - r1cs.check_relaxed_relation(&folded_witness, &folded_instance) + r1cs.check_relation(&folded_witness, &folded_instance) .unwrap(); running_witness = folded_witness; diff --git a/folding-schemes/src/folding/protogalaxy/mod.rs b/folding-schemes/src/folding/protogalaxy/mod.rs index 9ee44de..d17655d 100644 --- a/folding-schemes/src/folding/protogalaxy/mod.rs +++ b/folding-schemes/src/folding/protogalaxy/mod.rs @@ -1,27 +1,32 @@ /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) use ark_crypto_primitives::sponge::{ - constraints::{AbsorbGadget, CryptographicSpongeVar}, - poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, + poseidon::{PoseidonConfig, PoseidonSponge}, Absorb, CryptographicSponge, }; use ark_ec::{CurveGroup, Group}; use ark_ff::{BigInteger, PrimeField}; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, - fields::fp::FpVar, + eq::EqGadget, + fields::{fp::FpVar, FieldVar}, groups::{CurveVar, GroupOpsBounds}, R1CSVar, ToConstraintFieldGadget, }; use ark_relations::r1cs::{ ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, }; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Valid}; use ark_std::{ borrow::Borrow, cmp::max, fmt::Debug, log2, marker::PhantomData, rand::RngCore, One, Zero, }; +use constants::{INCOMING, RUNNING}; use num_bigint::BigUint; use crate::{ - arith::r1cs::{extract_r1cs, extract_w_x, RelaxedR1CS, R1CS}, + arith::{ + r1cs::{extract_r1cs, extract_w_x, R1CS}, + Arith, + }, commitment::CommitmentScheme, folding::circuits::{ cyclefold::{ @@ -32,11 +37,13 @@ use crate::{ CF1, CF2, }, frontend::{utils::DummyCircuit, FCircuit}, + transcript::poseidon::poseidon_canonical_config, utils::{get_cm_coordinates, pp_hash}, Error, FoldingScheme, }; pub mod circuits; +pub mod constants; pub mod folding; pub mod traits; pub(crate) mod utils; @@ -44,6 +51,10 @@ pub(crate) mod utils; use circuits::AugmentedFCircuit; use folding::Folding; +use super::traits::{ + CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps, +}; + /// Configuration for ProtoGalaxy's CycleFold circuit pub struct ProtoGalaxyCycleFoldConfig { _c: PhantomData, @@ -60,66 +71,68 @@ impl CycleFoldConfig for ProtoGalaxyCycleFoldConfig { /// in ProtoGalaxy instances. pub type ProtoGalaxyCycleFoldCircuit = CycleFoldCircuit, GC>; -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CommittedInstance { +/// The committed instance of ProtoGalaxy. +/// +/// We use `TYPE` to distinguish between incoming and running instances, as +/// they have slightly different structures (e.g., length of `betas`) and +/// behaviors (e.g., in satisfiability checks). +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +pub struct CommittedInstance { phi: C, betas: Vec, e: C::ScalarField, x: Vec, } -impl CommittedInstance { - pub fn dummy_running(io_len: usize, t: usize) -> Self { +impl Dummy<(usize, usize)> for CommittedInstance { + fn dummy((io_len, t): (usize, usize)) -> Self { + if TYPE == INCOMING { + assert_eq!(t, 0); + } Self { phi: C::zero(), - betas: vec![C::ScalarField::zero(); t], - e: C::ScalarField::zero(), - x: vec![C::ScalarField::zero(); io_len], + betas: vec![Zero::zero(); t], + e: Zero::zero(), + x: vec![Zero::zero(); io_len], } } +} - pub fn dummy_incoming(io_len: usize) -> Self { - Self::dummy_running(io_len, 0) +impl Dummy<&R1CS>> for CommittedInstance { + fn dummy(r1cs: &R1CS>) -> Self { + let t = if TYPE == RUNNING { + log2(r1cs.num_constraints()) as usize + } else { + 0 + }; + Self::dummy((r1cs.num_public_inputs(), t)) } } -impl CommittedInstance -where - C::ScalarField: Absorb, - C::BaseField: PrimeField, -{ - /// hash implements the committed instance hash compatible with the gadget implemented in - /// CommittedInstanceVar.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the - /// `CommittedInstance`. - pub fn hash( - &self, - sponge: &PoseidonSponge, - pp_hash: C::ScalarField, - i: C::ScalarField, - z_0: Vec, - z_i: Vec, - ) -> C::ScalarField { - let mut sponge = sponge.clone(); - sponge.absorb(&pp_hash); - sponge.absorb(&i); - sponge.absorb(&z_0); - sponge.absorb(&z_i); - sponge.absorb(&self); - sponge.squeeze_field_elements(1)[0] +impl CommittedInstanceOps for CommittedInstance { + type Var = CommittedInstanceVar; + + fn get_commitments(&self) -> Vec { + vec![self.phi] + } + + fn is_incoming(&self) -> bool { + TYPE == INCOMING } } #[derive(Clone, Debug)] -pub struct CommittedInstanceVar { +pub struct CommittedInstanceVar { phi: NonNativeAffineVar, betas: Vec>, e: FpVar, x: Vec>, } -impl AllocVar, C::ScalarField> for CommittedInstanceVar { - fn new_variable>>( +impl AllocVar, C::ScalarField> + for CommittedInstanceVar +{ + fn new_variable>>( cs: impl Into>, f: impl FnOnce() -> Result, mode: AllocationMode, @@ -132,15 +145,19 @@ impl AllocVar, C::ScalarField> for Committed Ok(Self { phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?, betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?, - e: FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?, + e: if TYPE == RUNNING { + FpVar::new_variable(cs.clone(), || Ok(u.e), mode)? + } else { + FpVar::zero() + }, x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?, }) }) } } -impl R1CSVar for CommittedInstanceVar { - type Value = CommittedInstance; +impl R1CSVar for CommittedInstanceVar { + type Value = CommittedInstance; fn cs(&self) -> ConstraintSystemRef { self.phi @@ -164,38 +181,35 @@ impl R1CSVar for CommittedInstanceVar { } } -impl CommittedInstanceVar -where - C::ScalarField: Absorb, - C::BaseField: PrimeField, -{ - /// hash implements the committed instance hash compatible with the native implementation from - /// CommittedInstance.hash. - /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the - /// `CommittedInstance`. - /// Additionally it returns the vector of the field elements from the self parameters, so they - /// can be reused in other gadgets avoiding recalculating (reconstraining) them. - #[allow(clippy::type_complexity)] - pub fn hash( - self, - sponge: &PoseidonSpongeVar>, - pp_hash: FpVar>, - i: FpVar>, - z_0: Vec>>, - z_i: Vec>>, - ) -> Result<(FpVar>, Vec>>), SynthesisError> { - let mut sponge = sponge.clone(); - let U_vec = self.to_sponge_field_elements()?; - sponge.absorb(&pp_hash)?; - sponge.absorb(&i)?; - sponge.absorb(&z_0)?; - sponge.absorb(&z_i)?; - sponge.absorb(&U_vec)?; - Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) +impl CommittedInstanceVarOps for CommittedInstanceVar { + type PointVar = NonNativeAffineVar; + + fn get_commitments(&self) -> Vec { + vec![self.phi.clone()] + } + + fn get_public_inputs(&self) -> &[FpVar>] { + &self.x + } + + fn enforce_incoming(&self) -> Result<(), SynthesisError> { + // We don't need to check if `self` is an incoming instance in-circuit, + // because incoming instances and running instances already have + // different types of `e` (constant vs witness) when we allocate them + // in-circuit. + (TYPE == INCOMING) + .then_some(()) + .ok_or(SynthesisError::Unsatisfiable) + } + + fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.betas.enforce_equal(&other.betas)?; + self.e.enforce_equal(&other.e)?; + self.x.enforce_equal(&other.x) } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] pub struct Witness { w: Vec, r_w: F, @@ -213,9 +227,9 @@ impl Witness { &self, params: &CS::ProverParams, x: Vec, - ) -> Result, crate::Error> { + ) -> Result, crate::Error> { let phi = CS::commit(params, &self.w, &self.r_w)?; - Ok(CommittedInstance { + Ok(CommittedInstance:: { phi, x, e: F::zero(), @@ -224,6 +238,53 @@ impl Witness { } } +impl Dummy<&R1CS> for Witness { + fn dummy(r1cs: &R1CS) -> Self { + Self { + w: vec![F::zero(); r1cs.num_witnesses()], + r_w: F::zero(), + } + } +} + +impl WitnessOps for Witness { + type Var = WitnessVar; + + fn get_openings(&self) -> Vec<(&[F], F)> { + vec![(&self.w, self.r_w)] + } +} + +/// In-circuit representation of the Witness associated to the CommittedInstance. +#[derive(Debug, Clone)] +pub struct WitnessVar { + pub W: Vec>, + pub rW: FpVar, +} + +impl AllocVar, F> for WitnessVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|val| { + let cs = cs.into(); + + let W = Vec::new_variable(cs.clone(), || Ok(val.borrow().w.to_vec()), mode)?; + let rW = FpVar::new_variable(cs.clone(), || Ok(val.borrow().r_w), mode)?; + + Ok(Self { W, rW }) + }) + } +} + +impl WitnessVarOps for WitnessVar { + fn get_openings(&self) -> Vec<(&[FpVar], FpVar)> { + vec![(&self.W, self.rW.clone())] + } +} + #[derive(Debug, thiserror::Error, PartialEq)] pub enum ProtoGalaxyError { #[error("The remainder from G(X)-F(α)*L_0(X)) / Z(X) should be zero")] @@ -254,6 +315,68 @@ where /// Proving parameters of the underlying commitment scheme over C2 pub cf_cs_params: CS2::ProverParams, } +impl CanonicalSerialize for ProverParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn serialize_with_mode( + &self, + mut writer: W, + compress: ark_serialize::Compress, + ) -> Result<(), ark_serialize::SerializationError> { + self.cs_params.serialize_with_mode(&mut writer, compress)?; + self.cf_cs_params.serialize_with_mode(&mut writer, compress) + } + + fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { + self.cs_params.serialized_size(compress) + self.cf_cs_params.serialized_size(compress) + } +} +impl Valid for ProverParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn check(&self) -> Result<(), ark_serialize::SerializationError> { + self.poseidon_config.full_rounds.check()?; + self.poseidon_config.partial_rounds.check()?; + self.poseidon_config.alpha.check()?; + self.poseidon_config.ark.check()?; + self.poseidon_config.mds.check()?; + self.poseidon_config.rate.check()?; + self.poseidon_config.capacity.check()?; + self.cs_params.check()?; + self.cf_cs_params.check()?; + Ok(()) + } +} +impl CanonicalDeserialize for ProverParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn deserialize_with_mode( + mut reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + ) -> Result { + let cs_params = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; + let cf_cs_params = + CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(ProverParams { + poseidon_config: poseidon_canonical_config::(), + cs_params, + cf_cs_params, + }) + } +} /// Verification parameters for ProtoGalaxy-based IVC #[derive(Debug, Clone)] @@ -276,6 +399,40 @@ where pub cf_cs_vp: CS2::VerifierParams, } +impl Valid for VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn check(&self) -> Result<(), ark_serialize::SerializationError> { + self.cs_vp.check()?; + self.cf_cs_vp.check()?; + Ok(()) + } +} +impl CanonicalSerialize for VerifierParams +where + C1: CurveGroup, + C2: CurveGroup, + CS1: CommitmentScheme, + CS2: CommitmentScheme, +{ + fn serialize_with_mode( + &self, + mut writer: W, + compress: ark_serialize::Compress, + ) -> Result<(), ark_serialize::SerializationError> { + self.cs_vp.serialize_with_mode(&mut writer, compress)?; + self.cf_cs_vp.serialize_with_mode(&mut writer, compress) + } + + fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { + self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress) + } +} + impl VerifierParams where C1: CurveGroup, @@ -298,6 +455,23 @@ where } } +#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct IVCProof +where + C1: CurveGroup, + C2: CurveGroup, +{ + pub i: C1::ScalarField, + pub z_0: Vec, + pub z_i: Vec, + pub W_i: Witness, + pub U_i: CommittedInstance, + pub w_i: Witness, + pub u_i: CommittedInstance, + pub cf_W_i: CycleFoldWitness, + pub cf_U_i: CycleFoldCommittedInstance, +} + /// Implements ProtoGalaxy+CycleFold's IVC, described in [ProtoGalaxy] and /// [CycleFold], following the FoldingScheme trait /// @@ -337,9 +511,9 @@ where pub z_i: Vec, /// ProtoGalaxy instances pub w_i: Witness, - pub u_i: CommittedInstance, + pub u_i: CommittedInstance, pub W_i: Witness, - pub U_i: CommittedInstance, + pub U_i: CommittedInstance, /// CycleFold running instance pub cf_W_i: CycleFoldWitness, @@ -472,10 +646,73 @@ where type PreprocessorParam = (PoseidonConfig>, FC); type ProverParam = ProverParams; type VerifierParam = VerifierParams; - type RunningInstance = (CommittedInstance, Witness); - type IncomingInstance = (CommittedInstance, Witness); - type MultiCommittedInstanceWithWitness = (CommittedInstance, Witness); + type RunningInstance = (CommittedInstance, Witness); + type IncomingInstance = (CommittedInstance, Witness); + type MultiCommittedInstanceWithWitness = + (CommittedInstance, Witness); type CFInstance = (CycleFoldCommittedInstance, CycleFoldWitness); + type IVCProof = IVCProof; + + fn pp_deserialize_with_mode( + reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + _fc_params: FC::Params, // FCircuit params + ) -> Result { + Ok(Self::ProverParam::deserialize_with_mode( + reader, compress, validate, + )?) + } + + fn vp_deserialize_with_mode( + mut reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, + ) -> Result { + let poseidon_config = poseidon_canonical_config::(); + + // generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing + // to serialize them, saving significant space in the VerifierParams serialized size. + + let f_circuit = FC::new(fc_params)?; + let k = 1; + let d = 2; + let t = Self::compute_t(&poseidon_config, &f_circuit, d, k)?; + + // main circuit R1CS: + let cs = ConstraintSystem::::new_ref(); + let augmented_F_circuit = AugmentedFCircuit::::empty( + &poseidon_config, + f_circuit.clone(), + t, + d, + k, + ); + augmented_F_circuit.generate_constraints(cs.clone())?; + cs.finalize(); + let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let r1cs = extract_r1cs::(&cs); + + // CycleFold circuit R1CS + let cs2 = ConstraintSystem::::new_ref(); + let cf_circuit = ProtoGalaxyCycleFoldCircuit::::empty(); + cf_circuit.generate_constraints(cs2.clone())?; + cs2.finalize(); + let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; + let cf_r1cs = extract_r1cs::(&cs2); + + let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; + + Ok(Self::VerifierParam { + poseidon_config, + r1cs, + cf_r1cs, + cs_vp, + cf_cs_vp, + }) + } fn preprocess( mut rng: impl RngCore, @@ -540,9 +777,9 @@ where let pp_hash = vp.pp_hash()?; // setup the dummy instances - let (W_dummy, U_dummy) = vp.r1cs.dummy_running_instance(); - let (w_dummy, u_dummy) = vp.r1cs.dummy_incoming_instance(); - let (cf_W_dummy, cf_U_dummy) = vp.cf_r1cs.dummy_running_instance(); + let (w_dummy, u_dummy) = vp.r1cs.dummy_witness_instance(); + let (W_dummy, U_dummy) = vp.r1cs.dummy_witness_instance(); + let (cf_W_dummy, cf_U_dummy) = vp.cf_r1cs.dummy_witness_instance(); // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // R1CS that we're working with. @@ -636,8 +873,8 @@ where &sponge, self.pp_hash, self.i + C1::ScalarField::one(), - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); // `cf_U_{i+1}` (i.e., `cf_U_1`) is fixed to `cf_U_dummy`, so we // just use `self.cf_U_i = cf_U_0 = cf_U_dummy`. @@ -744,8 +981,8 @@ where &sponge, self.pp_hash, self.i + C1::ScalarField::one(), - self.z_0.clone(), - z_i1.clone(), + &self.z_0, + &z_i1, ); cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); @@ -788,10 +1025,11 @@ where )?, U_i1 ); - self.cf_r1cs.check_tight_relation(&_cf1_w_i, &cf1_u_i)?; - self.cf_r1cs.check_tight_relation(&_cf2_w_i, &cf2_u_i)?; - self.cf_r1cs - .check_relaxed_relation(&self.cf_W_i, &self.cf_U_i)?; + cf1_u_i.check_incoming()?; + cf2_u_i.check_incoming()?; + self.cf_r1cs.check_relation(&_cf1_w_i, &cf1_u_i)?; + self.cf_r1cs.check_relation(&_cf2_w_i, &cf2_u_i)?; + self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?; } self.W_i = W_i1; @@ -826,8 +1064,9 @@ where #[cfg(test)] { - self.r1cs.check_tight_relation(&self.w_i, &self.u_i)?; - self.r1cs.check_relaxed_relation(&self.W_i, &self.U_i)?; + self.u_i.check_incoming()?; + self.r1cs.check_relation(&self.w_i, &self.u_i)?; + self.r1cs.check_relation(&self.W_i, &self.U_i)?; } Ok(()) @@ -836,35 +1075,79 @@ where fn state(&self) -> Vec { self.z_i.clone() } - fn instances( - &self, - ) -> ( - Self::RunningInstance, - Self::IncomingInstance, - Self::CFInstance, - ) { - ( - (self.U_i.clone(), self.W_i.clone()), - (self.u_i.clone(), self.w_i.clone()), - (self.cf_U_i.clone(), self.cf_W_i.clone()), - ) + + fn ivc_proof(&self) -> Self::IVCProof { + Self::IVCProof { + i: self.i, + z_0: self.z_0.clone(), + z_i: self.z_i.clone(), + W_i: self.W_i.clone(), + U_i: self.U_i.clone(), + w_i: self.w_i.clone(), + u_i: self.u_i.clone(), + cf_W_i: self.cf_W_i.clone(), + cf_U_i: self.cf_U_i.clone(), + } + } + + fn from_ivc_proof( + ivc_proof: Self::IVCProof, + fcircuit_params: FC::Params, + params: (Self::ProverParam, Self::VerifierParam), + ) -> Result { + let IVCProof { + i, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; + let (pp, vp) = params; + + let f_circuit = FC::new(fcircuit_params).unwrap(); + + Ok(Self { + _gc1: PhantomData, + _c2: PhantomData, + _gc2: PhantomData, + r1cs: vp.r1cs.clone(), + cf_r1cs: vp.cf_r1cs.clone(), + poseidon_config: pp.poseidon_config, + cs_params: pp.cs_params, + cf_cs_params: pp.cf_cs_params, + F: f_circuit, + pp_hash: vp.pp_hash()?, + i, + z_0, + z_i, + w_i, + u_i, + W_i, + U_i, + cf_W_i, + cf_U_i, + }) } /// Implements IVC.V of ProtoGalaxy+CycleFold - fn verify( - vp: Self::VerifierParam, - z_0: Vec, // initial state - z_i: Vec, // last state - num_steps: C1::ScalarField, - running_instance: Self::RunningInstance, - incoming_instance: Self::IncomingInstance, - cyclefold_instance: Self::CFInstance, - ) -> Result<(), Error> { - let sponge = PoseidonSponge::::new(&vp.poseidon_config); + fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> { + let Self::IVCProof { + i: num_steps, + z_0, + z_i, + W_i, + U_i, + w_i, + u_i, + cf_W_i, + cf_U_i, + } = ivc_proof; - let (U_i, W_i) = running_instance; - let (u_i, w_i) = incoming_instance; - let (cf_U_i, cf_W_i) = cyclefold_instance; + let sponge = PoseidonSponge::::new(&vp.poseidon_config); if u_i.x.len() != 2 || U_i.x.len() != 2 { return Err(Error::IVCVerificationFail); @@ -874,7 +1157,7 @@ where // check that u_i's output points to the running instance // u_i.X[0] == H(i, z_0, z_i, U_i) - let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone()); + let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i); if expected_u_i_x != u_i.x[0] { return Err(Error::IVCVerificationFail); } @@ -884,13 +1167,15 @@ where return Err(Error::IVCVerificationFail); } - // check R1CS satisfiability - vp.r1cs.check_tight_relation(&w_i, &u_i)?; + // check R1CS satisfiability, which is equivalent to checking if `u_i` + // is an incoming instance and if `w_i` and `u_i` satisfy RelaxedR1CS + u_i.check_incoming()?; + vp.r1cs.check_relation(&w_i, &u_i)?; // check RelaxedR1CS satisfiability - vp.r1cs.check_relaxed_relation(&W_i, &U_i)?; + vp.r1cs.check_relation(&W_i, &U_i)?; // check CycleFold RelaxedR1CS satisfiability - vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?; + vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?; Ok(()) } @@ -1003,17 +1288,8 @@ mod tests { } assert_eq!(Fr::from(num_steps as u32), protogalaxy.i); - let (running_instance, incoming_instance, cyclefold_instance) = protogalaxy.instances(); - PG::::verify( - params.1, - z_0, - protogalaxy.z_i, - protogalaxy.i, - running_instance, - incoming_instance, - cyclefold_instance, - ) - .unwrap(); + let ivc_proof = protogalaxy.ivc_proof(); + PG::::verify(params.1, ivc_proof).unwrap(); } #[ignore] diff --git a/folding-schemes/src/folding/protogalaxy/traits.rs b/folding-schemes/src/folding/protogalaxy/traits.rs index d735f6e..51b2aa1 100644 --- a/folding-schemes/src/folding/protogalaxy/traits.rs +++ b/folding-schemes/src/folding/protogalaxy/traits.rs @@ -3,23 +3,25 @@ use ark_ec::CurveGroup; use ark_ff::PrimeField; use ark_r1cs_std::{fields::fp::FpVar, uint8::UInt8, ToConstraintFieldGadget}; use ark_relations::r1cs::SynthesisError; -use ark_std::{cfg_iter, log2, rand::RngCore, One, Zero}; +use ark_std::{cfg_into_iter, log2, One}; use rayon::prelude::*; -use super::{utils::pow_i, CommittedInstance, CommittedInstanceVar, Witness}; +use super::{constants::RUNNING, utils::pow_i, CommittedInstance, CommittedInstanceVar, Witness}; use crate::{ - arith::r1cs::{RelaxedR1CS, R1CS}, + arith::{r1cs::R1CS, Arith}, + folding::circuits::CF1, transcript::AbsorbNonNative, + utils::vec::is_zero_vec, Error, }; // Implements the trait for absorbing ProtoGalaxy's CommittedInstance. -impl Absorb for CommittedInstance +impl Absorb for CommittedInstance where C::ScalarField: Absorb, { - fn to_sponge_bytes(&self, _dest: &mut Vec) { - unimplemented!() + fn to_sponge_bytes(&self, dest: &mut Vec) { + C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest); } fn to_sponge_field_elements(&self, dest: &mut Vec) { @@ -33,9 +35,11 @@ where } // Implements the trait for absorbing ProtoGalaxy's CommittedInstanceVar in-circuit. -impl AbsorbGadget for CommittedInstanceVar { +impl AbsorbGadget + for CommittedInstanceVar +{ fn to_sponge_bytes(&self) -> Result>, SynthesisError> { - unimplemented!() + FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) } fn to_sponge_field_elements(&self) -> Result>, SynthesisError> { @@ -49,68 +53,92 @@ impl AbsorbGadget for CommittedInstanceVar { } } -impl RelaxedR1CS, CommittedInstance> - for R1CS +/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and +/// the committed instance is of type [`CommittedInstance`]. +/// +/// Due to the error term `CommittedInstance.e`, R1CS here is considered as a +/// relaxed R1CS. +/// +/// See `nova/traits.rs` for the rationale behind the design. +impl Arith>, CommittedInstance> + for R1CS> { - fn dummy_running_instance(&self) -> (Witness, CommittedInstance) { - let w_len = self.A.n_cols - 1 - self.l; - let w_dummy = Witness::new(vec![C::ScalarField::zero(); w_len]); - let u_dummy = CommittedInstance::::dummy_running(self.l, log2(self.A.n_rows) as usize); - (w_dummy, u_dummy) - } + type Evaluation = Vec>; - fn dummy_incoming_instance(&self) -> (Witness, CommittedInstance) { - let w_len = self.A.n_cols - 1 - self.l; - let w_dummy = Witness::new(vec![C::ScalarField::zero(); w_len]); - let u_dummy = CommittedInstance::::dummy_incoming(self.l); - (w_dummy, u_dummy) - } - - fn is_relaxed(_w: &Witness, u: &CommittedInstance) -> bool { - u.e != C::ScalarField::zero() || !u.betas.is_empty() - } - - fn extract_z(w: &Witness, u: &CommittedInstance) -> Vec { - [&[C::ScalarField::one()][..], &u.x, &w.w].concat() + fn eval_relation( + &self, + w: &Witness>, + u: &CommittedInstance, + ) -> Result { + self.eval_at_z(&[&[C::ScalarField::one()][..], &u.x, &w.w].concat()) } - fn check_error_terms( + fn check_evaluation( _w: &Witness, - u: &CommittedInstance, + u: &CommittedInstance, e: Vec, ) -> Result<(), Error> { - if u.betas.len() != log2(e.len()) as usize { - return Err(Error::NotSameLength( - "instance.betas.len()".to_string(), - u.betas.len(), - "log2(e.len())".to_string(), - log2(e.len()) as usize, - )); - } - - let r = cfg_iter!(e) - .enumerate() - .map(|(i, e_i)| pow_i(i, &u.betas) * e_i) - .sum(); - if u.e == r { - Ok(()) + let ok = if TYPE == RUNNING { + if u.betas.len() != log2(e.len()) as usize { + return Err(Error::NotSameLength( + "instance.betas.len()".to_string(), + u.betas.len(), + "log2(e.len())".to_string(), + log2(e.len()) as usize, + )); + } + + u.e == cfg_into_iter!(e) + .enumerate() + .map(|(i, e_i)| pow_i(i, &u.betas) * e_i) + .sum::>() } else { - Err(Error::NotSatisfied) - } + is_zero_vec(&e) + }; + ok.then_some(()).ok_or(Error::NotSatisfied) } +} - fn sample( - &self, - _params: &CS::ProverParams, - _rng: impl RngCore, - ) -> Result<(Witness, CommittedInstance), Error> - where - CS: crate::commitment::CommitmentScheme, - { - // Sampling a random pair of witness and committed instance is required - // for the zero-knowledge layer for ProtoGalaxy, which is not supported - // yet. - // Tracking issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82 - unimplemented!() +#[cfg(test)] +pub mod tests { + use super::*; + use ark_bn254::{Fr, G1Projective as Projective}; + use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::UniformRand; + use rand::Rng; + + /// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements} + /// vs the R1CS constraints version + #[test] + pub fn test_committed_instance_to_sponge_preimage() { + let mut rng = ark_std::test_rng(); + + let t = rng.gen::() as usize; + let io_len = rng.gen::() as usize; + + let ci = CommittedInstance:: { + phi: Projective::rand(&mut rng), + betas: (0..t).map(|_| Fr::rand(&mut rng)).collect(), + e: Fr::rand(&mut rng), + x: (0..io_len).map(|_| Fr::rand(&mut rng)).collect(), + }; + + let bytes = ci.to_sponge_bytes_as_vec(); + let field_elements = ci.to_sponge_field_elements_as_vec(); + + let cs = ConstraintSystem::::new_ref(); + + let ciVar = + CommittedInstanceVar::::new_witness(cs.clone(), || Ok(ci.clone())) + .unwrap(); + let bytes_var = ciVar.to_sponge_bytes().unwrap(); + let field_elements_var = ciVar.to_sponge_field_elements().unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + // check that the natively computed and in-circuit computed hashes match + assert_eq!(bytes_var.value().unwrap(), bytes); + assert_eq!(field_elements_var.value().unwrap(), field_elements); } } diff --git a/folding-schemes/src/folding/traits.rs b/folding-schemes/src/folding/traits.rs new file mode 100644 index 0000000..3890554 --- /dev/null +++ b/folding-schemes/src/folding/traits.rs @@ -0,0 +1,131 @@ +use ark_crypto_primitives::sponge::{ + constraints::{AbsorbGadget, CryptographicSpongeVar}, + poseidon::constraints::PoseidonSpongeVar, + Absorb, +}; +use ark_ec::CurveGroup; +use ark_ff::PrimeField; +use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, ToConstraintFieldGadget}; +use ark_relations::r1cs::SynthesisError; + +use crate::{transcript::Transcript, Error}; + +use super::circuits::CF1; + +pub trait CommittedInstanceOps { + /// The in-circuit representation of the committed instance. + type Var: AllocVar> + CommittedInstanceVarOps; + /// `hash` implements the committed instance hash compatible with the + /// in-circuit implementation from `CommittedInstanceVarOps::hash`. + /// + /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and + /// `U_i` is the committed instance `self`. + fn hash>>( + &self, + sponge: &T, + pp_hash: CF1, // public params hash + i: CF1, + z_0: &[CF1], + z_i: &[CF1], + ) -> CF1 + where + CF1: Absorb, + Self: Sized + Absorb, + { + let mut sponge = sponge.clone(); + sponge.absorb(&pp_hash); + sponge.absorb(&i); + sponge.absorb(&z_0); + sponge.absorb(&z_i); + sponge.absorb(&self); + sponge.squeeze_field_elements(1)[0] + } + + /// Returns the commitments contained in the committed instance. + fn get_commitments(&self) -> Vec; + + /// Returns `true` if the committed instance is an incoming instance, and + /// `false` if it is a running instance. + fn is_incoming(&self) -> bool; + + /// Checks if the committed instance is an incoming instance. + fn check_incoming(&self) -> Result<(), Error> { + self.is_incoming() + .then_some(()) + .ok_or(Error::NotIncomingCommittedInstance) + } +} + +pub trait CommittedInstanceVarOps { + type PointVar: ToConstraintFieldGadget>; + /// `hash` implements the in-circuit committed instance hash compatible with + /// the native implementation from `CommittedInstanceOps::hash`. + /// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and + /// `U_i` is the committed instance `self`. + /// + /// Additionally it returns the in-circuit representation of the committed + /// instance `self` as a vector of field elements, so they can be reused in + /// other gadgets avoiding recalculating (reconstraining) them. + #[allow(clippy::type_complexity)] + fn hash( + &self, + sponge: &PoseidonSpongeVar>, + pp_hash: &FpVar>, + i: &FpVar>, + z_0: &[FpVar>], + z_i: &[FpVar>], + ) -> Result<(FpVar>, Vec>>), SynthesisError> + where + Self: AbsorbGadget>, + { + let mut sponge = sponge.clone(); + let U_vec = self.to_sponge_field_elements()?; + sponge.absorb(&pp_hash)?; + sponge.absorb(&i)?; + sponge.absorb(&z_0)?; + sponge.absorb(&z_i)?; + sponge.absorb(&U_vec)?; + Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec)) + } + + /// Returns the commitments contained in the committed instance. + fn get_commitments(&self) -> Vec; + + /// Returns the public inputs contained in the committed instance. + fn get_public_inputs(&self) -> &[FpVar>]; + + /// Generates constraints to enforce that the committed instance is an + /// incoming instance. + fn enforce_incoming(&self) -> Result<(), SynthesisError>; + + /// Generates constraints to enforce that the committed instance `self` is + /// partially equal to another committed instance `other`. + /// Here, only field elements are compared, while commitments (points) are + /// not. + fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError>; +} + +pub trait WitnessOps { + /// The in-circuit representation of the witness. + type Var: AllocVar + WitnessVarOps; + + /// Returns the openings (i.e., the values being committed to and the + /// randomness) contained in the witness. + fn get_openings(&self) -> Vec<(&[F], F)>; +} + +pub trait WitnessVarOps { + /// Returns the openings (i.e., the values being committed to and the + /// randomness) contained in the witness. + fn get_openings(&self) -> Vec<(&[FpVar], FpVar)>; +} + +pub trait Dummy { + fn dummy(cfg: Cfg) -> Self; +} + +impl Dummy for Vec { + fn dummy(cfg: usize) -> Self { + vec![Default::default(); cfg] + } +} diff --git a/folding-schemes/src/lib.rs b/folding-schemes/src/lib.rs index e4b22ef..419d1ba 100644 --- a/folding-schemes/src/lib.rs +++ b/folding-schemes/src/lib.rs @@ -4,6 +4,7 @@ use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::CryptoRng; use ark_std::{fmt::Debug, rand::RngCore}; use thiserror::Error; @@ -43,6 +44,8 @@ pub enum Error { IVCVerificationFail, #[error("zkIVC verification failed")] zkIVCVerificationFail, + #[error("Committed instance is expected to be an incoming (fresh) instance")] + NotIncomingCommittedInstance, #[error("R1CS instance is expected to not be relaxed")] R1CSUnrelaxedFail, #[error("Could not find the inner ConstraintSystem")] @@ -105,8 +108,8 @@ pub enum Error { JSONSerdeError(String), #[error("Multi instances folding not supported in this scheme")] NoMultiInstances, - #[error("Missing 'other' instances, since this is a multi-instances folding scheme")] - MissingOtherInstances, + #[error("Missing 'other' instances, since this is a multi-instances folding scheme. Expected number of instances, mu:{0}, nu:{1}")] + MissingOtherInstances(usize, usize), } /// FoldingScheme defines trait that is implemented by the diverse folding schemes. It is defined @@ -122,12 +125,37 @@ where FC: FCircuit, { type PreprocessorParam: Debug + Clone; - type ProverParam: Debug + Clone; - type VerifierParam: Debug + Clone; + type ProverParam: Debug + Clone + CanonicalSerialize; + type VerifierParam: Debug + Clone + CanonicalSerialize; type RunningInstance: Debug; // contains the CommittedInstance + Witness type IncomingInstance: Debug; // contains the CommittedInstance + Witness type MultiCommittedInstanceWithWitness: Debug; // type used for the extra instances in the multi-instance folding setting type CFInstance: Debug; // CycleFold CommittedInstance & Witness + type IVCProof: PartialEq + Eq + Clone + Debug + CanonicalSerialize + CanonicalDeserialize; + + /// deserialize Self::ProverParam and recover the not serialized data that is recomputed on the + /// fly to save serialized bytes. + /// Internally it generates the r1cs/ccs & cf_r1cs needed for the VerifierParams. In this way + /// we avoid needing to serialize them, saving significant space in the VerifierParams + /// serialized size. + fn pp_deserialize_with_mode( + reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, // FCircuit params + ) -> Result; + + /// deserialize Self::VerifierParam and recover the not serialized data that is recomputed on + /// the fly to save serialized bytes. + /// Internally it generates the r1cs/ccs & cf_r1cs needed for the VerifierParams. In this way + /// we avoid needing to serialize them, saving significant space in the VerifierParams + /// serialized size. + fn vp_deserialize_with_mode( + reader: R, + compress: ark_serialize::Compress, + validate: ark_serialize::Validate, + fc_params: FC::Params, // FCircuit params + ) -> Result; fn preprocess( rng: impl RngCore, @@ -147,29 +175,23 @@ where other_instances: Option, ) -> Result<(), Error>; - // returns the state at the current step + /// returns the state at the current step fn state(&self) -> Vec; - // returns the instances at the current step, in the following order: - // (running_instance, incoming_instance, cyclefold_instance) - fn instances( - &self, - ) -> ( - Self::RunningInstance, - Self::IncomingInstance, - Self::CFInstance, - ); + /// returns the last IVC state proof, which can be verified in the `verify` method + fn ivc_proof(&self) -> Self::IVCProof; - fn verify( - vp: Self::VerifierParam, - z_0: Vec, // initial state - z_i: Vec, // last state - // number of steps between the initial state and the last state - num_steps: C1::ScalarField, - running_instance: Self::RunningInstance, - incoming_instance: Self::IncomingInstance, - cyclefold_instance: Self::CFInstance, - ) -> Result<(), Error>; + /// constructs the FoldingScheme instance from the given IVCProof, ProverParams, VerifierParams + /// and PoseidonConfig. + /// This method is useful for when the IVCProof is sent between different parties, so that they + /// can continue iterating the IVC from the received IVCProof. + fn from_ivc_proof( + ivc_proof: Self::IVCProof, + fcircuit_params: FC::Params, + params: (Self::ProverParam, Self::VerifierParam), + ) -> Result; + + fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error>; } /// Trait with auxiliary methods for multi-folding schemes (ie. HyperNova, ProtoGalaxy, etc), diff --git a/folding-schemes/src/utils/mod.rs b/folding-schemes/src/utils/mod.rs index b1bad97..c63938f 100644 --- a/folding-schemes/src/utils/mod.rs +++ b/folding-schemes/src/utils/mod.rs @@ -8,7 +8,7 @@ use ark_serialize::CanonicalSerialize; use ark_std::Zero; use sha3::{Digest, Sha3_256}; -use crate::arith::Arith; +use crate::arith::ArithSerializer; use crate::commitment::CommitmentScheme; use crate::Error; @@ -45,8 +45,8 @@ pub fn get_cm_coordinates(cm: &C) -> Vec { /// returns the hash of the given public parameters of the Folding Scheme pub fn pp_hash( - arith: &impl Arith, - cf_arith: &impl Arith, + arith: &impl ArithSerializer, + cf_arith: &impl ArithSerializer, cs_vp: &CS1::VerifierParams, cf_cs_vp: &CS2::VerifierParams, poseidon_config: &PoseidonConfig,