Browse Source

Merge remote-tracking branch 'upstream/main' into introducing_mova

update-nifs-interface
Nick Dimitriou 4 weeks ago
parent
commit
9f9361b925
43 changed files with 4005 additions and 2277 deletions
  1. +1
    -0
      .github/workflows/typos.toml
  2. +2
    -0
      .gitignore
  3. +10
    -1
      examples/circom_full_flow.rs
  4. +3
    -9
      examples/external_inputs.rs
  5. +3
    -9
      examples/multi_inputs.rs
  6. +9
    -1
      examples/noir_full_flow.rs
  7. +10
    -1
      examples/noname_full_flow.rs
  8. +3
    -9
      examples/sha256.rs
  9. +4
    -0
      folding-schemes/Cargo.toml
  10. +39
    -26
      folding-schemes/src/arith/ccs.rs
  11. +142
    -14
      folding-schemes/src/arith/mod.rs
  12. +59
    -84
      folding-schemes/src/arith/r1cs.rs
  13. +31
    -11
      folding-schemes/src/folding/circuits/cyclefold.rs
  14. +8
    -5
      folding-schemes/src/folding/circuits/mod.rs
  15. +42
    -30
      folding-schemes/src/folding/hypernova/cccs.rs
  16. +139
    -101
      folding-schemes/src/folding/hypernova/circuits.rs
  17. +7
    -23
      folding-schemes/src/folding/hypernova/decider_eth.rs
  18. +28
    -43
      folding-schemes/src/folding/hypernova/decider_eth_circuit.rs
  19. +47
    -57
      folding-schemes/src/folding/hypernova/lcccs.rs
  20. +328
    -140
      folding-schemes/src/folding/hypernova/mod.rs
  21. +15
    -11
      folding-schemes/src/folding/hypernova/nimfs.rs
  22. +0
    -420
      folding-schemes/src/folding/hypernova/serialize.rs
  23. +8
    -4
      folding-schemes/src/folding/hypernova/utils.rs
  24. +168
    -0
      folding-schemes/src/folding/mod.rs
  25. +126
    -88
      folding-schemes/src/folding/nova/circuits.rs
  26. +492
    -0
      folding-schemes/src/folding/nova/decider.rs
  27. +553
    -0
      folding-schemes/src/folding/nova/decider_circuits.rs
  28. +16
    -12
      folding-schemes/src/folding/nova/decider_eth.rs
  29. +77
    -93
      folding-schemes/src/folding/nova/decider_eth_circuit.rs
  30. +309
    -149
      folding-schemes/src/folding/nova/mod.rs
  31. +168
    -312
      folding-schemes/src/folding/nova/nifs.rs
  32. +269
    -0
      folding-schemes/src/folding/nova/ova.rs
  33. +0
    -268
      folding-schemes/src/folding/nova/serialize.rs
  34. +119
    -39
      folding-schemes/src/folding/nova/traits.rs
  35. +26
    -33
      folding-schemes/src/folding/nova/zk.rs
  36. +33
    -36
      folding-schemes/src/folding/protogalaxy/circuits.rs
  37. +4
    -0
      folding-schemes/src/folding/protogalaxy/constants.rs
  38. +29
    -27
      folding-schemes/src/folding/protogalaxy/folding.rs
  39. +409
    -133
      folding-schemes/src/folding/protogalaxy/mod.rs
  40. +89
    -61
      folding-schemes/src/folding/protogalaxy/traits.rs
  41. +131
    -0
      folding-schemes/src/folding/traits.rs
  42. +46
    -24
      folding-schemes/src/lib.rs
  43. +3
    -3
      folding-schemes/src/utils/mod.rs

+ 1
- 0
.github/workflows/typos.toml

@ -1,2 +1,3 @@
[default.extend-words] [default.extend-words]
groth = "groth" groth = "groth"
mimc = "mimc"

+ 2
- 0
.gitignore

@ -14,3 +14,5 @@ solidity-verifiers/generated
examples/*.sol examples/*.sol
examples/*.calldata examples/*.calldata
examples/*.inputs examples/*.inputs
*.serialized
*/*.serialized

+ 10
- 1
examples/circom_full_flow.rs

@ -89,7 +89,8 @@ fn main() {
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
// prepare the Decider prover & verifier params // prepare the Decider prover & verifier params
let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap();
let (decider_pp, decider_vp) =
D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap();
// run n steps of the folding iteration // run n steps of the folding iteration
for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { for (i, external_inputs_at_step) in external_inputs.iter().enumerate() {
@ -99,6 +100,14 @@ fn main() {
println!("Nova::prove_step {}: {:?}", i, start.elapsed()); println!("Nova::prove_step {}: {:?}", i, start.elapsed());
} }
// verify the last IVC proof
let ivc_proof = nova.ivc_proof();
N::verify(
nova_params.1, // Nova's verifier params
ivc_proof,
)
.unwrap();
let start = Instant::now(); let start = Instant::now();
let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap();
println!("generated Decider proof: {:?}", start.elapsed()); println!("generated Decider proof: {:?}", start.elapsed());

+ 3
- 9
examples/external_inputs.rs

@ -207,17 +207,11 @@ fn main() {
folding_scheme.state() folding_scheme.state()
); );
let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances();
println!("Run the Nova's IVC verifier"); println!("Run the Nova's IVC verifier");
let ivc_proof = folding_scheme.ivc_proof();
N::verify( N::verify(
nova_params.1,
initial_state.clone(),
folding_scheme.state(), // latest state
Fr::from(num_steps as u32),
running_instance,
incoming_instance,
cyclefold_instance,
nova_params.1, // Nova's verifier params
ivc_proof,
) )
.unwrap(); .unwrap();
} }

+ 3
- 9
examples/multi_inputs.rs

@ -154,17 +154,11 @@ fn main() {
println!("Nova::prove_step {}: {:?}", i, start.elapsed()); println!("Nova::prove_step {}: {:?}", i, start.elapsed());
} }
let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances();
println!("Run the Nova's IVC verifier"); println!("Run the Nova's IVC verifier");
let ivc_proof = folding_scheme.ivc_proof();
N::verify( N::verify(
nova_params.1,
initial_state.clone(),
folding_scheme.state(), // latest state
Fr::from(num_steps as u32),
running_instance,
incoming_instance,
cyclefold_instance,
nova_params.1, // Nova's verifier params
ivc_proof,
) )
.unwrap(); .unwrap();
} }

+ 9
- 1
examples/noir_full_flow.rs

@ -79,7 +79,8 @@ fn main() {
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
// prepare the Decider prover & verifier params // prepare the Decider prover & verifier params
let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap();
let (decider_pp, decider_vp) =
D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap();
// run n steps of the folding iteration // run n steps of the folding iteration
for i in 0..5 { for i in 0..5 {
@ -87,6 +88,13 @@ fn main() {
nova.prove_step(rng, vec![], None).unwrap(); nova.prove_step(rng, vec![], None).unwrap();
println!("Nova::prove_step {}: {:?}", i, start.elapsed()); println!("Nova::prove_step {}: {:?}", i, start.elapsed());
} }
// verify the last IVC proof
let ivc_proof = nova.ivc_proof();
N::verify(
nova_params.1, // Nova's verifier params
ivc_proof,
)
.unwrap();
let start = Instant::now(); let start = Instant::now();
let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap();

+ 10
- 1
examples/noname_full_flow.rs

@ -89,7 +89,8 @@ fn main() {
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
// prepare the Decider prover & verifier params // prepare the Decider prover & verifier params
let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap();
let (decider_pp, decider_vp) =
D::preprocess(&mut rng, nova_params.clone(), nova.clone()).unwrap();
// run n steps of the folding iteration // run n steps of the folding iteration
for (i, external_inputs_at_step) in external_inputs.iter().enumerate() { for (i, external_inputs_at_step) in external_inputs.iter().enumerate() {
@ -99,6 +100,14 @@ fn main() {
println!("Nova::prove_step {}: {:?}", i, start.elapsed()); println!("Nova::prove_step {}: {:?}", i, start.elapsed());
} }
// verify the last IVC proof
let ivc_proof = nova.ivc_proof();
N::verify(
nova_params.1, // Nova's verifier params
ivc_proof,
)
.unwrap();
let start = Instant::now(); let start = Instant::now();
let proof = D::prove(rng, decider_pp, nova.clone()).unwrap(); let proof = D::prove(rng, decider_pp, nova.clone()).unwrap();
println!("generated Decider proof: {:?}", start.elapsed()); println!("generated Decider proof: {:?}", start.elapsed());

+ 3
- 9
examples/sha256.rs

@ -138,17 +138,11 @@ fn main() {
println!("Nova::prove_step {}: {:?}", i, start.elapsed()); println!("Nova::prove_step {}: {:?}", i, start.elapsed());
} }
let (running_instance, incoming_instance, cyclefold_instance) = folding_scheme.instances();
println!("Run the Nova's IVC verifier"); println!("Run the Nova's IVC verifier");
let ivc_proof = folding_scheme.ivc_proof();
N::verify( N::verify(
nova_params.1,
initial_state,
folding_scheme.state(), // latest state
Fr::from(num_steps as u32),
running_instance,
incoming_instance,
cyclefold_instance,
nova_params.1, // Nova's verifier params
ivc_proof,
) )
.unwrap(); .unwrap();
} }

+ 4
- 0
folding-schemes/Cargo.toml

@ -41,6 +41,10 @@ ark-pallas = {version="0.4.0", features=["r1cs"]}
ark-vesta = {version="0.4.0", features=["r1cs"]} ark-vesta = {version="0.4.0", features=["r1cs"]}
ark-bn254 = {version="0.4.0", features=["r1cs"]} ark-bn254 = {version="0.4.0", features=["r1cs"]}
ark-grumpkin = {version="0.4.0", features=["r1cs"]} ark-grumpkin = {version="0.4.0", features=["r1cs"]}
# Note: do not use the MNTx_298 curves in practice due security reasons, here
# we only use them in the tests.
ark-mnt4-298 = {version="0.4.0", features=["r1cs"]}
ark-mnt6-298 = {version="0.4.0", features=["r1cs"]}
rand = "0.8.5" rand = "0.8.5"
tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } tracing = { version = "0.1", default-features = false, features = [ "attributes" ] }
tracing-subscriber = { version = "0.2" } tracing-subscriber = { version = "0.2" }

+ 39
- 26
folding-schemes/src/arith/ccs.rs

@ -1,9 +1,12 @@
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_std::log2; use ark_std::log2;
use crate::utils::vec::{hadamard, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix};
use crate::utils::vec::{
hadamard, is_zero_vec, mat_vec_mul, vec_add, vec_scalar_mul, SparseMatrix,
};
use crate::Error; use crate::Error;
use super::ArithSerializer;
use super::{r1cs::R1CS, Arith}; use super::{r1cs::R1CS, Arith};
/// CCS represents the Customizable Constraint Systems structure defined in /// CCS represents the Customizable Constraint Systems structure defined in
@ -35,8 +38,9 @@ pub struct CCS {
pub c: Vec<F>, pub c: Vec<F>,
} }
impl<F: PrimeField> Arith<F> for CCS<F> {
fn eval_relation(&self, z: &[F]) -> Result<Vec<F>, Error> {
impl<F: PrimeField> CCS<F> {
/// Evaluates the CCS relation at a given vector of assignments `z`
pub fn eval_at_z(&self, z: &[F]) -> Result<Vec<F>, Error> {
let mut result = vec![F::zero(); self.m]; let mut result = vec![F::zero(); self.m];
for i in 0..self.q { for i in 0..self.q {
@ -59,6 +63,25 @@ impl Arith for CCS {
Ok(result) Ok(result)
} }
/// returns a tuple containing (w, x) (witness and public inputs respectively)
pub fn split_z(&self, z: &[F]) -> (Vec<F>, Vec<F>) {
(z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec())
}
}
impl<F: PrimeField, W: AsRef<[F]>, U: AsRef<[F]>> Arith<W, U> for CCS<F> {
type Evaluation = Vec<F>;
fn eval_relation(&self, w: &W, u: &U) -> Result<Self::Evaluation, Error> {
self.eval_at_z(&[&[F::one()], u.as_ref(), w.as_ref()].concat())
}
fn check_evaluation(_w: &W, _u: &U, e: Self::Evaluation) -> Result<(), Error> {
is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied)
}
}
impl<F: PrimeField> ArithSerializer for CCS<F> {
fn params_to_le_bytes(&self) -> Vec<u8> { fn params_to_le_bytes(&self) -> Vec<u8> {
[ [
self.l.to_le_bytes(), self.l.to_le_bytes(),
@ -72,14 +95,14 @@ impl Arith for CCS {
} }
} }
impl<F: PrimeField> CCS<F> {
pub fn from_r1cs(r1cs: R1CS<F>) -> Self {
let m = r1cs.A.n_rows;
let n = r1cs.A.n_cols;
impl<F: PrimeField> From<R1CS<F>> for CCS<F> {
fn from(r1cs: R1CS<F>) -> Self {
let m = r1cs.num_constraints();
let n = r1cs.num_variables();
CCS { CCS {
m, m,
n, n,
l: r1cs.l,
l: r1cs.num_public_inputs(),
s: log2(m) as usize, s: log2(m) as usize,
s_prime: log2(n) as usize, s_prime: log2(n) as usize,
t: 3, t: 3,
@ -91,29 +114,19 @@ impl CCS {
M: vec![r1cs.A, r1cs.B, r1cs.C], M: vec![r1cs.A, r1cs.B, r1cs.C],
} }
} }
pub fn to_r1cs(self) -> R1CS<F> {
R1CS::<F> {
l: self.l,
A: self.M[0].clone(),
B: self.M[1].clone(),
C: self.M[2].clone(),
}
}
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::{ use crate::{
arith::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z},
arith::r1cs::tests::{get_test_r1cs, get_test_z as r1cs_get_test_z, get_test_z_split},
utils::vec::is_zero_vec, utils::vec::is_zero_vec,
}; };
use ark_pallas::Fr; use ark_pallas::Fr;
pub fn get_test_ccs<F: PrimeField>() -> CCS<F> { pub fn get_test_ccs<F: PrimeField>() -> CCS<F> {
let r1cs = get_test_r1cs::<F>();
CCS::<F>::from_r1cs(r1cs)
get_test_r1cs::<F>().into()
} }
pub fn get_test_z<F: PrimeField>(input: usize) -> Vec<F> { pub fn get_test_z<F: PrimeField>(input: usize) -> Vec<F> {
r1cs_get_test_z(input) r1cs_get_test_z(input)
@ -122,13 +135,13 @@ pub mod tests {
#[test] #[test]
fn test_eval_ccs_relation() { fn test_eval_ccs_relation() {
let ccs = get_test_ccs::<Fr>(); let ccs = get_test_ccs::<Fr>();
let mut z = get_test_z(3);
let (_, x, mut w) = get_test_z_split(3);
let f_w = ccs.eval_relation(&z).unwrap();
let f_w = ccs.eval_relation(&w, &x).unwrap();
assert!(is_zero_vec(&f_w)); assert!(is_zero_vec(&f_w));
z[1] = Fr::from(111);
let f_w = ccs.eval_relation(&z).unwrap();
w[1] = Fr::from(111);
let f_w = ccs.eval_relation(&w, &x).unwrap();
assert!(!is_zero_vec(&f_w)); assert!(!is_zero_vec(&f_w));
} }
@ -136,8 +149,8 @@ pub mod tests {
#[test] #[test]
fn test_check_ccs_relation() { fn test_check_ccs_relation() {
let ccs = get_test_ccs::<Fr>(); let ccs = get_test_ccs::<Fr>();
let z = get_test_z(3);
let (_, x, w) = get_test_z_split(3);
ccs.check_relation(&z).unwrap();
ccs.check_relation(&w, &x).unwrap();
} }
} }

+ 142
- 14
folding-schemes/src/arith/mod.rs

@ -1,28 +1,156 @@
use ark_ff::PrimeField;
use ark_ec::CurveGroup;
use ark_relations::r1cs::SynthesisError;
use ark_std::rand::RngCore;
use crate::Error;
use crate::{commitment::CommitmentScheme, folding::traits::Dummy, Error};
pub mod ccs; pub mod ccs;
pub mod r1cs; pub mod r1cs;
pub trait Arith<F: PrimeField> {
/// Evaluate the given Arith structure at `z`, a vector of assignments, and
/// return the evaluation.
fn eval_relation(&self, z: &[F]) -> Result<Vec<F>, Error>;
/// `Arith` defines the operations that a constraint system (e.g., R1CS, CCS,
/// etc.) should support.
///
/// Here, `W` is the type of witness, and `U` is the type of statement / public
/// input / public IO / instance.
/// Note that the same constraint system may support different types of `W` and
/// `U`, and the satisfiability check may vary.
///
/// For example, both plain R1CS and relaxed R1CS are represented by 3 matrices,
/// but the types of `W` and `U` are different:
/// - The plain R1CS has `W` and `U` as vectors of field elements.
///
/// `W = w` and `U = x` satisfy R1CS if `Az ∘ Bz = Cz`, where `z = [1, x, w]`.
///
/// - In Nova, Relaxed R1CS has `W` as [`crate::folding::nova::Witness`],
/// and `U` as [`crate::folding::nova::CommittedInstance`].
///
/// `W = (w, e, ...)` and `U = (u, x, ...)` satisfy Relaxed R1CS if
/// `Az ∘ Bz = uCz + e`, where `z = [u, x, w]`.
/// (commitments in `U` are not checked here)
///
/// Also, `W` and `U` have non-native field elements as their components when
/// used as CycleFold witness and instance.
///
/// - In ProtoGalaxy, Relaxed R1CS has `W` as [`crate::folding::protogalaxy::Witness`],
/// and `U` as [`crate::folding::protogalaxy::CommittedInstance`].
///
/// `W = (w, ...)` and `U = (x, e, β, ...)` satisfy Relaxed R1CS if
/// `e = Σ pow_i(β) v_i`, where `v = Az ∘ Bz - Cz`, `z = [1, x, w]`.
/// (commitments in `U` are not checked here)
///
/// This is also the case of CCS, where `W` and `U` may be vectors of field
/// elements, [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::lcccs::LCCCS`],
/// or [`crate::folding::hypernova::Witness`] and [`crate::folding::hypernova::cccs::CCCS`].
pub trait Arith<W, U>: Clone {
type Evaluation;
/// Checks that the given Arith structure is satisfied by a z vector, i.e.,
/// if the evaluation is a zero vector
/// Returns a dummy witness and instance
fn dummy_witness_instance<'a>(&'a self) -> (W, U)
where
W: Dummy<&'a Self>,
U: Dummy<&'a Self>,
{
(W::dummy(self), U::dummy(self))
}
/// Evaluates the constraint system `self` at witness `w` and instance `u`.
/// Returns the evaluation result.
///
/// The evaluation result is usually a vector of field elements.
/// For instance:
/// - Evaluating the plain R1CS at `W = w` and `U = x` returns
/// `Az ∘ Bz - Cz`, where `z = [1, x, w]`.
///
/// - Evaluating the relaxed R1CS in Nova at `W = (w, e, ...)` and
/// `U = (u, x, ...)` returns `Az ∘ Bz - uCz`, where `z = [u, x, w]`.
///
/// - Evaluating the relaxed R1CS in ProtoGalaxy at `W = (w, ...)` and
/// `U = (x, e, β, ...)` returns `Az ∘ Bz - Cz`, where `z = [1, x, w]`.
///
/// However, we use `Self::Evaluation` to represent the evaluation result
/// for future extensibility.
fn eval_relation(&self, w: &W, u: &U) -> Result<Self::Evaluation, Error>;
/// Checks if the evaluation result is valid. The witness `w` and instance
/// `u` are also parameters, because the validity check may need information
/// contained in `w` and/or `u`.
///
/// For instance:
/// - The evaluation `v` of plain R1CS at satisfying `W` and `U` should be
/// an all-zero vector.
///
/// - The evaluation `v` of relaxed R1CS in Nova at satisfying `W` and `U`
/// should be equal to the error term `e` in the witness.
///
/// - The evaluation `v` of relaxed R1CS in ProtoGalaxy at satisfying `W`
/// and `U` should satisfy `e = Σ pow_i(β) v_i`, where `e` is the error
/// term in the committed instance.
fn check_evaluation(w: &W, u: &U, v: Self::Evaluation) -> Result<(), Error>;
/// Checks if witness `w` and instance `u` satisfy the constraint system
/// `self` by first computing the evaluation result and then checking the
/// validity of the evaluation result.
/// ///
/// Used only for testing. /// Used only for testing.
fn check_relation(&self, z: &[F]) -> Result<(), Error> {
if self.eval_relation(z)?.iter().all(|f| f.is_zero()) {
Ok(())
} else {
Err(Error::NotSatisfied)
}
fn check_relation(&self, w: &W, u: &U) -> Result<(), Error> {
let e = self.eval_relation(w, u)?;
Self::check_evaluation(w, u, e)
} }
}
/// `ArithSerializer` is for serializing constraint systems.
///
/// Currently we only support converting parameters to bytes, but in the future
/// we may consider implementing methods for serializing the actual data (e.g.,
/// R1CS matrices).
pub trait ArithSerializer {
/// Returns the bytes that represent the parameters, that is, the matrices sizes, the amount of /// Returns the bytes that represent the parameters, that is, the matrices sizes, the amount of
/// public inputs, etc, without the matrices/polynomials values. /// public inputs, etc, without the matrices/polynomials values.
fn params_to_le_bytes(&self) -> Vec<u8>; fn params_to_le_bytes(&self) -> Vec<u8>;
} }
/// `ArithSampler` allows sampling random pairs of witness and instance that
/// satisfy the constraint system `self`.
///
/// This is useful for constructing a zero-knowledge layer for a folding-based
/// IVC.
/// An example of such a layer can be found in Appendix D of the [HyperNova]
/// paper.
///
/// Note that we use a separate trait for sampling, because this operation may
/// not be supported by all witness-instance pairs.
/// For instance, it is difficult (if not impossible) to do this for `w` and `x`
/// in a plain R1CS.
///
/// [HyperNova]: https://eprint.iacr.org/2023/573.pdf
pub trait ArithSampler<C: CurveGroup, W, U>: Arith<W, U> {
/// Samples a random witness and instance that satisfy the constraint system.
fn sample_witness_instance<CS: CommitmentScheme<C, true>>(
&self,
params: &CS::ProverParams,
rng: impl RngCore,
) -> Result<(W, U), Error>;
}
/// `ArithGadget` defines the in-circuit counterparts of operations specified in
/// `Arith` on constraint systems.
pub trait ArithGadget<WVar, UVar> {
type Evaluation;
/// Evaluates the constraint system `self` at witness `w` and instance `u`.
/// Returns the evaluation result.
fn eval_relation(&self, w: &WVar, u: &UVar) -> Result<Self::Evaluation, SynthesisError>;
/// Generates constraints for enforcing that witness `w` and instance `u`
/// satisfy the constraint system `self` by first computing the evaluation
/// result and then checking the validity of the evaluation result.
fn enforce_relation(&self, w: &WVar, u: &UVar) -> Result<(), SynthesisError> {
let e = self.eval_relation(w, u)?;
Self::enforce_evaluation(w, u, e)
}
/// Generates constraints for enforcing that the evaluation result is valid.
/// The witness `w` and instance `u` are also parameters, because the
/// validity check may need information contained in `w` and/or `u`.
fn enforce_evaluation(w: &WVar, u: &UVar, e: Self::Evaluation) -> Result<(), SynthesisError>;
}

+ 59
- 84
folding-schemes/src/arith/r1cs.rs

@ -1,13 +1,13 @@
use crate::commitment::CommitmentScheme;
use crate::RngCore;
use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_relations::r1cs::ConstraintSystem; use ark_relations::r1cs::ConstraintSystem;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::rand::Rng; use ark_std::rand::Rng;
use super::Arith;
use crate::utils::vec::{hadamard, mat_vec_mul, vec_scalar_mul, vec_sub, SparseMatrix};
use super::ccs::CCS;
use super::{Arith, ArithSerializer};
use crate::utils::vec::{
hadamard, is_zero_vec, mat_vec_mul, vec_scalar_mul, vec_sub, SparseMatrix,
};
use crate::Error; use crate::Error;
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
@ -18,8 +18,9 @@ pub struct R1CS {
pub C: SparseMatrix<F>, pub C: SparseMatrix<F>,
} }
impl<F: PrimeField> Arith<F> for R1CS<F> {
fn eval_relation(&self, z: &[F]) -> Result<Vec<F>, Error> {
impl<F: PrimeField> R1CS<F> {
/// Evaluates the CCS relation at a given vector of variables `z`
pub fn eval_at_z(&self, z: &[F]) -> Result<Vec<F>, Error> {
if z.len() != self.A.n_cols { if z.len() != self.A.n_cols {
return Err(Error::NotSameLength( return Err(Error::NotSameLength(
"z.len()".to_string(), "z.len()".to_string(),
@ -33,12 +34,26 @@ impl Arith for R1CS {
let Bz = mat_vec_mul(&self.B, z)?; let Bz = mat_vec_mul(&self.B, z)?;
let Cz = mat_vec_mul(&self.C, z)?; let Cz = mat_vec_mul(&self.C, z)?;
// Multiply Cz by z[0] (u) here, allowing this method to be reused for // Multiply Cz by z[0] (u) here, allowing this method to be reused for
// both relaxed and unrelaxed R1CS.
// both relaxed and plain R1CS.
let uCz = vec_scalar_mul(&Cz, &z[0]); let uCz = vec_scalar_mul(&Cz, &z[0]);
let AzBz = hadamard(&Az, &Bz)?; let AzBz = hadamard(&Az, &Bz)?;
vec_sub(&AzBz, &uCz) vec_sub(&AzBz, &uCz)
} }
}
impl<F: PrimeField, W: AsRef<[F]>, U: AsRef<[F]>> Arith<W, U> for R1CS<F> {
type Evaluation = Vec<F>;
fn eval_relation(&self, w: &W, u: &U) -> Result<Self::Evaluation, Error> {
self.eval_at_z(&[&[F::one()], u.as_ref(), w.as_ref()].concat())
}
fn check_evaluation(_w: &W, _u: &U, e: Self::Evaluation) -> Result<(), Error> {
is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied)
}
}
impl<F: PrimeField> ArithSerializer for R1CS<F> {
fn params_to_le_bytes(&self) -> Vec<u8> { fn params_to_le_bytes(&self) -> Vec<u8> {
[ [
self.l.to_le_bytes(), self.l.to_le_bytes(),
@ -67,66 +82,41 @@ impl R1CS {
} }
} }
/// returns a tuple containing (w, x) (witness and public inputs respectively)
pub fn split_z(&self, z: &[F]) -> (Vec<F>, Vec<F>) {
(z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec())
#[inline]
pub fn num_constraints(&self) -> usize {
self.A.n_rows
} }
}
pub trait RelaxedR1CS<C: CurveGroup, W, U>: Arith<C::ScalarField> {
/// returns a dummy running instance (Witness and CommittedInstance) for the current R1CS structure
fn dummy_running_instance(&self) -> (W, U);
/// returns a dummy incoming instance (Witness and CommittedInstance) for the current R1CS structure
fn dummy_incoming_instance(&self) -> (W, U);
/// checks if the given instance is relaxed
fn is_relaxed(w: &W, u: &U) -> bool;
/// extracts `z`, the vector of variables, from the given Witness and CommittedInstance
fn extract_z(w: &W, u: &U) -> Vec<C::ScalarField>;
/// checks if the computed error terms correspond to the actual one in `w`
/// or `u`
fn check_error_terms(w: &W, u: &U, e: Vec<C::ScalarField>) -> Result<(), Error>;
/// checks the tight (unrelaxed) R1CS relation
fn check_tight_relation(&self, w: &W, u: &U) -> Result<(), Error> {
if Self::is_relaxed(w, u) {
return Err(Error::R1CSUnrelaxedFail);
}
let z = Self::extract_z(w, u);
self.check_relation(&z)
#[inline]
pub fn num_public_inputs(&self) -> usize {
self.l
} }
/// checks the relaxed R1CS relation
fn check_relaxed_relation(&self, w: &W, u: &U) -> Result<(), Error> {
let z = Self::extract_z(w, u);
let e = self.eval_relation(&z)?;
Self::check_error_terms(w, u, e)
#[inline]
pub fn num_variables(&self) -> usize {
self.A.n_cols
} }
// Computes the E term, given A, B, C, z, u
fn compute_E(
A: &SparseMatrix<C::ScalarField>,
B: &SparseMatrix<C::ScalarField>,
C: &SparseMatrix<C::ScalarField>,
z: &[C::ScalarField],
u: &C::ScalarField,
) -> Result<Vec<C::ScalarField>, Error> {
let Az = mat_vec_mul(A, z)?;
let Bz = mat_vec_mul(B, z)?;
let AzBz = hadamard(&Az, &Bz)?;
#[inline]
pub fn num_witnesses(&self) -> usize {
self.num_variables() - self.num_public_inputs() - 1
}
let Cz = mat_vec_mul(C, z)?;
let uCz = vec_scalar_mul(&Cz, u);
vec_sub(&AzBz, &uCz)
/// returns a tuple containing (w, x) (witness and public inputs respectively)
pub fn split_z(&self, z: &[F]) -> (Vec<F>, Vec<F>) {
(z[self.l + 1..].to_vec(), z[1..self.l + 1].to_vec())
} }
}
fn sample<CS>(&self, params: &CS::ProverParams, rng: impl RngCore) -> Result<(W, U), Error>
where
CS: CommitmentScheme<C, true>;
impl<F: PrimeField> From<CCS<F>> for R1CS<F> {
fn from(ccs: CCS<F>) -> Self {
R1CS::<F> {
l: ccs.l,
A: ccs.M[0].clone(),
B: ccs.M[1].clone(),
C: ccs.M[2].clone(),
}
}
} }
/// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS /// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS
@ -173,27 +163,12 @@ pub fn extract_w_x(cs: &ConstraintSystem) -> (Vec, Vec)
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::folding::nova::{CommittedInstance, Witness};
use crate::{
commitment::pedersen::Pedersen,
utils::vec::{
is_zero_vec,
tests::{to_F_matrix, to_F_vec},
},
use crate::utils::vec::{
is_zero_vec,
tests::{to_F_matrix, to_F_vec},
}; };
use ark_pallas::{Fr, Projective};
#[test]
pub fn sample_relaxed_r1cs() {
let rng = rand::rngs::OsRng;
let r1cs = get_test_r1cs::<Fr>();
let (prover_params, _) = Pedersen::<Projective>::setup(rng, r1cs.A.n_rows).unwrap();
let sampled: Result<(Witness<Projective>, CommittedInstance<Projective>), _> =
r1cs.sample::<Pedersen<Projective, true>>(&prover_params, rng);
assert!(sampled.is_ok());
}
use ark_pallas::Fr;
pub fn get_test_r1cs<F: PrimeField>() -> R1CS<F> { pub fn get_test_r1cs<F: PrimeField>() -> R1CS<F> {
// R1CS for: x^3 + x + 5 = y (example from article // R1CS for: x^3 + x + 5 = y (example from article
@ -252,20 +227,20 @@ pub mod tests {
fn test_eval_r1cs_relation() { fn test_eval_r1cs_relation() {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
let r1cs = get_test_r1cs::<Fr>(); let r1cs = get_test_r1cs::<Fr>();
let mut z = get_test_z::<Fr>(rng.gen::<u16>() as usize);
let (_, x, mut w) = get_test_z_split::<Fr>(rng.gen::<u16>() as usize);
let f_w = r1cs.eval_relation(&z).unwrap();
let f_w = r1cs.eval_relation(&w, &x).unwrap();
assert!(is_zero_vec(&f_w)); assert!(is_zero_vec(&f_w));
z[1] = Fr::from(111);
let f_w = r1cs.eval_relation(&z).unwrap();
w[1] = Fr::from(111);
let f_w = r1cs.eval_relation(&w, &x).unwrap();
assert!(!is_zero_vec(&f_w)); assert!(!is_zero_vec(&f_w));
} }
#[test] #[test]
fn test_check_r1cs_relation() { fn test_check_r1cs_relation() {
let r1cs = get_test_r1cs::<Fr>(); let r1cs = get_test_r1cs::<Fr>();
let z = get_test_z(5);
r1cs.check_relation(&z).unwrap();
let (_, x, w) = get_test_z_split(5);
r1cs.check_relation(&w, &x).unwrap();
} }
} }

+ 31
- 11
folding-schemes/src/folding/circuits/cyclefold.rs

@ -24,7 +24,7 @@ use super::{nonnative::uint::NonNativeUintVar, CF1, CF2};
use crate::arith::r1cs::{extract_w_x, R1CS}; use crate::arith::r1cs::{extract_w_x, R1CS};
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::constants::NOVA_N_BITS_RO; use crate::constants::NOVA_N_BITS_RO;
use crate::folding::nova::nifs::NIFS;
use crate::folding::nova::{nifs::NIFS, traits::NIFSTrait};
use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar};
use crate::Error; use crate::Error;
@ -61,10 +61,12 @@ where
f().and_then(|val| { f().and_then(|val| {
let cs = cs.into(); let cs = cs.into();
let u =
NonNativeUintVar::<CF2<C>>::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?;
let x: Vec<NonNativeUintVar<CF2<C>>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?;
let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?;
let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?;
let u = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?;
let x = Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?;
Ok(Self { cmE, u, cmW, x }) Ok(Self { cmE, u, cmW, x })
}) })
@ -568,9 +570,8 @@ where
let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits)) let cf_r_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&cf_r_bits))
.expect("cf_r_bits out of bounds"); .expect("cf_r_bits out of bounds");
let (cf_W_i1, cf_U_i1) = NIFS::<C2, CS2, H>::fold_instances(
cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, cf_cmT,
)?;
let (cf_W_i1, cf_U_i1) =
NIFS::<C2, CS2, H>::prove(cf_r_Fq, &cf_W_i, &cf_U_i, &cf_w_i, &cf_u_i, &cf_T, &cf_cmT)?;
Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq)) Ok((cf_w_i, cf_u_i, cf_W_i1, cf_U_i1, cf_cmT, cf_r_Fq))
} }
@ -582,10 +583,11 @@ pub mod tests {
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
}; };
use ark_r1cs_std::R1CSVar; use ark_r1cs_std::R1CSVar;
use ark_std::UniformRand;
use ark_std::{One, UniformRand};
use super::*; use super::*;
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::CommittedInstance;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
use crate::utils::get_cm_coordinates; use crate::utils::get_cm_coordinates;
@ -667,12 +669,30 @@ pub mod tests {
#[test] #[test]
fn test_nifs_full_gadget() { fn test_nifs_full_gadget() {
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, r_bits, _) = prepare_simple_fold_inputs();
let mut rng = ark_std::test_rng();
let cs = ConstraintSystem::<Fq>::new_ref();
// prepare the committed instances to test in-circuit
let ci: Vec<CommittedInstance<Projective>> = (0..2)
.into_iter()
.map(|_| CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
})
.collect();
let (ci1, mut ci2) = (ci[0].clone(), ci[1].clone());
// make the 2nd instance a 'fresh' instance (ie. cmE=0, u=1)
ci2.cmE = Projective::zero();
ci2.u = Fr::one();
let r_bits: Vec<bool> =
Fr::rand(&mut rng).into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec();
let r_Fr = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
let cmT = Projective::rand(&mut rng);
let ci3 = NIFS::<Projective, Pedersen<Projective>>::verify(r_Fr, &ci1, &ci2, &cmT);
let cs = ConstraintSystem::<Fq>::new_ref();
let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); let r_bitsVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
let ci1Var = let ci1Var =
CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || { CycleFoldCommittedInstanceVar::<Projective, GVar>::new_witness(cs.clone(), || {
Ok(ci1.clone()) Ok(ci1.clone())

+ 8
- 5
folding-schemes/src/folding/circuits/mod.rs

@ -7,10 +7,13 @@ pub mod nonnative;
pub mod sum_check; pub mod sum_check;
pub mod utils; pub mod utils;
/// CF1 represents the ConstraintField used for the main folding circuit which is over E1::Fr, where
/// E1 is the main curve where we do the folding.
/// CF1 uses the ScalarField of the given C. CF1 represents the ConstraintField used for the main
/// folding circuit which is over E1::Fr, where E1 is the main curve where we do the folding.
/// In CF1, the points of C can not be natively represented.
pub type CF1<C> = <<C as CurveGroup>::Affine as AffineRepr>::ScalarField; pub type CF1<C> = <<C as CurveGroup>::Affine as AffineRepr>::ScalarField;
/// CF2 represents the ConstraintField used for the CycleFold circuit which is over E2::Fr=E1::Fq,
/// where E2 is the auxiliary curve (from [CycleFold](https://eprint.iacr.org/2023/1192.pdf)
/// approach) where we check the folding of the commitments (elliptic curve points).
/// CF2 uses the BaseField of the given C. CF2 represents the ConstraintField used for the
/// CycleFold circuit which is over E2::Fr=E1::Fq, where E2 is the auxiliary curve (from
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf) approach) where we check the folding of the
/// commitments (elliptic curve points).
/// In CF2, the points of C can be natively represented.
pub type CF2<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField; pub type CF2<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;

+ 42
- 30
folding-schemes/src/folding/hypernova/cccs.rs

@ -3,23 +3,22 @@ use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalDeserialize;
use ark_serialize::CanonicalSerialize; use ark_serialize::CanonicalSerialize;
use ark_std::One;
use ark_std::Zero;
use std::sync::Arc;
use ark_std::rand::Rng;
use ark_std::{rand::Rng, sync::Arc, One, Zero};
use super::circuits::CCCSVar;
use super::Witness; use super::Witness;
use crate::arith::{ccs::CCS, Arith}; use crate::arith::{ccs::CCS, Arith};
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::folding::circuits::CF1;
use crate::folding::traits::{CommittedInstanceOps, Dummy};
use crate::transcript::AbsorbNonNative; use crate::transcript::AbsorbNonNative;
use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::mle::dense_vec_to_dense_mle;
use crate::utils::vec::mat_vec_mul;
use crate::utils::vec::{is_zero_vec, mat_vec_mul};
use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial}; use crate::utils::virtual_polynomial::{build_eq_x_r_vec, VirtualPolynomial};
use crate::Error; use crate::Error;
/// Committed CCS instance /// Committed CCS instance
#[derive(Debug, Clone, CanonicalSerialize, CanonicalDeserialize)]
#[derive(Debug, Clone, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)]
pub struct CCCS<C: CurveGroup> { pub struct CCCS<C: CurveGroup> {
// Commitment to witness // Commitment to witness
pub C: C, pub C: C,
@ -91,34 +90,34 @@ impl CCS {
} }
} }
impl<C: CurveGroup> CCCS<C> {
pub fn dummy(l: usize) -> CCCS<C>
where
C::ScalarField: PrimeField,
{
CCCS::<C> {
impl<C: CurveGroup> Dummy<&CCS<CF1<C>>> for CCCS<C> {
fn dummy(ccs: &CCS<CF1<C>>) -> Self {
Self {
C: C::zero(), C: C::zero(),
x: vec![C::ScalarField::zero(); l],
x: vec![CF1::<C>::zero(); ccs.l],
} }
} }
}
impl<C: CurveGroup> Arith<Witness<CF1<C>>, CCCS<C>> for CCS<CF1<C>> {
type Evaluation = Vec<CF1<C>>;
fn eval_relation(&self, w: &Witness<CF1<C>>, u: &CCCS<C>) -> Result<Self::Evaluation, Error> {
// evaluate CCCS relation
self.eval_at_z(&[&[CF1::<C>::one()][..], &u.x, &w.w].concat())
}
/// Perform the check of the CCCS instance described at section 4.1, /// Perform the check of the CCCS instance described at section 4.1,
/// notice that this method does not check the commitment correctness /// notice that this method does not check the commitment correctness
pub fn check_relation(
&self,
ccs: &CCS<C::ScalarField>,
w: &Witness<C::ScalarField>,
fn check_evaluation(
_w: &Witness<CF1<C>>,
_u: &CCCS<C>,
e: Self::Evaluation,
) -> Result<(), Error> { ) -> Result<(), Error> {
// check CCCS relation
let z: Vec<C::ScalarField> =
[vec![C::ScalarField::one()], self.x.clone(), w.w.to_vec()].concat();
// A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in
// the hypercube, evaluating over the whole boolean hypercube for a normal-sized instance // the hypercube, evaluating over the whole boolean hypercube for a normal-sized instance
// would take too much, this checks the CCS relation of the CCCS. // would take too much, this checks the CCS relation of the CCCS.
ccs.check_relation(&z)?;
Ok(())
is_zero_vec(&e).then_some(()).ok_or(Error::NotSatisfied)
} }
} }
@ -126,9 +125,8 @@ impl Absorb for CCCS
where where
C::ScalarField: Absorb, C::ScalarField: Absorb,
{ {
fn to_sponge_bytes(&self, _dest: &mut Vec<u8>) {
// This is never called
unimplemented!()
fn to_sponge_bytes(&self, dest: &mut Vec<u8>) {
C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest);
} }
fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) { fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) {
@ -142,6 +140,18 @@ where
} }
} }
impl<C: CurveGroup> CommittedInstanceOps<C> for CCCS<C> {
type Var = CCCSVar<C>;
fn get_commitments(&self) -> Vec<C> {
vec![self.C]
}
fn is_incoming(&self) -> bool {
true
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use ark_pallas::Fr; use ark_pallas::Fr;
@ -180,7 +190,8 @@ pub mod tests {
let ccs: CCS<Fr> = get_test_ccs(); let ccs: CCS<Fr> = get_test_ccs();
let z = get_test_z(3); let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let (w, x) = ccs.split_z(&z);
ccs.check_relation(&w, &x).unwrap();
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
@ -214,7 +225,8 @@ pub mod tests {
let ccs: CCS<Fr> = get_test_ccs(); let ccs: CCS<Fr> = get_test_ccs();
let z = get_test_z(3); let z = get_test_z(3);
ccs.check_relation(&z).unwrap();
let (w, x) = ccs.split_z(&z);
ccs.check_relation(&w, &x).unwrap();
// Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which
// should be equal to q(d), since G(x) interpolates q(x) inside the hypercube // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube

+ 139
- 101
folding-schemes/src/folding/hypernova/circuits.rs

@ -1,6 +1,6 @@
/// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) circuits /// Implementation of [HyperNova](https://eprint.iacr.org/2023/573.pdf) circuits
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
constraints::{AbsorbGadget, CryptographicSpongeVar},
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
CryptographicSponge, CryptographicSponge,
}; };
@ -14,6 +14,7 @@ use ark_r1cs_std::{
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds, groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
uint8::UInt8,
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ use ark_relations::r1cs::{
@ -41,6 +42,7 @@ use crate::folding::{
CF1, CF2, CF1, CF2,
}, },
nova::get_r1cs_from_cs, nova::get_r1cs_from_cs,
traits::{CommittedInstanceVarOps, Dummy},
}; };
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::utils::virtual_polynomial::VPAuxInfo; use crate::utils::virtual_polynomial::VPAuxInfo;
@ -52,19 +54,16 @@ use crate::{
/// Committed CCS instance /// Committed CCS instance
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CCCSVar<C: CurveGroup>
where
<C as CurveGroup>::BaseField: PrimeField,
{
pub struct CCCSVar<C: CurveGroup> {
// Commitment to witness // Commitment to witness
pub C: NonNativeAffineVar<C>, pub C: NonNativeAffineVar<C>,
// Public io // Public io
pub x: Vec<FpVar<CF1<C>>>, pub x: Vec<FpVar<CF1<C>>>,
} }
impl<C> AllocVar<CCCS<C>, CF1<C>> for CCCSVar<C> impl<C> AllocVar<CCCS<C>, CF1<C>> for CCCSVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{ {
fn new_variable<T: Borrow<CCCS<C>>>( fn new_variable<T: Borrow<CCCS<C>>>(
cs: impl Into<Namespace<CF1<C>>>, cs: impl Into<Namespace<CF1<C>>>,
@ -83,12 +82,30 @@ where
} }
} }
impl<C: CurveGroup> CommittedInstanceVarOps<C> for CCCSVar<C> {
type PointVar = NonNativeAffineVar<C>;
fn get_commitments(&self) -> Vec<Self::PointVar> {
vec![self.C.clone()]
}
fn get_public_inputs(&self) -> &[FpVar<CF1<C>>] {
&self.x
}
fn enforce_incoming(&self) -> Result<(), SynthesisError> {
// `CCCSVar` is always the incoming instance
Ok(())
}
fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.x.enforce_equal(&other.x)
}
}
/// Linearized Committed CCS instance /// Linearized Committed CCS instance
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LCCCSVar<C: CurveGroup>
where
<C as CurveGroup>::BaseField: PrimeField,
{
pub struct LCCCSVar<C: CurveGroup> {
// Commitment to witness // Commitment to witness
pub C: NonNativeAffineVar<C>, pub C: NonNativeAffineVar<C>,
// Relaxation factor of z for folded LCCCS // Relaxation factor of z for folded LCCCS
@ -100,10 +117,10 @@ where
// Vector of v_i // Vector of v_i
pub v: Vec<FpVar<CF1<C>>>, pub v: Vec<FpVar<CF1<C>>>,
} }
impl<C> AllocVar<LCCCS<C>, CF1<C>> for LCCCSVar<C> impl<C> AllocVar<LCCCS<C>, CF1<C>> for LCCCSVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{ {
fn new_variable<T: Borrow<LCCCS<C>>>( fn new_variable<T: Borrow<LCCCS<C>>>(
cs: impl Into<Namespace<CF1<C>>>, cs: impl Into<Namespace<CF1<C>>>,
@ -127,41 +144,44 @@ where
} }
} }
impl<C> LCCCSVar<C>
where
C: CurveGroup,
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
/// [`LCCCSVar`].hash implements the LCCCS instance hash compatible with the native
/// implementation from LCCCS.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the LCCCS.
/// Additionally it returns the vector of the field elements from the self parameters, so they
/// can be reused in other gadgets avoiding recalculating (reconstraining) them.
#[allow(clippy::type_complexity)]
pub fn hash(
self,
sponge: &PoseidonSpongeVar<CF1<C>>,
pp_hash: FpVar<CF1<C>>,
i: FpVar<CF1<C>>,
z_0: Vec<FpVar<CF1<C>>>,
z_i: Vec<FpVar<CF1<C>>>,
) -> Result<(FpVar<CF1<C>>, Vec<FpVar<CF1<C>>>), SynthesisError> {
let mut sponge = sponge.clone();
let U_vec = [
self.C.to_constraint_field()?,
vec![self.u],
self.x,
self.r_x,
self.v,
impl<C: CurveGroup> AbsorbGadget<C::ScalarField> for LCCCSVar<C> {
fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> {
FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?)
}
fn to_sponge_field_elements(&self) -> Result<Vec<FpVar<C::ScalarField>>, SynthesisError> {
Ok([
&self.C.to_constraint_field()?,
&[self.u.clone()][..],
&self.x,
&self.r_x,
&self.v,
] ]
.concat();
sponge.absorb(&pp_hash)?;
sponge.absorb(&i)?;
sponge.absorb(&z_0)?;
sponge.absorb(&z_i)?;
sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
.concat())
}
}
impl<C: CurveGroup> CommittedInstanceVarOps<C> for LCCCSVar<C> {
type PointVar = NonNativeAffineVar<C>;
fn get_commitments(&self) -> Vec<Self::PointVar> {
vec![self.C.clone()]
}
fn get_public_inputs(&self) -> &[FpVar<CF1<C>>] {
&self.x
}
fn enforce_incoming(&self) -> Result<(), SynthesisError> {
// `LCCCSVar` is always the running instance
Err(SynthesisError::Unsatisfiable)
}
fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.u.enforce_equal(&other.u)?;
self.x.enforce_equal(&other.x)?;
self.r_x.enforce_equal(&other.r_x)?;
self.v.enforce_equal(&other.v)
} }
} }
@ -582,13 +602,13 @@ where
/// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there. /// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there.
pub fn upper_bound_ccs(&self) -> Result<CCS<C1::ScalarField>, Error> { pub fn upper_bound_ccs(&self) -> Result<CCS<C1::ScalarField>, Error> {
let r1cs = get_r1cs_from_cs::<CF1<C1>>(self.clone()).unwrap(); let r1cs = get_r1cs_from_cs::<CF1<C1>>(self.clone()).unwrap();
let mut ccs = CCS::from_r1cs(r1cs.clone());
let mut ccs = CCS::from(r1cs);
let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()]; let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()];
let mut W_i = Witness::<C1::ScalarField>::dummy(&ccs); let mut W_i = Witness::<C1::ScalarField>::dummy(&ccs);
let mut U_i = LCCCS::<C1>::dummy(ccs.l, ccs.t, ccs.s);
let mut U_i = LCCCS::<C1>::dummy(&ccs);
let mut w_i = W_i.clone(); let mut w_i = W_i.clone();
let mut u_i = CCCS::<C1>::dummy(ccs.l);
let mut u_i = CCCS::<C1>::dummy(&ccs);
let n_iters = 2; let n_iters = 2;
for _ in 0..n_iters { for _ in 0..n_iters {
@ -654,7 +674,7 @@ where
r_w: C1::ScalarField::one(), r_w: C1::ScalarField::one(),
}; };
W_i = Witness::<C1::ScalarField>::dummy(&ccs); W_i = Witness::<C1::ScalarField>::dummy(&ccs);
U_i = LCCCS::<C1>::dummy(ccs.l, ccs.t, ccs.s);
U_i = LCCCS::<C1>::dummy(&ccs);
} }
Ok(ccs) Ok(ccs)
@ -671,7 +691,7 @@ where
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs); let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let ccs = CCS::from_r1cs(r1cs.clone());
let ccs = CCS::from(r1cs);
Ok((cs, ccs)) Ok((cs, ccs))
} }
@ -714,8 +734,8 @@ where
.unwrap_or(vec![CF1::<C1>::zero(); self.F.external_inputs_len()])) .unwrap_or(vec![CF1::<C1>::zero(); self.F.external_inputs_len()]))
})?; })?;
let U_dummy = LCCCS::<C1>::dummy(self.ccs.l, self.ccs.t, self.ccs.s);
let u_dummy = CCCS::<C1>::dummy(self.ccs.l);
let U_dummy = LCCCS::<C1>::dummy(&self.ccs);
let u_dummy = CCCS::<C1>::dummy(&self.ccs);
let U_i = let U_i =
LCCCSVar::<C1>::new_witness(cs.clone(), || Ok(self.U_i.unwrap_or(U_dummy.clone())))?; LCCCSVar::<C1>::new_witness(cs.clone(), || Ok(self.U_i.unwrap_or(U_dummy.clone())))?;
@ -728,7 +748,7 @@ where
let U_i1_C = NonNativeAffineVar::new_witness(cs.clone(), || { let U_i1_C = NonNativeAffineVar::new_witness(cs.clone(), || {
Ok(self.U_i1_C.unwrap_or_else(C1::zero)) Ok(self.U_i1_C.unwrap_or_else(C1::zero))
})?; })?;
let nimfs_proof_dummy = NIMFSProof::<C1>::dummy(&self.ccs, MU, NU);
let nimfs_proof_dummy = NIMFSProof::<C1>::dummy((&self.ccs, MU, NU));
let nimfs_proof = ProofVar::<C1>::new_witness(cs.clone(), || { let nimfs_proof = ProofVar::<C1>::new_witness(cs.clone(), || {
Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy)) Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy))
})?; })?;
@ -742,25 +762,13 @@ where
let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config); let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config);
// get z_{i+1} from the F circuit
let i_usize = self.i_usize.unwrap_or(0);
let z_i1 =
self.F
.generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?;
let is_basecase = i.is_zero()?; let is_basecase = i.is_zero()?;
let is_not_basecase = is_basecase.not(); let is_not_basecase = is_basecase.not();
// Primary Part // Primary Part
// P.1. Compute u_i.x // P.1. Compute u_i.x
// u_i.x[0] = H(i, z_0, z_i, U_i) // u_i.x[0] = H(i, z_0, z_i, U_i)
let (u_i_x, _) = U_i.clone().hash(
&sponge,
pp_hash.clone(),
i.clone(),
z_0.clone(),
z_i.clone(),
)?;
let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
// u_i.x[1] = H(cf_U_i) // u_i.x[1] = H(cf_U_i)
let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
@ -795,19 +803,26 @@ where
U_i1.C = U_i1_C; U_i1.C = U_i1_C;
// P.4.a compute and check the first output of F' // P.4.a compute and check the first output of F'
// get z_{i+1} from the F circuit
let i_usize = self.i_usize.unwrap_or(0);
let z_i1 = self
.F
.generate_step_constraints(cs.clone(), i_usize, z_i, external_inputs)?;
let (u_i1_x, _) = U_i1.clone().hash( let (u_i1_x, _) = U_i1.clone().hash(
&sponge, &sponge,
pp_hash.clone(),
i + FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&(i + FpVar::<CF1<C1>>::one()),
&z_0,
&z_i1,
)?; )?;
let (u_i1_x_base, _) = LCCCSVar::new_constant(cs.clone(), U_dummy)?.hash( let (u_i1_x_base, _) = LCCCSVar::new_constant(cs.clone(), U_dummy)?.hash(
&sponge, &sponge,
pp_hash.clone(),
FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&FpVar::<CF1<C1>>::one(),
&z_0,
&z_i1,
)?; )?;
let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?;
x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?;
@ -891,7 +906,8 @@ mod tests {
use crate::{ use crate::{
arith::{ arith::{
ccs::tests::{get_test_ccs, get_test_z}, ccs::tests::{get_test_ccs, get_test_z},
r1cs::{extract_w_x, RelaxedR1CS},
r1cs::extract_w_x,
Arith,
}, },
commitment::{pedersen::Pedersen, CommitmentScheme}, commitment::{pedersen::Pedersen, CommitmentScheme},
folding::{ folding::{
@ -900,6 +916,7 @@ mod tests {
utils::{compute_c, compute_sigmas_thetas}, utils::{compute_c, compute_sigmas_thetas},
HyperNovaCycleFoldCircuit, HyperNovaCycleFoldCircuit,
}, },
traits::CommittedInstanceOps,
}, },
frontend::utils::CubicFCircuit, frontend::utils::CubicFCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -1084,7 +1101,7 @@ mod tests {
assert_eq!(folded_lcccs, folded_lcccs_v); assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs.check_relation(&ccs, &folded_witness).unwrap();
ccs.check_relation(&folded_witness, &folded_lcccs).unwrap();
// allocate circuit inputs // allocate circuit inputs
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -1113,6 +1130,37 @@ mod tests {
assert_eq!(folded_lcccsVar.u.value().unwrap(), folded_lcccs.u); assert_eq!(folded_lcccsVar.u.value().unwrap(), folded_lcccs.u);
} }
/// test that checks the native LCCCS.to_sponge_{bytes,field_elements} vs
/// the R1CS constraints version
#[test]
pub fn test_lcccs_to_sponge_preimage() {
let mut rng = test_rng();
let ccs = get_test_ccs();
let z1 = get_test_z::<Fr>(3);
let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs, _) = ccs
.to_lcccs::<_, _, Pedersen<Projective, true>, true>(&mut rng, &pedersen_params, &z1)
.unwrap();
let bytes = lcccs.to_sponge_bytes_as_vec();
let field_elements = lcccs.to_sponge_field_elements_as_vec();
let cs = ConstraintSystem::<Fr>::new_ref();
let lcccsVar = LCCCSVar::<Projective>::new_witness(cs.clone(), || Ok(lcccs)).unwrap();
let bytes_var = lcccsVar.to_sponge_bytes().unwrap();
let field_elements_var = lcccsVar.to_sponge_field_elements().unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
assert_eq!(bytes_var.value().unwrap(), bytes);
assert_eq!(field_elements_var.value().unwrap(), field_elements);
}
/// test that checks the native LCCCS.hash vs the R1CS constraints version /// test that checks the native LCCCS.hash vs the R1CS constraints version
#[test] #[test]
pub fn test_lcccs_hash() { pub fn test_lcccs_hash() {
@ -1133,9 +1181,7 @@ mod tests {
let (lcccs, _) = ccs let (lcccs, _) = ccs
.to_lcccs::<_, _, Pedersen<Projective, true>, true>(&mut rng, &pedersen_params, &z1) .to_lcccs::<_, _, Pedersen<Projective, true>, true>(&mut rng, &pedersen_params, &z1)
.unwrap(); .unwrap();
let h = lcccs
.clone()
.hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone());
let h = lcccs.clone().hash(&sponge, pp_hash, i, &z_0, &z_i);
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -1147,13 +1193,7 @@ mod tests {
let lcccsVar = LCCCSVar::<Projective>::new_witness(cs.clone(), || Ok(lcccs)).unwrap(); let lcccsVar = LCCCSVar::<Projective>::new_witness(cs.clone(), || Ok(lcccs)).unwrap();
let (hVar, _) = lcccsVar let (hVar, _) = lcccsVar
.clone() .clone()
.hash(
&spongeVar,
pp_hashVar,
iVar.clone(),
z_0Var.clone(),
z_iVar.clone(),
)
.hash(&spongeVar, &pp_hashVar, &iVar, &z_0Var, &z_iVar)
.unwrap(); .unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
@ -1209,13 +1249,13 @@ mod tests {
// prepare the dummy instances // prepare the dummy instances
let W_dummy = Witness::<Fr>::dummy(&ccs); let W_dummy = Witness::<Fr>::dummy(&ccs);
let U_dummy = LCCCS::<Projective>::dummy(ccs.l, ccs.t, ccs.s);
let U_dummy = LCCCS::<Projective>::dummy(&ccs);
let w_dummy = W_dummy.clone(); let w_dummy = W_dummy.clone();
let u_dummy = CCCS::<Projective>::dummy(ccs.l);
let u_dummy = CCCS::<Projective>::dummy(&ccs);
let (cf_W_dummy, cf_U_dummy): ( let (cf_W_dummy, cf_U_dummy): (
CycleFoldWitness<Projective2>, CycleFoldWitness<Projective2>,
CycleFoldCommittedInstance<Projective2>, CycleFoldCommittedInstance<Projective2>,
) = cf_r1cs.dummy_running_instance();
) = cf_r1cs.dummy_witness_instance();
// set the initial dummy instances // set the initial dummy instances
let mut W_i = W_dummy.clone(); let mut W_i = W_dummy.clone();
@ -1225,7 +1265,7 @@ mod tests {
let mut cf_W_i = cf_W_dummy.clone(); let mut cf_W_i = cf_W_dummy.clone();
let mut cf_U_i = cf_U_dummy.clone(); let mut cf_U_i = cf_U_dummy.clone();
u_i.x = vec![ u_i.x = vec![
U_i.hash(&sponge, pp_hash, Fr::zero(), z_0.clone(), z_i.clone()),
U_i.hash(&sponge, pp_hash, Fr::zero(), &z_0, &z_i),
cf_U_i.hash_cyclefold(&sponge, pp_hash), cf_U_i.hash_cyclefold(&sponge, pp_hash),
]; ];
@ -1250,9 +1290,9 @@ mod tests {
if i == 0 { if i == 0 {
W_i1 = Witness::<Fr>::dummy(&ccs); W_i1 = Witness::<Fr>::dummy(&ccs);
U_i1 = LCCCS::dummy(ccs.l, ccs.t, ccs.s);
U_i1 = LCCCS::dummy(&ccs);
let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), z_0.clone(), z_i1.clone());
let u_i1_x = U_i1.hash(&sponge, pp_hash, Fr::one(), &z_0, &z_i1);
// hash the initial (dummy) CycleFold instance, which is used as the 2nd public // hash the initial (dummy) CycleFold instance, which is used as the 2nd public
// input in the AugmentedFCircuit // input in the AugmentedFCircuit
@ -1307,10 +1347,9 @@ mod tests {
.unwrap(); .unwrap();
// sanity check: check the folded instance relation // sanity check: check the folded instance relation
U_i1.check_relation(&ccs, &W_i1).unwrap();
ccs.check_relation(&W_i1, &U_i1).unwrap();
let u_i1_x =
U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone());
let u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1);
let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec();
let rho_Fq = Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap(); let rho_Fq = Fq::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap();
@ -1427,15 +1466,14 @@ mod tests {
(u_i, w_i) = ccs (u_i, w_i) = ccs
.to_cccs::<_, _, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &r1cs_z) .to_cccs::<_, _, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &r1cs_z)
.unwrap(); .unwrap();
u_i.check_relation(&ccs, &w_i).unwrap();
ccs.check_relation(&w_i, &u_i).unwrap();
// sanity checks // sanity checks
assert_eq!(w_i.w, r1cs_w_i1); assert_eq!(w_i.w, r1cs_w_i1);
assert_eq!(u_i.x, r1cs_x_i1); assert_eq!(u_i.x, r1cs_x_i1);
assert_eq!(u_i.x[0], augmented_f_circuit.x.unwrap()); assert_eq!(u_i.x[0], augmented_f_circuit.x.unwrap());
assert_eq!(u_i.x[1], augmented_f_circuit.cf_x.unwrap()); assert_eq!(u_i.x[1], augmented_f_circuit.cf_x.unwrap());
let expected_u_i1_x =
U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), z_0.clone(), z_i1.clone());
let expected_u_i1_x = U_i1.hash(&sponge, pp_hash, iFr + Fr::one(), &z_0, &z_i1);
let expected_cf_U_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash); let expected_cf_U_i1_x = cf_U_i.hash_cyclefold(&sponge, pp_hash);
// u_i is already u_i1 at this point, check that has the expected value at x[0] // u_i is already u_i1 at this point, check that has the expected value at x[0]
assert_eq!(u_i.x[0], expected_u_i1_x); assert_eq!(u_i.x[0], expected_u_i1_x);
@ -1449,12 +1487,12 @@ mod tests {
W_i = W_i1.clone(); W_i = W_i1.clone();
// check the new LCCCS instance relation // check the new LCCCS instance relation
U_i.check_relation(&ccs, &W_i).unwrap();
ccs.check_relation(&W_i, &U_i).unwrap();
// check the new CCCS instance relation // check the new CCCS instance relation
u_i.check_relation(&ccs, &w_i).unwrap();
ccs.check_relation(&w_i, &u_i).unwrap();
// check the CycleFold instance relation // check the CycleFold instance relation
cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i).unwrap();
cf_r1cs.check_relation(&cf_W_i, &cf_U_i).unwrap();
println!("augmented_f_circuit step {}: {:?}", i, start.elapsed()); println!("augmented_f_circuit step {}: {:?}", i, start.elapsed());
} }

+ 7
- 23
folding-schemes/src/folding/hypernova/decider_eth.rs

@ -234,9 +234,7 @@ pub mod tests {
use super::*; use super::*;
use crate::commitment::{kzg::KZG, pedersen::Pedersen}; use crate::commitment::{kzg::KZG, pedersen::Pedersen};
use crate::folding::hypernova::cccs::CCCS; use crate::folding::hypernova::cccs::CCCS;
use crate::folding::hypernova::{
PreprocessorParam, ProverParams, VerifierParams as HyperNovaVerifierParams,
};
use crate::folding::hypernova::PreprocessorParam;
use crate::folding::nova::decider_eth::VerifierParam; use crate::folding::nova::decider_eth::VerifierParam;
use crate::frontend::utils::CubicFCircuit; use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
@ -371,33 +369,19 @@ pub mod tests {
.serialize_compressed(&mut hypernova_vp_serialized) .serialize_compressed(&mut hypernova_vp_serialized)
.unwrap(); .unwrap();
let hypernova_pp_deserialized = ProverParams::<
Projective,
Projective2,
KZG<'static, Bn254>,
Pedersen<Projective2>,
false,
>::deserialize_prover_params(
let hypernova_pp_deserialized = HN::pp_deserialize_with_mode(
hypernova_pp_serialized.as_slice(), hypernova_pp_serialized.as_slice(),
Compress::Yes, Compress::Yes,
Validate::No, Validate::No,
&hypernova_params.0.ccs,
&poseidon_config,
(), // FCircuit's Params
) )
.unwrap(); .unwrap();
let hypernova_vp_deserialized = HyperNovaVerifierParams::<
Projective,
Projective2,
KZG<'static, Bn254>,
Pedersen<Projective2>,
false,
>::deserialize_verifier_params(
let hypernova_vp_deserialized = HN::vp_deserialize_with_mode(
hypernova_vp_serialized.as_slice(), hypernova_vp_serialized.as_slice(),
Compress::Yes, Compress::Yes,
Validate::No, Validate::No,
&hypernova_params.0.ccs.unwrap(),
&poseidon_config,
(), // FCircuit's Params
) )
.unwrap(); .unwrap();
@ -416,7 +400,7 @@ pub mod tests {
let verified = D::verify( let verified = D::verify(
decider_vp.clone(), decider_vp.clone(),
hypernova.i.clone(),
hypernova.i,
hypernova.z_0.clone(), hypernova.z_0.clone(),
hypernova.z_i.clone(), hypernova.z_i.clone(),
&(), &(),
@ -483,7 +467,7 @@ pub mod tests {
let verified = D::verify( let verified = D::verify(
decider_vp_deserialized, decider_vp_deserialized,
i_deserialized.clone(),
i_deserialized,
z_0_deserialized.clone(), z_0_deserialized.clone(),
z_i_deserialized.clone(), z_i_deserialized.clone(),
&(), &(),

+ 28
- 43
folding-schemes/src/folding/hypernova/decider_eth_circuit.rs

@ -26,9 +26,6 @@ use super::{
nimfs::{NIMFSProof, NIMFS}, nimfs::{NIMFSProof, NIMFS},
HyperNova, Witness, CCCS, LCCCS, HyperNova, Witness, CCCS, LCCCS,
}; };
use crate::arith::ccs::CCS;
use crate::arith::r1cs::R1CS;
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme};
use crate::folding::circuits::{ use crate::folding::circuits::{
cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness}, cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness},
CF1, CF2, CF1, CF2,
@ -40,6 +37,14 @@ use crate::utils::{
vec::poly_from_vec, vec::poly_from_vec,
}; };
use crate::Error; use crate::Error;
use crate::{
arith::{ccs::CCS, r1cs::R1CS},
folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps},
};
use crate::{
commitment::{pedersen::Params as PedersenParams, CommitmentScheme},
folding::nova::decider_eth_circuit::evaluate_gadget,
};
/// In-circuit representation of the Witness associated to the CommittedInstance. /// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -66,6 +71,12 @@ impl AllocVar, F> for WitnessVar {
} }
} }
impl<F: PrimeField> WitnessVarOps<F> for WitnessVar<F> {
fn get_openings(&self) -> Vec<(&[FpVar<F>], FpVar<F>)> {
vec![(&self.w, self.r_w.clone())]
}
}
/// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters. /// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CCSMatricesVar<F: PrimeField> { pub struct CCSMatricesVar<F: PrimeField> {
@ -286,8 +297,8 @@ where
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()])) Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?; })?;
let U_dummy_native = LCCCS::<C1>::dummy(self.ccs.l, self.ccs.t, self.ccs.s);
let u_dummy_native = CCCS::<C1>::dummy(self.ccs.l);
let U_dummy_native = LCCCS::<C1>::dummy(&self.ccs);
let u_dummy_native = CCCS::<C1>::dummy(&self.ccs);
let w_dummy_native = Witness::<C1::ScalarField>::new( let w_dummy_native = Witness::<C1::ScalarField>::new(
vec![C1::ScalarField::zero(); self.ccs.n - 3 /* (3=2+1, since u_i.x.len=2) */], vec![C1::ScalarField::zero(); self.ccs.n - 3 /* (3=2+1, since u_i.x.len=2) */],
); );
@ -305,7 +316,7 @@ where
let W_i1 = WitnessVar::<C1::ScalarField>::new_witness(cs.clone(), || { let W_i1 = WitnessVar::<C1::ScalarField>::new_witness(cs.clone(), || {
Ok(self.W_i1.unwrap_or(w_dummy_native.clone())) Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?; })?;
let nimfs_proof_dummy = NIMFSProof::<C1>::dummy(&self.ccs, 1, 1); // mu=1 & nu=1 because the last fold is 2-to-1
let nimfs_proof_dummy = NIMFSProof::<C1>::dummy((&self.ccs, 1, 1)); // mu=1 & nu=1 because the last fold is 2-to-1
let nimfs_proof = NIMFSProofVar::<C1>::new_witness(cs.clone(), || { let nimfs_proof = NIMFSProofVar::<C1>::new_witness(cs.clone(), || {
Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy)) Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy))
})?; })?;
@ -314,7 +325,7 @@ where
let kzg_challenge = FpVar::<CF1<C1>>::new_input(cs.clone(), || { let kzg_challenge = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_challenge.unwrap_or_else(CF1::<C1>::zero)) Ok(self.kzg_challenge.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
let _eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero)) Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
@ -340,13 +351,7 @@ where
)?; )?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i) // 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, _) = U_i.clone().hash(
&sponge,
pp_hash.clone(),
i.clone(),
z_0.clone(),
z_i.clone(),
)?;
let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?; (u_i.x[0]).enforce_equal(&u_i_x)?;
#[cfg(feature = "light-test")] #[cfg(feature = "light-test")]
@ -373,10 +378,7 @@ where
let cf_u_dummy_native = let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN); CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let cf_w_dummy_native = CycleFoldWitness::<C2>::dummy(
self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l,
self.cf_E_len,
);
let cf_w_dummy_native = CycleFoldWitness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || { let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?; })?;
@ -424,14 +426,6 @@ where
// `rho_bits` computed along the way of computing `computed_U_i1` for the later `rho_powers` // `rho_bits` computed along the way of computing `computed_U_i1` for the later `rho_powers`
// check (6.b). // check (6.b).
// Check 7 is temporary disabled due
// https://github.com/privacy-scaling-explorations/sonobe/issues/80
log::warn!("[WARNING]: issue #80 (https://github.com/privacy-scaling-explorations/sonobe/issues/80) is not resolved yet.");
//
// 7. check eval_W==p_W(c_W)
// let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
// incircuit_eval_W.enforce_equal(&eval_W)?;
// 8.a verify the NIMFS.V of the final fold, and check that the obtained rho_powers from the // 8.a verify the NIMFS.V of the final fold, and check that the obtained rho_powers from the
// transcript match the one from the public input (so we avoid the onchain logic of the // transcript match the one from the public input (so we avoid the onchain logic of the
// verifier computing it). // verifier computing it).
@ -463,6 +457,10 @@ where
computed_U_i1.r_x.enforce_equal(&U_i1.r_x)?; computed_U_i1.r_x.enforce_equal(&U_i1.r_x)?;
computed_U_i1.v.enforce_equal(&U_i1.v)?; computed_U_i1.v.enforce_equal(&U_i1.v)?;
// 7. check eval_W==p_W(c_W)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.w, incircuit_challenge)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
// 8.b check that the in-circuit computed r is equal to the inputted r. // 8.b check that the in-circuit computed r is equal to the inputted r.
let rho = Boolean::le_bits_to_fp_var(&rho_bits)?; let rho = Boolean::le_bits_to_fp_var(&rho_bits)?;
@ -511,7 +509,6 @@ pub mod tests {
use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective};
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_relations::r1cs::ConstraintSystem; use ark_relations::r1cs::ConstraintSystem;
use ark_std::One;
use ark_std::{test_rng, UniformRand}; use ark_std::{test_rng, UniformRand};
use super::*; use super::*;
@ -527,7 +524,7 @@ pub mod tests {
let n_rows = 2_u32.pow(5) as usize; let n_rows = 2_u32.pow(5) as usize;
let n_cols = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize;
let r1cs = R1CS::<Fr>::rand(&mut rng, n_rows, n_cols); let r1cs = R1CS::<Fr>::rand(&mut rng, n_rows, n_cols);
let ccs = CCS::from_r1cs(r1cs);
let ccs = CCS::from(r1cs);
let z: Vec<Fr> = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); let z: Vec<Fr> = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect();
let (pedersen_params, _) = let (pedersen_params, _) =
@ -585,22 +582,10 @@ pub mod tests {
// generate a Nova instance and do a step of it // generate a Nova instance and do a step of it
let mut hypernova = HN::init(&hn_params, F_circuit, z_0.clone()).unwrap(); let mut hypernova = HN::init(&hn_params, F_circuit, z_0.clone()).unwrap();
hypernova
.prove_step(&mut rng, vec![], Some((vec![], vec![])))
.unwrap();
hypernova.prove_step(&mut rng, vec![], None).unwrap();
let ivc_v = hypernova.clone();
let (running_instance, incoming_instance, cyclefold_instance) = ivc_v.instances();
HN::verify(
hn_params.1, // HN's verifier_params
z_0,
ivc_v.z_i,
Fr::one(),
running_instance,
incoming_instance,
cyclefold_instance,
)
.unwrap();
let ivc_proof = hypernova.ivc_proof();
HN::verify(hn_params.1, ivc_proof).unwrap();
// load the DeciderEthCircuit from the generated Nova instance // load the DeciderEthCircuit from the generated Nova instance
let decider_circuit = DeciderEthCircuit::< let decider_circuit = DeciderEthCircuit::<

+ 47
- 57
folding-schemes/src/folding/hypernova/lcccs.rs

@ -1,5 +1,5 @@
use ark_crypto_primitives::sponge::Absorb; use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_poly::DenseMultilinearExtension; use ark_poly::DenseMultilinearExtension;
use ark_poly::MultilinearExtension; use ark_poly::MultilinearExtension;
@ -8,10 +8,14 @@ use ark_serialize::CanonicalSerialize;
use ark_std::rand::Rng; use ark_std::rand::Rng;
use ark_std::Zero; use ark_std::Zero;
use super::circuits::LCCCSVar;
use super::Witness; use super::Witness;
use crate::arith::ccs::CCS; use crate::arith::ccs::CCS;
use crate::arith::Arith;
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::transcript::{AbsorbNonNative, Transcript};
use crate::folding::circuits::CF1;
use crate::folding::traits::{CommittedInstanceOps, Dummy};
use crate::transcript::AbsorbNonNative;
use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::mle::dense_vec_to_dense_mle;
use crate::utils::vec::mat_vec_mul; use crate::utils::vec::mat_vec_mul;
use crate::Error; use crate::Error;
@ -78,42 +82,41 @@ impl CCS {
} }
} }
impl<C: CurveGroup> LCCCS<C> {
pub fn dummy(l: usize, t: usize, s: usize) -> LCCCS<C>
where
C::ScalarField: PrimeField,
{
LCCCS::<C> {
impl<C: CurveGroup> Dummy<&CCS<CF1<C>>> for LCCCS<C> {
fn dummy(ccs: &CCS<CF1<C>>) -> Self {
Self {
C: C::zero(), C: C::zero(),
u: C::ScalarField::zero(),
x: vec![C::ScalarField::zero(); l],
r_x: vec![C::ScalarField::zero(); s],
v: vec![C::ScalarField::zero(); t],
u: CF1::<C>::zero(),
x: vec![CF1::<C>::zero(); ccs.l],
r_x: vec![CF1::<C>::zero(); ccs.s],
v: vec![CF1::<C>::zero(); ccs.t],
} }
} }
}
impl<C: CurveGroup> Arith<Witness<CF1<C>>, LCCCS<C>> for CCS<CF1<C>> {
type Evaluation = Vec<CF1<C>>;
/// Perform the check of the LCCCS instance described at section 4.2, /// Perform the check of the LCCCS instance described at section 4.2,
/// notice that this method does not check the commitment correctness /// notice that this method does not check the commitment correctness
pub fn check_relation(
&self,
ccs: &CCS<C::ScalarField>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check CCS relation
let z: Vec<C::ScalarField> = [vec![self.u], self.x.clone(), w.w.to_vec()].concat();
fn eval_relation(&self, w: &Witness<CF1<C>>, u: &LCCCS<C>) -> Result<Self::Evaluation, Error> {
let z = [&[u.u][..], &u.x, &w.w].concat();
let computed_v: Vec<C::ScalarField> = ccs
.M
self.M
.iter() .iter()
.map(|M_j| { .map(|M_j| {
let Mz_mle = dense_vec_to_dense_mle(ccs.s, &mat_vec_mul(M_j, &z)?);
Mz_mle.evaluate(&self.r_x).ok_or(Error::EvaluationFail)
let Mz_mle = dense_vec_to_dense_mle(self.s, &mat_vec_mul(M_j, &z)?);
Mz_mle.evaluate(&u.r_x).ok_or(Error::EvaluationFail)
}) })
.collect::<Result<_, Error>>()?;
if computed_v != self.v {
return Err(Error::NotSatisfied);
}
Ok(())
.collect()
}
fn check_evaluation(
_w: &Witness<CF1<C>>,
u: &LCCCS<C>,
e: Self::Evaluation,
) -> Result<(), Error> {
(u.v == e).then_some(()).ok_or(Error::NotSatisfied)
} }
} }
@ -121,9 +124,8 @@ impl Absorb for LCCCS
where where
C::ScalarField: Absorb, C::ScalarField: Absorb,
{ {
fn to_sponge_bytes(&self, _dest: &mut Vec<u8>) {
// This is never called
unimplemented!()
fn to_sponge_bytes(&self, dest: &mut Vec<u8>) {
C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest);
} }
fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) { fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) {
@ -140,29 +142,15 @@ where
} }
} }
impl<C: CurveGroup> LCCCS<C>
where
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
/// [`LCCCS`].hash implements the committed instance hash compatible with the gadget
/// implemented in nova/circuits.rs::CommittedInstanceVar.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the LCCCS.
pub fn hash<T: Transcript<C::ScalarField>>(
&self,
sponge: &T,
pp_hash: C::ScalarField,
i: C::ScalarField,
z_0: Vec<C::ScalarField>,
z_i: Vec<C::ScalarField>,
) -> C::ScalarField {
let mut sponge = sponge.clone();
sponge.absorb(&pp_hash);
sponge.absorb(&i);
sponge.absorb(&z_0);
sponge.absorb(&z_i);
sponge.absorb(&self);
sponge.squeeze_field_elements(1)[0]
impl<C: CurveGroup> CommittedInstanceOps<C> for LCCCS<C> {
type Var = LCCCSVar<C>;
fn get_commitments(&self) -> Vec<C> {
vec![self.C]
}
fn is_incoming(&self) -> bool {
false
} }
} }
@ -216,7 +204,7 @@ pub mod tests {
let n_rows = 2_u32.pow(5) as usize; let n_rows = 2_u32.pow(5) as usize;
let n_cols = 2_u32.pow(5) as usize; let n_cols = 2_u32.pow(5) as usize;
let r1cs = R1CS::<Fr>::rand(&mut rng, n_rows, n_cols); let r1cs = R1CS::<Fr>::rand(&mut rng, n_rows, n_cols);
let ccs = CCS::from_r1cs(r1cs);
let ccs = CCS::from(r1cs);
let z: Vec<Fr> = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect(); let z: Vec<Fr> = (0..n_cols).map(|_| Fr::rand(&mut rng)).collect();
let (pedersen_params, _) = let (pedersen_params, _) =
@ -250,12 +238,14 @@ pub mod tests {
let ccs = get_test_ccs(); let ccs = get_test_ccs();
let z = get_test_z(3); let z = get_test_z(3);
ccs.check_relation(&z.clone()).unwrap();
let (w, x) = ccs.split_z(&z);
ccs.check_relation(&w, &x).unwrap();
// Mutate z so that the relation does not hold // Mutate z so that the relation does not hold
let mut bad_z = z.clone(); let mut bad_z = z.clone();
bad_z[3] = Fr::zero(); bad_z[3] = Fr::zero();
assert!(ccs.check_relation(&bad_z.clone()).is_err());
let (bad_w, bad_x) = ccs.split_z(&bad_z);
assert!(ccs.check_relation(&bad_w, &bad_x).is_err());
let (pedersen_params, _) = let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();

+ 328
- 140
folding-schemes/src/folding/hypernova/mod.rs

@ -6,7 +6,7 @@ use ark_crypto_primitives::sponge::{
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError};
use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero}; use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero};
pub mod cccs; pub mod cccs;
@ -15,31 +15,36 @@ pub mod decider_eth;
pub mod decider_eth_circuit; pub mod decider_eth_circuit;
pub mod lcccs; pub mod lcccs;
pub mod nimfs; pub mod nimfs;
pub mod serialize;
pub mod utils; pub mod utils;
use cccs::CCCS; use cccs::CCCS;
use circuits::AugmentedFCircuit; use circuits::AugmentedFCircuit;
use decider_eth_circuit::WitnessVar;
use lcccs::LCCCS; use lcccs::LCCCS;
use nimfs::NIMFS; use nimfs::NIMFS;
use crate::commitment::CommitmentScheme;
use crate::constants::NOVA_N_BITS_RO; use crate::constants::NOVA_N_BITS_RO;
use crate::folding::circuits::{
cyclefold::{
fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig,
CycleFoldWitness,
use crate::folding::{
circuits::{
cyclefold::{
fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig,
CycleFoldWitness,
},
CF2,
}, },
CF2,
nova::{get_r1cs_from_cs, PreprocessorParam},
traits::{CommittedInstanceOps, Dummy, WitnessOps},
}; };
use crate::folding::nova::{get_r1cs_from_cs, PreprocessorParam};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::utils::{get_cm_coordinates, pp_hash}; use crate::utils::{get_cm_coordinates, pp_hash};
use crate::Error; use crate::Error;
use crate::{arith::r1cs::RelaxedR1CS, commitment::CommitmentScheme};
use crate::{ use crate::{
arith::{ arith::{
ccs::CCS, ccs::CCS,
r1cs::{extract_w_x, R1CS}, r1cs::{extract_w_x, R1CS},
Arith,
}, },
FoldingScheme, MultiFolding, FoldingScheme, MultiFolding,
}; };
@ -76,8 +81,19 @@ impl Witness {
// always. // always.
Self { w, r_w: F::zero() } Self { w, r_w: F::zero() }
} }
pub fn dummy(ccs: &CCS<F>) -> Self {
Witness::<F>::new(vec![F::zero(); ccs.n - ccs.l - 1])
}
impl<F: PrimeField> Dummy<&CCS<F>> for Witness<F> {
fn dummy(ccs: &CCS<F>) -> Self {
Self::new(vec![F::zero(); ccs.n - ccs.l - 1])
}
}
impl<F: PrimeField> WitnessOps<F> for Witness<F> {
type Var = WitnessVar<F>;
fn get_openings(&self) -> Vec<(&[F], F)> {
vec![(&self.w, self.r_w)]
} }
} }
@ -101,6 +117,28 @@ where
pub ccs: Option<CCS<C1::ScalarField>>, pub ccs: Option<CCS<C1::ScalarField>>,
} }
impl<
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
const H: bool,
> CanonicalSerialize for ProverParams<C1, C2, CS1, CS2, H>
{
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: Compress,
) -> Result<(), SerializationError> {
self.cs_pp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_pp.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: Compress) -> usize {
self.cs_pp.serialized_size(compress) + self.cf_cs_pp.serialized_size(compress)
}
}
/// Verification parameters for HyperNova-based IVC /// Verification parameters for HyperNova-based IVC
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct VerifierParams< pub struct VerifierParams<
@ -122,6 +160,27 @@ pub struct VerifierParams<
pub cf_cs_vp: CS2::VerifierParams, pub cf_cs_vp: CS2::VerifierParams,
} }
impl<C1, C2, CS1, CS2, const H: bool> CanonicalSerialize for VerifierParams<C1, C2, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
{
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> {
self.cs_vp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_vp.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress)
}
}
impl<C1, C2, CS1, CS2, const H: bool> VerifierParams<C1, C2, CS1, CS2, H> impl<C1, C2, CS1, CS2, const H: bool> VerifierParams<C1, C2, CS1, CS2, H>
where where
C1: CurveGroup, C1: CurveGroup,
@ -141,6 +200,23 @@ where
} }
} }
#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)]
pub struct IVCProof<C1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
{
pub i: C1::ScalarField,
pub z_0: Vec<C1::ScalarField>,
pub z_i: Vec<C1::ScalarField>,
pub W_i: Witness<C1::ScalarField>,
pub U_i: LCCCS<C1>,
pub w_i: Witness<C1::ScalarField>,
pub u_i: CCCS<C1>,
pub cf_W_i: CycleFoldWitness<C2>,
pub cf_U_i: CycleFoldCommittedInstance<C2>,
}
/// Implements HyperNova+CycleFold's IVC, described in /// Implements HyperNova+CycleFold's IVC, described in
/// [HyperNova](https://eprint.iacr.org/2023/573.pdf) and /// [HyperNova](https://eprint.iacr.org/2023/573.pdf) and
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait
@ -241,7 +317,7 @@ where
.to_lcccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?; .to_lcccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?;
#[cfg(test)] #[cfg(test)]
U_i.check_relation(&self.ccs, &W_i)?;
self.ccs.check_relation(&W_i, &U_i)?;
Ok((U_i, W_i)) Ok((U_i, W_i))
} }
@ -263,7 +339,7 @@ where
.to_cccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?; .to_cccs::<_, _, CS1, H>(&mut rng, &self.cs_pp, &r1cs_z)?;
#[cfg(test)] #[cfg(test)]
u_i.check_relation(&self.ccs, &w_i)?;
self.ccs.check_relation(&w_i, &u_i)?;
Ok((u_i, w_i)) Ok((u_i, w_i))
} }
@ -295,10 +371,10 @@ where
external_inputs: Vec<C1::ScalarField>, external_inputs: Vec<C1::ScalarField>,
) -> Result<Vec<C1::ScalarField>, Error> { ) -> Result<Vec<C1::ScalarField>, Error> {
// prepare the initial dummy instances // prepare the initial dummy instances
let U_i = LCCCS::<C1>::dummy(self.ccs.l, self.ccs.t, self.ccs.s);
let mut u_i = CCCS::<C1>::dummy(self.ccs.l);
let U_i = LCCCS::<C1>::dummy(&self.ccs);
let mut u_i = CCCS::<C1>::dummy(&self.ccs);
let (_, cf_U_i): (CycleFoldWitness<C2>, CycleFoldCommittedInstance<C2>) = let (_, cf_U_i): (CycleFoldWitness<C2>, CycleFoldCommittedInstance<C2>) =
self.cf_r1cs.dummy_running_instance();
self.cf_r1cs.dummy_witness_instance();
let sponge = PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config); let sponge = PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config);
@ -307,8 +383,8 @@ where
&sponge, &sponge,
self.pp_hash, self.pp_hash,
C1::ScalarField::zero(), // i C1::ScalarField::zero(), // i
self.z_0.clone(),
state.clone(),
&self.z_0,
&state,
), ),
cf_U_i.hash_cyclefold(&sponge, self.pp_hash), cf_U_i.hash_cyclefold(&sponge, self.pp_hash),
]; ];
@ -319,13 +395,13 @@ where
.step_native(0, state.clone(), external_inputs.clone())?; .step_native(0, state.clone(), external_inputs.clone())?;
// compute u_{i+1}.x // compute u_{i+1}.x
let U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s);
let U_i1 = LCCCS::dummy(&self.ccs);
let u_i1_x = U_i1.hash( let u_i1_x = U_i1.hash(
&sponge, &sponge,
self.pp_hash, self.pp_hash,
C1::ScalarField::one(), // i+1, where i=0 C1::ScalarField::one(), // i+1, where i=0
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, self.pp_hash); let cf_u_i1_x = cf_U_i.hash_cyclefold(&sponge, self.pp_hash);
@ -403,6 +479,74 @@ where
type MultiCommittedInstanceWithWitness = type MultiCommittedInstanceWithWitness =
(Vec<Self::RunningInstance>, Vec<Self::IncomingInstance>); (Vec<Self::RunningInstance>, Vec<Self::IncomingInstance>);
type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>); type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>);
type IVCProof = IVCProof<C1, C2>;
fn pp_deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params,
) -> Result<Self::ProverParam, Error> {
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
// generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing
// to serialize them, saving significant space in the VerifierParams serialized size.
// main circuit R1CS:
let f_circuit = FC::new(fc_params)?;
let augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC, MU, NU>::empty(
&poseidon_config,
f_circuit.clone(),
None,
)?;
let ccs = augmented_F_circuit.ccs;
let cs_pp = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_pp = CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(ProverParams {
poseidon_config,
cs_pp,
cf_cs_pp,
ccs: Some(ccs),
})
}
fn vp_deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params,
) -> Result<Self::VerifierParam, Error> {
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
// generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing
// to serialize them, saving significant space in the VerifierParams serialized size.
// main circuit R1CS:
let f_circuit = FC::new(fc_params)?;
let augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC, MU, NU>::empty(
&poseidon_config,
f_circuit.clone(),
None,
)?;
let ccs = augmented_F_circuit.ccs;
// CycleFold circuit R1CS
let cf_circuit = HyperNovaCycleFoldCircuit::<C1, GC1, MU, NU>::empty();
let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?;
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(VerifierParams {
poseidon_config,
ccs,
cf_r1cs,
cs_vp,
cf_cs_vp,
})
}
fn preprocess( fn preprocess(
mut rng: impl RngCore, mut rng: impl RngCore,
@ -488,19 +632,13 @@ where
// setup the dummy instances // setup the dummy instances
let W_dummy = Witness::<C1::ScalarField>::dummy(&ccs); let W_dummy = Witness::<C1::ScalarField>::dummy(&ccs);
let U_dummy = LCCCS::<C1>::dummy(ccs.l, ccs.t, ccs.s);
let U_dummy = LCCCS::<C1>::dummy(&ccs);
let w_dummy = W_dummy.clone(); let w_dummy = W_dummy.clone();
let mut u_dummy = CCCS::<C1>::dummy(ccs.l);
let mut u_dummy = CCCS::<C1>::dummy(&ccs);
let (cf_W_dummy, cf_U_dummy): (CycleFoldWitness<C2>, CycleFoldCommittedInstance<C2>) = let (cf_W_dummy, cf_U_dummy): (CycleFoldWitness<C2>, CycleFoldCommittedInstance<C2>) =
cf_r1cs.dummy_running_instance();
cf_r1cs.dummy_witness_instance();
u_dummy.x = vec![ u_dummy.x = vec![
U_dummy.hash(
&sponge,
pp_hash,
C1::ScalarField::zero(),
z_0.clone(),
z_0.clone(),
),
U_dummy.hash(&sponge, pp_hash, C1::ScalarField::zero(), &z_0, &z_0),
cf_U_dummy.hash_cyclefold(&sponge, pp_hash), cf_U_dummy.hash_cyclefold(&sponge, pp_hash),
]; ];
@ -556,36 +694,42 @@ where
// `sponge` is for digest computation. // `sponge` is for digest computation.
let sponge = PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config); let sponge = PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config);
let other_instances = other_instances.ok_or(Error::MissingOtherInstances)?;
#[allow(clippy::type_complexity)]
let (lcccs, cccs): (
Vec<(LCCCS<C1>, Witness<C1::ScalarField>)>,
Vec<(CCCS<C1>, Witness<C1::ScalarField>)>,
) = other_instances;
// recall, mu & nu is the number of all the LCCCS & CCCS respectively, including the
// running and incoming instances that are not part of the 'other_instances', hence the +1
// in the couple of following checks.
if lcccs.len() + 1 != MU {
return Err(Error::NotSameLength(
"other_instances.lcccs.len()".to_string(),
lcccs.len(),
"hypernova.mu".to_string(),
MU,
));
}
if cccs.len() + 1 != NU {
return Err(Error::NotSameLength(
"other_instances.cccs.len()".to_string(),
cccs.len(),
"hypernova.nu".to_string(),
NU,
));
}
let (Us, Ws, us, ws) = if MU > 1 || NU > 1 {
let other_instances = other_instances.ok_or(Error::MissingOtherInstances(MU, NU))?;
#[allow(clippy::type_complexity)]
let (lcccs, cccs): (
Vec<(LCCCS<C1>, Witness<C1::ScalarField>)>,
Vec<(CCCS<C1>, Witness<C1::ScalarField>)>,
) = other_instances;
// recall, mu & nu is the number of all the LCCCS & CCCS respectively, including the
// running and incoming instances that are not part of the 'other_instances', hence the +1
// in the couple of following checks.
if lcccs.len() + 1 != MU {
return Err(Error::NotSameLength(
"other_instances.lcccs.len()".to_string(),
lcccs.len(),
"hypernova.mu".to_string(),
MU,
));
}
if cccs.len() + 1 != NU {
return Err(Error::NotSameLength(
"other_instances.cccs.len()".to_string(),
cccs.len(),
"hypernova.nu".to_string(),
NU,
));
}
let (Us, Ws): (Vec<LCCCS<C1>>, Vec<Witness<C1::ScalarField>>) = lcccs.into_iter().unzip();
let (us, ws): (Vec<CCCS<C1>>, Vec<Witness<C1::ScalarField>>) = cccs.into_iter().unzip();
let (Us, Ws): (Vec<LCCCS<C1>>, Vec<Witness<C1::ScalarField>>) =
lcccs.into_iter().unzip();
let (us, ws): (Vec<CCCS<C1>>, Vec<Witness<C1::ScalarField>>) = cccs.into_iter().unzip();
(Some(Us), Some(Ws), Some(us), Some(ws))
} else {
(None, None, None, None)
};
let augmented_f_circuit: AugmentedFCircuit<C1, C2, GC2, FC, MU, NU>; let augmented_f_circuit: AugmentedFCircuit<C1, C2, GC2, FC, MU, NU>;
@ -637,14 +781,14 @@ where
if self.i == C1::ScalarField::zero() { if self.i == C1::ScalarField::zero() {
W_i1 = Witness::<C1::ScalarField>::dummy(&self.ccs); W_i1 = Witness::<C1::ScalarField>::dummy(&self.ccs);
W_i1.r_w = self.W_i.r_w; W_i1.r_w = self.W_i.r_w;
U_i1 = LCCCS::dummy(self.ccs.l, self.ccs.t, self.ccs.s);
U_i1 = LCCCS::dummy(&self.ccs);
let u_i1_x = U_i1.hash( let u_i1_x = U_i1.hash(
&sponge, &sponge,
self.pp_hash, self.pp_hash,
C1::ScalarField::one(), C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
// hash the initial (dummy) CycleFold instance, which is used as the 2nd public // hash the initial (dummy) CycleFold instance, which is used as the 2nd public
@ -663,9 +807,9 @@ where
z_i: Some(self.z_i.clone()), z_i: Some(self.z_i.clone()),
external_inputs: Some(external_inputs.clone()), external_inputs: Some(external_inputs.clone()),
U_i: Some(self.U_i.clone()), U_i: Some(self.U_i.clone()),
Us: Some(Us.clone()),
Us: Us.clone(),
u_i_C: Some(self.u_i.C), u_i_C: Some(self.u_i.C),
us: Some(us.clone()),
us: us.clone(),
U_i1_C: Some(U_i1.C), U_i1_C: Some(U_i1.C),
F: self.F.clone(), F: self.F.clone(),
x: Some(u_i1_x), x: Some(u_i1_x),
@ -681,26 +825,43 @@ where
let mut transcript_p: PoseidonSponge<C1::ScalarField> = let mut transcript_p: PoseidonSponge<C1::ScalarField> =
PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config); PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config);
transcript_p.absorb(&self.pp_hash); transcript_p.absorb(&self.pp_hash);
let (all_Us, all_us, all_Ws, all_ws) = if MU > 1 || NU > 1 {
(
[vec![self.U_i.clone()], Us.clone().unwrap()].concat(),
[vec![self.u_i.clone()], us.clone().unwrap()].concat(),
[vec![self.W_i.clone()], Ws.unwrap()].concat(),
[vec![self.w_i.clone()], ws.unwrap()].concat(),
)
} else {
(
vec![self.U_i.clone()],
vec![self.u_i.clone()],
vec![self.W_i.clone()],
vec![self.w_i.clone()],
)
};
let (rho, nimfs_proof); let (rho, nimfs_proof);
(nimfs_proof, U_i1, W_i1, rho) = NIMFS::<C1, PoseidonSponge<C1::ScalarField>>::prove( (nimfs_proof, U_i1, W_i1, rho) = NIMFS::<C1, PoseidonSponge<C1::ScalarField>>::prove(
&mut transcript_p, &mut transcript_p,
&self.ccs, &self.ccs,
&[vec![self.U_i.clone()], Us.clone()].concat(),
&[vec![self.u_i.clone()], us.clone()].concat(),
&[vec![self.W_i.clone()], Ws].concat(),
&[vec![self.w_i.clone()], ws].concat(),
&all_Us,
&all_us,
&all_Ws,
&all_ws,
)?; )?;
// sanity check: check the folded instance relation // sanity check: check the folded instance relation
#[cfg(test)] #[cfg(test)]
U_i1.check_relation(&self.ccs, &W_i1)?;
self.ccs.check_relation(&W_i1, &U_i1)?;
let u_i1_x = U_i1.hash( let u_i1_x = U_i1.hash(
&sponge, &sponge,
self.pp_hash, self.pp_hash,
self.i + C1::ScalarField::one(), self.i + C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec();
@ -715,12 +876,12 @@ where
// where each p_i is in fact p_i.to_constraint_field() // where each p_i is in fact p_i.to_constraint_field()
let cf_u_i_x = [ let cf_u_i_x = [
vec![rho_Fq], vec![rho_Fq],
get_cm_coordinates(&self.U_i.C),
Us.iter()
all_Us
.iter()
.flat_map(|Us_i| get_cm_coordinates(&Us_i.C)) .flat_map(|Us_i| get_cm_coordinates(&Us_i.C))
.collect(), .collect(),
get_cm_coordinates(&self.u_i.C),
us.iter()
all_us
.iter()
.flat_map(|us_i| get_cm_coordinates(&us_i.C)) .flat_map(|us_i| get_cm_coordinates(&us_i.C))
.collect(), .collect(),
get_cm_coordinates(&U_i1.C), get_cm_coordinates(&U_i1.C),
@ -732,10 +893,8 @@ where
r_bits: Some(rho_bits.clone()), r_bits: Some(rho_bits.clone()),
points: Some( points: Some(
[ [
vec![self.U_i.clone().C],
Us.iter().map(|Us_i| Us_i.C).collect(),
vec![self.u_i.clone().C],
us.iter().map(|us_i| us_i.C).collect(),
all_Us.iter().map(|Us_i| Us_i.C).collect::<Vec<_>>(),
all_us.iter().map(|us_i| us_i.C).collect::<Vec<_>>(),
] ]
.concat(), .concat(),
), ),
@ -776,9 +935,9 @@ where
z_i: Some(self.z_i.clone()), z_i: Some(self.z_i.clone()),
external_inputs: Some(external_inputs), external_inputs: Some(external_inputs),
U_i: Some(self.U_i.clone()), U_i: Some(self.U_i.clone()),
Us: Some(Us.clone()),
Us: Us.clone(),
u_i_C: Some(self.u_i.C), u_i_C: Some(self.u_i.C),
us: Some(us.clone()),
us: us.clone(),
U_i1_C: Some(U_i1.C), U_i1_C: Some(U_i1.C),
F: self.F.clone(), F: self.F.clone(),
x: Some(u_i1_x), x: Some(u_i1_x),
@ -827,9 +986,9 @@ where
#[cfg(test)] #[cfg(test)]
{ {
// check the new LCCCS instance relation // check the new LCCCS instance relation
self.U_i.check_relation(&self.ccs, &self.W_i)?;
self.ccs.check_relation(&self.W_i, &self.U_i)?;
// check the new CCCS instance relation // check the new CCCS instance relation
self.u_i.check_relation(&self.ccs, &self.w_i)?;
self.ccs.check_relation(&self.w_i, &self.u_i)?;
} }
Ok(()) Ok(())
@ -839,31 +998,87 @@ where
self.z_i.clone() self.z_i.clone()
} }
fn instances(
&self,
) -> (
Self::RunningInstance,
Self::IncomingInstance,
Self::CFInstance,
) {
(
(self.U_i.clone(), self.W_i.clone()),
(self.u_i.clone(), self.w_i.clone()),
(self.cf_U_i.clone(), self.cf_W_i.clone()),
)
fn ivc_proof(&self) -> Self::IVCProof {
Self::IVCProof {
i: self.i,
z_0: self.z_0.clone(),
z_i: self.z_i.clone(),
W_i: self.W_i.clone(),
U_i: self.U_i.clone(),
w_i: self.w_i.clone(),
u_i: self.u_i.clone(),
cf_W_i: self.cf_W_i.clone(),
cf_U_i: self.cf_U_i.clone(),
}
} }
/// Implements IVC.V of HyperNova+CycleFold. Notice that this method does not include the
fn from_ivc_proof(
ivc_proof: Self::IVCProof,
fcircuit_params: FC::Params,
params: (Self::ProverParam, Self::VerifierParam),
) -> Result<Self, Error> {
let IVCProof {
i,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
let augmented_f_circuit = AugmentedFCircuit::<C1, C2, GC2, FC, MU, NU>::empty(
&pp.poseidon_config,
f_circuit.clone(),
None,
)?;
let cf_circuit = HyperNovaCycleFoldCircuit::<C1, GC1, MU, NU>::empty();
let ccs = augmented_f_circuit.ccs.clone();
let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?;
Ok(Self {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
ccs,
cf_r1cs,
poseidon_config: pp.poseidon_config,
cs_pp: pp.cs_pp,
cf_cs_pp: pp.cf_cs_pp,
F: f_circuit,
pp_hash: vp.pp_hash()?,
i,
z_0,
z_i,
w_i,
u_i,
W_i,
U_i,
cf_W_i,
cf_U_i,
})
}
/// Implements IVC.V of Hyp.clone()erNova+CycleFold. Notice that this method does not include the
/// commitments verification, which is done in the Decider. /// commitments verification, which is done in the Decider.
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
num_steps: C1::ScalarField,
running_instance: Self::RunningInstance,
incoming_instance: Self::IncomingInstance,
cyclefold_instance: Self::CFInstance,
) -> Result<(), Error> {
fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> {
let Self::IVCProof {
i: num_steps,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
if num_steps == C1::ScalarField::zero() { if num_steps == C1::ScalarField::zero() {
if z_0 != z_i { if z_0 != z_i {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
@ -873,9 +1088,6 @@ where
// `sponge` is for digest computation. // `sponge` is for digest computation.
let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config); let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config);
let (U_i, W_i) = running_instance;
let (u_i, w_i) = incoming_instance;
let (cf_U_i, cf_W_i) = cyclefold_instance;
if u_i.x.len() != 2 || U_i.x.len() != 2 { if u_i.x.len() != 2 || U_i.x.len() != 2 {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
@ -884,7 +1096,7 @@ where
// check that u_i's output points to the running instance // check that u_i's output points to the running instance
// u_i.X[0] == H(i, z_0, z_i, U_i) // u_i.X[0] == H(i, z_0, z_i, U_i)
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone());
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i);
if expected_u_i_x != u_i.x[0] { if expected_u_i_x != u_i.x[0] {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
@ -895,12 +1107,12 @@ where
} }
// check LCCCS satisfiability // check LCCCS satisfiability
U_i.check_relation(&vp.ccs, &W_i)?;
vp.ccs.check_relation(&W_i, &U_i)?;
// check CCCS satisfiability // check CCCS satisfiability
u_i.check_relation(&vp.ccs, &w_i)?;
vp.ccs.check_relation(&w_i, &u_i)?;
// check CycleFold's RelaxedR1CS satisfiability // check CycleFold's RelaxedR1CS satisfiability
vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?;
vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?;
Ok(()) Ok(())
} }
@ -940,6 +1152,7 @@ mod tests {
test_ivc_opt::<KZG<Bn254>, Pedersen<Projective2>, false>(poseidon_config, F_circuit); test_ivc_opt::<KZG<Bn254>, Pedersen<Projective2>, false>(poseidon_config, F_circuit);
} }
#[allow(clippy::type_complexity)]
// test_ivc allowing to choose the CommitmentSchemes // test_ivc allowing to choose the CommitmentSchemes
pub fn test_ivc_opt< pub fn test_ivc_opt<
CS1: CommitmentScheme<Projective, H>, CS1: CommitmentScheme<Projective, H>,
@ -948,18 +1161,6 @@ mod tests {
>( >(
poseidon_config: PoseidonConfig<Fr>, poseidon_config: PoseidonConfig<Fr>,
F_circuit: CubicFCircuit<Fr>, F_circuit: CubicFCircuit<Fr>,
) -> (
HyperNova<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>, CS1, CS2, 2, 3, H>,
(
ProverParams<Projective, Projective2, CS1, CS2, H>,
VerifierParams<Projective, Projective2, CS1, CS2, H>,
),
(LCCCS<Projective>, Witness<Fr>),
(CCCS<Projective>, Witness<Fr>),
(
CycleFoldCommittedInstance<Projective2>,
CycleFoldWitness<Projective2>,
),
) { ) {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
@ -1013,24 +1214,11 @@ mod tests {
} }
assert_eq!(Fr::from(num_steps as u32), hypernova.i); assert_eq!(Fr::from(num_steps as u32), hypernova.i);
let (running_instance, incoming_instance, cyclefold_instance) = hypernova.instances();
let ivc_proof = hypernova.ivc_proof();
HN::verify( HN::verify(
hypernova_params.1.clone(), // verifier_params hypernova_params.1.clone(), // verifier_params
z_0,
hypernova.z_i.clone(),
hypernova.i.clone(),
running_instance.clone(),
incoming_instance.clone(),
cyclefold_instance.clone(),
ivc_proof,
) )
.unwrap(); .unwrap();
(
hypernova,
hypernova_params,
running_instance,
incoming_instance,
cyclefold_instance,
)
} }
} }

+ 15
- 11
folding-schemes/src/folding/hypernova/nimfs.rs

@ -13,6 +13,8 @@ use super::{
}; };
use crate::arith::ccs::CCS; use crate::arith::ccs::CCS;
use crate::constants::NOVA_N_BITS_RO; use crate::constants::NOVA_N_BITS_RO;
use crate::folding::circuits::CF1;
use crate::folding::traits::Dummy;
use crate::transcript::Transcript; use crate::transcript::Transcript;
use crate::utils::sum_check::structs::{IOPProof as SumCheckProof, IOPProverMessage}; use crate::utils::sum_check::structs::{IOPProof as SumCheckProof, IOPProverMessage};
use crate::utils::sum_check::{IOPSumCheck, SumCheck}; use crate::utils::sum_check::{IOPSumCheck, SumCheck};
@ -29,8 +31,8 @@ pub struct NIMFSProof {
pub sigmas_thetas: SigmasThetas<C::ScalarField>, pub sigmas_thetas: SigmasThetas<C::ScalarField>,
} }
impl<C: CurveGroup> NIMFSProof<C> {
pub fn dummy(ccs: &CCS<C::ScalarField>, mu: usize, nu: usize) -> Self {
impl<C: CurveGroup> Dummy<(&CCS<CF1<C>>, usize, usize)> for NIMFSProof<C> {
fn dummy((ccs, mu, nu): (&CCS<CF1<C>>, usize, usize)) -> Self {
// use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the // use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the
// same in-circuit representation to match the number of constraints of an actual proof. // same in-circuit representation to match the number of constraints of an actual proof.
NIMFSProof::<C> { NIMFSProof::<C> {
@ -410,8 +412,10 @@ pub mod tests {
let ccs = get_test_ccs(); let ccs = get_test_ccs();
let z1 = get_test_z::<Fr>(3); let z1 = get_test_z::<Fr>(3);
let z2 = get_test_z::<Fr>(4); let z2 = get_test_z::<Fr>(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let (w1, x1) = ccs.split_z(&z1);
let (w2, x2) = ccs.split_z(&z2);
ccs.check_relation(&w1, &x1).unwrap();
ccs.check_relation(&w2, &x2).unwrap();
let mut rng = test_rng(); let mut rng = test_rng();
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
@ -429,8 +433,8 @@ pub mod tests {
.to_cccs::<_, Projective, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &z2) .to_cccs::<_, Projective, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &z2)
.unwrap(); .unwrap();
lcccs.check_relation(&ccs, &w1).unwrap();
cccs.check_relation(&ccs, &w2).unwrap();
ccs.check_relation(&w1, &lcccs).unwrap();
ccs.check_relation(&w2, &cccs).unwrap();
let mut rng = test_rng(); let mut rng = test_rng();
let rho = Fr::rand(&mut rng); let rho = Fr::rand(&mut rng);
@ -446,7 +450,7 @@ pub mod tests {
let w_folded = NIMFS::<Projective, PoseidonSponge<Fr>>::fold_witness(&[w1], &[w2], rho); let w_folded = NIMFS::<Projective, PoseidonSponge<Fr>>::fold_witness(&[w1], &[w2], rho);
// check lcccs relation // check lcccs relation
folded.check_relation(&ccs, &w_folded).unwrap();
ccs.check_relation(&w_folded, &folded).unwrap();
} }
/// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper) /// Perform multifolding of an LCCCS instance with a CCCS instance (as described in the paper)
@ -506,7 +510,7 @@ pub mod tests {
assert_eq!(folded_lcccs, folded_lcccs_v); assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs.check_relation(&ccs, &folded_witness).unwrap();
ccs.check_relation(&folded_witness, &folded_lcccs).unwrap();
} }
/// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance /// Perform multiple steps of multifolding of an LCCCS instance with a CCCS instance
@ -566,7 +570,7 @@ pub mod tests {
assert_eq!(folded_lcccs, folded_lcccs_v); assert_eq!(folded_lcccs, folded_lcccs_v);
// check that the folded instance with the folded witness holds the LCCCS relation // check that the folded instance with the folded witness holds the LCCCS relation
folded_lcccs.check_relation(&ccs, &folded_witness).unwrap();
ccs.check_relation(&folded_witness, &folded_lcccs).unwrap();
running_instance = folded_lcccs; running_instance = folded_lcccs;
w1 = folded_witness; w1 = folded_witness;
@ -652,7 +656,7 @@ pub mod tests {
assert_eq!(folded_lcccs, folded_lcccs_v); assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs.check_relation(&ccs, &folded_witness).unwrap();
ccs.check_relation(&folded_witness, &folded_lcccs).unwrap();
} }
/// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step /// Test that generates mu>1 and nu>1 instances, and folds them in a single multifolding step
@ -740,7 +744,7 @@ pub mod tests {
assert_eq!(folded_lcccs, folded_lcccs_v); assert_eq!(folded_lcccs, folded_lcccs_v);
// Check that the folded LCCCS instance is a valid instance with respect to the folded witness // Check that the folded LCCCS instance is a valid instance with respect to the folded witness
folded_lcccs.check_relation(&ccs, &folded_witness).unwrap();
ccs.check_relation(&folded_witness, &folded_lcccs).unwrap();
} }
} }
} }

+ 0
- 420
folding-schemes/src/folding/hypernova/serialize.rs

@ -1,420 +0,0 @@
use crate::arith::ccs::CCS;
use crate::arith::r1cs::R1CS;
use crate::folding::hypernova::ProverParams;
use crate::folding::hypernova::VerifierParams;
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_r1cs_std::groups::{CurveVar, GroupOpsBounds};
use ark_r1cs_std::ToConstraintFieldGadget;
use ark_serialize::CanonicalDeserialize;
use ark_serialize::{CanonicalSerialize, Compress, SerializationError, Validate};
use ark_std::marker::PhantomData;
use crate::folding::hypernova::cccs::CCCS;
use crate::folding::hypernova::lcccs::LCCCS;
use crate::folding::hypernova::Witness;
use crate::folding::nova::{
CommittedInstance as CycleFoldCommittedInstance, Witness as CycleFoldWitness,
};
use crate::FoldingScheme;
use crate::{
commitment::CommitmentScheme,
folding::{circuits::CF2, nova::PreprocessorParam},
frontend::FCircuit,
};
use super::HyperNova;
impl<C1, GC1, C2, GC2, FC, CS1, CS2, const MU: usize, const NU: usize, const H: bool>
CanonicalSerialize for HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, H>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
{
fn serialize_compressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), ark_serialize::SerializationError> {
self.serialize_with_mode(writer, ark_serialize::Compress::Yes)
}
fn compressed_size(&self) -> usize {
self.serialized_size(ark_serialize::Compress::Yes)
}
fn serialize_uncompressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), ark_serialize::SerializationError> {
self.serialize_with_mode(writer, ark_serialize::Compress::No)
}
fn uncompressed_size(&self) -> usize {
self.serialized_size(ark_serialize::Compress::No)
}
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> {
self.pp_hash.serialize_with_mode(&mut writer, compress)?;
self.i.serialize_with_mode(&mut writer, compress)?;
self.z_0.serialize_with_mode(&mut writer, compress)?;
self.z_i.serialize_with_mode(&mut writer, compress)?;
self.W_i.serialize_with_mode(&mut writer, compress)?;
self.U_i.serialize_with_mode(&mut writer, compress)?;
self.w_i.serialize_with_mode(&mut writer, compress)?;
self.u_i.serialize_with_mode(&mut writer, compress)?;
self.cf_W_i.serialize_with_mode(&mut writer, compress)?;
self.cf_U_i.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.pp_hash.serialized_size(compress)
+ self.i.serialized_size(compress)
+ self.z_0.serialized_size(compress)
+ self.z_i.serialized_size(compress)
+ self.W_i.serialized_size(compress)
+ self.U_i.serialized_size(compress)
+ self.w_i.serialized_size(compress)
+ self.u_i.serialized_size(compress)
+ self.cf_W_i.serialized_size(compress)
+ self.cf_U_i.serialized_size(compress)
}
}
impl<C1, GC1, C2, GC2, FC, CS1, CS2, const MU: usize, const NU: usize, const H: bool>
HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, H>
where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField, Params = ()>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
#[allow(clippy::too_many_arguments)]
pub fn deserialize_hypernova<R: std::io::prelude::Read>(
mut reader: R,
compress: Compress,
validate: Validate,
poseidon_config: PoseidonConfig<C1::ScalarField>,
cs_pp: CS1::ProverParams,
cs_vp: CS1::VerifierParams,
cf_cs_pp: CS2::ProverParams,
cf_cs_vp: CS2::VerifierParams,
) -> Result<Self, SerializationError> {
let f_circuit = FC::new(()).unwrap();
let prep_param = PreprocessorParam {
poseidon_config: poseidon_config.clone(),
F: f_circuit.clone(),
cs_pp: Some(cs_pp.clone()),
cs_vp: Some(cs_vp.clone()),
cf_cs_pp: Some(cf_cs_pp.clone()),
cf_cs_vp: Some(cf_cs_vp.clone()),
};
// `test_rng` won't be used in `preprocess`, since parameters have already been initialized
let (prover_params, verifier_params) = Self::preprocess(ark_std::test_rng(), &prep_param)
.or(Err(SerializationError::InvalidData))?;
let pp_hash = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?;
let i = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?;
let z_0 = Vec::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let z_i = Vec::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let W_i =
Witness::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let U_i = LCCCS::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let w_i =
Witness::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let u_i = CCCS::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_W_i =
CycleFoldWitness::<C2>::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_U_i = CycleFoldCommittedInstance::<C2>::deserialize_with_mode(
&mut reader,
compress,
validate,
)?;
let ccs = prover_params.ccs.ok_or(SerializationError::InvalidData)?;
Ok(HyperNova {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
ccs,
cf_r1cs: verifier_params.cf_r1cs,
poseidon_config,
cs_pp,
cf_cs_pp,
F: f_circuit,
pp_hash,
i,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
})
}
}
impl<
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
const H: bool,
> CanonicalSerialize for ProverParams<C1, C2, CS1, CS2, H>
{
fn serialize_compressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), SerializationError> {
self.serialize_with_mode(writer, Compress::Yes)
}
fn compressed_size(&self) -> usize {
self.serialized_size(Compress::Yes)
}
fn serialize_uncompressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), SerializationError> {
self.serialize_with_mode(writer, Compress::No)
}
fn uncompressed_size(&self) -> usize {
self.serialized_size(Compress::No)
}
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: Compress,
) -> Result<(), SerializationError> {
self.cs_pp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_pp.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: Compress) -> usize {
self.cs_pp.serialized_size(compress) + self.cf_cs_pp.serialized_size(compress)
}
}
impl<
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
const H: bool,
> ProverParams<C1, C2, CS1, CS2, H>
{
pub fn deserialize_prover_params<R: std::io::prelude::Read>(
mut reader: R,
compress: Compress,
validate: Validate,
ccs: &Option<CCS<C1::ScalarField>>,
poseidon_config: &PoseidonConfig<C1::ScalarField>,
) -> Result<Self, SerializationError> {
let cs_pp = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_pp = CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(ProverParams {
cs_pp,
cf_cs_pp,
ccs: ccs.clone(),
poseidon_config: poseidon_config.clone(),
})
}
}
impl<
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
const H: bool,
> CanonicalSerialize for VerifierParams<C1, C2, CS1, CS2, H>
{
fn serialize_compressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), SerializationError> {
self.serialize_with_mode(writer, Compress::Yes)
}
fn compressed_size(&self) -> usize {
self.serialized_size(Compress::Yes)
}
fn serialize_uncompressed<W: std::io::prelude::Write>(
&self,
writer: W,
) -> Result<(), SerializationError> {
self.serialize_with_mode(writer, Compress::No)
}
fn uncompressed_size(&self) -> usize {
self.serialized_size(Compress::No)
}
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: Compress,
) -> Result<(), SerializationError> {
self.cf_r1cs.serialize_with_mode(&mut writer, compress)?;
self.cs_vp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_vp.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: Compress) -> usize {
self.cf_r1cs.serialized_size(compress)
+ self.cs_vp.serialized_size(compress)
+ self.cf_cs_vp.serialized_size(compress)
}
}
impl<
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
const H: bool,
> VerifierParams<C1, C2, CS1, CS2, H>
{
pub fn deserialize_verifier_params<R: std::io::Read>(
mut reader: R,
compress: Compress,
validate: Validate,
ccs: &CCS<C1::ScalarField>,
poseidon_config: &PoseidonConfig<C1::ScalarField>,
) -> Result<Self, SerializationError> {
let cf_r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?;
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(VerifierParams {
ccs: ccs.clone(),
poseidon_config: poseidon_config.clone(),
cf_r1cs,
cs_vp,
cf_cs_vp,
})
}
}
#[cfg(test)]
pub mod tests {
use crate::FoldingScheme;
use crate::MultiFolding;
use ark_serialize::{Compress, Validate, Write};
use std::fs;
use crate::{
commitment::{kzg::KZG, pedersen::Pedersen},
folding::hypernova::{tests::test_ivc_opt, HyperNova},
frontend::{utils::CubicFCircuit, FCircuit},
transcript::poseidon::poseidon_canonical_config,
};
use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective};
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_serialize::CanonicalSerialize;
#[test]
fn test_serde_hypernova() {
let poseidon_config = poseidon_canonical_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let (mut hn, (_, verifier_params), _, _, _) = test_ivc_opt::<
KZG<Bn254>,
Pedersen<Projective2>,
false,
>(poseidon_config.clone(), F_circuit);
let mut writer = vec![];
assert!(hn.serialize_compressed(&mut writer).is_ok());
let mut writer = vec![];
assert!(hn.serialize_uncompressed(&mut writer).is_ok());
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.open("./hypernova.serde")
.unwrap();
file.write_all(&writer).unwrap();
let bytes = fs::read("./hypernova.serde").unwrap();
let mut hn_deserialized = HyperNova::<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
KZG<Bn254>,
Pedersen<Projective2>,
2,
3,
false,
>::deserialize_hypernova(
bytes.as_slice(),
Compress::No,
Validate::No,
poseidon_config,
hn.cs_pp.clone(),
verifier_params.cs_vp,
hn.cf_cs_pp.clone(),
verifier_params.cf_cs_vp,
)
.unwrap();
assert_eq!(hn.i, hn_deserialized.i);
let mut rng = ark_std::test_rng();
for _ in 0..3 {
// prepare some new instances to fold in the multifolding step
let mut lcccs = vec![];
for j in 0..1 {
let instance_state = vec![Fr::from(j as u32 + 85_u32)];
let (U, W) = hn
.new_running_instance(&mut rng, instance_state, vec![])
.unwrap();
lcccs.push((U, W));
}
let mut cccs = vec![];
for j in 0..2 {
let instance_state = vec![Fr::from(j as u32 + 15_u32)];
let (u, w) = hn
.new_incoming_instance(&mut rng, instance_state, vec![])
.unwrap();
cccs.push((u, w));
}
hn.prove_step(&mut rng, vec![], Some((lcccs.clone(), cccs.clone())))
.unwrap();
hn_deserialized
.prove_step(&mut rng, vec![], Some((lcccs, cccs)))
.unwrap();
}
assert_eq!(hn.z_i, hn_deserialized.z_i);
}
}

+ 8
- 4
folding-schemes/src/folding/hypernova/utils.rs

@ -230,8 +230,10 @@ pub mod tests {
let ccs = get_test_ccs(); let ccs = get_test_ccs();
let z1 = get_test_z(3); let z1 = get_test_z(3);
let z2 = get_test_z(4); let z2 = get_test_z(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let (w1, x1) = ccs.split_z(&z1);
let (w2, x2) = ccs.split_z(&z2);
ccs.check_relation(&w1, &x1).unwrap();
ccs.check_relation(&w2, &x2).unwrap();
let mut rng = test_rng(); let mut rng = test_rng();
let gamma: Fr = Fr::rand(&mut rng); let gamma: Fr = Fr::rand(&mut rng);
@ -282,8 +284,10 @@ pub mod tests {
let ccs: CCS<Fr> = get_test_ccs(); let ccs: CCS<Fr> = get_test_ccs();
let z1 = get_test_z(3); let z1 = get_test_z(3);
let z2 = get_test_z(4); let z2 = get_test_z(4);
ccs.check_relation(&z1).unwrap();
ccs.check_relation(&z2).unwrap();
let (w1, x1) = ccs.split_z(&z1);
let (w2, x2) = ccs.split_z(&z2);
ccs.check_relation(&w1, &x1).unwrap();
ccs.check_relation(&w2, &x2).unwrap();
let gamma: Fr = Fr::rand(&mut rng); let gamma: Fr = Fr::rand(&mut rng);
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect(); let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();

+ 168
- 0
folding-schemes/src/folding/mod.rs

@ -3,3 +3,171 @@ pub mod hypernova;
pub mod mova; pub mod mova;
pub mod nova; pub mod nova;
pub mod protogalaxy; pub mod protogalaxy;
pub mod traits;
#[cfg(test)]
pub mod tests {
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_pallas::{constraints::GVar as GVar1, Fr, Projective as G1};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_vesta::{constraints::GVar as GVar2, Projective as G2};
use std::io::Write;
use crate::commitment::pedersen::Pedersen;
use crate::folding::{
hypernova::HyperNova,
nova::{Nova, PreprocessorParam as NovaPreprocessorParam},
protogalaxy::ProtoGalaxy,
};
use crate::frontend::utils::CubicFCircuit;
use crate::frontend::FCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::Error;
use crate::FoldingScheme;
/// tests the IVC proofs and its serializers for the 3 implemented IVCs: Nova, HyperNova and
/// ProtoGalaxy.
#[test]
fn test_serialize_ivc_nova_hypernova_protogalaxy() {
let poseidon_config = poseidon_canonical_config::<Fr>();
type FC = CubicFCircuit<Fr>;
let f_circuit = FC::new(()).unwrap();
// test Nova
type N = Nova<G1, GVar1, G2, GVar2, FC, Pedersen<G1>, Pedersen<G2>, false>;
let prep_param = NovaPreprocessorParam::new(poseidon_config.clone(), f_circuit);
test_serialize_ivc_opt::<G1, G2, FC, N>("nova".to_string(), prep_param.clone()).unwrap();
// test HyperNova
type HN = HyperNova<
G1,
GVar1,
G2,
GVar2,
FC,
Pedersen<G1>,
Pedersen<G2>,
1, // mu
1, // nu
false,
>;
test_serialize_ivc_opt::<G1, G2, FC, HN>("hypernova".to_string(), prep_param).unwrap();
// test ProtoGalaxy
type P = ProtoGalaxy<G1, GVar1, G2, GVar2, FC, Pedersen<G1>, Pedersen<G2>>;
let prep_param = (poseidon_config, f_circuit);
test_serialize_ivc_opt::<G1, G2, FC, P>("protogalaxy".to_string(), prep_param).unwrap();
}
fn test_serialize_ivc_opt<
C1: CurveGroup,
C2: CurveGroup,
FC: FCircuit<C1::ScalarField, Params = ()>,
FS: FoldingScheme<C1, C2, FC>,
>(
name: String,
prep_param: FS::PreprocessorParam,
) -> Result<(), Error>
where
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
C2::BaseField: PrimeField,
FC: FCircuit<C1::ScalarField>,
{
let mut rng = ark_std::test_rng();
let F_circuit = FC::new(())?;
let fs_params = FS::preprocess(&mut rng, &prep_param)?;
let z_0 = vec![C1::ScalarField::from(3_u32)];
let mut fs = FS::init(&fs_params, F_circuit, z_0.clone()).unwrap();
// perform multiple IVC steps (internally folding)
let num_steps: usize = 3;
for _ in 0..num_steps {
fs.prove_step(&mut rng, vec![], None).unwrap();
}
// verify the IVCProof
let ivc_proof: FS::IVCProof = fs.ivc_proof();
FS::verify(fs_params.1.clone(), ivc_proof.clone()).unwrap();
// serialize the IVCProof and store it in a file
let mut writer = vec![];
assert!(ivc_proof.serialize_compressed(&mut writer).is_ok());
let mut file = std::fs::OpenOptions::new()
.create(true)
.write(true)
.open(format!("./ivc_proof-{}.serialized", name))
.unwrap();
file.write_all(&writer).unwrap();
// read the IVCProof from the file deserializing it
let bytes = std::fs::read(format!("./ivc_proof-{}.serialized", name)).unwrap();
let deserialized_ivc_proof =
FS::IVCProof::deserialize_compressed(bytes.as_slice()).unwrap();
// verify deserialized IVCProof
FS::verify(fs_params.1.clone(), deserialized_ivc_proof.clone()).unwrap();
// build the FS from the given IVCProof, FC::Params, ProverParams and VerifierParams
let mut new_fs = FS::from_ivc_proof(deserialized_ivc_proof, (), fs_params.clone()).unwrap();
// serialize the Nova params
let mut fs_pp_serialized = vec![];
fs_params
.0
.serialize_compressed(&mut fs_pp_serialized)
.unwrap();
let mut fs_vp_serialized = vec![];
fs_params
.1
.serialize_compressed(&mut fs_vp_serialized)
.unwrap();
// deserialize the Nova params. This would be done by the client reading from a file
let _fs_pp_deserialized = FS::pp_deserialize_with_mode(
&mut fs_pp_serialized.as_slice(),
ark_serialize::Compress::Yes,
ark_serialize::Validate::Yes,
(), // FCircuit's Params
)
.unwrap();
// perform several IVC steps on both the original FS instance and the recovered from the
// serialization new FS instance
let num_steps: usize = 3;
for _ in 0..num_steps {
new_fs.prove_step(&mut rng, vec![], None).unwrap();
fs.prove_step(&mut rng, vec![], None).unwrap();
}
// check that the IVCProofs from both FS instances are equal
assert_eq!(new_fs.ivc_proof(), fs.ivc_proof());
let fs_vp_deserialized = FS::vp_deserialize_with_mode(
&mut fs_vp_serialized.as_slice(),
ark_serialize::Compress::Yes,
ark_serialize::Validate::Yes,
(), // fcircuit_params
)
.unwrap();
// get the IVCProof
let ivc_proof: FS::IVCProof = new_fs.ivc_proof();
// serialize IVCProof
let mut ivc_proof_serialized = vec![];
assert!(ivc_proof
.serialize_compressed(&mut ivc_proof_serialized)
.is_ok());
// deserialize IVCProof
let ivc_proof_deserialized =
FS::IVCProof::deserialize_compressed(ivc_proof_serialized.as_slice()).unwrap();
// verify the last IVCProof from the recovered from serialization FS
FS::verify(fs_vp_deserialized.clone(), ivc_proof_deserialized).unwrap();
Ok(())
}
}

+ 126
- 88
folding-schemes/src/folding/nova/circuits.rs

@ -21,7 +21,6 @@ use ark_std::{fmt::Debug, One, Zero};
use core::{borrow::Borrow, marker::PhantomData}; use core::{borrow::Borrow, marker::PhantomData};
use super::{CommittedInstance, NovaCycleFoldConfig}; use super::{CommittedInstance, NovaCycleFoldConfig};
use crate::constants::NOVA_N_BITS_RO;
use crate::folding::circuits::{ use crate::folding::circuits::{
cyclefold::{ cyclefold::{
CycleFoldChallengeGadget, CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldChallengeGadget, CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar,
@ -32,15 +31,16 @@ use crate::folding::circuits::{
}; };
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::{AbsorbNonNativeGadget, Transcript, TranscriptVar}; use crate::transcript::{AbsorbNonNativeGadget, Transcript, TranscriptVar};
use crate::{
constants::NOVA_N_BITS_RO,
folding::traits::{CommittedInstanceVarOps, Dummy},
};
/// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova /// CommittedInstanceVar contains the u, x, cmE and cmW values which are folded on the main Nova
/// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are /// constraints field (E1::Fr, where E1 is the main curve). The peculiarity is that cmE and cmW are
/// represented non-natively over the constraint field. /// represented non-natively over the constraint field.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CommittedInstanceVar<C: CurveGroup>
where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
pub struct CommittedInstanceVar<C: CurveGroup> {
pub u: FpVar<C::ScalarField>, pub u: FpVar<C::ScalarField>,
pub x: Vec<FpVar<C::ScalarField>>, pub x: Vec<FpVar<C::ScalarField>>,
pub cmE: NonNativeAffineVar<C>, pub cmE: NonNativeAffineVar<C>,
@ -50,7 +50,6 @@ where
impl<C> AllocVar<CommittedInstance<C>, CF1<C>> for CommittedInstanceVar<C> impl<C> AllocVar<CommittedInstance<C>, CF1<C>> for CommittedInstanceVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{ {
fn new_variable<T: Borrow<CommittedInstance<C>>>( fn new_variable<T: Borrow<CommittedInstance<C>>>(
cs: impl Into<Namespace<CF1<C>>>, cs: impl Into<Namespace<CF1<C>>>,
@ -80,7 +79,7 @@ where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField, <C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{ {
fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> { fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> {
unimplemented!()
FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?)
} }
fn to_sponge_field_elements(&self) -> Result<Vec<FpVar<C::ScalarField>>, SynthesisError> { fn to_sponge_field_elements(&self) -> Result<Vec<FpVar<C::ScalarField>>, SynthesisError> {
@ -94,35 +93,27 @@ where
} }
} }
impl<C> CommittedInstanceVar<C>
where
C: CurveGroup,
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
/// hash implements the committed instance hash compatible with the native implementation from
/// CommittedInstance.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the
/// `CommittedInstance`.
/// Additionally it returns the vector of the field elements from the self parameters, so they
/// can be reused in other gadgets avoiding recalculating (reconstraining) them.
#[allow(clippy::type_complexity)]
pub fn hash<S: CryptographicSponge, T: TranscriptVar<CF1<C>, S>>(
self,
sponge: &T,
pp_hash: FpVar<CF1<C>>,
i: FpVar<CF1<C>>,
z_0: Vec<FpVar<CF1<C>>>,
z_i: Vec<FpVar<CF1<C>>>,
) -> Result<(FpVar<CF1<C>>, Vec<FpVar<CF1<C>>>), SynthesisError> {
let mut sponge = sponge.clone();
let U_vec = self.to_sponge_field_elements()?;
sponge.absorb(&pp_hash)?;
sponge.absorb(&i)?;
sponge.absorb(&z_0)?;
sponge.absorb(&z_i)?;
sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
impl<C: CurveGroup> CommittedInstanceVarOps<C> for CommittedInstanceVar<C> {
type PointVar = NonNativeAffineVar<C>;
fn get_commitments(&self) -> Vec<Self::PointVar> {
vec![self.cmW.clone(), self.cmE.clone()]
}
fn get_public_inputs(&self) -> &[FpVar<CF1<C>>] {
&self.x
}
fn enforce_incoming(&self) -> Result<(), SynthesisError> {
let zero = NonNativeUintVar::new_constant(ConstraintSystemRef::None, CF2::<C>::zero())?;
self.cmE.x.enforce_equal_unaligned(&zero)?;
self.cmE.y.enforce_equal_unaligned(&zero)?;
self.u.enforce_equal(&FpVar::one())
}
fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.u.enforce_equal(&other.u)?;
self.x.enforce_equal(&other.x)
} }
} }
@ -177,10 +168,11 @@ where
/// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a /// ChallengeGadget computes the RO challenge used for the Nova instances NIFS, it contains a
/// rust-native and a in-circuit compatible versions. /// rust-native and a in-circuit compatible versions.
pub struct ChallengeGadget<C: CurveGroup> {
pub struct ChallengeGadget<C: CurveGroup, CI: Absorb> {
_c: PhantomData<C>, _c: PhantomData<C>,
_ci: PhantomData<CI>,
} }
impl<C: CurveGroup> ChallengeGadget<C>
impl<C: CurveGroup, CI: Absorb> ChallengeGadget<C, CI>
where where
C: CurveGroup, C: CurveGroup,
<C as CurveGroup>::BaseField: PrimeField, <C as CurveGroup>::BaseField: PrimeField,
@ -189,14 +181,17 @@ where
pub fn get_challenge_native<T: Transcript<C::ScalarField>>( pub fn get_challenge_native<T: Transcript<C::ScalarField>>(
transcript: &mut T, transcript: &mut T,
pp_hash: C::ScalarField, // public params hash pp_hash: C::ScalarField, // public params hash
U_i: CommittedInstance<C>,
u_i: CommittedInstance<C>,
cmT: C,
U_i: &CI,
u_i: &CI,
cmT: Option<&C>,
) -> Vec<bool> { ) -> Vec<bool> {
transcript.absorb(&pp_hash); transcript.absorb(&pp_hash);
transcript.absorb(&U_i); transcript.absorb(&U_i);
transcript.absorb(&u_i); transcript.absorb(&u_i);
transcript.absorb_nonnative(&cmT);
// in the Nova case we absorb the cmT, in Ova case we don't since it is not used.
if let Some(cmT_value) = cmT {
transcript.absorb_nonnative(cmT_value);
}
transcript.squeeze_bits(NOVA_N_BITS_RO) transcript.squeeze_bits(NOVA_N_BITS_RO)
} }
@ -206,12 +201,15 @@ where
pp_hash: FpVar<CF1<C>>, // public params hash pp_hash: FpVar<CF1<C>>, // public params hash
U_i_vec: Vec<FpVar<CF1<C>>>, // apready processed input, so we don't have to recompute these values U_i_vec: Vec<FpVar<CF1<C>>>, // apready processed input, so we don't have to recompute these values
u_i: CommittedInstanceVar<C>, u_i: CommittedInstanceVar<C>,
cmT: NonNativeAffineVar<C>,
cmT: Option<NonNativeAffineVar<C>>,
) -> Result<Vec<Boolean<C::ScalarField>>, SynthesisError> { ) -> Result<Vec<Boolean<C::ScalarField>>, SynthesisError> {
transcript.absorb(&pp_hash)?; transcript.absorb(&pp_hash)?;
transcript.absorb(&U_i_vec)?; transcript.absorb(&U_i_vec)?;
transcript.absorb(&u_i)?; transcript.absorb(&u_i)?;
transcript.absorb_nonnative(&cmT)?;
// in the Nova case we absorb the cmT, in Ova case we don't since it is not used.
if let Some(cmT_value) = cmT {
transcript.absorb_nonnative(&cmT_value)?;
}
transcript.squeeze_bits(NOVA_N_BITS_RO) transcript.squeeze_bits(NOVA_N_BITS_RO)
} }
} }
@ -359,24 +357,12 @@ where
// `transcript` is for challenge generation. // `transcript` is for challenge generation.
let mut transcript = sponge.clone(); let mut transcript = sponge.clone();
// get z_{i+1} from the F circuit
let i_usize = self.i_usize.unwrap_or(0);
let z_i1 =
self.F
.generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?;
let is_basecase = i.is_zero()?; let is_basecase = i.is_zero()?;
// Primary Part // Primary Part
// P.1. Compute u_i.x // P.1. Compute u_i.x
// u_i.x[0] = H(i, z_0, z_i, U_i) // u_i.x[0] = H(i, z_0, z_i, U_i)
let (u_i_x, U_i_vec) = U_i.clone().hash(
&sponge,
pp_hash.clone(),
i.clone(),
z_0.clone(),
z_i.clone(),
)?;
let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
// u_i.x[1] = H(cf_U_i) // u_i.x[1] = H(cf_U_i)
let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i_x, cf_U_i_vec) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
@ -397,12 +383,12 @@ where
// P.3. nifs.verify, obtains U_{i+1} by folding u_i & U_i . // P.3. nifs.verify, obtains U_{i+1} by folding u_i & U_i .
// compute r = H(u_i, U_i, cmT) // compute r = H(u_i, U_i, cmT)
let r_bits = ChallengeGadget::<C1>::get_challenge_gadget(
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_gadget(
&mut transcript, &mut transcript,
pp_hash.clone(), pp_hash.clone(),
U_i_vec, U_i_vec,
u_i.clone(), u_i.clone(),
cmT.clone(),
Some(cmT.clone()),
)?; )?;
let r = Boolean::le_bits_to_fp_var(&r_bits)?; let r = Boolean::le_bits_to_fp_var(&r_bits)?;
// Also convert r_bits to a `NonNativeFieldVar` // Also convert r_bits to a `NonNativeFieldVar`
@ -421,21 +407,28 @@ where
U_i1.cmW = U_i1_cmW; U_i1.cmW = U_i1_cmW;
// P.4.a compute and check the first output of F' // P.4.a compute and check the first output of F'
// get z_{i+1} from the F circuit
let i_usize = self.i_usize.unwrap_or(0);
let z_i1 = self
.F
.generate_step_constraints(cs.clone(), i_usize, z_i, external_inputs)?;
// Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot}) // Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot})
// Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1}) // Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1})
let (u_i1_x, _) = U_i1.clone().hash( let (u_i1_x, _) = U_i1.clone().hash(
&sponge, &sponge,
pp_hash.clone(),
i + FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&(i + FpVar::<CF1<C1>>::one()),
&z_0,
&z_i1,
)?; )?;
let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash( let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash(
&sponge, &sponge,
pp_hash.clone(),
FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&FpVar::<CF1<C1>>::one(),
&z_0,
&z_i1,
)?; )?;
let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?;
x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?;
@ -536,8 +529,9 @@ pub mod tests {
use ark_std::UniformRand; use ark_std::UniformRand;
use crate::commitment::pedersen::Pedersen; use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::nifs::tests::prepare_simple_fold_inputs;
use crate::folding::nova::nifs::NIFS; use crate::folding::nova::nifs::NIFS;
use crate::folding::nova::traits::NIFSTrait;
use crate::folding::traits::CommittedInstanceOps;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
#[test] #[test]
@ -563,10 +557,22 @@ pub mod tests {
#[test] #[test]
fn test_nifs_gadget() { fn test_nifs_gadget() {
let (_, _, _, _, ci1, _, ci2, _, ci3, _, cmT, _, r_Fr) = prepare_simple_fold_inputs();
let mut rng = ark_std::test_rng();
let ci3_verifier = NIFS::<Projective, Pedersen<Projective>>::verify(r_Fr, &ci1, &ci2, &cmT);
assert_eq!(ci3_verifier, ci3);
// prepare the committed instances to test in-circuit
let ci: Vec<CommittedInstance<Projective>> = (0..2)
.into_iter()
.map(|_| CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
})
.collect();
let (ci1, ci2) = (ci[0].clone(), ci[1].clone());
let r_Fr = Fr::rand(&mut rng);
let cmT = Projective::rand(&mut rng);
let ci3 = NIFS::<Projective, Pedersen<Projective>>::verify(r_Fr, &ci1, &ci2, &cmT);
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -591,6 +597,36 @@ pub mod tests {
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
} }
/// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements}
/// vs the R1CS constraints version
#[test]
pub fn test_committed_instance_to_sponge_preimage() {
let mut rng = ark_std::test_rng();
let ci = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
let bytes = ci.to_sponge_bytes_as_vec();
let field_elements = ci.to_sponge_field_elements_as_vec();
let cs = ConstraintSystem::<Fr>::new_ref();
let ciVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(ci.clone())).unwrap();
let bytes_var = ciVar.to_sponge_bytes().unwrap();
let field_elements_var = ciVar.to_sponge_field_elements().unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
assert_eq!(bytes_var.value().unwrap(), bytes);
assert_eq!(field_elements_var.value().unwrap(), field_elements);
}
#[test] #[test]
fn test_committed_instance_hash() { fn test_committed_instance_hash() {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
@ -609,7 +645,7 @@ pub mod tests {
}; };
// compute the CommittedInstance hash natively // compute the CommittedInstance hash natively
let h = ci.hash(&sponge, pp_hash, i, z_0.clone(), z_i.clone());
let h = ci.hash(&sponge, pp_hash, i, &z_0, &z_i);
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -624,7 +660,7 @@ pub mod tests {
// compute the CommittedInstance hash in-circuit // compute the CommittedInstance hash in-circuit
let (hVar, _) = ciVar let (hVar, _) = ciVar
.hash(&sponge, pp_hashVar, iVar, z_0Var, z_iVar)
.hash(&sponge, &pp_hashVar, &iVar, &z_0Var, &z_iVar)
.unwrap(); .unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
@ -656,13 +692,14 @@ pub mod tests {
let pp_hash = Fr::from(42u32); // only for testing let pp_hash = Fr::from(42u32); // only for testing
// compute the challenge natively // compute the challenge natively
let r_bits = ChallengeGadget::<Projective>::get_challenge_native(
&mut transcript,
pp_hash,
U_i.clone(),
u_i.clone(),
cmT,
);
let r_bits =
ChallengeGadget::<Projective, CommittedInstance<Projective>>::get_challenge_native(
&mut transcript,
pp_hash,
&U_i,
&u_i,
Some(&cmT),
);
let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap(); let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -684,14 +721,15 @@ pub mod tests {
U_iVar.cmW.to_constraint_field().unwrap(), U_iVar.cmW.to_constraint_field().unwrap(),
] ]
.concat(); .concat();
let r_bitsVar = ChallengeGadget::<Projective>::get_challenge_gadget(
&mut transcriptVar,
pp_hashVar,
U_iVar_vec,
u_iVar,
cmTVar,
)
.unwrap();
let r_bitsVar =
ChallengeGadget::<Projective, CommittedInstance<Projective>>::get_challenge_gadget(
&mut transcriptVar,
pp_hashVar,
U_iVar_vec,
u_iVar,
Some(cmTVar),
)
.unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match // check that the natively computed and in-circuit computed hashes match

+ 492
- 0
folding-schemes/src/folding/nova/decider.rs

@ -0,0 +1,492 @@
/// This file implements the offchain decider. For ethereum use cases, use the
/// DeciderEth from decider_eth.rs file.
/// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_snark::SNARK;
use ark_std::rand::{CryptoRng, RngCore};
use ark_std::{One, Zero};
use core::marker::PhantomData;
use super::decider_circuits::{DeciderCircuit1, DeciderCircuit2};
use super::{nifs::NIFS, traits::NIFSTrait, CommittedInstance, Nova};
use crate::commitment::CommitmentScheme;
use crate::folding::circuits::{
cyclefold::CycleFoldCommittedInstance,
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF2,
};
use crate::frontend::FCircuit;
use crate::Error;
use crate::{Decider as DeciderTrait, FoldingScheme};
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Proof<C1, C2, CS1, CS2, S1, S2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
S1: SNARK<C1::ScalarField>,
S2: SNARK<C2::ScalarField>,
{
c1_snark_proof: S1::Proof,
c2_snark_proof: S2::Proof,
cs1_proofs: [CS1::Proof; 2],
cs2_proofs: [CS2::Proof; 2],
// cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are
// checked in-circuit
cmT: C1,
r: C1::ScalarField,
// cyclefold committed instance
cf_U_i: CycleFoldCommittedInstance<C2>,
// the CS challenges are provided by the prover, but in-circuit they are checked to match the
// in-circuit computed computed ones.
cs1_challenges: [C1::ScalarField; 2],
cs2_challenges: [C2::ScalarField; 2],
}
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct ProverParam<CS1_ProvingKey, S1_ProvingKey, CS2_ProvingKey, S2_ProvingKey>
where
CS1_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
S1_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
CS2_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
S2_ProvingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
{
pub c1_snark_pp: S1_ProvingKey,
pub c1_cs_pp: CS1_ProvingKey,
pub c2_snark_pp: S2_ProvingKey,
pub c2_cs_pp: CS2_ProvingKey,
}
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct VerifierParam<C1, CS1_VerifyingKey, S1_VerifyingKey, CS2_VerifyingKey, S2_VerifyingKey>
where
C1: CurveGroup,
CS1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
S1_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
CS2_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
S2_VerifyingKey: Clone + CanonicalSerialize + CanonicalDeserialize,
{
pub pp_hash: C1::ScalarField,
pub c1_snark_vp: S1_VerifyingKey,
pub c1_cs_vp: CS1_VerifyingKey,
pub c2_snark_vp: S2_VerifyingKey,
pub c2_cs_vp: CS2_VerifyingKey,
}
/// Onchain Decider, for ethereum use cases
#[derive(Clone, Debug)]
pub struct Decider<C1, GC1, C2, GC2, FC, CS1, CS2, S1, S2, FS> {
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
_fc: PhantomData<FC>,
_cs1: PhantomData<CS1>,
_cs2: PhantomData<CS2>,
_s1: PhantomData<S1>,
_s2: PhantomData<S2>,
_fs: PhantomData<FS>,
}
impl<C1, GC1, C2, GC2, FC, CS1, CS2, S1, S2, FS> DeciderTrait<C1, C2, FC, FS>
for Decider<C1, GC1, C2, GC2, FC, CS1, CS2, S1, S2, FS>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<
C1,
ProverChallenge = C1::ScalarField,
Challenge = C1::ScalarField,
Proof = crate::commitment::kzg::Proof<C1>,
>,
CS2: CommitmentScheme<
C2,
ProverChallenge = C2::ScalarField,
Challenge = C2::ScalarField,
Proof = crate::commitment::kzg::Proof<C2>,
>,
S1: SNARK<C1::ScalarField>,
S2: SNARK<C2::ScalarField>,
FS: FoldingScheme<C1, C2, FC>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
// constrain FS into Nova, since this is a Decider specifically for Nova
Nova<C1, GC1, C2, GC2, FC, CS1, CS2, false>: From<FS>,
crate::folding::nova::ProverParams<C1, C2, CS1, CS2, false>:
From<<FS as FoldingScheme<C1, C2, FC>>::ProverParam>,
crate::folding::nova::VerifierParams<C1, C2, CS1, CS2, false>:
From<<FS as FoldingScheme<C1, C2, FC>>::VerifierParam>,
{
type PreprocessorParam = (FS::ProverParam, FS::VerifierParam);
type ProverParam =
ProverParam<CS1::ProverParams, S1::ProvingKey, CS2::ProverParams, S2::ProvingKey>;
type Proof = Proof<C1, C2, CS1, CS2, S1, S2>;
type VerifierParam = VerifierParam<
C1,
CS1::VerifierParams,
S1::VerifyingKey,
CS2::VerifierParams,
S2::VerifyingKey,
>;
type PublicInput = Vec<C1::ScalarField>;
type CommittedInstance = CommittedInstance<C1>;
fn preprocess(
mut rng: impl RngCore + CryptoRng,
prep_param: Self::PreprocessorParam,
fs: FS,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::from_nova::<GC1, CS1, CS2, false, FC>(
fs.clone().into(),
)?;
let circuit2 =
DeciderCircuit2::<C1, GC1, C2>::from_nova::<GC2, CS1, CS2, false, FC>(fs.into())?;
// get the Groth16 specific setup for the circuits
let (c1_g16_pk, c1_g16_vk) = S1::circuit_specific_setup(circuit1, &mut rng).unwrap();
let (c2_g16_pk, c2_g16_vk) = S2::circuit_specific_setup(circuit2, &mut rng).unwrap();
// get the FoldingScheme prover & verifier params from Nova
#[allow(clippy::type_complexity)]
let nova_pp: <Nova<C1, GC1, C2, GC2, FC, CS1, CS2, false> as FoldingScheme<
C1,
C2,
FC,
>>::ProverParam = prep_param.0.clone().into();
#[allow(clippy::type_complexity)]
let nova_vp: <Nova<C1, GC1, C2, GC2, FC, CS1, CS2, false> as FoldingScheme<
C1,
C2,
FC,
>>::VerifierParam = prep_param.1.clone().into();
let pp_hash = nova_vp.pp_hash()?;
let pp = Self::ProverParam {
c1_snark_pp: c1_g16_pk,
c1_cs_pp: nova_pp.cs_pp,
c2_snark_pp: c2_g16_pk,
c2_cs_pp: nova_pp.cf_cs_pp,
};
let vp = Self::VerifierParam {
pp_hash,
c1_snark_vp: c1_g16_vk,
c1_cs_vp: nova_vp.cs_vp,
c2_snark_vp: c2_g16_vk,
c2_cs_vp: nova_vp.cf_cs_vp,
};
Ok((pp, vp))
}
fn prove(
mut rng: impl RngCore + CryptoRng,
pp: Self::ProverParam,
fs: FS,
) -> Result<Self::Proof, Error> {
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::from_nova::<GC1, CS1, CS2, false, FC>(
fs.clone().into(),
)?;
let circuit2 =
DeciderCircuit2::<C1, GC1, C2>::from_nova::<GC2, CS1, CS2, false, FC>(fs.into())?;
let c1_snark_proof = S1::prove(&pp.c1_snark_pp, circuit1.clone(), &mut rng)
.map_err(|e| Error::Other(e.to_string()))?;
let c2_snark_proof = S2::prove(&pp.c2_snark_pp, circuit2.clone(), &mut rng)
.map_err(|e| Error::Other(e.to_string()))?;
let cmT = circuit1.cmT.unwrap();
let r_Fr = circuit1.r.unwrap();
let W_i1 = circuit1.W_i1.unwrap();
let cf_W_i = circuit2.cf_W_i.unwrap();
// get the challenges that have been already computed when preparing the circuits inputs in
// the above `from_nova` calls
let challenge_W = circuit1
.cs_c_W
.ok_or(Error::MissingValue("cs_c_W".to_string()))?;
let challenge_E = circuit1
.cs_c_E
.ok_or(Error::MissingValue("cs_c_E".to_string()))?;
let c2_challenge_W = circuit2
.cs_c_W
.ok_or(Error::MissingValue("c2's cs_c_W".to_string()))?;
let c2_challenge_E = circuit2
.cs_c_E
.ok_or(Error::MissingValue("c2's cs_c_E".to_string()))?;
// generate CommitmentScheme proofs for the main instance
let U_cmW_proof = CS1::prove_with_challenge(
&pp.c1_cs_pp,
challenge_W,
&W_i1.W,
&C1::ScalarField::zero(),
None,
)?;
let U_cmE_proof = CS1::prove_with_challenge(
&pp.c1_cs_pp,
challenge_E,
&W_i1.E,
&C1::ScalarField::zero(),
None,
)?;
// CS proofs for the CycleFold instance
let cf_cmW_proof = CS2::prove_with_challenge(
&pp.c2_cs_pp,
c2_challenge_W,
&cf_W_i.W,
&C2::ScalarField::zero(),
None,
)?;
let cf_cmE_proof = CS2::prove_with_challenge(
&pp.c2_cs_pp,
c2_challenge_E,
&cf_W_i.E,
&C2::ScalarField::zero(),
None,
)?;
Ok(Self::Proof {
c1_snark_proof,
c2_snark_proof,
cs1_proofs: [U_cmW_proof, U_cmE_proof],
cs2_proofs: [cf_cmW_proof, cf_cmE_proof],
cmT,
r: r_Fr,
cf_U_i: circuit1.cf_U_i.unwrap(),
cs1_challenges: [challenge_W, challenge_E],
cs2_challenges: [c2_challenge_W, c2_challenge_E],
})
}
fn verify(
vp: Self::VerifierParam,
i: C1::ScalarField,
z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>,
running_instance: &Self::CommittedInstance,
incoming_instance: &Self::CommittedInstance,
proof: &Self::Proof,
) -> Result<bool, Error> {
if i <= C1::ScalarField::one() {
return Err(Error::NotEnoughSteps);
}
// compute U = U_{d+1}= NIFS.V(U_d, u_d, cmT)
let U = NIFS::<C1, CS1>::verify(proof.r, running_instance, incoming_instance, &proof.cmT);
let (cmE_x, cmE_y) = NonNativeAffineVar::inputize(U.cmE)?;
let (cmW_x, cmW_y) = NonNativeAffineVar::inputize(U.cmW)?;
let (cmT_x, cmT_y) = NonNativeAffineVar::inputize(proof.cmT)?;
let zero = (&C2::BaseField::zero(), &C2::BaseField::zero());
let cmE_affine = proof.cf_U_i.cmE.into_affine();
let cmW_affine = proof.cf_U_i.cmW.into_affine();
let (cf_cmE_x, cf_cmE_y) = cmE_affine.xy().unwrap_or(zero);
let cf_cmE_z = C1::ScalarField::one();
let (cf_cmW_x, cf_cmW_y) = cmW_affine.xy().unwrap_or(zero);
let cf_cmW_z = C1::ScalarField::one();
// snark proof 1
let c1_public_input: Vec<C1::ScalarField> = [
vec![vp.pp_hash, i],
z_0,
z_i,
// U_{i+1} values:
vec![U.u],
U.x.clone(),
cmE_x,
cmE_y,
cmW_x,
cmW_y,
// CS1 values:
proof.cs1_challenges.to_vec(), // c_W, c_E
vec![
proof.cs1_proofs[0].eval, // eval_W
proof.cs1_proofs[1].eval, // eval_E
],
// cf_U_i values
NonNativeUintVar::<CF2<C2>>::inputize(proof.cf_U_i.u),
proof
.cf_U_i
.x
.iter()
.flat_map(|&x_i| NonNativeUintVar::<CF2<C2>>::inputize(x_i))
.collect::<Vec<C1::ScalarField>>(),
vec![
*cf_cmE_x, *cf_cmE_y, cf_cmE_z, *cf_cmW_x, *cf_cmW_y, cf_cmW_z,
],
// NIFS values:
cmT_x,
cmT_y,
vec![proof.r],
]
.concat();
let c1_snark_v = S1::verify(&vp.c1_snark_vp, &c1_public_input, &proof.c1_snark_proof)
.map_err(|e| Error::Other(e.to_string()))?;
if !c1_snark_v {
return Err(Error::SNARKVerificationFail);
}
let (cf2_cmE_x, cf2_cmE_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmE)?;
let (cf2_cmW_x, cf2_cmW_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmW)?;
// snark proof 2
// migrate pp_hash from C1::Fr to C1::Fq
let pp_hash_Fq =
C2::ScalarField::from_le_bytes_mod_order(&vp.pp_hash.into_bigint().to_bytes_le());
let c2_public_input: Vec<C2::ScalarField> = [
vec![pp_hash_Fq],
vec![proof.cf_U_i.u],
proof.cf_U_i.x.clone(),
cf2_cmE_x,
cf2_cmE_y,
cf2_cmW_x,
cf2_cmW_y,
proof.cs2_challenges.to_vec(),
vec![
proof.cs2_proofs[0].eval, // eval_W
proof.cs2_proofs[1].eval, // eval_E
],
]
.concat();
let c2_snark_v = S2::verify(&vp.c2_snark_vp, &c2_public_input, &proof.c2_snark_proof)
.map_err(|e| Error::Other(e.to_string()))?;
if !c2_snark_v {
return Err(Error::SNARKVerificationFail);
}
// check C1 commitments (main instance commitments)
CS1::verify_with_challenge(
&vp.c1_cs_vp,
proof.cs1_challenges[0],
&U.cmW,
&proof.cs1_proofs[0],
)?;
CS1::verify_with_challenge(
&vp.c1_cs_vp,
proof.cs1_challenges[1],
&U.cmE,
&proof.cs1_proofs[1],
)?;
// check C2 commitments (CycleFold instance commitments)
CS2::verify_with_challenge(
&vp.c2_cs_vp,
proof.cs2_challenges[0],
&proof.cf_U_i.cmW,
&proof.cs2_proofs[0],
)?;
CS2::verify_with_challenge(
&vp.c2_cs_vp,
proof.cs2_challenges[1],
&proof.cf_U_i.cmE,
&proof.cs2_proofs[1],
)?;
Ok(true)
}
}
#[cfg(test)]
pub mod tests {
use ark_groth16::Groth16;
// Note: do not use the MNTx_298 curves in practice, these are just for tests. Use the MNTx_753
// curves instead.
use ark_mnt4_298::{
constraints::G1Var as GVar, Fr, G1Projective as Projective, MNT4_298 as MNT4,
};
use ark_mnt6_298::{
constraints::G1Var as GVar2, G1Projective as Projective2, MNT6_298 as MNT6,
};
use std::time::Instant;
use super::*;
use crate::commitment::kzg::KZG;
use crate::folding::nova::PreprocessorParam;
use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
#[test]
fn test_decider() {
// use Nova as FoldingScheme
type N = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
KZG<'static, MNT4>,
KZG<'static, MNT6>,
false,
>;
type D = Decider<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
KZG<'static, MNT4>,
KZG<'static, MNT6>,
Groth16<MNT4>,
Groth16<MNT6>,
N, // here we define the FoldingScheme to use
>;
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let z_0 = vec![Fr::from(3_u32)];
let start = Instant::now();
let prep_param = PreprocessorParam::new(poseidon_config, F_circuit);
let nova_params = N::preprocess(&mut rng, &prep_param).unwrap();
println!("Nova preprocess, {:?}", start.elapsed());
let start = Instant::now();
let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap();
println!("Nova initialized, {:?}", start.elapsed());
let start = Instant::now();
nova.prove_step(&mut rng, vec![], None).unwrap();
println!("prove_step, {:?}", start.elapsed());
nova.prove_step(&mut rng, vec![], None).unwrap(); // do a 2nd step
let mut rng = rand::rngs::OsRng;
// prepare the Decider prover & verifier params
let start = Instant::now();
let (decider_pp, decider_vp) = D::preprocess(&mut rng, nova_params, nova.clone()).unwrap();
println!("Decider preprocess, {:?}", start.elapsed());
// decider proof generation
let start = Instant::now();
let proof = D::prove(rng, decider_pp, nova.clone()).unwrap();
println!("Decider prove, {:?}", start.elapsed());
// decider proof verification
let start = Instant::now();
let verified = D::verify(
decider_vp, nova.i, nova.z_0, nova.z_i, &nova.U_i, &nova.u_i, &proof,
)
.unwrap();
assert!(verified);
println!("Decider verify, {:?}", start.elapsed());
}
}

+ 553
- 0
folding-schemes/src/folding/nova/decider_circuits.rs

@ -0,0 +1,553 @@
/// This file implements the offchain decider circuit. For ethereum use cases, use the
/// DeciderEthCircuit.
/// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField};
use ark_poly::Polynomial;
use ark_r1cs_std::{
alloc::AllocVar,
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError};
use ark_std::Zero;
use core::marker::PhantomData;
use super::{
circuits::{ChallengeGadget, CommittedInstanceVar},
decider_eth_circuit::{
evaluate_gadget, KZGChallengesGadget, R1CSVar, RelaxedR1CSGadget, WitnessVar,
},
nifs::NIFS,
traits::NIFSTrait,
CommittedInstance, Nova, Witness,
};
use crate::arith::r1cs::R1CS;
use crate::commitment::CommitmentScheme;
use crate::folding::circuits::{
cyclefold::{
CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldConfig,
CycleFoldWitness,
},
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF1, CF2,
};
use crate::folding::nova::NovaCycleFoldConfig;
use crate::folding::traits::{CommittedInstanceVarOps, Dummy};
use crate::frontend::FCircuit;
use crate::utils::vec::poly_from_vec;
use crate::Error;
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve2's BaseField (=Curve1's ScalarField).
#[derive(Clone, Debug)]
pub struct DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
{
_c1: PhantomData<C1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
/// E vector's length of the Nova instance witness
pub E_len: usize,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the Augmented Function circuit
pub r1cs: R1CS<C1::ScalarField>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: Option<C1::ScalarField>,
pub i: Option<CF1<C1>>,
/// initial state
pub z_0: Option<Vec<C1::ScalarField>>,
/// current i-th state
pub z_i: Option<Vec<C1::ScalarField>>,
/// Nova instances
pub u_i: Option<CommittedInstance<C1>>,
pub w_i: Option<Witness<C1>>,
pub U_i: Option<CommittedInstance<C1>>,
pub W_i: Option<Witness<C1>>,
pub U_i1: Option<CommittedInstance<C1>>,
pub W_i1: Option<Witness<C1>>,
pub cmT: Option<C1>,
pub r: Option<C1::ScalarField>,
/// CycleFold running instance
pub cf_U_i: Option<CycleFoldCommittedInstance<C2>>,
/// Commitment Scheme challenges
pub cs_c_W: Option<C1::ScalarField>,
pub cs_c_E: Option<C1::ScalarField>,
/// Evaluations of the committed polynomials at the challenge
pub eval_W: Option<C1::ScalarField>,
pub eval_E: Option<C1::ScalarField>,
}
impl<C1, C2, GC2> DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
{
pub fn from_nova<GC1, CS1, CS2, const H: bool, FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>,
) -> Result<Self, Error>
where
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
{
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config);
// pp_hash is absorbed to transcript at the ChallengeGadget::get_challenge_native call
// compute the U_{i+1}, W_{i+1}
let (T, cmT) = NIFS::<C1, CS1, H>::compute_cmT(
&nova.cs_pp,
&nova.r1cs.clone(),
&nova.w_i.clone(),
&nova.u_i.clone(),
&nova.W_i.clone(),
&nova.U_i.clone(),
)?;
let r_bits = NIFS::<C1, CS1, H>::get_challenge(
&mut transcript,
nova.pp_hash,
&nova.U_i,
&nova.u_i,
&cmT,
);
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?;
let (W_i1, U_i1) =
NIFS::<C1, CS1, H>::prove(r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &T, &cmT)?;
// compute the commitment scheme challenges used as inputs in the circuit
let (cs_challenge_W, cs_challenge_E) =
KZGChallengesGadget::<C1>::get_challenges_native(&mut transcript, U_i1.clone());
// get evals of the committed polys at the challenges
let mut W = W_i1.W.clone();
W.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.W.len().next_power_of_two() - W_i1.W.len()),
);
let mut E = W_i1.E.clone();
E.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.E.len().next_power_of_two() - W_i1.E.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&cs_challenge_W);
let p_E = poly_from_vec(E.to_vec())?;
let eval_E = p_E.evaluate(&cs_challenge_E);
Ok(Self {
_c1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
E_len: nova.W_i.E.len(),
cf_E_len: nova.cf_W_i.E.len(),
r1cs: nova.r1cs,
poseidon_config: nova.poseidon_config,
pp_hash: Some(nova.pp_hash),
i: Some(nova.i),
z_0: Some(nova.z_0),
z_i: Some(nova.z_i),
u_i: Some(nova.u_i),
w_i: Some(nova.w_i),
U_i: Some(nova.U_i),
W_i: Some(nova.W_i),
U_i1: Some(U_i1),
W_i1: Some(W_i1),
cmT: Some(cmT),
r: Some(r_Fr),
cf_U_i: Some(nova.cf_U_i),
cs_c_W: Some(cs_challenge_W),
cs_c_E: Some(cs_challenge_E),
eval_W: Some(eval_W),
eval_E: Some(eval_E),
})
}
}
impl<C1, C2, GC2> ConstraintSynthesizer<CF1<C1>> for DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
C2: CurveGroup,
<C2 as CurveGroup>::BaseField: PrimeField,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let r1cs =
R1CSVar::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.r1cs.clone())
})?;
let pp_hash = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C1>::zero))
})?;
let i =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let u_dummy_native = CommittedInstance::<C1>::dummy(&self.r1cs);
let w_dummy_native = Witness::<C1>::dummy(&self.r1cs);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1 = CommittedInstanceVar::<C1>::new_input(cs.clone(), || {
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
})?;
let W_i1 = WitnessVar::<C1>::new_witness(cs.clone(), || {
Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?;
// allocate the inputs for the check 6
let cs_c_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.cs_c_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let cs_c_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.cs_c_E.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C1>::zero))
})?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
// notice that the `pp_hash` is absorbed inside the ChallengeGadget::get_challenge_gadget call
// 2. u_i.cmE==cm(0), u_i.u==1
// Here zero is the x & y coordinates of the zero point affine representation.
let zero = NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::zero())?;
u_i.cmE.x.enforce_equal_unaligned(&zero)?;
u_i.cmE.y.enforce_equal_unaligned(&zero)?;
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?;
// 3.b u_i.x[1] == H(cf_U_i)
let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_input(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
(u_i.x[1]).enforce_equal(&cf_u_i_x)?;
// 4. check RelaxedR1CS of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?;
// 1.1.a, 5.1 compute NIFS.V and Commitment Scheme challenges.
// We need to ensure the order of challenge generation is the same as
// the native counterpart, so we first compute the challenges here and
// do the actual checks later.
let cmT =
NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_gadget(
&mut transcript,
pp_hash,
U_i_vec,
u_i.clone(),
Some(cmT.clone()),
)?;
// 5.1.
let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C1>::get_challenges_gadget(&mut transcript, U_i1.clone())?;
incircuit_c_W.enforce_equal(&cs_c_W)?;
incircuit_c_E.enforce_equal(&cs_c_E)?;
// 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C1>>(W_i1.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
// 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid
// the verifier computing it)
let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?;
// check that the in-circuit computed r is equal to the inputted r
let r =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.r.unwrap_or_else(CF1::<C1>::zero)))?;
r_Fr.enforce_equal(&r)?;
Ok(())
}
}
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve1's BaseField (=Curve2's ScalarField).
#[derive(Clone, Debug)]
pub struct DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
{
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
pub poseidon_config: PoseidonConfig<CF1<C2>>,
/// public params hash
pub pp_hash: Option<C2::ScalarField>,
/// CycleFold running instance. Notice that here we use Nova's CommittedInstance (instead of
/// CycleFoldCommittedInstance), since we are over C2::Fr, so that the CycleFold instances can
/// be computed natively
pub cf_U_i: Option<CommittedInstance<C2>>,
pub cf_W_i: Option<CycleFoldWitness<C2>>,
/// Commitment Scheme challenges
pub cs_c_W: Option<C2::ScalarField>,
pub cs_c_E: Option<C2::ScalarField>,
/// Evaluations of the committed polynomials at the challenge
pub eval_W: Option<C2::ScalarField>,
pub eval_E: Option<C2::ScalarField>,
}
impl<C1, GC1, C2> DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as CurveGroup>::BaseField: PrimeField,
<C2 as Group>::ScalarField: Absorb,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
{
pub fn from_nova<GC2, CS1, CS2, const H: bool, FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>,
) -> Result<Self, Error>
where
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
{
// compute the Commitment Scheme challenges of the CycleFold instance commitments, used as
// inputs in the circuit
let poseidon_config =
crate::transcript::poseidon::poseidon_canonical_config::<C2::ScalarField>();
let mut transcript = PoseidonSponge::<C2::ScalarField>::new(&poseidon_config);
let pp_hash_Fq =
C2::ScalarField::from_le_bytes_mod_order(&nova.pp_hash.into_bigint().to_bytes_le());
transcript.absorb(&pp_hash_Fq);
let (cs_challenge_W, cs_challenge_E) =
KZGChallengesGadget::<C2>::get_challenges_native(&mut transcript, nova.cf_U_i.clone());
// get evals of the committed polynomials at the challenge
let mut W = nova.cf_W_i.W.clone();
W.extend(
std::iter::repeat(C2::ScalarField::zero())
.take(nova.cf_W_i.W.len().next_power_of_two() - nova.cf_W_i.W.len()),
);
let mut E = nova.cf_W_i.E.clone();
E.extend(
std::iter::repeat(C2::ScalarField::zero())
.take(nova.cf_W_i.E.len().next_power_of_two() - nova.cf_W_i.E.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&cs_challenge_W);
let p_E = poly_from_vec(E.to_vec())?;
let eval_E = p_E.evaluate(&cs_challenge_E);
Ok(Self {
_c1: PhantomData,
_gc1: PhantomData,
_c2: PhantomData,
cf_E_len: nova.cf_W_i.E.len(),
cf_r1cs: nova.cf_r1cs,
poseidon_config,
pp_hash: Some(pp_hash_Fq),
cf_U_i: Some(nova.cf_U_i),
cf_W_i: Some(nova.cf_W_i),
// CycleFold instance commitments challenges
cs_c_W: Some(cs_challenge_W),
cs_c_E: Some(cs_challenge_E),
eval_W: Some(eval_W),
eval_E: Some(eval_E),
})
}
}
impl<C1, GC1, C2> ConstraintSynthesizer<CF1<C2>> for DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C2>>) -> Result<(), SynthesisError> {
let pp_hash = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C2>::zero))
})?;
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(&self.cf_r1cs);
let w_dummy_native = Witness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CommittedInstanceVar::<C2>::new_input(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_W_i = WitnessVar::<C2>::new_witness(cs.clone(), || {
Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone()))
})?;
let cf_r1cs =
R1CSVar::<C2::ScalarField, CF1<C2>, FpVar<CF1<C2>>>::new_witness(cs.clone(), || {
Ok(self.cf_r1cs.clone())
})?;
// 6. check RelaxedR1CS of cf_U_i
let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(cf_r1cs, cf_W_i.E.clone(), cf_U_i.u.clone(), cf_z_U)?;
// `transcript` is for challenge generation.
let mut transcript =
PoseidonSpongeVar::<C2::ScalarField>::new(cs.clone(), &self.poseidon_config);
transcript.absorb(&pp_hash)?;
// allocate the inputs for the check 7.1
let cs_c_W = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.cs_c_W.unwrap_or_else(CF1::<C2>::zero))
})?;
let cs_c_E = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.cs_c_E.unwrap_or_else(CF1::<C2>::zero))
})?;
// allocate the inputs for the check 7.2
let eval_W = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C2>::zero))
})?;
let eval_E = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C2>::zero))
})?;
// 7.1. check the commitment scheme challenges correct computation
let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C2>::get_challenges_gadget(&mut transcript, cf_U_i.clone())?;
incircuit_c_W.enforce_equal(&cs_c_W)?;
incircuit_c_E.enforce_equal(&cs_c_E)?;
// 7.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C2>>(cf_W_i.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C2>>(cf_W_i.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_relations::r1cs::ConstraintSystem;
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use super::*;
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::PreprocessorParam;
use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::FoldingScheme;
#[test]
fn test_decider_circuits() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let z_0 = vec![Fr::from(3_u32)];
type N = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
false,
>;
let prep_param = PreprocessorParam::<
Projective,
Projective2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
false,
>::new(poseidon_config, F_circuit);
let nova_params = N::preprocess(&mut rng, &prep_param).unwrap();
// generate a Nova instance and do a step of it
let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap();
nova.prove_step(&mut rng, vec![], None).unwrap();
// verify the IVC
let ivc_proof = nova.ivc_proof();
N::verify(nova_params.1, ivc_proof).unwrap();
// load the DeciderCircuit 1 & 2 from the Nova instance
let decider_circuit1 =
DeciderCircuit1::<Projective, Projective2, GVar2>::from_nova(nova.clone()).unwrap();
let decider_circuit2 =
DeciderCircuit2::<Projective, GVar, Projective2>::from_nova(nova).unwrap();
// generate the constraints of both circuits and check that are satisfied by the inputs
let cs1 = ConstraintSystem::<Fr>::new_ref();
decider_circuit1.generate_constraints(cs1.clone()).unwrap();
assert!(cs1.is_satisfied().unwrap());
let cs2 = ConstraintSystem::<Fq>::new_ref();
decider_circuit2.generate_constraints(cs2.clone()).unwrap();
assert!(cs2.is_satisfied().unwrap());
}
}

+ 16
- 12
folding-schemes/src/folding/nova/decider_eth.rs

@ -1,4 +1,7 @@
/// This file implements the Nova's onchain (Ethereum's EVM) decider.
/// This file implements the Nova's onchain (Ethereum's EVM) decider. For non-ethereum use cases,
/// the Decider from decider.rs file will be more efficient.
/// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
use ark_bn254::Bn254; use ark_bn254::Bn254;
use ark_crypto_primitives::sponge::Absorb; use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{AffineRepr, CurveGroup, Group}; use ark_ec::{AffineRepr, CurveGroup, Group};
@ -12,6 +15,7 @@ use ark_std::{One, Zero};
use core::marker::PhantomData; use core::marker::PhantomData;
pub use super::decider_eth_circuit::DeciderEthCircuit; pub use super::decider_eth_circuit::DeciderEthCircuit;
use super::traits::NIFSTrait;
use super::{nifs::NIFS, CommittedInstance, Nova}; use super::{nifs::NIFS, CommittedInstance, Nova};
use crate::commitment::{ use crate::commitment::{
kzg::{Proof as KZGProof, KZG}, kzg::{Proof as KZGProof, KZG},
@ -71,8 +75,8 @@ impl DeciderTrait
for Decider<C1, GC1, C2, GC2, FC, CS1, CS2, S, FS> for Decider<C1, GC1, C2, GC2, FC, CS1, CS2, S, FS>
where where
C1: CurveGroup, C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>, GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>, GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>, FC: FCircuit<C1::ScalarField>,
// CS1 is a KZG commitment, where challenge is C1::Fr elem // CS1 is a KZG commitment, where challenge is C1::Fr elem
@ -339,9 +343,7 @@ pub mod tests {
use super::*; use super::*;
use crate::commitment::pedersen::Pedersen; use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::{
PreprocessorParam, ProverParams as NovaProverParams, VerifierParams as NovaVerifierParams,
};
use crate::folding::nova::{PreprocessorParam, ProverParams as NovaProverParams};
use crate::frontend::utils::CubicFCircuit; use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
@ -400,7 +402,7 @@ pub mod tests {
let start = Instant::now(); let start = Instant::now();
let verified = D::verify( let verified = D::verify(
decider_vp.clone(), decider_vp.clone(),
nova.i.clone(),
nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i, &nova.U_i,
@ -486,13 +488,15 @@ pub mod tests {
&mut nova_pp_serialized.as_slice() &mut nova_pp_serialized.as_slice()
) )
.unwrap(); .unwrap();
let nova_vp_deserialized = NovaVerifierParams::<
let nova_vp_deserialized = <N as FoldingScheme<
Projective, Projective,
Projective2, Projective2,
KZG<'static, Bn254>,
Pedersen<Projective2>,
>::deserialize_compressed(
&mut nova_vp_serialized.as_slice()
CubicFCircuit<Fr>,
>>::vp_deserialize_with_mode(
&mut nova_vp_serialized.as_slice(),
ark_serialize::Compress::Yes,
ark_serialize::Validate::Yes,
(), // fcircuit_params
) )
.unwrap(); .unwrap();
@ -514,7 +518,7 @@ pub mod tests {
let start = Instant::now(); let start = Instant::now();
let verified = D::verify( let verified = D::verify(
decider_vp.clone(), decider_vp.clone(),
nova.i.clone(),
nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i, &nova.U_i,

+ 77
- 93
folding-schemes/src/folding/nova/decider_eth_circuit.rs

@ -1,5 +1,7 @@
/// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases, /// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases,
/// other more efficient approaches can be used. /// other more efficient approaches can be used.
/// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar, constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge}, poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
@ -25,9 +27,9 @@ use core::{borrow::Borrow, marker::PhantomData};
use super::{ use super::{
circuits::{ChallengeGadget, CommittedInstanceVar}, circuits::{ChallengeGadget, CommittedInstanceVar},
nifs::NIFS, nifs::NIFS,
traits::NIFSTrait,
CommittedInstance, Nova, Witness, CommittedInstance, Nova, Witness,
}; };
use crate::arith::r1cs::R1CS;
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme};
use crate::folding::circuits::{ use crate::folding::circuits::{
cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness}, cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness},
@ -41,6 +43,10 @@ use crate::utils::{
vec::poly_from_vec, vec::poly_from_vec,
}; };
use crate::Error; use crate::Error;
use crate::{
arith::r1cs::R1CS,
folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps},
};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RelaxedR1CSGadget {} pub struct RelaxedR1CSGadget {}
@ -135,7 +141,6 @@ pub struct WitnessVar {
impl<C> AllocVar<Witness<C>, CF1<C>> for WitnessVar<C> impl<C> AllocVar<Witness<C>, CF1<C>> for WitnessVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{ {
fn new_variable<T: Borrow<Witness<C>>>( fn new_variable<T: Borrow<Witness<C>>>(
cs: impl Into<Namespace<CF1<C>>>, cs: impl Into<Namespace<CF1<C>>>,
@ -160,6 +165,12 @@ where
} }
} }
impl<C: CurveGroup> WitnessVarOps<C::ScalarField> for WitnessVar<C> {
fn get_openings(&self) -> Vec<(&[FpVar<C::ScalarField>], FpVar<C::ScalarField>)> {
vec![(&self.E, self.rE.clone()), (&self.W, self.rW.clone())]
}
}
/// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM) /// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM)
/// verification. /// verification.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -235,7 +246,7 @@ where
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config); let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config);
// compute the U_{i+1}, W_{i+1} // compute the U_{i+1}, W_{i+1}
let (T, cmT) = NIFS::<C1, CS1, H>::compute_cmT(
let (aux_p, aux_v) = NIFS::<C1, CS1, H>::compute_aux(
&nova.cs_pp, &nova.cs_pp,
&nova.r1cs.clone(), &nova.r1cs.clone(),
&nova.w_i.clone(), &nova.w_i.clone(),
@ -243,17 +254,18 @@ where
&nova.W_i.clone(), &nova.W_i.clone(),
&nova.U_i.clone(), &nova.U_i.clone(),
)?; )?;
let r_bits = ChallengeGadget::<C1>::get_challenge_native(
let cmT = aux_v;
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_native(
&mut transcript, &mut transcript,
nova.pp_hash, nova.pp_hash,
nova.U_i.clone(),
nova.u_i.clone(),
cmT,
&nova.U_i,
&nova.u_i,
Some(&cmT),
); );
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?; .ok_or(Error::OutOfBounds)?;
let (W_i1, U_i1) = NIFS::<C1, CS1, H>::fold_instances(
r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &T, cmT,
let (W_i1, U_i1) = NIFS::<C1, CS1, H>::prove(
r_Fr, &nova.W_i, &nova.U_i, &nova.w_i, &nova.u_i, &aux_p, &aux_v,
)?; )?;
// compute the KZG challenges used as inputs in the circuit // compute the KZG challenges used as inputs in the circuit
@ -346,11 +358,8 @@ where
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()])) Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?; })?;
let u_dummy_native = CommittedInstance::<C1>::dummy(2);
let w_dummy_native = Witness::<C1>::dummy(
self.r1cs.A.n_cols - 3, /* (3=2+1, since u_i.x.len=2) */
self.E_len,
);
let u_dummy_native = CommittedInstance::<C1>::dummy(&self.r1cs);
let w_dummy_native = Witness::<C1>::dummy(&self.r1cs);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || { let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone())) Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
@ -366,17 +375,18 @@ where
Ok(self.W_i1.unwrap_or(w_dummy_native.clone())) Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?; })?;
// allocate the inputs for the check 6
// allocate the inputs for the check 5.1
let kzg_c_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || { let kzg_c_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_c_W.unwrap_or_else(CF1::<C1>::zero)) Ok(self.kzg_c_W.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
let kzg_c_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || { let kzg_c_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_c_E.unwrap_or_else(CF1::<C1>::zero)) Ok(self.kzg_c_E.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
let _eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
// allocate the inputs for the check 5.2
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero)) Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
let _eval_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
let eval_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C1>::zero)) Ok(self.eval_E.unwrap_or_else(CF1::<C1>::zero))
})?; })?;
@ -385,10 +395,8 @@ where
// `transcript` is for challenge generation. // `transcript` is for challenge generation.
let mut transcript = sponge.clone(); let mut transcript = sponge.clone();
// 1. check RelaxedR1CS of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?;
// The following enumeration of the steps matches the one used at the documentation page
// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
// 2. u_i.cmE==cm(0), u_i.u==1 // 2. u_i.cmE==cm(0), u_i.u==1
// Here zero is the x & y coordinates of the zero point affine representation. // Here zero is the x & y coordinates of the zero point affine representation.
@ -398,15 +406,14 @@ where
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?; (u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i) // 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, U_i_vec) = U_i.clone().hash(
&sponge,
pp_hash.clone(),
i.clone(),
z_0.clone(),
z_i.clone(),
)?;
let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?; (u_i.x[0]).enforce_equal(&u_i_x)?;
// 4. check RelaxedR1CS of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?;
#[cfg(feature = "light-test")] #[cfg(feature = "light-test")]
log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes."); log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes.");
@ -429,10 +436,7 @@ where
let cf_u_dummy_native = let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN); CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let w_dummy_native = CycleFoldWitness::<C2>::dummy(
self.cf_r1cs.A.n_cols - 1 - self.cf_r1cs.l,
self.cf_E_len,
);
let w_dummy_native = CycleFoldWitness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || { let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone())) Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?; })?;
@ -444,7 +448,7 @@ where
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
(u_i.x[1]).enforce_equal(&cf_u_i_x)?; (u_i.x[1]).enforce_equal(&cf_u_i_x)?;
// 4. check Pedersen commitments of cf_U_i.{cmE, cmW}
// 7. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?; let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?; let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
let cf_W_i_E_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> = let cf_W_i_E_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> =
@ -469,43 +473,39 @@ where
|| Ok(self.cf_r1cs.clone()), || Ok(self.cf_r1cs.clone()),
)?; )?;
// 5. check RelaxedR1CS of cf_U_i
// 6. check RelaxedR1CS of cf_U_i (CycleFold instance)
let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat(); let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?; RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?;
} }
// 8.a, 6.a compute NIFS.V and KZG challenges.
// 1.1.a, 5.1. compute NIFS.V and KZG challenges.
// We need to ensure the order of challenge generation is the same as // We need to ensure the order of challenge generation is the same as
// the native counterpart, so we first compute the challenges here and // the native counterpart, so we first compute the challenges here and
// do the actual checks later. // do the actual checks later.
let cmT = let cmT =
NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?; NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
let r_bits = ChallengeGadget::<C1>::get_challenge_gadget(
// 1.1.a
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_gadget(
&mut transcript, &mut transcript,
pp_hash, pp_hash,
U_i_vec, U_i_vec,
u_i.clone(), u_i.clone(),
cmT.clone(),
Some(cmT),
)?; )?;
// 5.1.
let (incircuit_c_W, incircuit_c_E) = let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C1>::get_challenges_gadget(&mut transcript, U_i1.clone())?; KZGChallengesGadget::<C1>::get_challenges_gadget(&mut transcript, U_i1.clone())?;
// 6.b check KZG challenges
incircuit_c_W.enforce_equal(&kzg_c_W)?; incircuit_c_W.enforce_equal(&kzg_c_W)?;
incircuit_c_E.enforce_equal(&kzg_c_E)?; incircuit_c_E.enforce_equal(&kzg_c_E)?;
// Check 7 is temporary disabled due
// https://github.com/privacy-scaling-explorations/sonobe/issues/80
log::warn!("[WARNING]: issue #80 (https://github.com/privacy-scaling-explorations/sonobe/issues/80) is not resolved yet.");
//
// 7. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
// let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
// let incircuit_eval_E = evaluate_gadget::<CF1<C1>>(W_i1.E, incircuit_c_E)?;
// incircuit_eval_W.enforce_equal(&eval_W)?;
// incircuit_eval_E.enforce_equal(&eval_E)?;
// 8.b check the NIFS.V challenge matches the one from the public input (so we
// avoid the verifier computing it)
// 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C1>>(W_i1.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
// 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid
// the verifier computing it)
let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?; let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?;
// check that the in-circuit computed r is equal to the inputted r // check that the in-circuit computed r is equal to the inputted r
let r = let r =
@ -519,13 +519,11 @@ where
/// Interpolates the polynomial from the given vector, and then returns it's evaluation at the /// Interpolates the polynomial from the given vector, and then returns it's evaluation at the
/// given point. /// given point.
#[allow(unused)] // unused while check 7 is disabled #[allow(unused)] // unused while check 7 is disabled
fn evaluate_gadget<F: PrimeField>(
v: Vec<FpVar<F>>,
pub fn evaluate_gadget<F: PrimeField>(
mut v: Vec<FpVar<F>>,
point: FpVar<F>, point: FpVar<F>,
) -> Result<FpVar<F>, SynthesisError> { ) -> Result<FpVar<F>, SynthesisError> {
if !v.len().is_power_of_two() {
return Err(SynthesisError::Unsatisfiable);
}
v.resize(v.len().next_power_of_two(), FpVar::zero());
let n = v.len() as u64; let n = v.len() as u64;
let gen = F::get_root_of_unity(n).unwrap(); let gen = F::get_root_of_unity(n).unwrap();
let domain = Radix2DomainVar::new(gen, log2(v.len()) as u64, FpVar::one()).unwrap(); let domain = Radix2DomainVar::new(gen, log2(v.len()) as u64, FpVar::one()).unwrap();
@ -591,7 +589,7 @@ pub mod tests {
use ark_relations::r1cs::ConstraintSystem; use ark_relations::r1cs::ConstraintSystem;
use ark_std::{ use ark_std::{
rand::{thread_rng, Rng}, rand::{thread_rng, Rng},
One, UniformRand,
UniformRand,
}; };
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
@ -600,7 +598,6 @@ pub mod tests {
r1cs::{ r1cs::{
extract_r1cs, extract_w_x, extract_r1cs, extract_w_x,
tests::{get_test_r1cs, get_test_z}, tests::{get_test_r1cs, get_test_z},
RelaxedR1CS,
}, },
Arith, Arith,
}; };
@ -610,20 +607,18 @@ pub mod tests {
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
use crate::FoldingScheme; use crate::FoldingScheme;
fn prepare_instances<C: CurveGroup, CS: CommitmentScheme<C>, R: Rng>(
// Convert `z` to a witness-instance pair for the relaxed R1CS
fn prepare_relaxed_witness_instance<C: CurveGroup, CS: CommitmentScheme<C>, R: Rng>(
mut rng: R, mut rng: R,
r1cs: &R1CS<C::ScalarField>, r1cs: &R1CS<C::ScalarField>,
z: &[C::ScalarField], z: &[C::ScalarField],
) -> (Witness<C>, CommittedInstance<C>)
where
C::ScalarField: Absorb,
{
) -> (Witness<C>, CommittedInstance<C>) {
let (w, x) = r1cs.split_z(z); let (w, x) = r1cs.split_z(z);
let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap(); let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap();
let mut w = Witness::new::<false>(w, r1cs.A.n_rows, &mut rng); let mut w = Witness::new::<false>(w, r1cs.A.n_rows, &mut rng);
w.E = r1cs.eval_relation(z).unwrap();
w.E = r1cs.eval_at_z(z).unwrap();
let mut u = w.commit::<CS, false>(&cs_pp, x).unwrap(); let mut u = w.commit::<CS, false>(&cs_pp, x).unwrap();
u.u = z[0]; u.u = z[0];
@ -635,9 +630,10 @@ pub mod tests {
let rng = &mut thread_rng(); let rng = &mut thread_rng();
let r1cs: R1CS<Fr> = get_test_r1cs(); let r1cs: R1CS<Fr> = get_test_r1cs();
let mut z = get_test_z(3); let mut z = get_test_z(3);
z[0] = Fr::rand(rng);
let (w, u) = prepare_instances::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
z[0] = Fr::rand(rng); // Randomize `z[0]` (i.e. `u.u`) to test the relaxed R1CS
let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -665,12 +661,11 @@ pub mod tests {
let r1cs = extract_r1cs::<Fr>(&cs); let r1cs = extract_r1cs::<Fr>(&cs);
let (w, x) = extract_w_x::<Fr>(&cs); let (w, x) = extract_w_x::<Fr>(&cs);
let mut z = [vec![Fr::one()], x, w].concat();
r1cs.check_relation(&z).unwrap();
r1cs.check_relation(&w, &x).unwrap();
z[0] = Fr::rand(rng);
let (w, u) = prepare_instances::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
r1cs.check_relaxed_relation(&w, &u).unwrap();
let z = [vec![Fr::rand(rng)], x, w].concat();
let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
r1cs.check_relation(&w, &u).unwrap();
// set new CS for the circuit that checks the RelaxedR1CS of our original circuit // set new CS for the circuit that checks the RelaxedR1CS of our original circuit
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
@ -759,9 +754,10 @@ pub mod tests {
let cs = cs.into_inner().unwrap(); let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fq>(&cs); let r1cs = extract_r1cs::<Fq>(&cs);
let (w, x) = extract_w_x::<Fq>(&cs); let (w, x) = extract_w_x::<Fq>(&cs);
let z = [vec![Fq::rand(rng)], x, w].concat();
let (w, u) = prepare_instances::<_, Pedersen<Projective2>, _>(rng, &r1cs, &z);
let z = [vec![Fq::rand(rng)], x, w].concat();
let (w, u) =
prepare_relaxed_witness_instance::<_, Pedersen<Projective2>, _>(rng, &r1cs, &z);
// natively // natively
let cs = ConstraintSystem::<Fq>::new_ref(); let cs = ConstraintSystem::<Fq>::new_ref();
@ -783,7 +779,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_decider_circuit() {
fn test_decider_eth_circuit() {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>(); let poseidon_config = poseidon_canonical_config::<Fr>();
@ -814,21 +810,11 @@ pub mod tests {
// generate a Nova instance and do a step of it // generate a Nova instance and do a step of it
let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap(); let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap();
nova.prove_step(&mut rng, vec![], None).unwrap(); nova.prove_step(&mut rng, vec![], None).unwrap();
let ivc_v = nova.clone();
let (running_instance, incoming_instance, cyclefold_instance) = ivc_v.instances();
N::verify(
nova_params.1, // verifier_params
z_0,
ivc_v.z_i,
Fr::one(),
running_instance,
incoming_instance,
cyclefold_instance,
)
.unwrap();
let ivc_proof = nova.ivc_proof();
N::verify(nova_params.1, ivc_proof).unwrap();
// load the DeciderEthCircuit from the generated Nova instance
let decider_circuit = DeciderEthCircuit::<
// load the DeciderEthCircuit from the Nova instance
let decider_eth_circuit = DeciderEthCircuit::<
Projective, Projective,
GVar, GVar,
Projective2, Projective2,
@ -841,7 +827,9 @@ pub mod tests {
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
// generate the constraints and check that are satisfied by the inputs // generate the constraints and check that are satisfied by the inputs
decider_circuit.generate_constraints(cs.clone()).unwrap();
decider_eth_circuit
.generate_constraints(cs.clone())
.unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
} }
@ -880,10 +868,6 @@ pub mod tests {
assert_eq!(challenge_E_Var.value().unwrap(), challenge_E); assert_eq!(challenge_E_Var.value().unwrap(), challenge_E);
} }
// The test test_polynomial_interpolation is temporary disabled due
// https://github.com/privacy-scaling-explorations/sonobe/issues/80
// for n<=11 it will work, but for n>11 it will fail with stack overflow.
#[ignore]
#[test] #[test]
fn test_polynomial_interpolation() { fn test_polynomial_interpolation() {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();

+ 309
- 149
folding-schemes/src/folding/nova/mod.rs

@ -13,33 +13,46 @@ use ark_std::fmt::Debug;
use ark_std::rand::RngCore; use ark_std::rand::RngCore;
use ark_std::{One, UniformRand, Zero}; use ark_std::{One, UniformRand, Zero};
use core::marker::PhantomData; use core::marker::PhantomData;
use decider_eth_circuit::WitnessVar;
use crate::folding::circuits::cyclefold::{ use crate::folding::circuits::cyclefold::{
fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig, fold_cyclefold_circuit, CycleFoldCircuit, CycleFoldCommittedInstance, CycleFoldConfig,
CycleFoldWitness, CycleFoldWitness,
}; };
use crate::folding::circuits::CF2;
use crate::folding::{
circuits::{CF1, CF2},
traits::Dummy,
};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::{poseidon::poseidon_canonical_config, AbsorbNonNative, Transcript}; use crate::transcript::{poseidon::poseidon_canonical_config, AbsorbNonNative, Transcript};
use crate::utils::vec::is_zero_vec; use crate::utils::vec::is_zero_vec;
use crate::Error; use crate::Error;
use crate::FoldingScheme; use crate::FoldingScheme;
use crate::{arith::r1cs::RelaxedR1CS, commitment::CommitmentScheme};
use crate::{ use crate::{
arith::r1cs::{extract_r1cs, extract_w_x, R1CS}, arith::r1cs::{extract_r1cs, extract_w_x, R1CS},
constants::NOVA_N_BITS_RO, constants::NOVA_N_BITS_RO,
utils::{get_cm_coordinates, pp_hash}, utils::{get_cm_coordinates, pp_hash},
}; };
use crate::{arith::Arith, commitment::CommitmentScheme};
pub mod circuits; pub mod circuits;
pub mod decider_eth;
pub mod decider_eth_circuit;
pub mod nifs; pub mod nifs;
pub mod serialize;
pub mod ova;
pub mod traits; pub mod traits;
pub mod zk; pub mod zk;
use circuits::{AugmentedFCircuit, ChallengeGadget};
use circuits::{AugmentedFCircuit, ChallengeGadget, CommittedInstanceVar};
use nifs::NIFS; use nifs::NIFS;
use traits::NIFSTrait;
// offchain decider
pub mod decider;
pub mod decider_circuits;
// onchain decider
pub mod decider_eth;
pub mod decider_eth_circuit;
use super::traits::{CommittedInstanceOps, WitnessOps};
/// Configuration for Nova's CycleFold circuit /// Configuration for Nova's CycleFold circuit
pub struct NovaCycleFoldConfig<C: CurveGroup> { pub struct NovaCycleFoldConfig<C: CurveGroup> {
@ -68,24 +81,29 @@ pub struct CommittedInstance {
pub x: Vec<C::ScalarField>, pub x: Vec<C::ScalarField>,
} }
impl<C: CurveGroup> CommittedInstance<C> {
pub fn dummy(io_len: usize) -> Self {
impl<C: CurveGroup> Dummy<usize> for CommittedInstance<C> {
fn dummy(io_len: usize) -> Self {
Self { Self {
cmE: C::zero(), cmE: C::zero(),
u: C::ScalarField::zero(),
u: CF1::<C>::zero(),
cmW: C::zero(), cmW: C::zero(),
x: vec![C::ScalarField::zero(); io_len],
x: vec![CF1::<C>::zero(); io_len],
} }
} }
} }
impl<C: CurveGroup> Dummy<&R1CS<CF1<C>>> for CommittedInstance<C> {
fn dummy(r1cs: &R1CS<CF1<C>>) -> Self {
Self::dummy(r1cs.l)
}
}
impl<C: CurveGroup> Absorb for CommittedInstance<C> impl<C: CurveGroup> Absorb for CommittedInstance<C>
where where
C::ScalarField: Absorb, C::ScalarField: Absorb,
{ {
fn to_sponge_bytes(&self, _dest: &mut Vec<u8>) {
// This is never called
unimplemented!()
fn to_sponge_bytes(&self, dest: &mut Vec<u8>) {
C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest);
} }
fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) { fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) {
@ -103,30 +121,15 @@ where
} }
} }
impl<C: CurveGroup> CommittedInstance<C>
where
<C as Group>::ScalarField: Absorb,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
/// hash implements the committed instance hash compatible with the gadget implemented in
/// nova/circuits.rs::CommittedInstanceVar.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the
/// `CommittedInstance`.
pub fn hash<T: Transcript<C::ScalarField>>(
&self,
sponge: &T,
pp_hash: C::ScalarField, // public params hash
i: C::ScalarField,
z_0: Vec<C::ScalarField>,
z_i: Vec<C::ScalarField>,
) -> C::ScalarField {
let mut sponge = sponge.clone();
sponge.absorb(&pp_hash);
sponge.absorb(&i);
sponge.absorb(&z_0);
sponge.absorb(&z_i);
sponge.absorb(&self);
sponge.squeeze_field_elements(1)[0]
impl<C: CurveGroup> CommittedInstanceOps<C> for CommittedInstance<C> {
type Var = CommittedInstanceVar<C>;
fn get_commitments(&self) -> Vec<C> {
vec![self.cmW, self.cmE]
}
fn is_incoming(&self) -> bool {
self.cmE == C::zero() && self.u == One::one()
} }
} }
@ -157,18 +160,6 @@ impl Witness {
} }
} }
pub fn dummy(w_len: usize, e_len: usize) -> Self {
let (rW, rE) = (C::ScalarField::zero(), C::ScalarField::zero());
let w = vec![C::ScalarField::zero(); w_len];
Self {
E: vec![C::ScalarField::zero(); e_len],
rE,
W: w,
rW,
}
}
pub fn commit<CS: CommitmentScheme<C, HC>, const HC: bool>( pub fn commit<CS: CommitmentScheme<C, HC>, const HC: bool>(
&self, &self,
params: &CS::ProverParams, params: &CS::ProverParams,
@ -188,6 +179,25 @@ impl Witness {
} }
} }
impl<C: CurveGroup> Dummy<&R1CS<CF1<C>>> for Witness<C> {
fn dummy(r1cs: &R1CS<CF1<C>>) -> Self {
Self {
E: vec![C::ScalarField::zero(); r1cs.A.n_rows],
rE: C::ScalarField::zero(),
W: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l],
rW: C::ScalarField::zero(),
}
}
}
impl<C: CurveGroup> WitnessOps<C::ScalarField> for Witness<C> {
type Var = WitnessVar<C>;
fn get_openings(&self) -> Vec<(&[C::ScalarField], C::ScalarField)> {
vec![(&self.W, self.rW), (&self.E, self.rE)]
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct PreprocessorParam<C1, C2, FC, CS1, CS2, const H: bool = false> pub struct PreprocessorParam<C1, C2, FC, CS1, CS2, const H: bool = false>
where where
@ -334,15 +344,6 @@ where
CS2: CommitmentScheme<C2, H>, CS2: CommitmentScheme<C2, H>,
{ {
fn check(&self) -> Result<(), ark_serialize::SerializationError> { fn check(&self) -> Result<(), ark_serialize::SerializationError> {
self.poseidon_config.full_rounds.check()?;
self.poseidon_config.partial_rounds.check()?;
self.poseidon_config.alpha.check()?;
self.poseidon_config.ark.check()?;
self.poseidon_config.mds.check()?;
self.poseidon_config.rate.check()?;
self.poseidon_config.capacity.check()?;
self.r1cs.check()?;
self.cf_r1cs.check()?;
self.cs_vp.check()?; self.cs_vp.check()?;
self.cf_cs_vp.check()?; self.cf_cs_vp.check()?;
Ok(()) Ok(())
@ -360,42 +361,12 @@ where
mut writer: W, mut writer: W,
compress: ark_serialize::Compress, compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> { ) -> Result<(), ark_serialize::SerializationError> {
self.r1cs.serialize_with_mode(&mut writer, compress)?;
self.cf_r1cs.serialize_with_mode(&mut writer, compress)?;
self.cs_vp.serialize_with_mode(&mut writer, compress)?; self.cs_vp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_vp.serialize_with_mode(&mut writer, compress) self.cf_cs_vp.serialize_with_mode(&mut writer, compress)
} }
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize { fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.r1cs.serialized_size(compress)
+ self.cf_r1cs.serialized_size(compress)
+ self.cs_vp.serialized_size(compress)
+ self.cf_cs_vp.serialized_size(compress)
}
}
impl<C1, C2, CS1, CS2, const H: bool> CanonicalDeserialize for VerifierParams<C1, C2, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
{
fn deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
) -> Result<Self, ark_serialize::SerializationError> {
let r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_r1cs = R1CS::deserialize_with_mode(&mut reader, compress, validate)?;
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(VerifierParams {
poseidon_config: poseidon_canonical_config::<C1::ScalarField>(),
r1cs,
cf_r1cs,
cs_vp,
cf_cs_vp,
})
self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress)
} }
} }
@ -418,6 +389,29 @@ where
} }
} }
#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)]
pub struct IVCProof<C1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
{
// current step of the IVC
pub i: C1::ScalarField,
// initial state
pub z_0: Vec<C1::ScalarField>,
// current state
pub z_i: Vec<C1::ScalarField>,
// running instance
pub W_i: Witness<C1>,
pub U_i: CommittedInstance<C1>,
// incoming instance
pub w_i: Witness<C1>,
pub u_i: CommittedInstance<C1>,
// CycleFold instances
pub cf_W_i: CycleFoldWitness<C2>,
pub cf_U_i: CycleFoldCommittedInstance<C2>,
}
/// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and /// Implements Nova+CycleFold's IVC, described in [Nova](https://eprint.iacr.org/2021/370.pdf) and
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf), following the FoldingScheme trait
/// The `H` const generic specifies whether the homorphic commitment scheme is blinding /// The `H` const generic specifies whether the homorphic commitment scheme is blinding
@ -489,6 +483,58 @@ where
type IncomingInstance = (CommittedInstance<C1>, Witness<C1>); type IncomingInstance = (CommittedInstance<C1>, Witness<C1>);
type MultiCommittedInstanceWithWitness = (); type MultiCommittedInstanceWithWitness = ();
type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>); type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>);
type IVCProof = IVCProof<C1, C2>;
fn pp_deserialize_with_mode<R: std::io::prelude::Read>(
reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
_fc_params: FC::Params, // FCircuit params
) -> Result<Self::ProverParam, Error> {
Ok(Self::ProverParam::deserialize_with_mode(
reader, compress, validate,
)?)
}
fn vp_deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params,
) -> Result<Self::VerifierParam, Error> {
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
// generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing
// to serialize them, saving significant space in the VerifierParams serialized size.
// main circuit R1CS:
let f_circuit = FC::new(fc_params)?;
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(&poseidon_config, f_circuit.clone());
augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
// CycleFold circuit R1CS
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let cf_circuit = NovaCycleFoldCircuit::<C1, GC1>::empty();
cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(Self::VerifierParam {
poseidon_config,
r1cs,
cf_r1cs,
cs_vp,
cf_cs_vp,
})
}
fn preprocess( fn preprocess(
mut rng: impl RngCore, mut rng: impl RngCore,
@ -562,9 +608,9 @@ where
let pp_hash = vp.pp_hash()?; let pp_hash = vp.pp_hash()?;
// setup the dummy instances // setup the dummy instances
let (W_dummy, U_dummy) = r1cs.dummy_running_instance();
let (w_dummy, u_dummy) = r1cs.dummy_incoming_instance();
let (cf_W_dummy, cf_U_dummy) = cf_r1cs.dummy_running_instance();
let (W_dummy, U_dummy) = r1cs.dummy_witness_instance();
let (w_dummy, u_dummy) = r1cs.dummy_witness_instance();
let (cf_W_dummy, cf_U_dummy) = cf_r1cs.dummy_witness_instance();
// W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the
// R1CS that we're working with. // R1CS that we're working with.
@ -669,15 +715,16 @@ where
.step_native(i_usize, self.z_i.clone(), external_inputs.clone())?; .step_native(i_usize, self.z_i.clone(), external_inputs.clone())?;
// compute T and cmT for AugmentedFCircuit // compute T and cmT for AugmentedFCircuit
let (T, cmT) = self.compute_cmT()?;
let (aux_p, aux_v) = self.compute_cmT()?;
let cmT = aux_v;
// r_bits is the r used to the RLC of the F' instances // r_bits is the r used to the RLC of the F' instances
let r_bits = ChallengeGadget::<C1>::get_challenge_native(
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_native(
&mut transcript, &mut transcript,
self.pp_hash, self.pp_hash,
self.U_i.clone(),
self.u_i.clone(),
cmT,
&self.U_i,
&self.u_i,
Some(&cmT),
); );
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?; .ok_or(Error::OutOfBounds)?;
@ -685,10 +732,9 @@ where
.ok_or(Error::OutOfBounds)?; .ok_or(Error::OutOfBounds)?;
// fold Nova instances // fold Nova instances
let (W_i1, U_i1): (Witness<C1>, CommittedInstance<C1>) =
NIFS::<C1, CS1, H>::fold_instances(
r_Fr, &self.W_i, &self.U_i, &self.w_i, &self.u_i, &T, cmT,
)?;
let (W_i1, U_i1): (Witness<C1>, CommittedInstance<C1>) = NIFS::<C1, CS1, H>::prove(
r_Fr, &self.W_i, &self.U_i, &self.w_i, &self.u_i, &aux_p, &aux_v,
)?;
// folded instance output (public input, x) // folded instance output (public input, x)
// u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1}) // u_{i+1}.x[0] = H(i+1, z_0, z_{i+1}, U_{i+1})
@ -696,8 +742,8 @@ where
&sponge, &sponge,
self.pp_hash, self.pp_hash,
self.i + C1::ScalarField::one(), self.i + C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
// u_{i+1}.x[1] = H(cf_U_{i+1}) // u_{i+1}.x[1] = H(cf_U_{i+1})
let cf_u_i1_x: C1::ScalarField; let cf_u_i1_x: C1::ScalarField;
@ -815,10 +861,11 @@ where
#[cfg(test)] #[cfg(test)]
{ {
self.cf_r1cs.check_tight_relation(&_cfW_w_i, &cfW_u_i)?;
self.cf_r1cs.check_tight_relation(&_cfE_w_i, &cfE_u_i)?;
self.cf_r1cs
.check_relaxed_relation(&self.cf_W_i, &self.cf_U_i)?;
cfW_u_i.check_incoming()?;
cfE_u_i.check_incoming()?;
self.cf_r1cs.check_relation(&_cfW_w_i, &cfW_u_i)?;
self.cf_r1cs.check_relation(&_cfE_w_i, &cfE_u_i)?;
self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?;
} }
} }
@ -850,8 +897,9 @@ where
#[cfg(test)] #[cfg(test)]
{ {
self.r1cs.check_tight_relation(&self.w_i, &self.u_i)?;
self.r1cs.check_relaxed_relation(&self.W_i, &self.U_i)?;
self.u_i.check_incoming()?;
self.r1cs.check_relation(&self.w_i, &self.u_i)?;
self.r1cs.check_relation(&self.W_i, &self.U_i)?;
} }
Ok(()) Ok(())
@ -861,31 +909,93 @@ where
self.z_i.clone() self.z_i.clone()
} }
fn instances(
&self,
) -> (
Self::RunningInstance,
Self::IncomingInstance,
Self::CFInstance,
) {
(
(self.U_i.clone(), self.W_i.clone()),
(self.u_i.clone(), self.w_i.clone()),
(self.cf_U_i.clone(), self.cf_W_i.clone()),
)
fn ivc_proof(&self) -> Self::IVCProof {
Self::IVCProof {
i: self.i,
z_0: self.z_0.clone(),
z_i: self.z_i.clone(),
W_i: self.W_i.clone(),
U_i: self.U_i.clone(),
w_i: self.w_i.clone(),
u_i: self.u_i.clone(),
cf_W_i: self.cf_W_i.clone(),
cf_U_i: self.cf_U_i.clone(),
}
}
fn from_ivc_proof(
ivc_proof: IVCProof<C1, C2>,
fcircuit_params: FC::Params,
params: (Self::ProverParam, Self::VerifierParam),
) -> Result<Self, Error> {
let IVCProof {
i,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(&pp.poseidon_config, f_circuit.clone());
let cf_circuit = NovaCycleFoldCircuit::<C1, GC1>::empty();
augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
Ok(Self {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
r1cs,
cf_r1cs,
poseidon_config: pp.poseidon_config,
cs_pp: pp.cs_pp,
cf_cs_pp: pp.cf_cs_pp,
F: f_circuit,
pp_hash: vp.pp_hash()?,
i,
z_0,
z_i,
w_i,
u_i,
W_i,
U_i,
cf_W_i,
cf_U_i,
})
} }
/// Implements IVC.V of Nova+CycleFold. Notice that this method does not include the
/// Implements IVC.V of Nov.clone()a+CycleFold. Notice that this method does not include the
/// commitments verification, which is done in the Decider. /// commitments verification, which is done in the Decider.
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
num_steps: C1::ScalarField,
running_instance: Self::RunningInstance,
incoming_instance: Self::IncomingInstance,
cyclefold_instance: Self::CFInstance,
) -> Result<(), Error> {
fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> {
let Self::IVCProof {
i: num_steps,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config); let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config);
if num_steps == C1::ScalarField::zero() { if num_steps == C1::ScalarField::zero() {
@ -895,10 +1005,6 @@ where
return Ok(()); return Ok(());
} }
let (U_i, W_i) = running_instance;
let (u_i, w_i) = incoming_instance;
let (cf_U_i, cf_W_i) = cyclefold_instance;
if u_i.x.len() != 2 || U_i.x.len() != 2 { if u_i.x.len() != 2 || U_i.x.len() != 2 {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
@ -907,7 +1013,7 @@ where
// check that u_i's output points to the running instance // check that u_i's output points to the running instance
// u_i.X[0] == H(i, z_0, z_i, U_i) // u_i.X[0] == H(i, z_0, z_i, U_i)
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone());
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i);
if expected_u_i_x != u_i.x[0] { if expected_u_i_x != u_i.x[0] {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
@ -917,13 +1023,15 @@ where
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
// check R1CS satisfiability, which also enforces u_i.cmE==0, u_i.u==1
vp.r1cs.check_tight_relation(&w_i, &u_i)?;
// check R1CS satisfiability, which is equivalent to checking if `u_i`
// is an incoming instance and if `w_i` and `u_i` satisfy RelaxedR1CS
u_i.check_incoming()?;
vp.r1cs.check_relation(&w_i, &u_i)?;
// check RelaxedR1CS satisfiability // check RelaxedR1CS satisfiability
vp.r1cs.check_relaxed_relation(&W_i, &U_i)?;
vp.r1cs.check_relation(&W_i, &U_i)?;
// check CycleFold RelaxedR1CS satisfiability // check CycleFold RelaxedR1CS satisfiability
vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?;
vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?;
Ok(()) Ok(())
} }
@ -945,7 +1053,7 @@ where
{ {
// computes T and cmT for the AugmentedFCircuit // computes T and cmT for the AugmentedFCircuit
fn compute_cmT(&self) -> Result<(Vec<C1::ScalarField>, C1), Error> { fn compute_cmT(&self) -> Result<(Vec<C1::ScalarField>, C1), Error> {
NIFS::<C1, CS1, H>::compute_cmT(
NIFS::<C1, CS1, H>::compute_aux(
&self.cs_pp, &self.cs_pp,
&self.r1cs, &self.r1cs,
&self.w_i, &self.w_i,
@ -1160,15 +1268,67 @@ pub mod tests {
} }
assert_eq!(Fr::from(num_steps as u32), nova.i); assert_eq!(Fr::from(num_steps as u32), nova.i);
let (running_instance, incoming_instance, cyclefold_instance) = nova.instances();
// serialize the Nova Prover & Verifier params. These params are the trusted setup of the commitment schemes used
let mut nova_pp_serialized = vec![];
nova_params
.0
.serialize_compressed(&mut nova_pp_serialized)
.unwrap();
let mut nova_vp_serialized = vec![];
nova_params
.1
.serialize_compressed(&mut nova_vp_serialized)
.unwrap();
// deserialize the Nova params
let _nova_pp_deserialized =
ProverParams::<Projective, Projective2, CS1, CS2, H>::deserialize_compressed(
&mut nova_pp_serialized.as_slice(),
)
.unwrap();
let nova_vp_deserialized = Nova::<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
CS1,
CS2,
H,
>::vp_deserialize_with_mode(
&mut nova_vp_serialized.as_slice(),
ark_serialize::Compress::Yes,
ark_serialize::Validate::Yes,
(), // fcircuit_params
)
.unwrap();
let ivc_proof = nova.ivc_proof();
// serialize IVCProof
let mut ivc_proof_serialized = vec![];
assert!(ivc_proof
.serialize_compressed(&mut ivc_proof_serialized)
.is_ok());
// deserialize IVCProof
let ivc_proof_deserialized = <Nova::<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
CS1,
CS2,
H,
> as FoldingScheme<Projective,Projective2, CubicFCircuit<Fr>>>::IVCProof::deserialize_compressed(
ivc_proof_serialized.as_slice()
)
.unwrap();
// verify the deserialized IVCProof with the deserialized VerifierParams
Nova::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>, CS1, CS2, H>::verify( Nova::<Projective, GVar, Projective2, GVar2, CubicFCircuit<Fr>, CS1, CS2, H>::verify(
nova_params.1, // Nova's verifier params
z_0.clone(),
nova.z_i.clone(),
nova.i,
running_instance,
incoming_instance,
cyclefold_instance,
nova_vp_deserialized, // Nova's verifier params
ivc_proof_deserialized,
) )
.unwrap(); .unwrap();

+ 168
- 312
folding-schemes/src/folding/nova/nifs.rs

@ -1,8 +1,12 @@
use ark_crypto_primitives::sponge::Absorb; use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_std::rand::RngCore;
use ark_std::Zero; use ark_std::Zero;
use std::marker::PhantomData; use std::marker::PhantomData;
use super::circuits::ChallengeGadget;
use super::traits::NIFSTrait;
use super::{CommittedInstance, Witness}; use super::{CommittedInstance, Witness};
use crate::arith::r1cs::R1CS; use crate::arith::r1cs::R1CS;
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
@ -19,11 +23,119 @@ pub struct NIFS, const H: bool = false
_cp: PhantomData<CS>, _cp: PhantomData<CS>,
} }
impl<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool> NIFSTrait<C, CS, H>
for NIFS<C, CS, H>
where
<C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: PrimeField,
{
type CommittedInstance = CommittedInstance<C>;
type Witness = Witness<C>;
type ProverAux = Vec<C::ScalarField>;
type VerifierAux = C;
fn new_witness(w: Vec<C::ScalarField>, e_len: usize, rng: impl RngCore) -> Self::Witness {
Witness::new::<H>(w, e_len, rng)
}
fn new_instance(
W: &Self::Witness,
params: &CS::ProverParams,
x: Vec<C::ScalarField>,
_aux: Vec<C::ScalarField>,
) -> Result<Self::CommittedInstance, Error> {
W.commit::<CS, H>(params, x)
}
fn fold_witness(
r: C::ScalarField,
W_i: &Self::Witness,
w_i: &Self::Witness,
aux: &Self::ProverAux,
) -> Result<Self::Witness, Error> {
let r2 = r * r;
let E: Vec<C::ScalarField> = vec_add(
&vec_add(&W_i.E, &vec_scalar_mul(aux, &r))?, // aux is Nova's T
&vec_scalar_mul(&w_i.E, &r2),
)?;
// use r_T=0 since we don't need hiding property for cm(T)
let rT = C::ScalarField::zero();
let rE = W_i.rE + r * rT + r2 * w_i.rE;
let W: Vec<C::ScalarField> = W_i
.W
.iter()
.zip(&w_i.W)
.map(|(a, b)| *a + (r * b))
.collect();
let rW = W_i.rW + r * w_i.rW;
Ok(Self::Witness { E, rE, W, rW })
}
fn compute_aux(
cs_prover_params: &CS::ProverParams,
r1cs: &R1CS<C::ScalarField>,
W_i: &Self::Witness,
U_i: &Self::CommittedInstance,
w_i: &Self::Witness,
u_i: &Self::CommittedInstance,
) -> Result<(Self::ProverAux, Self::VerifierAux), Error> {
let z1: Vec<C::ScalarField> = [vec![U_i.u], U_i.x.to_vec(), W_i.W.to_vec()].concat();
let z2: Vec<C::ScalarField> = [vec![u_i.u], u_i.x.to_vec(), w_i.W.to_vec()].concat();
// compute cross terms
let T = Self::compute_T(r1cs, U_i.u, u_i.u, &z1, &z2)?;
// use r_T=0 since we don't need hiding property for cm(T)
let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?;
Ok((T, cmT))
}
fn get_challenge<T: Transcript<C::ScalarField>>(
transcript: &mut T,
pp_hash: C::ScalarField, // public params hash
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
aux: &Self::VerifierAux, // cmT
) -> Vec<bool> {
ChallengeGadget::<C, Self::CommittedInstance>::get_challenge_native(
transcript,
pp_hash,
U_i,
u_i,
Some(aux),
)
}
// Notice: `prove` method is implemented at the trait level.
fn verify(
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
r: C::ScalarField,
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
cmT: &C, // VerifierAux
) -> Self::CommittedInstance {
let r2 = r * r;
let cmE = U_i.cmE + cmT.mul(r) + u_i.cmE.mul(r2);
let u = U_i.u + r * u_i.u;
let cmW = U_i.cmW + u_i.cmW.mul(r);
let x = U_i
.x
.iter()
.zip(&u_i.x)
.map(|(a, b)| *a + (r * b))
.collect::<Vec<C::ScalarField>>();
Self::CommittedInstance { cmE, u, cmW, x }
}
}
impl<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool> NIFS<C, CS, H> impl<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool> NIFS<C, CS, H>
where where
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: PrimeField,
{ {
// compute_T: compute cross-terms T
/// compute_T: compute cross-terms T
pub fn compute_T( pub fn compute_T(
r1cs: &R1CS<C::ScalarField>, r1cs: &R1CS<C::ScalarField>,
u1: C::ScalarField, u1: C::ScalarField,
@ -49,48 +161,8 @@ where
vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1) vec_sub(&vec_sub(&vec_add(&Az1_Bz2, &Az2_Bz1)?, &u1Cz2)?, &u2Cz1)
} }
pub fn fold_witness(
r: C::ScalarField,
w1: &Witness<C>,
w2: &Witness<C>,
T: &[C::ScalarField],
rT: C::ScalarField,
) -> Result<Witness<C>, Error> {
let r2 = r * r;
let E: Vec<C::ScalarField> = vec_add(
&vec_add(&w1.E, &vec_scalar_mul(T, &r))?,
&vec_scalar_mul(&w2.E, &r2),
)?;
let rE = w1.rE + r * rT + r2 * w2.rE;
let W: Vec<C::ScalarField> = w1.W.iter().zip(&w2.W).map(|(a, b)| *a + (r * b)).collect();
let rW = w1.rW + r * w2.rW;
Ok(Witness::<C> { E, rE, W, rW })
}
pub fn fold_committed_instance(
r: C::ScalarField,
ci1: &CommittedInstance<C>, // U_i
ci2: &CommittedInstance<C>, // u_i
cmT: &C,
) -> CommittedInstance<C> {
let r2 = r * r;
let cmE = ci1.cmE + cmT.mul(r) + ci2.cmE.mul(r2);
let u = ci1.u + r * ci2.u;
let cmW = ci1.cmW + ci2.cmW.mul(r);
let x = ci1
.x
.iter()
.zip(&ci2.x)
.map(|(a, b)| *a + (r * b))
.collect::<Vec<C::ScalarField>>();
CommittedInstance::<C> { cmE, u, cmW, x }
}
/// NIFS.P is the consecutive combination of compute_cmT with fold_instances
/// compute_cmT is part of the NIFS.P logic
/// In Nova, NIFS.P is the consecutive combination of compute_cmT with fold_instances,
/// ie. compute_cmT is part of the NIFS.P logic.
pub fn compute_cmT( pub fn compute_cmT(
cs_prover_params: &CS::ProverParams, cs_prover_params: &CS::ProverParams,
r1cs: &R1CS<C::ScalarField>, r1cs: &R1CS<C::ScalarField>,
@ -108,6 +180,7 @@ where
let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?; let cmT = CS::commit(cs_prover_params, &T, &C::ScalarField::zero())?;
Ok((T, cmT)) Ok((T, cmT))
} }
pub fn compute_cyclefold_cmT( pub fn compute_cyclefold_cmT(
cs_prover_params: &CS::ProverParams, cs_prover_params: &CS::ProverParams,
r1cs: &R1CS<C::ScalarField>, // R1CS over C2.Fr=C1.Fq (here C=C2) r1cs: &R1CS<C::ScalarField>, // R1CS over C2.Fr=C1.Fq (here C=C2)
@ -129,40 +202,6 @@ where
Ok((T, cmT)) Ok((T, cmT))
} }
/// fold_instances is part of the NIFS.P logic described in
/// [Nova](https://eprint.iacr.org/2021/370.pdf)'s section 4. It returns the folded Committed
/// Instances and the Witness.
pub fn fold_instances(
r: C::ScalarField,
w1: &Witness<C>,
ci1: &CommittedInstance<C>,
w2: &Witness<C>,
ci2: &CommittedInstance<C>,
T: &[C::ScalarField],
cmT: C,
) -> Result<(Witness<C>, CommittedInstance<C>), Error> {
// fold witness
// use r_T=0 since we don't need hiding property for cm(T)
let w3 = NIFS::<C, CS, H>::fold_witness(r, w1, w2, T, C::ScalarField::zero())?;
// fold committed instances
let ci3 = NIFS::<C, CS, H>::fold_committed_instance(r, ci1, ci2, &cmT);
Ok((w3, ci3))
}
/// verify implements NIFS.V logic described in [Nova](https://eprint.iacr.org/2021/370.pdf)'s
/// section 4. It returns the folded Committed Instance
pub fn verify(
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
r: C::ScalarField,
ci1: &CommittedInstance<C>,
ci2: &CommittedInstance<C>,
cmT: &C,
) -> CommittedInstance<C> {
NIFS::<C, CS, H>::fold_committed_instance(r, ci1, ci2, cmT)
}
/// Verify committed folded instance (ci) relations. Notice that this method does not open the /// Verify committed folded instance (ci) relations. Notice that this method does not open the
/// commitments, but just checks that the given committed instances (ci1, ci2) when folded /// commitments, but just checks that the given committed instances (ci1, ci2) when folded
/// result in the folded committed instance (ci3) values. /// result in the folded committed instance (ci3) values.
@ -173,7 +212,7 @@ where
ci3: &CommittedInstance<C>, ci3: &CommittedInstance<C>,
cmT: &C, cmT: &C,
) -> Result<(), Error> { ) -> Result<(), Error> {
let expected = Self::fold_committed_instance(r, ci1, ci2, cmT);
let expected = Self::verify(r, ci1, ci2, cmT);
if ci3.cmE != expected.cmE if ci3.cmE != expected.cmE
|| ci3.u != expected.u || ci3.u != expected.u
|| ci3.cmW != expected.cmW || ci3.cmW != expected.cmW
@ -202,217 +241,32 @@ where
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use ark_crypto_primitives::sponge::{
poseidon::{PoseidonConfig, PoseidonSponge},
CryptographicSponge,
};
use crate::transcript::poseidon::poseidon_canonical_config;
use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, CryptographicSponge};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_pallas::{Fr, Projective}; use ark_pallas::{Fr, Projective};
use ark_std::{ops::Mul, test_rng, UniformRand};
use ark_std::{test_rng, UniformRand};
use crate::arith::r1cs::{
tests::{get_test_r1cs, get_test_z},
RelaxedR1CS,
use crate::arith::{
r1cs::tests::{get_test_r1cs, get_test_z},
Arith,
}; };
use crate::commitment::pedersen::{Params as PedersenParams, Pedersen};
use crate::folding::nova::circuits::ChallengeGadget;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::traits::NIFSTrait;
#[allow(clippy::type_complexity)]
pub(crate) fn prepare_simple_fold_inputs<C>() -> (
PedersenParams<C>,
PoseidonConfig<C::ScalarField>,
R1CS<C::ScalarField>,
Witness<C>, // w1
CommittedInstance<C>, // ci1
Witness<C>, // w2
CommittedInstance<C>, // ci2
Witness<C>, // w3
CommittedInstance<C>, // ci3
Vec<C::ScalarField>, // T
C, // cmT
Vec<bool>, // r_bits
C::ScalarField, // r_Fr
)
where
C: CurveGroup,
<C as CurveGroup>::BaseField: PrimeField,
C::ScalarField: Absorb,
{
let r1cs = get_test_r1cs();
let z1 = get_test_z(3);
let z2 = get_test_z(4);
let (w1, x1) = r1cs.split_z(&z1);
let (w2, x2) = r1cs.split_z(&z2);
let w1 = Witness::<C>::new::<false>(w1.clone(), r1cs.A.n_rows, test_rng());
let w2 = Witness::<C>::new::<false>(w2.clone(), r1cs.A.n_rows, test_rng());
let mut rng = ark_std::test_rng();
let (pedersen_params, _) = Pedersen::<C>::setup(&mut rng, r1cs.A.n_cols).unwrap();
// compute committed instances
let ci1 = w1
.commit::<Pedersen<C>, false>(&pedersen_params, x1.clone())
.unwrap();
let ci2 = w2
.commit::<Pedersen<C>, false>(&pedersen_params, x2.clone())
.unwrap();
// NIFS.P
let (T, cmT) =
NIFS::<C, Pedersen<C>>::compute_cmT(&pedersen_params, &r1cs, &w1, &ci1, &w2, &ci2)
.unwrap();
let poseidon_config = poseidon_canonical_config::<C::ScalarField>();
let mut transcript = PoseidonSponge::<C::ScalarField>::new(&poseidon_config);
let pp_hash = C::ScalarField::from(42u32); // only for test
let r_bits = ChallengeGadget::<C>::get_challenge_native(
&mut transcript,
pp_hash,
ci1.clone(),
ci2.clone(),
cmT,
);
let r_Fr = C::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
let (w3, ci3) =
NIFS::<C, Pedersen<C>>::fold_instances(r_Fr, &w1, &ci1, &w2, &ci2, &T, cmT).unwrap();
(
pedersen_params,
poseidon_config,
r1cs,
w1,
ci1,
w2,
ci2,
w3,
ci3,
T,
cmT,
r_bits,
r_Fr,
)
}
// fold 2 dummy instances and check that the folded instance holds the relaxed R1CS relation
#[test] #[test]
fn test_nifs_fold_dummy() {
let r1cs = get_test_r1cs::<Fr>();
let z1 = get_test_z(3);
let (w1, x1) = r1cs.split_z(&z1);
fn test_nifs_nova() {
let (W, U) = test_nifs_opt::<NIFS<Projective, Pedersen<Projective>>>();
let mut rng = ark_std::test_rng();
let (pedersen_params, _) = Pedersen::<Projective>::setup(&mut rng, r1cs.A.n_cols).unwrap();
// dummy instance, witness and public inputs zeroes
let w_dummy = Witness::<Projective>::dummy(w1.len(), r1cs.A.n_rows);
let mut u_dummy = w_dummy
.commit::<Pedersen<Projective>, false>(&pedersen_params, vec![Fr::zero(); x1.len()])
.unwrap();
u_dummy.u = Fr::zero();
let w_i = w_dummy.clone();
let u_i = u_dummy.clone();
let W_i = w_dummy.clone();
let U_i = u_dummy.clone();
r1cs.check_relaxed_relation(&w_i, &u_i).unwrap();
r1cs.check_relaxed_relation(&W_i, &U_i).unwrap();
let r_Fr = Fr::from(3_u32);
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
&pedersen_params,
&r1cs,
&w_i,
&u_i,
&W_i,
&U_i,
)
.unwrap();
let (W_i1, U_i1) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
r_Fr, &w_i, &u_i, &W_i, &U_i, &T, cmT,
)
.unwrap();
r1cs.check_relaxed_relation(&W_i1, &U_i1).unwrap();
}
// fold 2 instances into one
#[test]
fn test_nifs_one_fold() {
let (pedersen_params, poseidon_config, r1cs, w1, ci1, w2, ci2, w3, ci3, T, cmT, _, r) =
prepare_simple_fold_inputs();
// NIFS.V
let ci3_v = NIFS::<Projective, Pedersen<Projective>>::verify(r, &ci1, &ci2, &cmT);
assert_eq!(ci3_v, ci3);
// check that relations hold for the 2 inputted instances and the folded one
r1cs.check_relaxed_relation(&w1, &ci1).unwrap();
r1cs.check_relaxed_relation(&w2, &ci2).unwrap();
r1cs.check_relaxed_relation(&w3, &ci3).unwrap();
// check that folded commitments from folded instance (ci) are equal to folding the
// use folded rE, rW to commit w3
let ci3_expected = w3
.commit::<Pedersen<Projective>, false>(&pedersen_params, ci3.x.clone())
.unwrap();
assert_eq!(ci3_expected.cmE, ci3.cmE);
assert_eq!(ci3_expected.cmW, ci3.cmW);
// next equalities should hold since we started from two cmE of zero-vector E's
assert_eq!(ci3.cmE, cmT.mul(r));
assert_eq!(w3.E, vec_scalar_mul(&T, &r));
// NIFS.Verify_Folded_Instance:
NIFS::<Projective, Pedersen<Projective>>::verify_folded_instance(r, &ci1, &ci2, &ci3, &cmT)
.unwrap();
// init Prover's transcript
let mut transcript_p = PoseidonSponge::<Fr>::new(&poseidon_config);
// init Verifier's transcript
let mut transcript_v = PoseidonSponge::<Fr>::new(&poseidon_config);
// prove the ci3.cmE, ci3.cmW, cmT commitments
let cm_proofs = NIFS::<Projective, Pedersen<Projective>>::prove_commitments(
&mut transcript_p,
&pedersen_params,
&w3,
&ci3,
T,
&cmT,
)
.unwrap();
// verify the ci3.cmE, ci3.cmW, cmT commitments
assert_eq!(cm_proofs.len(), 3);
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
&ci3.cmE,
&cm_proofs[0].clone(),
)
.unwrap();
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
&ci3.cmW,
&cm_proofs[1].clone(),
)
.unwrap();
Pedersen::<Projective>::verify(
&pedersen_params,
&mut transcript_v,
&cmT,
&cm_proofs[2].clone(),
)
.unwrap();
// check the last folded instance relation
let r1cs = get_test_r1cs();
r1cs.check_relation(&W, &U).unwrap();
} }
#[test]
fn test_nifs_fold_loop() {
/// runs a loop using the NIFS trait, and returns the last Witness and CommittedInstance so
/// that their relation can be checked.
pub(crate) fn test_nifs_opt<N: NIFSTrait<Projective, Pedersen<Projective>>>(
) -> (N::Witness, N::CommittedInstance) {
let r1cs = get_test_r1cs(); let r1cs = get_test_r1cs();
let z = get_test_z(3); let z = get_test_z(3);
let (w, x) = r1cs.split_z(&z); let (w, x) = r1cs.split_z(&z);
@ -420,66 +274,68 @@ pub mod tests {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
let (pedersen_params, _) = Pedersen::<Projective>::setup(&mut rng, r1cs.A.n_cols).unwrap(); let (pedersen_params, _) = Pedersen::<Projective>::setup(&mut rng, r1cs.A.n_cols).unwrap();
// prepare the running instance
let mut running_instance_w =
Witness::<Projective>::new::<false>(w.clone(), r1cs.A.n_rows, test_rng());
let mut running_committed_instance = running_instance_w
.commit::<Pedersen<Projective>, false>(&pedersen_params, x)
.unwrap();
let poseidon_config = poseidon_canonical_config::<Fr>();
let mut transcript = PoseidonSponge::<Fr>::new(&poseidon_config);
let pp_hash = Fr::rand(&mut rng);
r1cs.check_relaxed_relation(&running_instance_w, &running_committed_instance)
.unwrap();
// prepare the running instance
let mut running_witness = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng());
let mut running_committed_instance =
N::new_instance(&running_witness, &pedersen_params, x, vec![]).unwrap();
let num_iters = 10; let num_iters = 10;
for i in 0..num_iters { for i in 0..num_iters {
// prepare the incoming instance // prepare the incoming instance
let incoming_instance_z = get_test_z(i + 4); let incoming_instance_z = get_test_z(i + 4);
let (w, x) = r1cs.split_z(&incoming_instance_z); let (w, x) = r1cs.split_z(&incoming_instance_z);
let incoming_instance_w =
Witness::<Projective>::new::<false>(w.clone(), r1cs.A.n_rows, test_rng());
let incoming_committed_instance = incoming_instance_w
.commit::<Pedersen<Projective>, false>(&pedersen_params, x)
.unwrap();
r1cs.check_relaxed_relation(&incoming_instance_w, &incoming_committed_instance)
.unwrap();
let incoming_witness = N::new_witness(w.clone(), r1cs.A.n_rows, test_rng());
let incoming_committed_instance =
N::new_instance(&incoming_witness, &pedersen_params, x, vec![]).unwrap();
let r = Fr::rand(&mut rng); // folding challenge would come from the RO
// NIFS.P
let (T, cmT) = NIFS::<Projective, Pedersen<Projective>>::compute_cmT(
let (aux_p, aux_v) = N::compute_aux(
&pedersen_params, &pedersen_params,
&r1cs, &r1cs,
&running_instance_w,
&running_witness,
&running_committed_instance, &running_committed_instance,
&incoming_instance_w,
&incoming_witness,
&incoming_committed_instance, &incoming_committed_instance,
) )
.unwrap(); .unwrap();
let (folded_w, _) = NIFS::<Projective, Pedersen<Projective>>::fold_instances(
let r_bits = N::get_challenge(
&mut transcript,
pp_hash,
&running_committed_instance,
&incoming_committed_instance,
&aux_v,
);
let r = Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).unwrap();
// NIFS.P
let (folded_witness, _) = N::prove(
r, r,
&running_instance_w,
&running_witness,
&running_committed_instance, &running_committed_instance,
&incoming_instance_w,
&incoming_witness,
&incoming_committed_instance, &incoming_committed_instance,
&T,
cmT,
&aux_p,
&aux_v,
) )
.unwrap(); .unwrap();
// NIFS.V // NIFS.V
let folded_committed_instance = NIFS::<Projective, Pedersen<Projective>>::verify(
let folded_committed_instance = N::verify(
r, r,
&running_committed_instance, &running_committed_instance,
&incoming_committed_instance, &incoming_committed_instance,
&cmT,
&aux_v,
); );
r1cs.check_relaxed_relation(&folded_w, &folded_committed_instance)
.unwrap();
// set running_instance for next loop iteration // set running_instance for next loop iteration
running_instance_w = folded_w;
running_witness = folded_witness;
running_committed_instance = folded_committed_instance; running_committed_instance = folded_committed_instance;
} }
(running_witness, running_committed_instance)
} }
} }

+ 269
- 0
folding-schemes/src/folding/nova/ova.rs

@ -0,0 +1,269 @@
/// This module contains the implementation the NIFSTrait for the
/// [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) NIFS (Non-Interactive Folding Scheme) as
/// outlined in the protocol description doc:
/// <https://hackmd.io/V4838nnlRKal9ZiTHiGYzw#Construction> authored by Benedikt Bünz.
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::fmt::Debug;
use ark_std::rand::RngCore;
use ark_std::{One, UniformRand, Zero};
use std::marker::PhantomData;
use super::{circuits::ChallengeGadget, traits::NIFSTrait};
use crate::arith::r1cs::R1CS;
use crate::commitment::CommitmentScheme;
use crate::folding::{circuits::CF1, traits::Dummy};
use crate::transcript::{AbsorbNonNative, Transcript};
use crate::utils::vec::{hadamard, mat_vec_mul, vec_scalar_mul, vec_sub};
use crate::Error;
/// A CommittedInstance in [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) is represented by `W` or
/// `W'`. It is the result of the commitment to a vector that contains the witness `w` concatenated
/// with `t` or `e` + the public inputs `x` and a relaxation factor `u`. (Notice that in the Ova
/// document `u` is denoted as `mu`, in this implementation we use `u` so it follows the original
/// Nova notation, so code is easier to follow).
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct CommittedInstance<C: CurveGroup> {
pub u: C::ScalarField, // in the Ova document is denoted as `mu`
pub x: Vec<C::ScalarField>,
pub cmWE: C,
}
impl<C: CurveGroup> Absorb for CommittedInstance<C>
where
C::ScalarField: Absorb,
{
fn to_sponge_bytes(&self, dest: &mut Vec<u8>) {
C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest);
}
fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) {
self.u.to_sponge_field_elements(dest);
self.x.to_sponge_field_elements(dest);
// We cannot call `to_native_sponge_field_elements(dest)` directly, as
// `to_native_sponge_field_elements` needs `F` to be `C::ScalarField`,
// but here `F` is a generic `PrimeField`.
self.cmWE
.to_native_sponge_field_elements_as_vec()
.to_sponge_field_elements(dest);
}
}
// #[allow(dead_code)] // Clippy flag needed for now.
/// A Witness in Ova is represented by `w`. It also contains a blinder which can or not be used
/// when committing to the witness itself.
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Witness<C: CurveGroup> {
pub w: Vec<C::ScalarField>,
pub rW: C::ScalarField,
}
impl<C: CurveGroup> Witness<C> {
/// Generates a new `Witness` instance from a given witness vector.
/// If `H = true`, then we assume we want to blind it at commitment time,
/// hence sampling `rW` from the randomness passed.
pub fn new<const H: bool>(w: Vec<C::ScalarField>, mut rng: impl RngCore) -> Self {
Self {
w,
rW: if H {
C::ScalarField::rand(&mut rng)
} else {
C::ScalarField::zero()
},
}
}
/// Given `x` (public inputs) and `t` or `e` (which we always concatenate in Ova) and the
/// public inputs `x`, generates a [`CommittedInstance`] as a result which will or not be
/// blinded depending on how the const generic `HC` is set up.
pub fn commit<CS: CommitmentScheme<C, HC>, const HC: bool>(
&self,
params: &CS::ProverParams,
x: Vec<C::ScalarField>,
t_or_e: Vec<C::ScalarField>,
) -> Result<CommittedInstance<C>, Error> {
let cmWE = CS::commit(params, &[self.w.clone(), t_or_e].concat(), &self.rW)?;
Ok(CommittedInstance {
u: C::ScalarField::one(),
cmWE,
x,
})
}
}
impl<C: CurveGroup> Dummy<&R1CS<CF1<C>>> for Witness<C> {
fn dummy(r1cs: &R1CS<CF1<C>>) -> Self {
Self {
w: vec![C::ScalarField::zero(); r1cs.A.n_cols - 1 - r1cs.l],
rW: C::ScalarField::zero(),
}
}
}
/// Implements the NIFS (Non-Interactive Folding Scheme) trait for Ova.
pub struct NIFS<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool = false> {
_c: PhantomData<C>,
_cp: PhantomData<CS>,
}
impl<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool> NIFSTrait<C, CS, H>
for NIFS<C, CS, H>
where
<C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: PrimeField,
{
type CommittedInstance = CommittedInstance<C>;
type Witness = Witness<C>;
type ProverAux = ();
type VerifierAux = ();
fn new_witness(w: Vec<C::ScalarField>, _e_len: usize, rng: impl RngCore) -> Self::Witness {
Witness::new::<H>(w, rng)
}
fn new_instance(
W: &Self::Witness,
params: &CS::ProverParams,
x: Vec<C::ScalarField>,
aux: Vec<C::ScalarField>, // t_or_e
) -> Result<Self::CommittedInstance, Error> {
W.commit::<CS, H>(params, x, aux)
}
fn fold_witness(
r: C::ScalarField, // in Ova's hackmd denoted as `alpha`
W_i: &Self::Witness,
w_i: &Self::Witness,
_aux: &Self::ProverAux,
) -> Result<Self::Witness, Error> {
let w: Vec<C::ScalarField> = W_i
.w
.iter()
.zip(&w_i.w)
.map(|(a, b)| *a + (r * b))
.collect();
let rW = W_i.rW + r * w_i.rW;
Ok(Self::Witness { w, rW })
}
fn compute_aux(
_cs_prover_params: &CS::ProverParams,
_r1cs: &R1CS<C::ScalarField>,
_W_i: &Self::Witness,
_U_i: &Self::CommittedInstance,
_w_i: &Self::Witness,
_u_i: &Self::CommittedInstance,
) -> Result<(Self::ProverAux, Self::VerifierAux), Error> {
Ok(((), ()))
}
fn get_challenge<T: Transcript<C::ScalarField>>(
transcript: &mut T,
pp_hash: C::ScalarField, // public params hash
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
_aux: &Self::VerifierAux,
) -> Vec<bool> {
// reuse Nova's get_challenge method
ChallengeGadget::<C, Self::CommittedInstance>::get_challenge_native(
transcript, pp_hash, U_i, u_i, None, // empty in Ova's case
)
}
// Notice: `prove` method is implemented at the trait level.
fn verify(
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
r: C::ScalarField,
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
_aux: &Self::VerifierAux,
) -> Self::CommittedInstance {
// recall that r <==> alpha, and u <==> mu between Nova and Ova respectively
let u = U_i.u + r; // u_i.u is always 1 IN ova as we just can do sequential IVC.
let cmWE = U_i.cmWE + u_i.cmWE.mul(r);
let x = U_i
.x
.iter()
.zip(&u_i.x)
.map(|(a, b)| *a + (r * b))
.collect::<Vec<C::ScalarField>>();
Self::CommittedInstance { cmWE, u, x }
}
}
/// Computes the E parameter (error terms) for the given R1CS and the instance's z and u. This
/// method is used by the verifier to obtain E in order to check the RelaxedR1CS relation.
pub fn compute_E<C: CurveGroup>(
r1cs: &R1CS<C::ScalarField>,
z: &[C::ScalarField],
u: C::ScalarField,
) -> Result<Vec<C::ScalarField>, Error> {
let (A, B, C) = (r1cs.A.clone(), r1cs.B.clone(), r1cs.C.clone());
// this is parallelizable (for the future)
let Az = mat_vec_mul(&A, z)?;
let Bz = mat_vec_mul(&B, z)?;
let Cz = mat_vec_mul(&C, z)?;
let Az_Bz = hadamard(&Az, &Bz)?;
let uCz = vec_scalar_mul(&Cz, &u);
vec_sub(&Az_Bz, &uCz)
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_pallas::{Fr, Projective};
use crate::arith::{r1cs::tests::get_test_r1cs, Arith};
use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::nifs::tests::test_nifs_opt;
// Simple auxiliary structure mainly used to help pass a witness for which we can check
// easily an R1CS relation.
// Notice that checking it requires us to have `E` as per [`Arith`] trait definition.
// But since we don't hold `E` nor `e` within the NIFS, we create this structure to pass
// `e` such that the check can be done.
#[derive(Debug, Clone)]
pub(crate) struct TestingWitness<C: CurveGroup> {
pub(crate) w: Vec<C::ScalarField>,
pub(crate) e: Vec<C::ScalarField>,
}
impl<C: CurveGroup> Arith<TestingWitness<C>, CommittedInstance<C>> for R1CS<CF1<C>> {
type Evaluation = Vec<CF1<C>>;
fn eval_relation(
&self,
w: &TestingWitness<C>,
u: &CommittedInstance<C>,
) -> Result<Self::Evaluation, Error> {
self.eval_at_z(&[&[u.u], u.x.as_slice(), &w.w].concat())
}
fn check_evaluation(
w: &TestingWitness<C>,
_u: &CommittedInstance<C>,
e: Self::Evaluation,
) -> Result<(), Error> {
(w.e == e).then_some(()).ok_or(Error::NotSatisfied)
}
}
#[test]
fn test_nifs_ova() {
let (W, U) = test_nifs_opt::<NIFS<Projective, Pedersen<Projective>>>();
// check the last folded instance relation
let r1cs = get_test_r1cs();
let z: Vec<Fr> = [&[U.u][..], &U.x, &W.w].concat();
let e = compute_E::<Projective>(&r1cs, &z, U.u).unwrap();
r1cs.check_relation(&TestingWitness::<Projective> { e, w: W.w.clone() }, &U)
.unwrap();
}
}

+ 0
- 268
folding-schemes/src/folding/nova/serialize.rs

@ -1,268 +0,0 @@
use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb};
use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField;
use ark_r1cs_std::{
groups::{CurveVar, GroupOpsBounds},
ToConstraintFieldGadget,
};
use ark_relations::r1cs::ConstraintSynthesizer;
use ark_relations::r1cs::ConstraintSystem;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError, Write};
use std::marker::PhantomData;
use super::{
circuits::AugmentedFCircuit, CommittedInstance, Nova, NovaCycleFoldCircuit, ProverParams,
Witness,
};
use crate::{
arith::r1cs::extract_r1cs,
commitment::CommitmentScheme,
folding::circuits::{CF1, CF2},
frontend::FCircuit,
};
impl<C1, GC1, C2, GC2, FC, CS1, CS2, const H: bool> CanonicalSerialize
for Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
GC1: CurveVar<C1, <C2 as Group>::ScalarField>,
GC1: ToConstraintFieldGadget<<C2 as Group>::ScalarField>,
GC2: CurveVar<C2, <C2 as CurveGroup>::BaseField>,
{
fn serialize_with_mode<W: Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> {
self.pp_hash.serialize_with_mode(&mut writer, compress)?;
self.i.serialize_with_mode(&mut writer, compress)?;
self.z_0.serialize_with_mode(&mut writer, compress)?;
self.z_i.serialize_with_mode(&mut writer, compress)?;
self.w_i.serialize_with_mode(&mut writer, compress)?;
self.u_i.serialize_with_mode(&mut writer, compress)?;
self.W_i.serialize_with_mode(&mut writer, compress)?;
self.U_i.serialize_with_mode(&mut writer, compress)?;
self.cf_W_i.serialize_with_mode(&mut writer, compress)?;
self.cf_U_i.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.pp_hash.serialized_size(compress)
+ self.i.serialized_size(compress)
+ self.z_0.serialized_size(compress)
+ self.z_i.serialized_size(compress)
+ self.w_i.serialized_size(compress)
+ self.u_i.serialized_size(compress)
+ self.W_i.serialized_size(compress)
+ self.U_i.serialized_size(compress)
+ self.cf_W_i.serialized_size(compress)
+ self.cf_U_i.serialized_size(compress)
}
fn serialize_compressed<W: Write>(
&self,
writer: W,
) -> Result<(), ark_serialize::SerializationError> {
self.serialize_with_mode(writer, ark_serialize::Compress::Yes)
}
fn compressed_size(&self) -> usize {
self.serialized_size(ark_serialize::Compress::Yes)
}
fn serialize_uncompressed<W: Write>(
&self,
writer: W,
) -> Result<(), ark_serialize::SerializationError> {
self.serialize_with_mode(writer, ark_serialize::Compress::No)
}
fn uncompressed_size(&self) -> usize {
self.serialized_size(ark_serialize::Compress::No)
}
}
// Note that we can't derive or implement `CanonicalDeserialize` directly.
// This is because `CurveVar` notably does not implement the `Sync` trait.
impl<C1, GC1, C2, GC2, FC, CS1, CS2, const H: bool> Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
FC: FCircuit<CF1<C1>, Params = ()>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
GC1: CurveVar<C1, <C2 as Group>::ScalarField>,
GC1: ToConstraintFieldGadget<<C2 as Group>::ScalarField>,
GC2: CurveVar<C2, CF2<C2>>,
GC2: ToConstraintFieldGadget<<C2 as CurveGroup>::BaseField>,
{
pub fn deserialize_nova<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
prover_params: ProverParams<C1, C2, CS1, CS2, H>,
poseidon_config: PoseidonConfig<C1::ScalarField>,
) -> Result<Self, ark_serialize::SerializationError> {
let pp_hash = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?;
let i = C1::ScalarField::deserialize_with_mode(&mut reader, compress, validate)?;
let z_0 = Vec::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let z_i = Vec::<C1::ScalarField>::deserialize_with_mode(&mut reader, compress, validate)?;
let w_i = Witness::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let u_i = CommittedInstance::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let W_i = Witness::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let U_i = CommittedInstance::<C1>::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_W_i = Witness::<C2>::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_U_i =
CommittedInstance::<C2>::deserialize_with_mode(&mut reader, compress, validate)?;
let f_circuit = FC::new(()).unwrap();
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(&poseidon_config, f_circuit.clone());
let cf_circuit = NovaCycleFoldCircuit::<C1, GC1>::empty();
augmented_F_circuit
.generate_constraints(cs.clone())
.map_err(|_| SerializationError::InvalidData)?;
cs.finalize();
let cs = cs.into_inner().ok_or(SerializationError::InvalidData)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
cf_circuit
.generate_constraints(cs2.clone())
.map_err(|_| SerializationError::InvalidData)?;
cs2.finalize();
let cs2 = cs2.into_inner().ok_or(SerializationError::InvalidData)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
Ok(Nova {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
r1cs,
cf_r1cs,
poseidon_config,
cs_pp: prover_params.cs_pp,
cf_cs_pp: prover_params.cf_cs_pp,
F: f_circuit,
pp_hash,
i,
z_0,
z_i,
w_i,
u_i,
W_i,
U_i,
cf_W_i,
cf_U_i,
})
}
}
#[cfg(test)]
pub mod tests {
use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective};
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_serialize::{CanonicalSerialize, Compress, Validate};
use std::{fs, io::Write};
use crate::{
commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{Nova, PreprocessorParam},
frontend::{utils::CubicFCircuit, FCircuit},
transcript::poseidon::poseidon_canonical_config,
FoldingScheme,
};
#[test]
fn test_serde_nova() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
// Initialize nova and make multiple `prove_step()`
type N = Nova<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
KZG<'static, Bn254>,
Pedersen<Projective2>,
false,
>;
let prep_param = PreprocessorParam::new(poseidon_config.clone(), F_circuit);
let nova_params = N::preprocess(&mut rng, &prep_param).unwrap();
let z_0 = vec![Fr::from(3_u32)];
let mut nova = N::init(&nova_params, F_circuit, z_0.clone()).unwrap();
let num_steps: usize = 3;
for _ in 0..num_steps {
nova.prove_step(&mut rng, vec![], None).unwrap();
}
let mut writer = vec![];
assert!(nova
.serialize_with_mode(&mut writer, ark_serialize::Compress::No)
.is_ok());
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.open("./nova.serde")
.unwrap();
file.write_all(&writer).unwrap();
let bytes = fs::read("./nova.serde").unwrap();
let mut deserialized_nova = Nova::<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
KZG<Bn254>,
Pedersen<Projective2>,
false,
>::deserialize_nova(
bytes.as_slice(),
Compress::No,
Validate::No,
nova_params.0, // Nova's prover params
poseidon_config,
)
.unwrap();
assert_eq!(nova.i, deserialized_nova.i);
let num_steps: usize = 3;
for _ in 0..num_steps {
deserialized_nova
.prove_step(&mut rng, vec![], None)
.unwrap();
nova.prove_step(&mut rng, vec![], None).unwrap();
}
assert_eq!(deserialized_nova.w_i, nova.w_i);
}
}

+ 119
- 39
folding-schemes/src/folding/nova/traits.rs

@ -1,50 +1,139 @@
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::CurveGroup; use ark_ec::CurveGroup;
use ark_std::{rand::RngCore, One, UniformRand};
use ark_std::fmt::Debug;
use ark_std::{rand::RngCore, UniformRand};
use super::{CommittedInstance, Witness}; use super::{CommittedInstance, Witness};
use crate::arith::r1cs::{RelaxedR1CS, R1CS};
use crate::arith::ArithSampler;
use crate::arith::{r1cs::R1CS, Arith};
use crate::commitment::CommitmentScheme;
use crate::folding::circuits::CF1;
use crate::transcript::Transcript;
use crate::Error; use crate::Error;
impl<C: CurveGroup> RelaxedR1CS<C, Witness<C>, CommittedInstance<C>> for R1CS<C::ScalarField> {
fn dummy_running_instance(&self) -> (Witness<C>, CommittedInstance<C>) {
let w_len = self.A.n_cols - 1 - self.l;
let w_dummy = Witness::<C>::dummy(w_len, self.A.n_rows);
let u_dummy = CommittedInstance::<C>::dummy(self.l);
(w_dummy, u_dummy)
}
/// Defines the NIFS (Non-Interactive Folding Scheme) trait, initially defined in
/// [Nova](https://eprint.iacr.org/2021/370.pdf), and it's variants
/// [Ova](https://hackmd.io/V4838nnlRKal9ZiTHiGYzw) and
/// [Mova](https://eprint.iacr.org/2024/1220.pdf).
/// `H` specifies whether the NIFS will use a blinding factor.
pub trait NIFSTrait<C: CurveGroup, CS: CommitmentScheme<C, H>, const H: bool = false> {
type CommittedInstance: Debug + Clone + Absorb;
type Witness: Debug + Clone;
type ProverAux: Debug + Clone; // Prover's aux params
type VerifierAux: Debug + Clone; // Verifier's aux params
fn dummy_incoming_instance(&self) -> (Witness<C>, CommittedInstance<C>) {
self.dummy_running_instance()
}
fn new_witness(w: Vec<C::ScalarField>, e_len: usize, rng: impl RngCore) -> Self::Witness;
fn new_instance(
w: &Self::Witness,
params: &CS::ProverParams,
x: Vec<C::ScalarField>,
aux: Vec<C::ScalarField>, // t_or_e in Ova, empty for Nova
) -> Result<Self::CommittedInstance, Error>;
fn fold_witness(
r: C::ScalarField,
W: &Self::Witness, // running witness
w: &Self::Witness, // incoming witness
aux: &Self::ProverAux,
) -> Result<Self::Witness, Error>;
fn is_relaxed(_w: &Witness<C>, u: &CommittedInstance<C>) -> bool {
u.cmE != C::zero() || u.u != C::ScalarField::one()
/// computes the auxiliary parameters, eg. in Nova: (T, cmT), in Ova: T
fn compute_aux(
cs_prover_params: &CS::ProverParams,
r1cs: &R1CS<C::ScalarField>,
W_i: &Self::Witness,
U_i: &Self::CommittedInstance,
w_i: &Self::Witness,
u_i: &Self::CommittedInstance,
) -> Result<(Self::ProverAux, Self::VerifierAux), Error>;
fn get_challenge<T: Transcript<C::ScalarField>>(
transcript: &mut T,
pp_hash: C::ScalarField, // public params hash
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
aux: &Self::VerifierAux, // ie. in Nova wouild be cmT, in Ova it's empty
) -> Vec<bool>;
/// NIFS.P. Notice that this method is implemented at the trait level, and depends on the other
/// two methods `fold_witness` and `verify`.
fn prove(
r: C::ScalarField,
W_i: &Self::Witness, // running witness
U_i: &Self::CommittedInstance, // running committed instance
w_i: &Self::Witness, // incoming witness
u_i: &Self::CommittedInstance, // incoming committed instance
aux_p: &Self::ProverAux,
aux_v: &Self::VerifierAux,
) -> Result<(Self::Witness, Self::CommittedInstance), Error> {
let w = Self::fold_witness(r, W_i, w_i, aux_p)?;
let ci = Self::verify(r, U_i, u_i, aux_v);
Ok((w, ci))
} }
fn extract_z(w: &Witness<C>, u: &CommittedInstance<C>) -> Vec<C::ScalarField> {
[&[u.u][..], &u.x, &w.W].concat()
/// NIFS.V
fn verify(
// r comes from the transcript, and is a n-bit (N_BITS_CHALLENGE) element
r: C::ScalarField,
U_i: &Self::CommittedInstance,
u_i: &Self::CommittedInstance,
aux: &Self::VerifierAux,
) -> Self::CommittedInstance;
}
/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and
/// the committed instance is of type [`CommittedInstance`].
///
/// Due to the error terms `Witness.E` and `CommittedInstance.u`, R1CS here is
/// considered as a relaxed R1CS.
///
/// One may wonder why we do not provide distinct structs for R1CS and relaxed
/// R1CS.
/// This is because both plain R1CS and relaxed R1CS have the same structure:
/// they are both represented by three matrices.
/// What makes them different is the error terms, which are not part of the R1CS
/// struct, but are part of the witness and committed instance.
///
/// As a follow-up, one may further ask why not providing a trait for relaxed
/// R1CS and implement it for the `R1CS` struct, where the relaxed R1CS trait
/// has methods for relaxed satisfiability check, while the `Arith` trait that
/// `R1CS` implements has methods for plain satisfiability check.
/// However, it would be more ideal if we have a single method that can smartly
/// choose the type of satisfiability check, which would make the code more
/// generic and easier to maintain.
///
/// This is achieved thanks to the new design of the [`Arith`] trait, where we
/// can implement the trait for the same constraint system with different types
/// of witnesses and committed instances.
/// For R1CS, whether it is relaxed or not is now determined by the types of `W`
/// and `U`: the satisfiability check is relaxed if `W` and `U` are defined by
/// folding schemes, and plain if they are vectors of field elements.
impl<C: CurveGroup> Arith<Witness<C>, CommittedInstance<C>> for R1CS<CF1<C>> {
type Evaluation = Vec<CF1<C>>;
fn eval_relation(
&self,
w: &Witness<C>,
u: &CommittedInstance<C>,
) -> Result<Self::Evaluation, Error> {
self.eval_at_z(&[&[u.u][..], &u.x, &w.W].concat())
} }
fn check_error_terms(
fn check_evaluation(
w: &Witness<C>, w: &Witness<C>,
_u: &CommittedInstance<C>, _u: &CommittedInstance<C>,
e: Vec<C::ScalarField>,
e: Self::Evaluation,
) -> Result<(), Error> { ) -> Result<(), Error> {
if w.E == e {
Ok(())
} else {
Err(Error::NotSatisfied)
}
(w.E == e).then_some(()).ok_or(Error::NotSatisfied)
} }
}
fn sample<CS>(
impl<C: CurveGroup> ArithSampler<C, Witness<C>, CommittedInstance<C>> for R1CS<CF1<C>> {
fn sample_witness_instance<CS: CommitmentScheme<C, true>>(
&self, &self,
params: &CS::ProverParams, params: &CS::ProverParams,
mut rng: impl RngCore, mut rng: impl RngCore,
) -> Result<(Witness<C>, CommittedInstance<C>), Error>
where
CS: crate::commitment::CommitmentScheme<C, true>,
{
) -> Result<(Witness<C>, CommittedInstance<C>), Error> {
// Implements sampling a (committed) RelaxedR1CS // Implements sampling a (committed) RelaxedR1CS
// See construction 5 in https://eprint.iacr.org/2023/573.pdf // See construction 5 in https://eprint.iacr.org/2023/573.pdf
let u = C::ScalarField::rand(&mut rng); let u = C::ScalarField::rand(&mut rng);
@ -61,16 +150,7 @@ impl RelaxedR1CS, CommittedInstance> for R1CS
z.extend(&x); z.extend(&x);
z.extend(&W); z.extend(&W);
let E = <Self as RelaxedR1CS<C, Witness<C>, CommittedInstance<C>>>::compute_E(
&self.A, &self.B, &self.C, &z, &u,
)?;
debug_assert!(
z.len() == self.A.n_cols,
"Length of z is {}, while A has {} columns.",
z.len(),
self.A.n_cols
);
let E = self.eval_at_z(&z)?;
let witness = Witness { E, rE, W, rW }; let witness = Witness { E, rE, W, rW };
let mut cm_witness = witness.commit::<CS, true>(params, x)?; let mut cm_witness = witness.commit::<CS, true>(params, x)?;
@ -79,7 +159,7 @@ impl RelaxedR1CS, CommittedInstance> for R1CS
cm_witness.u = u; cm_witness.u = u;
debug_assert!( debug_assert!(
self.check_relaxed_relation(&witness, &cm_witness).is_ok(),
self.check_relation(&witness, &cm_witness).is_ok(),
"Sampled a non satisfiable relaxed R1CS, sampled u: {}, computed E: {:?}", "Sampled a non satisfiable relaxed R1CS, sampled u: {}, computed E: {:?}",
u, u,
witness.E witness.E

+ 26
- 33
folding-schemes/src/folding/nova/zk.rs

@ -35,7 +35,8 @@ use ark_ff::{BigInteger, PrimeField};
use ark_std::{One, Zero}; use ark_std::{One, Zero};
use crate::{ use crate::{
arith::r1cs::{RelaxedR1CS, R1CS},
arith::{r1cs::R1CS, Arith, ArithSampler},
folding::traits::CommittedInstanceOps,
RngCore, RngCore,
}; };
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
@ -50,7 +51,9 @@ use ark_r1cs_std::{
use crate::{commitment::CommitmentScheme, folding::circuits::CF2, frontend::FCircuit, Error}; use crate::{commitment::CommitmentScheme, folding::circuits::CF2, frontend::FCircuit, Error};
use super::{circuits::ChallengeGadget, nifs::NIFS, CommittedInstance, Nova, Witness};
use super::{
circuits::ChallengeGadget, nifs::NIFS, traits::NIFSTrait, CommittedInstance, Nova, Witness,
};
// We use the same definition of a folding proof as in https://eprint.iacr.org/2023/969.pdf // We use the same definition of a folding proof as in https://eprint.iacr.org/2023/969.pdf
// It consists in the commitment to the T term // It consists in the commitment to the T term
@ -82,7 +85,13 @@ where
u_i: CommittedInstance<C1>, u_i: CommittedInstance<C1>,
cmT: C1, cmT: C1,
) -> Result<C1::ScalarField, Error> { ) -> Result<C1::ScalarField, Error> {
let r_bits = ChallengeGadget::<C1>::get_challenge_native(sponge, pp_hash, U_i, u_i, cmT);
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_native(
sponge,
pp_hash,
&U_i,
&u_i,
Some(&cmT),
);
C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds) C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds)
} }
@ -133,15 +142,16 @@ where
)?; )?;
// c. Compute fold // c. Compute fold
let (W_f, U_f) = NIFS::<C1, CS1, true>::fold_instances(
r, &nova.w_i, &nova.u_i, &nova.W_i, &nova.U_i, &T, cmT,
)?;
let (W_f, U_f) =
NIFS::<C1, CS1, true>::prove(r, &nova.w_i, &nova.u_i, &nova.W_i, &nova.U_i, &T, &cmT)?;
// d. Store folding proof // d. Store folding proof
let pi = FoldingProof { cmT }; let pi = FoldingProof { cmT };
// 2. Sample a satisfying relaxed R1CS instance-witness pair (W_r, U_r) // 2. Sample a satisfying relaxed R1CS instance-witness pair (W_r, U_r)
let (W_r, U_r) = nova.r1cs.sample::<CS1>(&nova.cs_pp, &mut rng)?;
let (W_r, U_r) = nova
.r1cs
.sample_witness_instance::<CS1>(&nova.cs_pp, &mut rng)?;
// 3. Fold the instance-witness pair (U_f, W_f) with (U_r, W_r) // 3. Fold the instance-witness pair (U_f, W_f) with (U_r, W_r)
// a. Compute T // a. Compute T
@ -158,15 +168,8 @@ where
)?; )?;
// c. Compute fold // c. Compute fold
let (W_i_prime, _) = NIFS::<C1, CS1, true>::fold_instances(
r_2,
&W_f,
&U_f,
&W_r,
&U_r,
&T_i_prime,
cmT_i_prime,
)?;
let (W_i_prime, _) =
NIFS::<C1, CS1, true>::prove(r_2, &W_f, &U_f, &W_r, &U_r, &T_i_prime, &cmT_i_prime)?;
// d. Store folding proof // d. Store folding proof
let pi_prime = FoldingProof { cmT: cmT_i_prime }; let pi_prime = FoldingProof { cmT: cmT_i_prime };
@ -226,7 +229,7 @@ where
// b. Check computed hashes are correct // b. Check computed hashes are correct
let mut sponge = PoseidonSponge::<C1::ScalarField>::new(poseidon_config); let mut sponge = PoseidonSponge::<C1::ScalarField>::new(poseidon_config);
let expected_u_i_x = proof.U_i.hash(&sponge, pp_hash, i, z_0, z_i);
let expected_u_i_x = proof.U_i.hash(&sponge, pp_hash, i, &z_0, &z_i);
if expected_u_i_x != proof.u_i.x[0] { if expected_u_i_x != proof.u_i.x[0] {
return Err(Error::zkIVCVerificationFail); return Err(Error::zkIVCVerificationFail);
} }
@ -252,12 +255,7 @@ where
)?; )?;
// b. Get the U_f instance // b. Get the U_f instance
let U_f = NIFS::<C1, CS1, true>::fold_committed_instance(
r,
&proof.u_i,
&proof.U_i,
&proof.pi.cmT,
);
let U_f = NIFS::<C1, CS1, true>::verify(r, &proof.u_i, &proof.U_i, &proof.pi.cmT);
// 4. Obtain the U^{\prime}_i folded instance // 4. Obtain the U^{\prime}_i folded instance
// a. Compute folding challenge // a. Compute folding challenge
@ -270,18 +268,13 @@ where
)?; )?;
// b. Compute fold // b. Compute fold
let U_i_prime = NIFS::<C1, CS1, true>::fold_committed_instance(
r_2,
&U_f,
&proof.U_r,
&proof.pi_prime.cmT,
);
let U_i_prime = NIFS::<C1, CS1, true>::verify(r_2, &U_f, &proof.U_r, &proof.pi_prime.cmT);
// 5. Check that W^{\prime}_i is a satisfying witness // 5. Check that W^{\prime}_i is a satisfying witness
r1cs.check_relaxed_relation(&proof.W_i_prime, &U_i_prime)?;
r1cs.check_relation(&proof.W_i_prime, &U_i_prime)?;
// 6. Check that the cyclefold instance-witness pair satisfies the cyclefold relaxed r1cs // 6. Check that the cyclefold instance-witness pair satisfies the cyclefold relaxed r1cs
cf_r1cs.check_relaxed_relation(&proof.cf_W_i, &proof.cf_U_i)?;
cf_r1cs.check_relation(&proof.cf_W_i, &proof.cf_U_i)?;
Ok(()) Ok(())
} }
@ -369,7 +362,7 @@ pub mod tests {
); );
let (_, sampled_committed_instance) = nova let (_, sampled_committed_instance) = nova
.r1cs .r1cs
.sample::<Pedersen<Projective, true>>(&nova.cs_pp, rng)
.sample_witness_instance::<Pedersen<Projective, true>>(&nova.cs_pp, rng)
.unwrap(); .unwrap();
// proof verification fails with incorrect running instance // proof verification fails with incorrect running instance
@ -406,7 +399,7 @@ pub mod tests {
); );
let (sampled_committed_witness, _) = nova let (sampled_committed_witness, _) = nova
.r1cs .r1cs
.sample::<Pedersen<Projective, true>>(&nova.cs_pp, rng)
.sample_witness_instance::<Pedersen<Projective, true>>(&nova.cs_pp, rng)
.unwrap(); .unwrap();
// proof generation fails with incorrect running witness // proof generation fails with incorrect running witness

+ 33
- 36
folding-schemes/src/folding/protogalaxy/circuits.rs

@ -24,13 +24,16 @@ use super::{
CommittedInstance, CommittedInstanceVar, ProtoGalaxyCycleFoldConfig, CommittedInstance, CommittedInstanceVar, ProtoGalaxyCycleFoldConfig,
}; };
use crate::{ use crate::{
folding::circuits::{
cyclefold::{
CycleFoldChallengeGadget, CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar,
CycleFoldConfig, NIFSFullGadget,
folding::{
circuits::{
cyclefold::{
CycleFoldChallengeGadget, CycleFoldCommittedInstance,
CycleFoldCommittedInstanceVar, CycleFoldConfig, NIFSFullGadget,
},
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF1, CF2,
}, },
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF1, CF2,
traits::{CommittedInstanceVarOps, Dummy},
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::{AbsorbNonNativeGadget, TranscriptVar}, transcript::{AbsorbNonNativeGadget, TranscriptVar},
@ -44,13 +47,13 @@ impl FoldingGadget {
pub fn fold_committed_instance<C: CurveGroup, S: CryptographicSponge>( pub fn fold_committed_instance<C: CurveGroup, S: CryptographicSponge>(
transcript: &mut impl TranscriptVar<C::ScalarField, S>, transcript: &mut impl TranscriptVar<C::ScalarField, S>,
// running instance // running instance
instance: &CommittedInstanceVar<C>,
instance: &CommittedInstanceVar<C, true>,
// incoming instances // incoming instances
vec_instances: &[CommittedInstanceVar<C>],
vec_instances: &[CommittedInstanceVar<C, false>],
// polys from P // polys from P
F_coeffs: Vec<FpVar<C::ScalarField>>, F_coeffs: Vec<FpVar<C::ScalarField>>,
K_coeffs: Vec<FpVar<C::ScalarField>>, K_coeffs: Vec<FpVar<C::ScalarField>>,
) -> Result<(CommittedInstanceVar<C>, Vec<FpVar<C::ScalarField>>), SynthesisError> {
) -> Result<(CommittedInstanceVar<C, true>, Vec<FpVar<C::ScalarField>>), SynthesisError> {
let t = instance.betas.len(); let t = instance.betas.len();
// absorb the committed instances // absorb the committed instances
@ -132,13 +135,13 @@ impl AugmentationGadget {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub fn prepare_and_fold_primary<C: CurveGroup, S: CryptographicSponge>( pub fn prepare_and_fold_primary<C: CurveGroup, S: CryptographicSponge>(
transcript: &mut impl TranscriptVar<CF1<C>, S>, transcript: &mut impl TranscriptVar<CF1<C>, S>,
U: CommittedInstanceVar<C>,
U: CommittedInstanceVar<C, true>,
u_phis: Vec<NonNativeAffineVar<C>>, u_phis: Vec<NonNativeAffineVar<C>>,
u_xs: Vec<Vec<FpVar<CF1<C>>>>, u_xs: Vec<Vec<FpVar<CF1<C>>>>,
new_U_phi: NonNativeAffineVar<C>, new_U_phi: NonNativeAffineVar<C>,
F_coeffs: Vec<FpVar<CF1<C>>>, F_coeffs: Vec<FpVar<CF1<C>>>,
K_coeffs: Vec<FpVar<CF1<C>>>, K_coeffs: Vec<FpVar<CF1<C>>>,
) -> Result<(CommittedInstanceVar<C>, Vec<FpVar<CF1<C>>>), SynthesisError> {
) -> Result<(CommittedInstanceVar<C, true>, Vec<FpVar<CF1<C>>>), SynthesisError> {
assert_eq!(u_phis.len(), u_xs.len()); assert_eq!(u_phis.len(), u_xs.len());
// Prepare the incoming instances. // Prepare the incoming instances.
@ -247,7 +250,7 @@ pub struct AugmentedFCircuit<
pub(super) external_inputs: Vec<CF1<C1>>, pub(super) external_inputs: Vec<CF1<C1>>,
pub(super) F: FC, // F circuit pub(super) F: FC, // F circuit
pub(super) u_i_phi: C1, pub(super) u_i_phi: C1,
pub(super) U_i: CommittedInstance<C1>,
pub(super) U_i: CommittedInstance<C1, true>,
pub(super) U_i1_phi: C1, pub(super) U_i1_phi: C1,
pub(super) F_coeffs: Vec<CF1<C1>>, pub(super) F_coeffs: Vec<CF1<C1>>,
pub(super) K_coeffs: Vec<CF1<C1>>, pub(super) K_coeffs: Vec<CF1<C1>>,
@ -275,7 +278,7 @@ where
d: usize, d: usize,
k: usize, k: usize,
) -> Self { ) -> Self {
let u_dummy = CommittedInstance::dummy_running(2, t);
let u_dummy = CommittedInstance::dummy((2, t));
let cf_u_dummy = let cf_u_dummy =
CycleFoldCommittedInstance::dummy(ProtoGalaxyCycleFoldConfig::<C1>::IO_LEN); CycleFoldCommittedInstance::dummy(ProtoGalaxyCycleFoldConfig::<C1>::IO_LEN);
@ -324,8 +327,8 @@ where
let external_inputs = let external_inputs =
Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || Ok(self.external_inputs))?; Vec::<FpVar<CF1<C1>>>::new_witness(cs.clone(), || Ok(self.external_inputs))?;
let u_dummy = CommittedInstance::<C1>::dummy_running(2, self.U_i.betas.len());
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || Ok(self.U_i))?;
let u_dummy = CommittedInstance::<C1, true>::dummy((2, self.U_i.betas.len()));
let U_i = CommittedInstanceVar::<C1, true>::new_witness(cs.clone(), || Ok(self.U_i))?;
let u_i_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.u_i_phi))?; let u_i_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.u_i_phi))?;
let U_i1_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.U_i1_phi))?; let U_i1_phi = NonNativeAffineVar::new_witness(cs.clone(), || Ok(self.U_i1_phi))?;
let phi_stars = let phi_stars =
@ -346,24 +349,12 @@ where
// `transcript` is for challenge generation. // `transcript` is for challenge generation.
let mut transcript = sponge.clone(); let mut transcript = sponge.clone();
// get z_{i+1} from the F circuit
let i_usize = self.i_usize;
let z_i1 =
self.F
.generate_step_constraints(cs.clone(), i_usize, z_i.clone(), external_inputs)?;
let is_basecase = i.is_zero()?; let is_basecase = i.is_zero()?;
// Primary Part // Primary Part
// P.1. Compute u_i.x // P.1. Compute u_i.x
// u_i.x[0] = H(i, z_0, z_i, U_i) // u_i.x[0] = H(i, z_0, z_i, U_i)
let (u_i_x, _) = U_i.clone().hash(
&sponge,
pp_hash.clone(),
i.clone(),
z_0.clone(),
z_i.clone(),
)?;
let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
// u_i.x[1] = H(cf_U_i) // u_i.x[1] = H(cf_U_i)
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
@ -380,21 +371,27 @@ where
)?; )?;
// P.4.a compute and check the first output of F' // P.4.a compute and check the first output of F'
// get z_{i+1} from the F circuit
let z_i1 =
self.F
.generate_step_constraints(cs.clone(), self.i_usize, z_i, external_inputs)?;
// Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot}) // Base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{\bot})
// Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1}) // Non-base case: u_{i+1}.x[0] == H((i+1, z_0, z_{i+1}, U_{i+1})
let (u_i1_x, _) = U_i1.clone().hash( let (u_i1_x, _) = U_i1.clone().hash(
&sponge, &sponge,
pp_hash.clone(),
i + FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&(i + FpVar::<CF1<C1>>::one()),
&z_0,
&z_i1,
)?; )?;
let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash( let (u_i1_x_base, _) = CommittedInstanceVar::new_constant(cs.clone(), u_dummy)?.hash(
&sponge, &sponge,
pp_hash.clone(),
FpVar::<CF1<C1>>::one(),
z_0.clone(),
z_i1.clone(),
&pp_hash,
&FpVar::<CF1<C1>>::one(),
&z_0,
&z_i1,
)?; )?;
let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?; let x = FpVar::new_input(cs.clone(), || Ok(self.x.unwrap_or(u_i1_x_base.value()?)))?;
x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?; x.enforce_equal(&is_basecase.select(&u_i1_x_base, &u_i1_x)?)?;

+ 4
- 0
folding-schemes/src/folding/protogalaxy/constants.rs

@ -0,0 +1,4 @@
/// `RUNNING` indicates that the committed instance is a running instance.
pub const RUNNING: bool = true;
/// `INCOMING` indicates that the committed instance is an incoming instance.
pub const INCOMING: bool = false;

+ 29
- 27
folding-schemes/src/folding/protogalaxy/folding.rs

@ -14,9 +14,7 @@ use super::utils::{all_powers, betas_star, exponential_powers, pow_i};
use super::ProtoGalaxyError; use super::ProtoGalaxyError;
use super::{CommittedInstance, Witness}; use super::{CommittedInstance, Witness};
#[cfg(test)]
use crate::arith::r1cs::RelaxedR1CS;
use crate::arith::{r1cs::R1CS, Arith};
use crate::arith::r1cs::R1CS;
use crate::transcript::Transcript; use crate::transcript::Transcript;
use crate::utils::vec::*; use crate::utils::vec::*;
use crate::Error; use crate::Error;
@ -38,14 +36,14 @@ where
transcript: &mut impl Transcript<C::ScalarField>, transcript: &mut impl Transcript<C::ScalarField>,
r1cs: &R1CS<C::ScalarField>, r1cs: &R1CS<C::ScalarField>,
// running instance // running instance
instance: &CommittedInstance<C>,
instance: &CommittedInstance<C, true>,
w: &Witness<C::ScalarField>, w: &Witness<C::ScalarField>,
// incoming instances // incoming instances
vec_instances: &[CommittedInstance<C>],
vec_instances: &[CommittedInstance<C, false>],
vec_w: &[Witness<C::ScalarField>], vec_w: &[Witness<C::ScalarField>],
) -> Result< ) -> Result<
( (
CommittedInstance<C>,
CommittedInstance<C, true>,
Witness<C::ScalarField>, Witness<C::ScalarField>,
Vec<C::ScalarField>, // F_X coeffs Vec<C::ScalarField>, // F_X coeffs
Vec<C::ScalarField>, // K_X coeffs Vec<C::ScalarField>, // K_X coeffs
@ -97,7 +95,7 @@ where
let delta = transcript.get_challenge(); let delta = transcript.get_challenge();
let deltas = exponential_powers(delta, t); let deltas = exponential_powers(delta, t);
let mut f_z = r1cs.eval_relation(&z)?;
let mut f_z = r1cs.eval_at_z(&z)?;
if f_z.len() != m { if f_z.len() != m {
return Err(Error::NotSameLength( return Err(Error::NotSameLength(
"number of constraints in R1CS".to_string(), "number of constraints in R1CS".to_string(),
@ -127,15 +125,18 @@ where
// sanity check: check that the new randomized instance (the original instance but with // sanity check: check that the new randomized instance (the original instance but with
// 'refreshed' randomness) satisfies the relation. // 'refreshed' randomness) satisfies the relation.
#[cfg(test)] #[cfg(test)]
r1cs.check_relaxed_relation(
w,
&CommittedInstance {
phi: instance.phi,
betas: betas_star.clone(),
e: F_alpha,
x: instance.x.clone(),
},
)?;
{
use crate::arith::Arith;
r1cs.check_relation(
w,
&CommittedInstance::<_, true> {
phi: instance.phi,
betas: betas_star.clone(),
e: F_alpha,
x: instance.x.clone(),
},
)?;
}
let zs: Vec<Vec<C::ScalarField>> = std::iter::once(z.clone()) let zs: Vec<Vec<C::ScalarField>> = std::iter::once(z.clone())
.chain( .chain(
@ -178,7 +179,7 @@ where
inner[j] += Lh * zj; inner[j] += Lh * zj;
} }
} }
let f_ev = r1cs.eval_relation(&inner)?;
let f_ev = r1cs.eval_at_z(&inner)?;
G_evals[hi] = cfg_into_iter!(f_ev) G_evals[hi] = cfg_into_iter!(f_ev)
.enumerate() .enumerate()
@ -253,13 +254,13 @@ where
pub fn verify( pub fn verify(
transcript: &mut impl Transcript<C::ScalarField>, transcript: &mut impl Transcript<C::ScalarField>,
// running instance // running instance
instance: &CommittedInstance<C>,
instance: &CommittedInstance<C, true>,
// incoming instances // incoming instances
vec_instances: &[CommittedInstance<C>],
vec_instances: &[CommittedInstance<C, false>],
// polys from P // polys from P
F_coeffs: Vec<C::ScalarField>, F_coeffs: Vec<C::ScalarField>,
K_coeffs: Vec<C::ScalarField>, K_coeffs: Vec<C::ScalarField>,
) -> Result<CommittedInstance<C>, Error> {
) -> Result<CommittedInstance<C, true>, Error> {
let t = instance.betas.len(); let t = instance.betas.len();
// absorb the committed instances // absorb the committed instances
@ -395,6 +396,7 @@ pub mod tests {
use ark_std::{rand::Rng, UniformRand}; use ark_std::{rand::Rng, UniformRand};
use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z_split}; use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z_split};
use crate::arith::Arith;
use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::commitment::{pedersen::Pedersen, CommitmentScheme};
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
@ -419,9 +421,9 @@ pub mod tests {
k: usize, k: usize,
) -> ( ) -> (
Witness<C::ScalarField>, Witness<C::ScalarField>,
CommittedInstance<C>,
CommittedInstance<C, true>,
Vec<Witness<C::ScalarField>>, Vec<Witness<C::ScalarField>>,
Vec<CommittedInstance<C>>,
Vec<CommittedInstance<C, false>>,
) { ) {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
@ -439,7 +441,7 @@ pub mod tests {
r_w: C::ScalarField::zero(), r_w: C::ScalarField::zero(),
}; };
let phi = Pedersen::<C>::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap(); let phi = Pedersen::<C>::commit(&pedersen_params, &witness.w, &witness.r_w).unwrap();
let instance = CommittedInstance::<C> {
let instance = CommittedInstance::<C, true> {
phi, phi,
betas: betas.clone(), betas: betas.clone(),
e: C::ScalarField::zero(), e: C::ScalarField::zero(),
@ -447,7 +449,7 @@ pub mod tests {
}; };
// same for the other instances // same for the other instances
let mut witnesses: Vec<Witness<C::ScalarField>> = Vec::new(); let mut witnesses: Vec<Witness<C::ScalarField>> = Vec::new();
let mut instances: Vec<CommittedInstance<C>> = Vec::new();
let mut instances: Vec<CommittedInstance<C, false>> = Vec::new();
#[allow(clippy::needless_range_loop)] #[allow(clippy::needless_range_loop)]
for _ in 0..k { for _ in 0..k {
let (_, x_i, w_i) = get_test_z_split::<C::ScalarField>(rng.gen::<u16>() as usize); let (_, x_i, w_i) = get_test_z_split::<C::ScalarField>(rng.gen::<u16>() as usize);
@ -457,7 +459,7 @@ pub mod tests {
}; };
let phi_i = let phi_i =
Pedersen::<C>::commit(&pedersen_params, &witness_i.w, &witness_i.r_w).unwrap(); Pedersen::<C>::commit(&pedersen_params, &witness_i.w, &witness_i.r_w).unwrap();
let instance_i = CommittedInstance::<C> {
let instance_i = CommittedInstance::<C, false> {
phi: phi_i, phi: phi_i,
betas: vec![], betas: vec![],
e: C::ScalarField::zero(), e: C::ScalarField::zero(),
@ -509,7 +511,7 @@ pub mod tests {
assert!(!folded_instance.e.is_zero()); assert!(!folded_instance.e.is_zero());
// check that the folded instance satisfies the relation // check that the folded instance satisfies the relation
r1cs.check_relaxed_relation(&folded_witness, &folded_instance)
r1cs.check_relation(&folded_witness, &folded_instance)
.unwrap(); .unwrap();
} }
@ -558,7 +560,7 @@ pub mod tests {
assert!(!folded_instance.e.is_zero()); assert!(!folded_instance.e.is_zero());
// check that the folded instance satisfies the relation // check that the folded instance satisfies the relation
r1cs.check_relaxed_relation(&folded_witness, &folded_instance)
r1cs.check_relation(&folded_witness, &folded_instance)
.unwrap(); .unwrap();
running_witness = folded_witness; running_witness = folded_witness;

+ 409
- 133
folding-schemes/src/folding/protogalaxy/mod.rs

@ -1,27 +1,32 @@
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
constraints::{AbsorbGadget, CryptographicSpongeVar},
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
poseidon::{PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge, Absorb, CryptographicSponge,
}; };
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
groups::{CurveVar, GroupOpsBounds}, groups::{CurveVar, GroupOpsBounds},
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ use ark_relations::r1cs::{
ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError, ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, SynthesisError,
}; };
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Valid};
use ark_std::{ use ark_std::{
borrow::Borrow, cmp::max, fmt::Debug, log2, marker::PhantomData, rand::RngCore, One, Zero, borrow::Borrow, cmp::max, fmt::Debug, log2, marker::PhantomData, rand::RngCore, One, Zero,
}; };
use constants::{INCOMING, RUNNING};
use num_bigint::BigUint; use num_bigint::BigUint;
use crate::{ use crate::{
arith::r1cs::{extract_r1cs, extract_w_x, RelaxedR1CS, R1CS},
arith::{
r1cs::{extract_r1cs, extract_w_x, R1CS},
Arith,
},
commitment::CommitmentScheme, commitment::CommitmentScheme,
folding::circuits::{ folding::circuits::{
cyclefold::{ cyclefold::{
@ -32,11 +37,13 @@ use crate::{
CF1, CF2, CF1, CF2,
}, },
frontend::{utils::DummyCircuit, FCircuit}, frontend::{utils::DummyCircuit, FCircuit},
transcript::poseidon::poseidon_canonical_config,
utils::{get_cm_coordinates, pp_hash}, utils::{get_cm_coordinates, pp_hash},
Error, FoldingScheme, Error, FoldingScheme,
}; };
pub mod circuits; pub mod circuits;
pub mod constants;
pub mod folding; pub mod folding;
pub mod traits; pub mod traits;
pub(crate) mod utils; pub(crate) mod utils;
@ -44,6 +51,10 @@ pub(crate) mod utils;
use circuits::AugmentedFCircuit; use circuits::AugmentedFCircuit;
use folding::Folding; use folding::Folding;
use super::traits::{
CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps,
};
/// Configuration for ProtoGalaxy's CycleFold circuit /// Configuration for ProtoGalaxy's CycleFold circuit
pub struct ProtoGalaxyCycleFoldConfig<C: CurveGroup> { pub struct ProtoGalaxyCycleFoldConfig<C: CurveGroup> {
_c: PhantomData<C>, _c: PhantomData<C>,
@ -60,66 +71,68 @@ impl CycleFoldConfig for ProtoGalaxyCycleFoldConfig {
/// in ProtoGalaxy instances. /// in ProtoGalaxy instances.
pub type ProtoGalaxyCycleFoldCircuit<C, GC> = CycleFoldCircuit<ProtoGalaxyCycleFoldConfig<C>, GC>; pub type ProtoGalaxyCycleFoldCircuit<C, GC> = CycleFoldCircuit<ProtoGalaxyCycleFoldConfig<C>, GC>;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CommittedInstance<C: CurveGroup> {
/// The committed instance of ProtoGalaxy.
///
/// We use `TYPE` to distinguish between incoming and running instances, as
/// they have slightly different structures (e.g., length of `betas`) and
/// behaviors (e.g., in satisfiability checks).
#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)]
pub struct CommittedInstance<C: CurveGroup, const TYPE: bool> {
phi: C, phi: C,
betas: Vec<C::ScalarField>, betas: Vec<C::ScalarField>,
e: C::ScalarField, e: C::ScalarField,
x: Vec<C::ScalarField>, x: Vec<C::ScalarField>,
} }
impl<C: CurveGroup> CommittedInstance<C> {
pub fn dummy_running(io_len: usize, t: usize) -> Self {
impl<C: CurveGroup, const TYPE: bool> Dummy<(usize, usize)> for CommittedInstance<C, TYPE> {
fn dummy((io_len, t): (usize, usize)) -> Self {
if TYPE == INCOMING {
assert_eq!(t, 0);
}
Self { Self {
phi: C::zero(), phi: C::zero(),
betas: vec![C::ScalarField::zero(); t],
e: C::ScalarField::zero(),
x: vec![C::ScalarField::zero(); io_len],
betas: vec![Zero::zero(); t],
e: Zero::zero(),
x: vec![Zero::zero(); io_len],
} }
} }
}
pub fn dummy_incoming(io_len: usize) -> Self {
Self::dummy_running(io_len, 0)
impl<C: CurveGroup, const TYPE: bool> Dummy<&R1CS<CF1<C>>> for CommittedInstance<C, TYPE> {
fn dummy(r1cs: &R1CS<CF1<C>>) -> Self {
let t = if TYPE == RUNNING {
log2(r1cs.num_constraints()) as usize
} else {
0
};
Self::dummy((r1cs.num_public_inputs(), t))
} }
} }
impl<C: CurveGroup> CommittedInstance<C>
where
C::ScalarField: Absorb,
C::BaseField: PrimeField,
{
/// hash implements the committed instance hash compatible with the gadget implemented in
/// CommittedInstanceVar.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U_i` is the
/// `CommittedInstance`.
pub fn hash(
&self,
sponge: &PoseidonSponge<C::ScalarField>,
pp_hash: C::ScalarField,
i: C::ScalarField,
z_0: Vec<C::ScalarField>,
z_i: Vec<C::ScalarField>,
) -> C::ScalarField {
let mut sponge = sponge.clone();
sponge.absorb(&pp_hash);
sponge.absorb(&i);
sponge.absorb(&z_0);
sponge.absorb(&z_i);
sponge.absorb(&self);
sponge.squeeze_field_elements(1)[0]
impl<C: CurveGroup, const TYPE: bool> CommittedInstanceOps<C> for CommittedInstance<C, TYPE> {
type Var = CommittedInstanceVar<C, TYPE>;
fn get_commitments(&self) -> Vec<C> {
vec![self.phi]
}
fn is_incoming(&self) -> bool {
TYPE == INCOMING
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct CommittedInstanceVar<C: CurveGroup> {
pub struct CommittedInstanceVar<C: CurveGroup, const TYPE: bool> {
phi: NonNativeAffineVar<C>, phi: NonNativeAffineVar<C>,
betas: Vec<FpVar<C::ScalarField>>, betas: Vec<FpVar<C::ScalarField>>,
e: FpVar<C::ScalarField>, e: FpVar<C::ScalarField>,
x: Vec<FpVar<C::ScalarField>>, x: Vec<FpVar<C::ScalarField>>,
} }
impl<C: CurveGroup> AllocVar<CommittedInstance<C>, C::ScalarField> for CommittedInstanceVar<C> {
fn new_variable<T: Borrow<CommittedInstance<C>>>(
impl<C: CurveGroup, const TYPE: bool> AllocVar<CommittedInstance<C, TYPE>, C::ScalarField>
for CommittedInstanceVar<C, TYPE>
{
fn new_variable<T: Borrow<CommittedInstance<C, TYPE>>>(
cs: impl Into<Namespace<C::ScalarField>>, cs: impl Into<Namespace<C::ScalarField>>,
f: impl FnOnce() -> Result<T, SynthesisError>, f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode, mode: AllocationMode,
@ -132,15 +145,19 @@ impl AllocVar, C::ScalarField> for Committed
Ok(Self { Ok(Self {
phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?, phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?,
betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?, betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?,
e: FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?,
e: if TYPE == RUNNING {
FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?
} else {
FpVar::zero()
},
x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?, x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?,
}) })
}) })
} }
} }
impl<C: CurveGroup> R1CSVar<C::ScalarField> for CommittedInstanceVar<C> {
type Value = CommittedInstance<C>;
impl<C: CurveGroup, const TYPE: bool> R1CSVar<C::ScalarField> for CommittedInstanceVar<C, TYPE> {
type Value = CommittedInstance<C, TYPE>;
fn cs(&self) -> ConstraintSystemRef<C::ScalarField> { fn cs(&self) -> ConstraintSystemRef<C::ScalarField> {
self.phi self.phi
@ -164,38 +181,35 @@ impl R1CSVar for CommittedInstanceVar {
} }
} }
impl<C: CurveGroup> CommittedInstanceVar<C>
where
C::ScalarField: Absorb,
C::BaseField: PrimeField,
{
/// hash implements the committed instance hash compatible with the native implementation from
/// CommittedInstance.hash.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and `U` is the
/// `CommittedInstance`.
/// Additionally it returns the vector of the field elements from the self parameters, so they
/// can be reused in other gadgets avoiding recalculating (reconstraining) them.
#[allow(clippy::type_complexity)]
pub fn hash(
self,
sponge: &PoseidonSpongeVar<CF1<C>>,
pp_hash: FpVar<CF1<C>>,
i: FpVar<CF1<C>>,
z_0: Vec<FpVar<CF1<C>>>,
z_i: Vec<FpVar<CF1<C>>>,
) -> Result<(FpVar<CF1<C>>, Vec<FpVar<CF1<C>>>), SynthesisError> {
let mut sponge = sponge.clone();
let U_vec = self.to_sponge_field_elements()?;
sponge.absorb(&pp_hash)?;
sponge.absorb(&i)?;
sponge.absorb(&z_0)?;
sponge.absorb(&z_i)?;
sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
impl<C: CurveGroup, const TYPE: bool> CommittedInstanceVarOps<C> for CommittedInstanceVar<C, TYPE> {
type PointVar = NonNativeAffineVar<C>;
fn get_commitments(&self) -> Vec<Self::PointVar> {
vec![self.phi.clone()]
}
fn get_public_inputs(&self) -> &[FpVar<CF1<C>>] {
&self.x
}
fn enforce_incoming(&self) -> Result<(), SynthesisError> {
// We don't need to check if `self` is an incoming instance in-circuit,
// because incoming instances and running instances already have
// different types of `e` (constant vs witness) when we allocate them
// in-circuit.
(TYPE == INCOMING)
.then_some(())
.ok_or(SynthesisError::Unsatisfiable)
}
fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.betas.enforce_equal(&other.betas)?;
self.e.enforce_equal(&other.e)?;
self.x.enforce_equal(&other.x)
} }
} }
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Witness<F: PrimeField> { pub struct Witness<F: PrimeField> {
w: Vec<F>, w: Vec<F>,
r_w: F, r_w: F,
@ -213,9 +227,9 @@ impl Witness {
&self, &self,
params: &CS::ProverParams, params: &CS::ProverParams,
x: Vec<F>, x: Vec<F>,
) -> Result<CommittedInstance<C>, crate::Error> {
) -> Result<CommittedInstance<C, false>, crate::Error> {
let phi = CS::commit(params, &self.w, &self.r_w)?; let phi = CS::commit(params, &self.w, &self.r_w)?;
Ok(CommittedInstance {
Ok(CommittedInstance::<C, false> {
phi, phi,
x, x,
e: F::zero(), e: F::zero(),
@ -224,6 +238,53 @@ impl Witness {
} }
} }
impl<F: PrimeField> Dummy<&R1CS<F>> for Witness<F> {
fn dummy(r1cs: &R1CS<F>) -> Self {
Self {
w: vec![F::zero(); r1cs.num_witnesses()],
r_w: F::zero(),
}
}
}
impl<F: PrimeField> WitnessOps<F> for Witness<F> {
type Var = WitnessVar<F>;
fn get_openings(&self) -> Vec<(&[F], F)> {
vec![(&self.w, self.r_w)]
}
}
/// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)]
pub struct WitnessVar<F: PrimeField> {
pub W: Vec<FpVar<F>>,
pub rW: FpVar<F>,
}
impl<F: PrimeField> AllocVar<Witness<F>, F> for WitnessVar<F> {
fn new_variable<T: Borrow<Witness<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let W = Vec::new_variable(cs.clone(), || Ok(val.borrow().w.to_vec()), mode)?;
let rW = FpVar::new_variable(cs.clone(), || Ok(val.borrow().r_w), mode)?;
Ok(Self { W, rW })
})
}
}
impl<F: PrimeField> WitnessVarOps<F> for WitnessVar<F> {
fn get_openings(&self) -> Vec<(&[FpVar<F>], FpVar<F>)> {
vec![(&self.W, self.rW.clone())]
}
}
#[derive(Debug, thiserror::Error, PartialEq)] #[derive(Debug, thiserror::Error, PartialEq)]
pub enum ProtoGalaxyError { pub enum ProtoGalaxyError {
#[error("The remainder from G(X)-F(α)*L_0(X)) / Z(X) should be zero")] #[error("The remainder from G(X)-F(α)*L_0(X)) / Z(X) should be zero")]
@ -254,6 +315,68 @@ where
/// Proving parameters of the underlying commitment scheme over C2 /// Proving parameters of the underlying commitment scheme over C2
pub cf_cs_params: CS2::ProverParams, pub cf_cs_params: CS2::ProverParams,
} }
impl<C1, C2, CS1, CS2> CanonicalSerialize for ProverParams<C1, C2, CS1, CS2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, false>,
CS2: CommitmentScheme<C2, false>,
{
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> {
self.cs_params.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_params.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.cs_params.serialized_size(compress) + self.cf_cs_params.serialized_size(compress)
}
}
impl<C1, C2, CS1, CS2> Valid for ProverParams<C1, C2, CS1, CS2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
{
fn check(&self) -> Result<(), ark_serialize::SerializationError> {
self.poseidon_config.full_rounds.check()?;
self.poseidon_config.partial_rounds.check()?;
self.poseidon_config.alpha.check()?;
self.poseidon_config.ark.check()?;
self.poseidon_config.mds.check()?;
self.poseidon_config.rate.check()?;
self.poseidon_config.capacity.check()?;
self.cs_params.check()?;
self.cf_cs_params.check()?;
Ok(())
}
}
impl<C1, C2, CS1, CS2> CanonicalDeserialize for ProverParams<C1, C2, CS1, CS2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1, false>,
CS2: CommitmentScheme<C2, false>,
{
fn deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
) -> Result<Self, ark_serialize::SerializationError> {
let cs_params = CS1::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_params =
CS2::ProverParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(ProverParams {
poseidon_config: poseidon_canonical_config::<C1::ScalarField>(),
cs_params,
cf_cs_params,
})
}
}
/// Verification parameters for ProtoGalaxy-based IVC /// Verification parameters for ProtoGalaxy-based IVC
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -276,6 +399,40 @@ where
pub cf_cs_vp: CS2::VerifierParams, pub cf_cs_vp: CS2::VerifierParams,
} }
impl<C1, C2, CS1, CS2> Valid for VerifierParams<C1, C2, CS1, CS2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
{
fn check(&self) -> Result<(), ark_serialize::SerializationError> {
self.cs_vp.check()?;
self.cf_cs_vp.check()?;
Ok(())
}
}
impl<C1, C2, CS1, CS2> CanonicalSerialize for VerifierParams<C1, C2, CS1, CS2>
where
C1: CurveGroup,
C2: CurveGroup,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
{
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), ark_serialize::SerializationError> {
self.cs_vp.serialize_with_mode(&mut writer, compress)?;
self.cf_cs_vp.serialize_with_mode(&mut writer, compress)
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
self.cs_vp.serialized_size(compress) + self.cf_cs_vp.serialized_size(compress)
}
}
impl<C1, C2, CS1, CS2> VerifierParams<C1, C2, CS1, CS2> impl<C1, C2, CS1, CS2> VerifierParams<C1, C2, CS1, CS2>
where where
C1: CurveGroup, C1: CurveGroup,
@ -298,6 +455,23 @@ where
} }
} }
#[derive(PartialEq, Eq, Debug, Clone, CanonicalSerialize, CanonicalDeserialize)]
pub struct IVCProof<C1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
{
pub i: C1::ScalarField,
pub z_0: Vec<C1::ScalarField>,
pub z_i: Vec<C1::ScalarField>,
pub W_i: Witness<C1::ScalarField>,
pub U_i: CommittedInstance<C1, true>,
pub w_i: Witness<C1::ScalarField>,
pub u_i: CommittedInstance<C1, false>,
pub cf_W_i: CycleFoldWitness<C2>,
pub cf_U_i: CycleFoldCommittedInstance<C2>,
}
/// Implements ProtoGalaxy+CycleFold's IVC, described in [ProtoGalaxy] and /// Implements ProtoGalaxy+CycleFold's IVC, described in [ProtoGalaxy] and
/// [CycleFold], following the FoldingScheme trait /// [CycleFold], following the FoldingScheme trait
/// ///
@ -337,9 +511,9 @@ where
pub z_i: Vec<C1::ScalarField>, pub z_i: Vec<C1::ScalarField>,
/// ProtoGalaxy instances /// ProtoGalaxy instances
pub w_i: Witness<C1::ScalarField>, pub w_i: Witness<C1::ScalarField>,
pub u_i: CommittedInstance<C1>,
pub u_i: CommittedInstance<C1, false>,
pub W_i: Witness<C1::ScalarField>, pub W_i: Witness<C1::ScalarField>,
pub U_i: CommittedInstance<C1>,
pub U_i: CommittedInstance<C1, true>,
/// CycleFold running instance /// CycleFold running instance
pub cf_W_i: CycleFoldWitness<C2>, pub cf_W_i: CycleFoldWitness<C2>,
@ -472,10 +646,73 @@ where
type PreprocessorParam = (PoseidonConfig<CF1<C1>>, FC); type PreprocessorParam = (PoseidonConfig<CF1<C1>>, FC);
type ProverParam = ProverParams<C1, C2, CS1, CS2>; type ProverParam = ProverParams<C1, C2, CS1, CS2>;
type VerifierParam = VerifierParams<C1, C2, CS1, CS2>; type VerifierParam = VerifierParams<C1, C2, CS1, CS2>;
type RunningInstance = (CommittedInstance<C1>, Witness<C1::ScalarField>);
type IncomingInstance = (CommittedInstance<C1>, Witness<C1::ScalarField>);
type MultiCommittedInstanceWithWitness = (CommittedInstance<C1>, Witness<C1::ScalarField>);
type RunningInstance = (CommittedInstance<C1, true>, Witness<C1::ScalarField>);
type IncomingInstance = (CommittedInstance<C1, false>, Witness<C1::ScalarField>);
type MultiCommittedInstanceWithWitness =
(CommittedInstance<C1, false>, Witness<C1::ScalarField>);
type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>); type CFInstance = (CycleFoldCommittedInstance<C2>, CycleFoldWitness<C2>);
type IVCProof = IVCProof<C1, C2>;
fn pp_deserialize_with_mode<R: std::io::prelude::Read>(
reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
_fc_params: FC::Params, // FCircuit params
) -> Result<Self::ProverParam, Error> {
Ok(Self::ProverParam::deserialize_with_mode(
reader, compress, validate,
)?)
}
fn vp_deserialize_with_mode<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params,
) -> Result<Self::VerifierParam, Error> {
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
// generate the r1cs & cf_r1cs needed for the VerifierParams. In this way we avoid needing
// to serialize them, saving significant space in the VerifierParams serialized size.
let f_circuit = FC::new(fc_params)?;
let k = 1;
let d = 2;
let t = Self::compute_t(&poseidon_config, &f_circuit, d, k)?;
// main circuit R1CS:
let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let augmented_F_circuit = AugmentedFCircuit::<C1, C2, GC2, FC>::empty(
&poseidon_config,
f_circuit.clone(),
t,
d,
k,
);
augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
// CycleFold circuit R1CS
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let cf_circuit = ProtoGalaxyCycleFoldCircuit::<C1, GC1>::empty();
cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
Ok(Self::VerifierParam {
poseidon_config,
r1cs,
cf_r1cs,
cs_vp,
cf_cs_vp,
})
}
fn preprocess( fn preprocess(
mut rng: impl RngCore, mut rng: impl RngCore,
@ -540,9 +777,9 @@ where
let pp_hash = vp.pp_hash()?; let pp_hash = vp.pp_hash()?;
// setup the dummy instances // setup the dummy instances
let (W_dummy, U_dummy) = vp.r1cs.dummy_running_instance();
let (w_dummy, u_dummy) = vp.r1cs.dummy_incoming_instance();
let (cf_W_dummy, cf_U_dummy) = vp.cf_r1cs.dummy_running_instance();
let (w_dummy, u_dummy) = vp.r1cs.dummy_witness_instance();
let (W_dummy, U_dummy) = vp.r1cs.dummy_witness_instance();
let (cf_W_dummy, cf_U_dummy) = vp.cf_r1cs.dummy_witness_instance();
// W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the // W_dummy=W_0 is a 'dummy witness', all zeroes, but with the size corresponding to the
// R1CS that we're working with. // R1CS that we're working with.
@ -636,8 +873,8 @@ where
&sponge, &sponge,
self.pp_hash, self.pp_hash,
self.i + C1::ScalarField::one(), self.i + C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
// `cf_U_{i+1}` (i.e., `cf_U_1`) is fixed to `cf_U_dummy`, so we // `cf_U_{i+1}` (i.e., `cf_U_1`) is fixed to `cf_U_dummy`, so we
// just use `self.cf_U_i = cf_U_0 = cf_U_dummy`. // just use `self.cf_U_i = cf_U_0 = cf_U_dummy`.
@ -744,8 +981,8 @@ where
&sponge, &sponge,
self.pp_hash, self.pp_hash,
self.i + C1::ScalarField::one(), self.i + C1::ScalarField::one(),
self.z_0.clone(),
z_i1.clone(),
&self.z_0,
&z_i1,
); );
cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash); cf_u_i1_x = cf_U_i1.hash_cyclefold(&sponge, self.pp_hash);
@ -788,10 +1025,11 @@ where
)?, )?,
U_i1 U_i1
); );
self.cf_r1cs.check_tight_relation(&_cf1_w_i, &cf1_u_i)?;
self.cf_r1cs.check_tight_relation(&_cf2_w_i, &cf2_u_i)?;
self.cf_r1cs
.check_relaxed_relation(&self.cf_W_i, &self.cf_U_i)?;
cf1_u_i.check_incoming()?;
cf2_u_i.check_incoming()?;
self.cf_r1cs.check_relation(&_cf1_w_i, &cf1_u_i)?;
self.cf_r1cs.check_relation(&_cf2_w_i, &cf2_u_i)?;
self.cf_r1cs.check_relation(&self.cf_W_i, &self.cf_U_i)?;
} }
self.W_i = W_i1; self.W_i = W_i1;
@ -826,8 +1064,9 @@ where
#[cfg(test)] #[cfg(test)]
{ {
self.r1cs.check_tight_relation(&self.w_i, &self.u_i)?;
self.r1cs.check_relaxed_relation(&self.W_i, &self.U_i)?;
self.u_i.check_incoming()?;
self.r1cs.check_relation(&self.w_i, &self.u_i)?;
self.r1cs.check_relation(&self.W_i, &self.U_i)?;
} }
Ok(()) Ok(())
@ -836,35 +1075,79 @@ where
fn state(&self) -> Vec<C1::ScalarField> { fn state(&self) -> Vec<C1::ScalarField> {
self.z_i.clone() self.z_i.clone()
} }
fn instances(
&self,
) -> (
Self::RunningInstance,
Self::IncomingInstance,
Self::CFInstance,
) {
(
(self.U_i.clone(), self.W_i.clone()),
(self.u_i.clone(), self.w_i.clone()),
(self.cf_U_i.clone(), self.cf_W_i.clone()),
)
fn ivc_proof(&self) -> Self::IVCProof {
Self::IVCProof {
i: self.i,
z_0: self.z_0.clone(),
z_i: self.z_i.clone(),
W_i: self.W_i.clone(),
U_i: self.U_i.clone(),
w_i: self.w_i.clone(),
u_i: self.u_i.clone(),
cf_W_i: self.cf_W_i.clone(),
cf_U_i: self.cf_U_i.clone(),
}
}
fn from_ivc_proof(
ivc_proof: Self::IVCProof,
fcircuit_params: FC::Params,
params: (Self::ProverParam, Self::VerifierParam),
) -> Result<Self, Error> {
let IVCProof {
i,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
Ok(Self {
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData,
r1cs: vp.r1cs.clone(),
cf_r1cs: vp.cf_r1cs.clone(),
poseidon_config: pp.poseidon_config,
cs_params: pp.cs_params,
cf_cs_params: pp.cf_cs_params,
F: f_circuit,
pp_hash: vp.pp_hash()?,
i,
z_0,
z_i,
w_i,
u_i,
W_i,
U_i,
cf_W_i,
cf_U_i,
})
} }
/// Implements IVC.V of ProtoGalaxy+CycleFold /// Implements IVC.V of ProtoGalaxy+CycleFold
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
num_steps: C1::ScalarField,
running_instance: Self::RunningInstance,
incoming_instance: Self::IncomingInstance,
cyclefold_instance: Self::CFInstance,
) -> Result<(), Error> {
let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config);
fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error> {
let Self::IVCProof {
i: num_steps,
z_0,
z_i,
W_i,
U_i,
w_i,
u_i,
cf_W_i,
cf_U_i,
} = ivc_proof;
let (U_i, W_i) = running_instance;
let (u_i, w_i) = incoming_instance;
let (cf_U_i, cf_W_i) = cyclefold_instance;
let sponge = PoseidonSponge::<C1::ScalarField>::new(&vp.poseidon_config);
if u_i.x.len() != 2 || U_i.x.len() != 2 { if u_i.x.len() != 2 || U_i.x.len() != 2 {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
@ -874,7 +1157,7 @@ where
// check that u_i's output points to the running instance // check that u_i's output points to the running instance
// u_i.X[0] == H(i, z_0, z_i, U_i) // u_i.X[0] == H(i, z_0, z_i, U_i)
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, z_0, z_i.clone());
let expected_u_i_x = U_i.hash(&sponge, pp_hash, num_steps, &z_0, &z_i);
if expected_u_i_x != u_i.x[0] { if expected_u_i_x != u_i.x[0] {
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
@ -884,13 +1167,15 @@ where
return Err(Error::IVCVerificationFail); return Err(Error::IVCVerificationFail);
} }
// check R1CS satisfiability
vp.r1cs.check_tight_relation(&w_i, &u_i)?;
// check R1CS satisfiability, which is equivalent to checking if `u_i`
// is an incoming instance and if `w_i` and `u_i` satisfy RelaxedR1CS
u_i.check_incoming()?;
vp.r1cs.check_relation(&w_i, &u_i)?;
// check RelaxedR1CS satisfiability // check RelaxedR1CS satisfiability
vp.r1cs.check_relaxed_relation(&W_i, &U_i)?;
vp.r1cs.check_relation(&W_i, &U_i)?;
// check CycleFold RelaxedR1CS satisfiability // check CycleFold RelaxedR1CS satisfiability
vp.cf_r1cs.check_relaxed_relation(&cf_W_i, &cf_U_i)?;
vp.cf_r1cs.check_relation(&cf_W_i, &cf_U_i)?;
Ok(()) Ok(())
} }
@ -1003,17 +1288,8 @@ mod tests {
} }
assert_eq!(Fr::from(num_steps as u32), protogalaxy.i); assert_eq!(Fr::from(num_steps as u32), protogalaxy.i);
let (running_instance, incoming_instance, cyclefold_instance) = protogalaxy.instances();
PG::<CS1, CS2>::verify(
params.1,
z_0,
protogalaxy.z_i,
protogalaxy.i,
running_instance,
incoming_instance,
cyclefold_instance,
)
.unwrap();
let ivc_proof = protogalaxy.ivc_proof();
PG::<CS1, CS2>::verify(params.1, ivc_proof).unwrap();
} }
#[ignore] #[ignore]

+ 89
- 61
folding-schemes/src/folding/protogalaxy/traits.rs

@ -3,23 +3,25 @@ use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_r1cs_std::{fields::fp::FpVar, uint8::UInt8, ToConstraintFieldGadget}; use ark_r1cs_std::{fields::fp::FpVar, uint8::UInt8, ToConstraintFieldGadget};
use ark_relations::r1cs::SynthesisError; use ark_relations::r1cs::SynthesisError;
use ark_std::{cfg_iter, log2, rand::RngCore, One, Zero};
use ark_std::{cfg_into_iter, log2, One};
use rayon::prelude::*; use rayon::prelude::*;
use super::{utils::pow_i, CommittedInstance, CommittedInstanceVar, Witness};
use super::{constants::RUNNING, utils::pow_i, CommittedInstance, CommittedInstanceVar, Witness};
use crate::{ use crate::{
arith::r1cs::{RelaxedR1CS, R1CS},
arith::{r1cs::R1CS, Arith},
folding::circuits::CF1,
transcript::AbsorbNonNative, transcript::AbsorbNonNative,
utils::vec::is_zero_vec,
Error, Error,
}; };
// Implements the trait for absorbing ProtoGalaxy's CommittedInstance. // Implements the trait for absorbing ProtoGalaxy's CommittedInstance.
impl<C: CurveGroup> Absorb for CommittedInstance<C>
impl<C: CurveGroup, const TYPE: bool> Absorb for CommittedInstance<C, TYPE>
where where
C::ScalarField: Absorb, C::ScalarField: Absorb,
{ {
fn to_sponge_bytes(&self, _dest: &mut Vec<u8>) {
unimplemented!()
fn to_sponge_bytes(&self, dest: &mut Vec<u8>) {
C::ScalarField::batch_to_sponge_bytes(&self.to_sponge_field_elements_as_vec(), dest);
} }
fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) { fn to_sponge_field_elements<F: PrimeField>(&self, dest: &mut Vec<F>) {
@ -33,9 +35,11 @@ where
} }
// Implements the trait for absorbing ProtoGalaxy's CommittedInstanceVar in-circuit. // Implements the trait for absorbing ProtoGalaxy's CommittedInstanceVar in-circuit.
impl<C: CurveGroup> AbsorbGadget<C::ScalarField> for CommittedInstanceVar<C> {
impl<C: CurveGroup, const TYPE: bool> AbsorbGadget<C::ScalarField>
for CommittedInstanceVar<C, TYPE>
{
fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> { fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> {
unimplemented!()
FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?)
} }
fn to_sponge_field_elements(&self) -> Result<Vec<FpVar<C::ScalarField>>, SynthesisError> { fn to_sponge_field_elements(&self) -> Result<Vec<FpVar<C::ScalarField>>, SynthesisError> {
@ -49,68 +53,92 @@ impl AbsorbGadget for CommittedInstanceVar {
} }
} }
impl<C: CurveGroup> RelaxedR1CS<C, Witness<C::ScalarField>, CommittedInstance<C>>
for R1CS<C::ScalarField>
/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and
/// the committed instance is of type [`CommittedInstance`].
///
/// Due to the error term `CommittedInstance.e`, R1CS here is considered as a
/// relaxed R1CS.
///
/// See `nova/traits.rs` for the rationale behind the design.
impl<C: CurveGroup, const TYPE: bool> Arith<Witness<CF1<C>>, CommittedInstance<C, TYPE>>
for R1CS<CF1<C>>
{ {
fn dummy_running_instance(&self) -> (Witness<C::ScalarField>, CommittedInstance<C>) {
let w_len = self.A.n_cols - 1 - self.l;
let w_dummy = Witness::new(vec![C::ScalarField::zero(); w_len]);
let u_dummy = CommittedInstance::<C>::dummy_running(self.l, log2(self.A.n_rows) as usize);
(w_dummy, u_dummy)
}
type Evaluation = Vec<CF1<C>>;
fn dummy_incoming_instance(&self) -> (Witness<C::ScalarField>, CommittedInstance<C>) {
let w_len = self.A.n_cols - 1 - self.l;
let w_dummy = Witness::new(vec![C::ScalarField::zero(); w_len]);
let u_dummy = CommittedInstance::<C>::dummy_incoming(self.l);
(w_dummy, u_dummy)
}
fn is_relaxed(_w: &Witness<C::ScalarField>, u: &CommittedInstance<C>) -> bool {
u.e != C::ScalarField::zero() || !u.betas.is_empty()
}
fn extract_z(w: &Witness<C::ScalarField>, u: &CommittedInstance<C>) -> Vec<C::ScalarField> {
[&[C::ScalarField::one()][..], &u.x, &w.w].concat()
fn eval_relation(
&self,
w: &Witness<CF1<C>>,
u: &CommittedInstance<C, TYPE>,
) -> Result<Self::Evaluation, Error> {
self.eval_at_z(&[&[C::ScalarField::one()][..], &u.x, &w.w].concat())
} }
fn check_error_terms(
fn check_evaluation(
_w: &Witness<C::ScalarField>, _w: &Witness<C::ScalarField>,
u: &CommittedInstance<C>,
u: &CommittedInstance<C, TYPE>,
e: Vec<C::ScalarField>, e: Vec<C::ScalarField>,
) -> Result<(), Error> { ) -> Result<(), Error> {
if u.betas.len() != log2(e.len()) as usize {
return Err(Error::NotSameLength(
"instance.betas.len()".to_string(),
u.betas.len(),
"log2(e.len())".to_string(),
log2(e.len()) as usize,
));
}
let r = cfg_iter!(e)
.enumerate()
.map(|(i, e_i)| pow_i(i, &u.betas) * e_i)
.sum();
if u.e == r {
Ok(())
let ok = if TYPE == RUNNING {
if u.betas.len() != log2(e.len()) as usize {
return Err(Error::NotSameLength(
"instance.betas.len()".to_string(),
u.betas.len(),
"log2(e.len())".to_string(),
log2(e.len()) as usize,
));
}
u.e == cfg_into_iter!(e)
.enumerate()
.map(|(i, e_i)| pow_i(i, &u.betas) * e_i)
.sum::<CF1<C>>()
} else { } else {
Err(Error::NotSatisfied)
}
is_zero_vec(&e)
};
ok.then_some(()).ok_or(Error::NotSatisfied)
} }
}
fn sample<CS>(
&self,
_params: &CS::ProverParams,
_rng: impl RngCore,
) -> Result<(Witness<C::ScalarField>, CommittedInstance<C>), Error>
where
CS: crate::commitment::CommitmentScheme<C, true>,
{
// Sampling a random pair of witness and committed instance is required
// for the zero-knowledge layer for ProtoGalaxy, which is not supported
// yet.
// Tracking issue: https://github.com/privacy-scaling-explorations/sonobe/issues/82
unimplemented!()
#[cfg(test)]
pub mod tests {
use super::*;
use ark_bn254::{Fr, G1Projective as Projective};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
use rand::Rng;
/// test that checks the native CommittedInstance.to_sponge_{bytes,field_elements}
/// vs the R1CS constraints version
#[test]
pub fn test_committed_instance_to_sponge_preimage() {
let mut rng = ark_std::test_rng();
let t = rng.gen::<u8>() as usize;
let io_len = rng.gen::<u8>() as usize;
let ci = CommittedInstance::<Projective, true> {
phi: Projective::rand(&mut rng),
betas: (0..t).map(|_| Fr::rand(&mut rng)).collect(),
e: Fr::rand(&mut rng),
x: (0..io_len).map(|_| Fr::rand(&mut rng)).collect(),
};
let bytes = ci.to_sponge_bytes_as_vec();
let field_elements = ci.to_sponge_field_elements_as_vec();
let cs = ConstraintSystem::<Fr>::new_ref();
let ciVar =
CommittedInstanceVar::<Projective, true>::new_witness(cs.clone(), || Ok(ci.clone()))
.unwrap();
let bytes_var = ciVar.to_sponge_bytes().unwrap();
let field_elements_var = ciVar.to_sponge_field_elements().unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
assert_eq!(bytes_var.value().unwrap(), bytes);
assert_eq!(field_elements_var.value().unwrap(), field_elements);
} }
} }

+ 131
- 0
folding-schemes/src/folding/traits.rs

@ -0,0 +1,131 @@
use ark_crypto_primitives::sponge::{
constraints::{AbsorbGadget, CryptographicSpongeVar},
poseidon::constraints::PoseidonSpongeVar,
Absorb,
};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_r1cs_std::{alloc::AllocVar, fields::fp::FpVar, ToConstraintFieldGadget};
use ark_relations::r1cs::SynthesisError;
use crate::{transcript::Transcript, Error};
use super::circuits::CF1;
pub trait CommittedInstanceOps<C: CurveGroup> {
/// The in-circuit representation of the committed instance.
type Var: AllocVar<Self, CF1<C>> + CommittedInstanceVarOps<C>;
/// `hash` implements the committed instance hash compatible with the
/// in-circuit implementation from `CommittedInstanceVarOps::hash`.
///
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and
/// `U_i` is the committed instance `self`.
fn hash<T: Transcript<CF1<C>>>(
&self,
sponge: &T,
pp_hash: CF1<C>, // public params hash
i: CF1<C>,
z_0: &[CF1<C>],
z_i: &[CF1<C>],
) -> CF1<C>
where
CF1<C>: Absorb,
Self: Sized + Absorb,
{
let mut sponge = sponge.clone();
sponge.absorb(&pp_hash);
sponge.absorb(&i);
sponge.absorb(&z_0);
sponge.absorb(&z_i);
sponge.absorb(&self);
sponge.squeeze_field_elements(1)[0]
}
/// Returns the commitments contained in the committed instance.
fn get_commitments(&self) -> Vec<C>;
/// Returns `true` if the committed instance is an incoming instance, and
/// `false` if it is a running instance.
fn is_incoming(&self) -> bool;
/// Checks if the committed instance is an incoming instance.
fn check_incoming(&self) -> Result<(), Error> {
self.is_incoming()
.then_some(())
.ok_or(Error::NotIncomingCommittedInstance)
}
}
pub trait CommittedInstanceVarOps<C: CurveGroup> {
type PointVar: ToConstraintFieldGadget<CF1<C>>;
/// `hash` implements the in-circuit committed instance hash compatible with
/// the native implementation from `CommittedInstanceOps::hash`.
/// Returns `H(i, z_0, z_i, U_i)`, where `i` can be `i` but also `i+1`, and
/// `U_i` is the committed instance `self`.
///
/// Additionally it returns the in-circuit representation of the committed
/// instance `self` as a vector of field elements, so they can be reused in
/// other gadgets avoiding recalculating (reconstraining) them.
#[allow(clippy::type_complexity)]
fn hash(
&self,
sponge: &PoseidonSpongeVar<CF1<C>>,
pp_hash: &FpVar<CF1<C>>,
i: &FpVar<CF1<C>>,
z_0: &[FpVar<CF1<C>>],
z_i: &[FpVar<CF1<C>>],
) -> Result<(FpVar<CF1<C>>, Vec<FpVar<CF1<C>>>), SynthesisError>
where
Self: AbsorbGadget<CF1<C>>,
{
let mut sponge = sponge.clone();
let U_vec = self.to_sponge_field_elements()?;
sponge.absorb(&pp_hash)?;
sponge.absorb(&i)?;
sponge.absorb(&z_0)?;
sponge.absorb(&z_i)?;
sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
}
/// Returns the commitments contained in the committed instance.
fn get_commitments(&self) -> Vec<Self::PointVar>;
/// Returns the public inputs contained in the committed instance.
fn get_public_inputs(&self) -> &[FpVar<CF1<C>>];
/// Generates constraints to enforce that the committed instance is an
/// incoming instance.
fn enforce_incoming(&self) -> Result<(), SynthesisError>;
/// Generates constraints to enforce that the committed instance `self` is
/// partially equal to another committed instance `other`.
/// Here, only field elements are compared, while commitments (points) are
/// not.
fn enforce_partial_equal(&self, other: &Self) -> Result<(), SynthesisError>;
}
pub trait WitnessOps<F: PrimeField> {
/// The in-circuit representation of the witness.
type Var: AllocVar<Self, F> + WitnessVarOps<F>;
/// Returns the openings (i.e., the values being committed to and the
/// randomness) contained in the witness.
fn get_openings(&self) -> Vec<(&[F], F)>;
}
pub trait WitnessVarOps<F: PrimeField> {
/// Returns the openings (i.e., the values being committed to and the
/// randomness) contained in the witness.
fn get_openings(&self) -> Vec<(&[FpVar<F>], FpVar<F>)>;
}
pub trait Dummy<Cfg> {
fn dummy(cfg: Cfg) -> Self;
}
impl<T: Default + Clone> Dummy<usize> for Vec<T> {
fn dummy(cfg: usize) -> Self {
vec![Default::default(); cfg]
}
}

+ 46
- 24
folding-schemes/src/lib.rs

@ -4,6 +4,7 @@
use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ec::{pairing::Pairing, CurveGroup};
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::rand::CryptoRng; use ark_std::rand::CryptoRng;
use ark_std::{fmt::Debug, rand::RngCore}; use ark_std::{fmt::Debug, rand::RngCore};
use thiserror::Error; use thiserror::Error;
@ -43,6 +44,8 @@ pub enum Error {
IVCVerificationFail, IVCVerificationFail,
#[error("zkIVC verification failed")] #[error("zkIVC verification failed")]
zkIVCVerificationFail, zkIVCVerificationFail,
#[error("Committed instance is expected to be an incoming (fresh) instance")]
NotIncomingCommittedInstance,
#[error("R1CS instance is expected to not be relaxed")] #[error("R1CS instance is expected to not be relaxed")]
R1CSUnrelaxedFail, R1CSUnrelaxedFail,
#[error("Could not find the inner ConstraintSystem")] #[error("Could not find the inner ConstraintSystem")]
@ -105,8 +108,8 @@ pub enum Error {
JSONSerdeError(String), JSONSerdeError(String),
#[error("Multi instances folding not supported in this scheme")] #[error("Multi instances folding not supported in this scheme")]
NoMultiInstances, NoMultiInstances,
#[error("Missing 'other' instances, since this is a multi-instances folding scheme")]
MissingOtherInstances,
#[error("Missing 'other' instances, since this is a multi-instances folding scheme. Expected number of instances, mu:{0}, nu:{1}")]
MissingOtherInstances(usize, usize),
} }
/// FoldingScheme defines trait that is implemented by the diverse folding schemes. It is defined /// FoldingScheme defines trait that is implemented by the diverse folding schemes. It is defined
@ -122,12 +125,37 @@ where
FC: FCircuit<C1::ScalarField>, FC: FCircuit<C1::ScalarField>,
{ {
type PreprocessorParam: Debug + Clone; type PreprocessorParam: Debug + Clone;
type ProverParam: Debug + Clone;
type VerifierParam: Debug + Clone;
type ProverParam: Debug + Clone + CanonicalSerialize;
type VerifierParam: Debug + Clone + CanonicalSerialize;
type RunningInstance: Debug; // contains the CommittedInstance + Witness type RunningInstance: Debug; // contains the CommittedInstance + Witness
type IncomingInstance: Debug; // contains the CommittedInstance + Witness type IncomingInstance: Debug; // contains the CommittedInstance + Witness
type MultiCommittedInstanceWithWitness: Debug; // type used for the extra instances in the multi-instance folding setting type MultiCommittedInstanceWithWitness: Debug; // type used for the extra instances in the multi-instance folding setting
type CFInstance: Debug; // CycleFold CommittedInstance & Witness type CFInstance: Debug; // CycleFold CommittedInstance & Witness
type IVCProof: PartialEq + Eq + Clone + Debug + CanonicalSerialize + CanonicalDeserialize;
/// deserialize Self::ProverParam and recover the not serialized data that is recomputed on the
/// fly to save serialized bytes.
/// Internally it generates the r1cs/ccs & cf_r1cs needed for the VerifierParams. In this way
/// we avoid needing to serialize them, saving significant space in the VerifierParams
/// serialized size.
fn pp_deserialize_with_mode<R: std::io::prelude::Read>(
reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params, // FCircuit params
) -> Result<Self::ProverParam, Error>;
/// deserialize Self::VerifierParam and recover the not serialized data that is recomputed on
/// the fly to save serialized bytes.
/// Internally it generates the r1cs/ccs & cf_r1cs needed for the VerifierParams. In this way
/// we avoid needing to serialize them, saving significant space in the VerifierParams
/// serialized size.
fn vp_deserialize_with_mode<R: std::io::prelude::Read>(
reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
fc_params: FC::Params, // FCircuit params
) -> Result<Self::VerifierParam, Error>;
fn preprocess( fn preprocess(
rng: impl RngCore, rng: impl RngCore,
@ -147,29 +175,23 @@ where
other_instances: Option<Self::MultiCommittedInstanceWithWitness>, other_instances: Option<Self::MultiCommittedInstanceWithWitness>,
) -> Result<(), Error>; ) -> Result<(), Error>;
// returns the state at the current step
/// returns the state at the current step
fn state(&self) -> Vec<C1::ScalarField>; fn state(&self) -> Vec<C1::ScalarField>;
// returns the instances at the current step, in the following order:
// (running_instance, incoming_instance, cyclefold_instance)
fn instances(
&self,
) -> (
Self::RunningInstance,
Self::IncomingInstance,
Self::CFInstance,
);
/// returns the last IVC state proof, which can be verified in the `verify` method
fn ivc_proof(&self) -> Self::IVCProof;
fn verify(
vp: Self::VerifierParam,
z_0: Vec<C1::ScalarField>, // initial state
z_i: Vec<C1::ScalarField>, // last state
// number of steps between the initial state and the last state
num_steps: C1::ScalarField,
running_instance: Self::RunningInstance,
incoming_instance: Self::IncomingInstance,
cyclefold_instance: Self::CFInstance,
) -> Result<(), Error>;
/// constructs the FoldingScheme instance from the given IVCProof, ProverParams, VerifierParams
/// and PoseidonConfig.
/// This method is useful for when the IVCProof is sent between different parties, so that they
/// can continue iterating the IVC from the received IVCProof.
fn from_ivc_proof(
ivc_proof: Self::IVCProof,
fcircuit_params: FC::Params,
params: (Self::ProverParam, Self::VerifierParam),
) -> Result<Self, Error>;
fn verify(vp: Self::VerifierParam, ivc_proof: Self::IVCProof) -> Result<(), Error>;
} }
/// Trait with auxiliary methods for multi-folding schemes (ie. HyperNova, ProtoGalaxy, etc), /// Trait with auxiliary methods for multi-folding schemes (ie. HyperNova, ProtoGalaxy, etc),

+ 3
- 3
folding-schemes/src/utils/mod.rs

@ -8,7 +8,7 @@ use ark_serialize::CanonicalSerialize;
use ark_std::Zero; use ark_std::Zero;
use sha3::{Digest, Sha3_256}; use sha3::{Digest, Sha3_256};
use crate::arith::Arith;
use crate::arith::ArithSerializer;
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::Error; use crate::Error;
@ -45,8 +45,8 @@ pub fn get_cm_coordinates(cm: &C) -> Vec {
/// returns the hash of the given public parameters of the Folding Scheme /// returns the hash of the given public parameters of the Folding Scheme
pub fn pp_hash<C1, C2, CS1, CS2, const H: bool>( pub fn pp_hash<C1, C2, CS1, CS2, const H: bool>(
arith: &impl Arith<C1::ScalarField>,
cf_arith: &impl Arith<C2::ScalarField>,
arith: &impl ArithSerializer,
cf_arith: &impl ArithSerializer,
cs_vp: &CS1::VerifierParams, cs_vp: &CS1::VerifierParams,
cf_cs_vp: &CS2::VerifierParams, cf_cs_vp: &CS2::VerifierParams,
poseidon_config: &PoseidonConfig<C1::ScalarField>, poseidon_config: &PoseidonConfig<C1::ScalarField>,

Loading…
Cancel
Save