Browse Source

Onchain decider circuit for Protogalaxy (#145)

* Move r1cs and ccs to standalone folders

* Simplify type bounds of SparseMatrixVar

* Implement `EquivalenceGadget` trait for `FpVar` and `NonNativeUintVar`.

Together with the existing `MatrixGadget` and `VectorGadget`, we can now use the same logic for checking R1CS satisfiability of `R1CSVar` both natively and non-natively.

* Simplify trait bounds

* Implement `ArithGadget` for `R1CSMatricesVar` and `CCSMatricesVar`

* `PedersenGadget::commit` now takes slices as input

* Structs for proofs and auxiliary values in protogalaxy

* `u` in LCCCS should be `z[0]`

* `Inputize` trait

* Generic decider circuits

* Verifier should check the commitments in committed instances

* Update the comments according to the new docs

* Fix examples

* Add `DeciderEnabledNIFS::fold_group_elements_native` to wrap code for folding commitments

* Fix incorrect endian

* Format

* Get rid of `unwrap` when possible
main
winderica 1 month ago
committed by GitHub
parent
commit
b812dd66df
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
46 changed files with 2738 additions and 2411 deletions
  1. +8
    -7
      examples/circom_full_flow.rs
  2. +8
    -7
      examples/full_flow.rs
  3. +9
    -8
      examples/noir_full_flow.rs
  4. +8
    -7
      examples/noname_full_flow.rs
  5. +35
    -0
      folding-schemes/src/arith/ccs/circuits.rs
  6. +2
    -0
      folding-schemes/src/arith/ccs/mod.rs
  7. +301
    -0
      folding-schemes/src/arith/r1cs/circuits.rs
  8. +12
    -4
      folding-schemes/src/arith/r1cs/mod.rs
  9. +1
    -5
      folding-schemes/src/commitment/ipa.rs
  10. +6
    -9
      folding-schemes/src/commitment/pedersen.rs
  11. +77
    -34
      folding-schemes/src/folding/circuits/cyclefold.rs
  12. +211
    -0
      folding-schemes/src/folding/circuits/decider/mod.rs
  13. +316
    -0
      folding-schemes/src/folding/circuits/decider/off_chain.rs
  14. +325
    -0
      folding-schemes/src/folding/circuits/decider/on_chain.rs
  15. +3
    -2
      folding-schemes/src/folding/circuits/mod.rs
  16. +78
    -14
      folding-schemes/src/folding/circuits/nonnative/affine.rs
  17. +47
    -30
      folding-schemes/src/folding/circuits/nonnative/uint.rs
  18. +7
    -0
      folding-schemes/src/folding/hypernova/cccs.rs
  19. +9
    -20
      folding-schemes/src/folding/hypernova/circuits.rs
  20. +66
    -68
      folding-schemes/src/folding/hypernova/decider_eth.rs
  21. +145
    -422
      folding-schemes/src/folding/hypernova/decider_eth_circuit.rs
  22. +15
    -1
      folding-schemes/src/folding/hypernova/lcccs.rs
  23. +25
    -47
      folding-schemes/src/folding/hypernova/mod.rs
  24. +19
    -12
      folding-schemes/src/folding/hypernova/nimfs.rs
  25. +4
    -20
      folding-schemes/src/folding/nova/circuits.rs
  26. +110
    -179
      folding-schemes/src/folding/nova/decider.rs
  27. +100
    -416
      folding-schemes/src/folding/nova/decider_circuits.rs
  28. +102
    -156
      folding-schemes/src/folding/nova/decider_eth.rs
  29. +123
    -724
      folding-schemes/src/folding/nova/decider_eth_circuit.rs
  30. +30
    -35
      folding-schemes/src/folding/nova/mod.rs
  31. +0
    -3
      folding-schemes/src/folding/nova/nifs/nova.rs
  32. +31
    -2
      folding-schemes/src/folding/nova/traits.rs
  33. +1
    -6
      folding-schemes/src/folding/nova/zk.rs
  34. +8
    -16
      folding-schemes/src/folding/protogalaxy/circuits.rs
  35. +252
    -0
      folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs
  36. +53
    -43
      folding-schemes/src/folding/protogalaxy/folding.rs
  37. +34
    -34
      folding-schemes/src/folding/protogalaxy/mod.rs
  38. +46
    -3
      folding-schemes/src/folding/protogalaxy/traits.rs
  39. +19
    -2
      folding-schemes/src/folding/traits.rs
  40. +4
    -2
      folding-schemes/src/lib.rs
  41. +26
    -10
      folding-schemes/src/utils/gadgets.rs
  42. +12
    -4
      frontends/src/circom/utils.rs
  43. +9
    -6
      frontends/src/noir/mod.rs
  44. +1
    -1
      solidity-verifiers/src/utils/mod.rs
  45. +8
    -7
      solidity-verifiers/src/verifiers/nova_cyclefold.rs
  46. +32
    -45
      solidity-verifiers/templates/nova_cyclefold_decider.askama.sol

+ 8
- 7
examples/circom_full_flow.rs

@ -19,9 +19,12 @@ use std::time::Instant;
use folding_schemes::{ use folding_schemes::{
commitment::{kzg::KZG, pedersen::Pedersen}, commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
folding::{
nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
},
traits::CommittedInstanceOps,
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -83,7 +86,6 @@ fn main() {
// prepare the Nova prover & verifier params // prepare the Nova prover & verifier params
let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone());
let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap();
let pp_hash = nova_params.1.pp_hash().unwrap();
// initialize the folding scheme engine, in our case we use Nova // initialize the folding scheme engine, in our case we use Nova
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
@ -117,8 +119,8 @@ fn main() {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -131,7 +133,6 @@ fn main() {
let calldata: Vec<u8> = prepare_calldata( let calldata: Vec<u8> = prepare_calldata(
function_selector, function_selector,
pp_hash,
nova.i, nova.i,
nova.z_0, nova.z_0,
nova.z_i, nova.z_i,

+ 8
- 7
examples/full_flow.rs

@ -21,9 +21,12 @@ use std::time::Instant;
use folding_schemes::{ use folding_schemes::{
commitment::{kzg::KZG, pedersen::Pedersen}, commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
folding::{
nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
},
traits::CommittedInstanceOps,
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -101,7 +104,6 @@ fn main() {
// prepare the Nova prover & verifier params // prepare the Nova prover & verifier params
let nova_preprocess_params = PreprocessorParam::new(poseidon_config.clone(), f_circuit); let nova_preprocess_params = PreprocessorParam::new(poseidon_config.clone(), f_circuit);
let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap();
let pp_hash = nova_params.1.pp_hash().unwrap();
// initialize the folding scheme engine, in our case we use Nova // initialize the folding scheme engine, in our case we use Nova
let mut nova = N::init(&nova_params, f_circuit, z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit, z_0).unwrap();
@ -125,8 +127,8 @@ fn main() {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -139,7 +141,6 @@ fn main() {
let calldata: Vec<u8> = prepare_calldata( let calldata: Vec<u8> = prepare_calldata(
function_selector, function_selector,
pp_hash,
nova.i, nova.i,
nova.z_0, nova.z_0,
nova.z_i, nova.z_i,

+ 9
- 8
examples/noir_full_flow.rs

@ -16,9 +16,12 @@ use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2};
use folding_schemes::{ use folding_schemes::{
commitment::{kzg::KZG, pedersen::Pedersen}, commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
folding::{
nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
},
traits::CommittedInstanceOps,
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -46,7 +49,7 @@ fn main() {
cur_path.to_str().unwrap() cur_path.to_str().unwrap()
); );
let circuit = load_noir_circuit(circuit_path);
let circuit = load_noir_circuit(circuit_path).unwrap();
let f_circuit = NoirFCircuit { let f_circuit = NoirFCircuit {
circuit, circuit,
state_len: 1, state_len: 1,
@ -72,7 +75,6 @@ fn main() {
// prepare the Nova prover & verifier params // prepare the Nova prover & verifier params
let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone());
let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap();
let pp_hash = nova_params.1.pp_hash().unwrap();
// initialize the folding scheme engine, in our case we use Nova // initialize the folding scheme engine, in our case we use Nova
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
@ -104,8 +106,8 @@ fn main() {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -118,7 +120,6 @@ fn main() {
let calldata: Vec<u8> = prepare_calldata( let calldata: Vec<u8> = prepare_calldata(
function_selector, function_selector,
pp_hash,
nova.i, nova.i,
nova.z_0, nova.z_0,
nova.z_i, nova.z_i,

+ 8
- 7
examples/noname_full_flow.rs

@ -17,9 +17,12 @@ use ark_grumpkin::{constraints::GVar as GVar2, Projective as G2};
use folding_schemes::{ use folding_schemes::{
commitment::{kzg::KZG, pedersen::Pedersen}, commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
folding::{
nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
},
traits::CommittedInstanceOps,
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -85,7 +88,6 @@ fn main() {
// prepare the Nova prover & verifier params // prepare the Nova prover & verifier params
let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone()); let nova_preprocess_params = PreprocessorParam::new(poseidon_config, f_circuit.clone());
let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap(); let nova_params = N::preprocess(&mut rng, &nova_preprocess_params).unwrap();
let pp_hash = nova_params.1.pp_hash().unwrap();
// initialize the folding scheme engine, in our case we use Nova // initialize the folding scheme engine, in our case we use Nova
let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap(); let mut nova = N::init(&nova_params, f_circuit.clone(), z_0).unwrap();
@ -119,8 +121,8 @@ fn main() {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -133,7 +135,6 @@ fn main() {
let calldata: Vec<u8> = prepare_calldata( let calldata: Vec<u8> = prepare_calldata(
function_selector, function_selector,
pp_hash,
nova.i, nova.i,
nova.z_0, nova.z_0,
nova.z_i, nova.z_i,

+ 35
- 0
folding-schemes/src/arith/ccs/circuits.rs

@ -0,0 +1,35 @@
use super::CCS;
use crate::utils::gadgets::SparseMatrixVar;
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::borrow::Borrow;
/// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters.
///
#[derive(Debug, Clone)]
pub struct CCSMatricesVar<F: PrimeField> {
// we only need native representation, so the constraint field==F
pub M: Vec<SparseMatrixVar<FpVar<F>>>,
}
impl<F: PrimeField> AllocVar<CCS<F>, F> for CCSMatricesVar<F> {
fn new_variable<T: Borrow<CCS<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let M: Vec<SparseMatrixVar<FpVar<F>>> = val
.borrow()
.M
.iter()
.map(|M| SparseMatrixVar::<FpVar<F>>::new_constant(cs.clone(), M.clone()))
.collect::<Result<_, SynthesisError>>()?;
Ok(Self { M })
})
}
}

folding-schemes/src/arith/ccs.rs → folding-schemes/src/arith/ccs/mod.rs

@ -9,6 +9,8 @@ use crate::Error;
use super::ArithSerializer; use super::ArithSerializer;
use super::{r1cs::R1CS, Arith}; use super::{r1cs::R1CS, Arith};
pub mod circuits;
/// CCS represents the Customizable Constraint Systems structure defined in /// CCS represents the Customizable Constraint Systems structure defined in
/// the [CCS paper](https://eprint.iacr.org/2023/552) /// the [CCS paper](https://eprint.iacr.org/2023/552)
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]

+ 301
- 0
folding-schemes/src/arith/r1cs/circuits.rs

@ -0,0 +1,301 @@
use crate::{
arith::ArithGadget,
utils::gadgets::{EquivalenceGadget, MatrixGadget, SparseMatrixVar, VectorGadget},
};
use ark_ff::PrimeField;
use ark_r1cs_std::alloc::{AllocVar, AllocationMode};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{borrow::Borrow, marker::PhantomData, One};
use super::R1CS;
/// An in-circuit representation of the `R1CS` struct.
///
/// `M` is for the modulo operation involved in the satisfiability check when
/// the underlying `FVar` is `NonNativeUintVar`.
#[derive(Debug, Clone)]
pub struct R1CSMatricesVar<M, FVar> {
_m: PhantomData<M>,
pub A: SparseMatrixVar<FVar>,
pub B: SparseMatrixVar<FVar>,
pub C: SparseMatrixVar<FVar>,
}
impl<F: PrimeField, ConstraintF: PrimeField, FVar: AllocVar<F, ConstraintF>>
AllocVar<R1CS<F>, ConstraintF> for R1CSMatricesVar<F, FVar>
{
fn new_variable<T: Borrow<R1CS<F>>>(
cs: impl Into<Namespace<ConstraintF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
Ok(Self {
_m: PhantomData,
A: SparseMatrixVar::<FVar>::new_constant(cs.clone(), &val.borrow().A)?,
B: SparseMatrixVar::<FVar>::new_constant(cs.clone(), &val.borrow().B)?,
C: SparseMatrixVar::<FVar>::new_constant(cs.clone(), &val.borrow().C)?,
})
})
}
}
impl<M, FVar> R1CSMatricesVar<M, FVar>
where
SparseMatrixVar<FVar>: MatrixGadget<FVar>,
[FVar]: VectorGadget<FVar>,
{
pub fn eval_at_z(&self, z: &[FVar]) -> Result<(Vec<FVar>, Vec<FVar>), SynthesisError> {
// Multiply Cz by z[0] (u) here, allowing this method to be reused for
// both relaxed and unrelaxed R1CS.
let Az = self.A.mul_vector(z)?;
let Bz = self.B.mul_vector(z)?;
let Cz = self.C.mul_vector(z)?;
let uCz = Cz.mul_scalar(&z[0])?;
let AzBz = Az.hadamard(&Bz)?;
Ok((AzBz, uCz))
}
}
impl<M, FVar, WVar: AsRef<[FVar]>, UVar: AsRef<[FVar]>> ArithGadget<WVar, UVar>
for R1CSMatricesVar<M, FVar>
where
SparseMatrixVar<FVar>: MatrixGadget<FVar>,
[FVar]: VectorGadget<FVar> + EquivalenceGadget<M>,
FVar: Clone + One,
{
/// Evaluation is a tuple of two vectors (`AzBz` and `uCz`) instead of a
/// single vector `AzBz - uCz`, because subtraction is not supported for
/// `FVar = NonNativeUintVar`.
type Evaluation = (Vec<FVar>, Vec<FVar>);
fn eval_relation(&self, w: &WVar, u: &UVar) -> Result<Self::Evaluation, SynthesisError> {
self.eval_at_z(&[&[FVar::one()], u.as_ref(), w.as_ref()].concat())
}
fn enforce_evaluation(
_w: &WVar,
_u: &UVar,
(lhs, rhs): Self::Evaluation,
) -> Result<(), SynthesisError> {
lhs.enforce_equivalent(&rhs)
}
}
#[cfg(test)]
pub mod tests {
use std::cmp::max;
use ark_crypto_primitives::crh::{
sha256::{
constraints::{Sha256Gadget, UnitVar},
Sha256,
},
CRHScheme, CRHSchemeGadget,
};
use ark_ec::CurveGroup;
use ark_ff::BigInteger;
use ark_pallas::{Fq, Fr, Projective};
use ark_r1cs_std::{bits::uint8::UInt8, eq::EqGadget, fields::fp::FpVar};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef};
use ark_std::{
rand::{thread_rng, Rng},
One, UniformRand,
};
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use super::*;
use crate::arith::{
r1cs::{
extract_r1cs, extract_w_x,
tests::{get_test_r1cs, get_test_z},
},
Arith,
};
use crate::commitment::{pedersen::Pedersen, CommitmentScheme};
use crate::folding::{
circuits::{
cyclefold::{CycleFoldCommittedInstanceVar, CycleFoldWitnessVar},
nonnative::uint::NonNativeUintVar,
},
nova::{
circuits::CommittedInstanceVar, decider_eth_circuit::WitnessVar, CommittedInstance,
Witness,
},
};
use crate::frontend::{
utils::{CubicFCircuit, CustomFCircuit, WrapperCircuit},
FCircuit,
};
fn prepare_instances<C: CurveGroup, CS: CommitmentScheme<C>, R: Rng>(
mut rng: R,
r1cs: &R1CS<C::ScalarField>,
z: &[C::ScalarField],
) -> (Witness<C>, CommittedInstance<C>) {
let (w, x) = r1cs.split_z(z);
let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap();
let mut w = Witness::new::<false>(w, r1cs.A.n_rows, &mut rng);
w.E = r1cs.eval_at_z(z).unwrap();
let mut u = w.commit::<CS, false>(&cs_pp, x).unwrap();
u.u = z[0];
(w, u)
}
#[test]
fn test_relaxed_r1cs_small_gadget_handcrafted() {
let rng = &mut thread_rng();
let r1cs: R1CS<Fr> = get_test_r1cs();
let mut z = get_test_z(3);
z[0] = Fr::rand(rng);
let (w, u) = prepare_instances::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
let cs = ConstraintSystem::<Fr>::new_ref();
let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap();
let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u)).unwrap();
let r1csVar =
R1CSMatricesVar::<Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
r1csVar.enforce_relation(&wVar, &uVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
// gets as input a circuit that implements the ConstraintSynthesizer trait, and that has been
// initialized.
fn test_relaxed_r1cs_gadget<CS: ConstraintSynthesizer<Fr>>(circuit: CS) {
let rng = &mut thread_rng();
let cs = ConstraintSystem::<Fr>::new_ref();
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
assert!(cs.is_satisfied().unwrap());
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fr>(&cs).unwrap();
let (w, x) = extract_w_x::<Fr>(&cs);
r1cs.check_relation(&w, &x).unwrap();
let mut z = [vec![Fr::one()], x, w].concat();
z[0] = Fr::rand(rng);
let (w, u) = prepare_instances::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
r1cs.check_relation(&w, &u).unwrap();
// set new CS for the circuit that checks the RelaxedR1CS of our original circuit
let cs = ConstraintSystem::<Fr>::new_ref();
// prepare the inputs for our circuit
let wVar = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap();
let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(u)).unwrap();
let r1csVar =
R1CSMatricesVar::<Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
r1csVar.enforce_relation(&wVar, &uVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
#[test]
fn test_relaxed_r1cs_small_gadget_arkworks() {
let z_i = vec![Fr::from(3_u32)];
let cubic_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let circuit = WrapperCircuit::<Fr, CubicFCircuit<Fr>> {
FC: cubic_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(cubic_circuit.step_native(0, z_i, vec![]).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
struct Sha256TestCircuit<F: PrimeField> {
_f: PhantomData<F>,
pub x: Vec<u8>,
pub y: Vec<u8>,
}
impl<F: PrimeField> ConstraintSynthesizer<F> for Sha256TestCircuit<F> {
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> Result<(), SynthesisError> {
let x = Vec::<UInt8<F>>::new_witness(cs.clone(), || Ok(self.x))?;
let y = Vec::<UInt8<F>>::new_input(cs.clone(), || Ok(self.y))?;
let unitVar = UnitVar::default();
let comp_y = <Sha256Gadget<F> as CRHSchemeGadget<Sha256, F>>::evaluate(&unitVar, &x)?;
comp_y.0.enforce_equal(&y)?;
Ok(())
}
}
#[test]
fn test_relaxed_r1cs_medium_gadget_arkworks() {
let x = Fr::from(5_u32).into_bigint().to_bytes_le();
let y = <Sha256 as CRHScheme>::evaluate(&(), x.clone()).unwrap();
let circuit = Sha256TestCircuit::<Fr> {
_f: PhantomData,
x,
y,
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_custom_circuit() {
let n_constraints = 10_000;
let custom_circuit = CustomFCircuit::<Fr>::new(n_constraints).unwrap();
let z_i = vec![Fr::from(5_u32)];
let circuit = WrapperCircuit::<Fr, CustomFCircuit<Fr>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_nonnative_circuit() {
let rng = &mut thread_rng();
let cs = ConstraintSystem::<Fq>::new_ref();
// in practice we would use CycleFoldCircuit, but is a very big circuit (when computed
// non-natively inside the RelaxedR1CS circuit), so in order to have a short test we use a
// custom circuit.
let custom_circuit = CustomFCircuit::<Fq>::new(10).unwrap();
let z_i = vec![Fq::from(5_u32)];
let circuit = WrapperCircuit::<Fq, CustomFCircuit<Fq>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()),
};
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fq>(&cs).unwrap();
let (w, x) = extract_w_x::<Fq>(&cs);
let z = [vec![Fq::rand(rng)], x, w].concat();
let (w, u) = prepare_instances::<_, Pedersen<Projective2>, _>(rng, &r1cs, &z);
// natively
let cs = ConstraintSystem::<Fq>::new_ref();
let wVar = WitnessVar::new_witness(cs.clone(), || Ok(&w)).unwrap();
let uVar = CommittedInstanceVar::new_witness(cs.clone(), || Ok(&u)).unwrap();
let r1csVar =
R1CSMatricesVar::<Fq, FpVar<Fq>>::new_witness(cs.clone(), || Ok(&r1cs)).unwrap();
r1csVar.enforce_relation(&wVar, &uVar).unwrap();
// non-natively
let cs = ConstraintSystem::<Fr>::new_ref();
let wVar = CycleFoldWitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap();
let uVar =
CycleFoldCommittedInstanceVar::<_, GVar2>::new_witness(cs.clone(), || Ok(u)).unwrap();
let r1csVar =
R1CSMatricesVar::<Fq, NonNativeUintVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs))
.unwrap();
r1csVar.enforce_relation(&wVar, &uVar).unwrap();
}
}

folding-schemes/src/arith/r1cs.rs → folding-schemes/src/arith/r1cs/mod.rs

@ -10,6 +10,8 @@ use crate::utils::vec::{
}; };
use crate::Error; use crate::Error;
pub mod circuits;
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CS<F: PrimeField> { pub struct R1CS<F: PrimeField> {
pub l: usize, // io len pub l: usize, // io len
@ -121,8 +123,14 @@ impl From> for R1CS {
/// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS /// extracts arkworks ConstraintSystem matrices into crate::utils::vec::SparseMatrix format as R1CS
/// struct. /// struct.
pub fn extract_r1cs<F: PrimeField>(cs: &ConstraintSystem<F>) -> R1CS<F> {
let m = cs.to_matrices().unwrap();
pub fn extract_r1cs<F: PrimeField>(cs: &ConstraintSystem<F>) -> Result<R1CS<F>, Error> {
let m = cs.to_matrices().ok_or_else(|| {
Error::ConversionError(
"ConstraintSystem".into(),
"ConstraintMatrices".into(),
"The matrices have not been generated yet".into(),
)
})?;
let n_rows = cs.num_constraints; let n_rows = cs.num_constraints;
let n_cols = cs.num_instance_variables + cs.num_witness_variables; // cs.num_instance_variables already counts the 1 let n_cols = cs.num_instance_variables + cs.num_witness_variables; // cs.num_instance_variables already counts the 1
@ -143,12 +151,12 @@ pub fn extract_r1cs(cs: &ConstraintSystem) -> R1CS {
coeffs: m.c, coeffs: m.c,
}; };
R1CS::<F> {
Ok(R1CS::<F> {
l: cs.num_instance_variables - 1, // -1 to subtract the first '1' l: cs.num_instance_variables - 1, // -1 to subtract the first '1'
A, A,
B, B,
C, C,
}
})
} }
/// extracts the witness and the public inputs from arkworks ConstraintSystem. /// extracts the witness and the public inputs from arkworks ConstraintSystem.

+ 1
- 5
folding-schemes/src/commitment/ipa.rs

@ -13,7 +13,6 @@ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
boolean::Boolean, boolean::Boolean,
fields::{nonnative::NonNativeFieldVar, FieldVar}, fields::{nonnative::NonNativeFieldVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
ToBitsGadget, ToBitsGadget,
}; };
@ -492,9 +491,6 @@ impl IPAGadget
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF<C>>, GC: CurveVar<C, CF<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{ {
/// Verify the IPA opening proof, K=log2(d), where d is the degree of the committed polynomial, /// Verify the IPA opening proof, K=log2(d), where d is the degree of the committed polynomial,
/// and H indicates if the commitment is in hiding mode and thus uses blinding factors, if not, /// and H indicates if the commitment is in hiding mode and thus uses blinding factors, if not,
@ -515,7 +511,7 @@ where
return Err(SynthesisError::Unsatisfiable); return Err(SynthesisError::Unsatisfiable);
} }
let P_ = P + U.scalar_mul_le(v.to_bits_le()?.iter())?;
let P_ = U.scalar_mul_le(v.to_bits_le()?.iter())? + P;
let mut q_0 = P_; let mut q_0 = P_;
let mut r = r.clone(); let mut r = r.clone();

+ 6
- 9
folding-schemes/src/commitment/pedersen.rs

@ -1,6 +1,6 @@
use ark_ec::CurveGroup; use ark_ec::CurveGroup;
use ark_ff::Field; use ark_ff::Field;
use ark_r1cs_std::{boolean::Boolean, groups::GroupOpsBounds, prelude::CurveVar};
use ark_r1cs_std::{boolean::Boolean, prelude::CurveVar};
use ark_relations::r1cs::SynthesisError; use ark_relations::r1cs::SynthesisError;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::Zero; use ark_std::Zero;
@ -194,15 +194,12 @@ impl PedersenGadget
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF<C>>, GC: CurveVar<C, CF<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{ {
pub fn commit( pub fn commit(
h: GC,
g: Vec<GC>,
v: Vec<Vec<Boolean<CF<C>>>>,
r: Vec<Boolean<CF<C>>>,
h: &GC,
g: &[GC],
v: &[Vec<Boolean<CF<C>>>],
r: &[Boolean<CF<C>>],
) -> Result<GC, SynthesisError> { ) -> Result<GC, SynthesisError> {
let mut res = GC::zero(); let mut res = GC::zero();
if H { if H {
@ -303,7 +300,7 @@ mod tests {
// use the gadget // use the gadget
let cmVar = let cmVar =
PedersenGadget::<Projective, GVar, hiding>::commit(hVar, gVar, vVar, rVar).unwrap();
PedersenGadget::<Projective, GVar, hiding>::commit(&hVar, &gVar, &vVar, &rVar).unwrap();
cmVar.enforce_equal(&expected_cmVar).unwrap(); cmVar.enforce_equal(&expected_cmVar).unwrap();
} }
} }

+ 77
- 34
folding-schemes/src/folding/circuits/cyclefold.rs

@ -8,7 +8,6 @@ use ark_r1cs_std::{
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::fp::FpVar, fields::fp::FpVar,
groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
ToConstraintFieldGadget, ToConstraintFieldGadget,
}; };
@ -17,16 +16,23 @@ use ark_relations::r1cs::{
}; };
use ark_std::fmt::Debug; use ark_std::fmt::Debug;
use ark_std::rand::RngCore; use ark_std::rand::RngCore;
use ark_std::Zero;
use ark_std::{One, Zero};
use core::{borrow::Borrow, marker::PhantomData}; use core::{borrow::Borrow, marker::PhantomData};
use super::{nonnative::uint::NonNativeUintVar, CF1, CF2}; use super::{nonnative::uint::NonNativeUintVar, CF1, CF2};
use crate::arith::r1cs::{extract_w_x, R1CS};
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::constants::NOVA_N_BITS_RO; use crate::constants::NOVA_N_BITS_RO;
use crate::folding::nova::nifs::{nova::NIFS, NIFSTrait}; use crate::folding::nova::nifs::{nova::NIFS, NIFSTrait};
use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar}; use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget, Transcript, TranscriptVar};
use crate::utils::gadgets::{EquivalenceGadget, VectorGadget};
use crate::Error; use crate::Error;
use crate::{
arith::{
r1cs::{circuits::R1CSMatricesVar, extract_w_x, R1CS},
ArithGadget,
},
folding::traits::Inputize,
};
use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; use ark_crypto_primitives::sponge::poseidon::PoseidonSponge;
/// Re-export the Nova committed instance as `CycleFoldCommittedInstance` and /// Re-export the Nova committed instance as `CycleFoldCommittedInstance` and
@ -35,13 +41,39 @@ pub use crate::folding::nova::{
CommittedInstance as CycleFoldCommittedInstance, Witness as CycleFoldWitness, CommittedInstance as CycleFoldCommittedInstance, Witness as CycleFoldWitness,
}; };
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>> Inputize<CF2<C>, CycleFoldCommittedInstanceVar<C, GC>>
for CycleFoldCommittedInstance<C>
{
fn inputize(&self) -> Vec<CF2<C>> {
let zero = (&C::BaseField::zero(), &C::BaseField::zero());
let cmE = self.cmE.into_affine();
let cmW = self.cmW.into_affine();
let (cmE_x, cmE_y) = cmE.xy().unwrap_or(zero);
let (cmW_x, cmW_y) = cmW.xy().unwrap_or(zero);
self.u
.inputize()
.into_iter()
.chain(self.x.iter().flat_map(|x| x.inputize()))
.chain(
[
*cmE_x,
*cmE_y,
C::BaseField::one(),
*cmW_x,
*cmW_y,
C::BaseField::one(),
]
.into_iter()
.flat_map(|x| x.to_base_prime_field_elements()),
)
.collect()
}
}
/// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance represented /// CycleFoldCommittedInstanceVar is the CycleFold CommittedInstance represented
/// in folding verifier circuit /// in folding verifier circuit
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CycleFoldCommittedInstanceVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
where
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub struct CycleFoldCommittedInstanceVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
pub cmE: GC, pub cmE: GC,
pub u: NonNativeUintVar<CF2<C>>, pub u: NonNativeUintVar<CF2<C>>,
pub cmW: GC, pub cmW: GC,
@ -51,8 +83,6 @@ impl AllocVar, CF2> for CycleFoldCommitt
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>>, GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{ {
fn new_variable<T: Borrow<CycleFoldCommittedInstance<C>>>( fn new_variable<T: Borrow<CycleFoldCommittedInstance<C>>>(
cs: impl Into<Namespace<CF2<C>>>, cs: impl Into<Namespace<CF2<C>>>,
@ -101,8 +131,7 @@ impl AbsorbNonNativeGadget for CycleFoldCommittedInstanceVa
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>, GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField + Absorb,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
C::BaseField: PrimeField + Absorb,
{ {
/// Extracts the underlying field elements from `CycleFoldCommittedInstanceVar`, in the order /// Extracts the underlying field elements from `CycleFoldCommittedInstanceVar`, in the order
/// of `u`, `x`, `cmE.x`, `cmE.y`, `cmW.x`, `cmW.y`, `cmE.is_inf || cmW.is_inf` (|| is for /// of `u`, `x`, `cmE.x`, `cmE.y`, `cmW.x`, `cmW.y`, `cmE.is_inf || cmW.is_inf` (|| is for
@ -132,7 +161,7 @@ where
impl<C: CurveGroup> CycleFoldCommittedInstance<C> impl<C: CurveGroup> CycleFoldCommittedInstance<C>
where where
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField + Absorb,
C::BaseField: PrimeField + Absorb,
{ {
/// hash_cyclefold implements the committed instance hash compatible with the /// hash_cyclefold implements the committed instance hash compatible with the
/// in-circuit implementation `CycleFoldCommittedInstanceVar::hash`. /// in-circuit implementation `CycleFoldCommittedInstanceVar::hash`.
@ -153,8 +182,7 @@ impl CycleFoldCommittedInstanceVar
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>, GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField + Absorb,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
C::BaseField: PrimeField + Absorb,
{ {
/// hash implements the committed instance hash compatible with the native /// hash implements the committed instance hash compatible with the native
/// implementation `CycleFoldCommittedInstance::hash_cyclefold`. /// implementation `CycleFoldCommittedInstance::hash_cyclefold`.
@ -165,7 +193,7 @@ where
/// (reconstraining) them. /// (reconstraining) them.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub fn hash<S: CryptographicSponge, T: TranscriptVar<CF2<C>, S>>( pub fn hash<S: CryptographicSponge, T: TranscriptVar<CF2<C>, S>>(
self,
&self,
sponge: &T, sponge: &T,
pp_hash: FpVar<CF2<C>>, // public params hash pp_hash: FpVar<CF2<C>>, // public params hash
) -> Result<(FpVar<CF2<C>>, Vec<FpVar<CF2<C>>>), SynthesisError> { ) -> Result<(FpVar<CF2<C>>, Vec<FpVar<CF2<C>>>), SynthesisError> {
@ -173,7 +201,11 @@ where
let U_vec = self.to_native_sponge_field_elements()?; let U_vec = self.to_native_sponge_field_elements()?;
sponge.absorb(&pp_hash)?; sponge.absorb(&pp_hash)?;
sponge.absorb(&U_vec)?; sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
Ok((
// `unwrap` is safe because the sponge is guaranteed to return a single element
sponge.squeeze_field_elements(1)?.pop().unwrap(),
U_vec,
))
} }
} }
@ -182,10 +214,7 @@ where
/// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr /// represented as native points, which are folded on the auxiliary curve constraints field (E2::Fr
/// = E1::Fq). /// = E1::Fq).
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CommittedInstanceInCycleFoldVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
where
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
pub struct CommittedInstanceInCycleFoldVar<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
_c: PhantomData<C>, _c: PhantomData<C>,
pub cmE: GC, pub cmE: GC,
pub cmW: GC, pub cmW: GC,
@ -196,7 +225,6 @@ impl AllocVar, CF2>
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>>, GC: CurveVar<C, CF2<C>>,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{ {
fn new_variable<T: Borrow<CycleFoldCommittedInstance<C>>>( fn new_variable<T: Borrow<CycleFoldCommittedInstance<C>>>(
cs: impl Into<Namespace<CF2<C>>>, cs: impl Into<Namespace<CF2<C>>>,
@ -232,7 +260,7 @@ pub struct CycleFoldWitnessVar {
impl<C> AllocVar<CycleFoldWitness<C>, CF2<C>> for CycleFoldWitnessVar<C> impl<C> AllocVar<CycleFoldWitness<C>, CF2<C>> for CycleFoldWitnessVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
C::BaseField: PrimeField,
{ {
fn new_variable<T: Borrow<CycleFoldWitness<C>>>( fn new_variable<T: Borrow<CycleFoldWitness<C>>>(
cs: impl Into<Namespace<CF2<C>>>, cs: impl Into<Namespace<CF2<C>>>,
@ -265,8 +293,7 @@ impl>> NIFSFullGadget
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>>, GC: CurveVar<C, CF2<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
C::BaseField: PrimeField,
{ {
pub fn fold_committed_instance( pub fn fold_committed_instance(
r_bits: Vec<Boolean<CF2<C>>>, r_bits: Vec<Boolean<CF2<C>>>,
@ -284,13 +311,13 @@ where
Ok(CycleFoldCommittedInstanceVar { Ok(CycleFoldCommittedInstanceVar {
cmE: cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE, cmE: cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE,
cmW: ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?, cmW: ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?,
u: ci1.u.add_no_align(&r_nonnat).modulo::<CF1<C>>()?,
u: ci1.u.add_no_align(&r_nonnat)?.modulo::<CF1<C>>()?,
x: ci1 x: ci1
.x .x
.iter() .iter()
.zip(ci2.x) .zip(ci2.x)
.map(|(a, b)| { .map(|(a, b)| {
a.add_no_align(&r_nonnat.mul_no_align(&b)?)
a.add_no_align(&r_nonnat.mul_no_align(&b)?)?
.modulo::<CF1<C>>() .modulo::<CF1<C>>()
}) })
.collect::<Result<Vec<_>, _>>()?, .collect::<Result<Vec<_>, _>>()?,
@ -319,6 +346,29 @@ where
} }
} }
impl<C: CurveGroup, GC: CurveVar<C, CF2<C>>>
ArithGadget<CycleFoldWitnessVar<C>, CycleFoldCommittedInstanceVar<C, GC>>
for R1CSMatricesVar<CF1<C>, NonNativeUintVar<CF2<C>>>
{
type Evaluation = (Vec<NonNativeUintVar<CF2<C>>>, Vec<NonNativeUintVar<CF2<C>>>);
fn eval_relation(
&self,
w: &CycleFoldWitnessVar<C>,
u: &CycleFoldCommittedInstanceVar<C, GC>,
) -> Result<Self::Evaluation, SynthesisError> {
self.eval_at_z(&[&[u.u.clone()][..], &u.x, &w.W].concat())
}
fn enforce_evaluation(
w: &CycleFoldWitnessVar<C>,
_u: &CycleFoldCommittedInstanceVar<C, GC>,
(AzBz, uCz): Self::Evaluation,
) -> Result<(), SynthesisError> {
EquivalenceGadget::<CF1<C>>::enforce_equivalent(&AzBz[..], &uCz.add(&w.E)?[..])
}
}
/// CycleFoldChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a /// CycleFoldChallengeGadget computes the RO challenge used for the CycleFold instances NIFS, it contains a
/// rust-native and a in-circuit compatible versions. /// rust-native and a in-circuit compatible versions.
pub struct CycleFoldChallengeGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> { pub struct CycleFoldChallengeGadget<C: CurveGroup, GC: CurveVar<C, CF2<C>>> {
@ -329,9 +379,7 @@ impl CycleFoldChallengeGadget
where where
C: CurveGroup, C: CurveGroup,
GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>, GC: CurveVar<C, CF2<C>> + ToConstraintFieldGadget<CF2<C>>,
<C as CurveGroup>::BaseField: PrimeField,
<C as CurveGroup>::BaseField: Absorb,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
C::BaseField: PrimeField + Absorb,
{ {
pub fn get_challenge_native<T: Transcript<C::BaseField>>( pub fn get_challenge_native<T: Transcript<C::BaseField>>(
transcript: &mut T, transcript: &mut T,
@ -425,7 +473,6 @@ impl> ConstraintSynthesizer
where where
GC: ToConstraintFieldGadget<CFG::F>, GC: ToConstraintFieldGadget<CFG::F>,
CFG::F: PrimeField, CFG::F: PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, CFG::C, GC>,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CFG::F>) -> Result<(), SynthesisError> { fn generate_constraints(self, cs: ConstraintSystemRef<CFG::F>) -> Result<(), SynthesisError> {
let r_bits = Vec::<Boolean<CFG::F>>::new_witness(cs.clone(), || { let r_bits = Vec::<Boolean<CFG::F>>::new_witness(cs.clone(), || {
@ -510,7 +557,6 @@ pub struct CycleFoldNIFS<
> where > where
<C1 as CurveGroup>::BaseField: PrimeField, <C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField, <C2 as CurveGroup>::BaseField: PrimeField,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
_c1: PhantomData<C1>, _c1: PhantomData<C1>,
_c2: PhantomData<C2>, _c2: PhantomData<C2>,
@ -526,7 +572,6 @@ where
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>, GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
fn prove( fn prove(
cf_r_Fq: C2::ScalarField, // C2::Fr==C1::Fq cf_r_Fq: C2::ScalarField, // C2::Fr==C1::Fq
@ -597,8 +642,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref(); let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;

+ 211
- 0
folding-schemes/src/folding/circuits/decider/mod.rs

@ -0,0 +1,211 @@
use ark_crypto_primitives::sponge::{
poseidon::constraints::PoseidonSpongeVar, CryptographicSponge,
};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_poly::Polynomial;
use ark_r1cs_std::{
fields::{fp::FpVar, FieldVar},
poly::{domain::Radix2DomainVar, evaluations::univariate::EvaluationsVar},
ToConstraintFieldGadget,
};
use ark_relations::r1cs::SynthesisError;
use ark_std::log2;
use crate::folding::traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps};
use crate::transcript::{Transcript, TranscriptVar};
use crate::utils::vec::poly_from_vec;
use crate::Error;
use crate::{arith::Arith, folding::circuits::CF1};
pub mod off_chain;
pub mod on_chain;
/// Gadget that computes the KZG challenges.
/// It also offers the rust native implementation compatible with the gadget.
pub struct KZGChallengesGadget {}
impl KZGChallengesGadget {
pub fn get_challenges_native<
C: CurveGroup,
T: Transcript<CF1<C>>,
U: CommittedInstanceOps<C>,
>(
transcript: &mut T,
U_i: &U,
) -> Vec<CF1<C>> {
let mut challenges = vec![];
for cm in U_i.get_commitments() {
transcript.absorb_nonnative(&cm);
challenges.push(transcript.get_challenge());
}
challenges
}
pub fn get_challenges_gadget<
C: CurveGroup,
S: CryptographicSponge,
T: TranscriptVar<CF1<C>, S>,
U: CommittedInstanceVarOps<C>,
>(
transcript: &mut T,
U_i: &U,
) -> Result<Vec<FpVar<CF1<C>>>, SynthesisError> {
let mut challenges = vec![];
for cm in U_i.get_commitments() {
transcript.absorb(&cm.to_constraint_field()?)?;
challenges.push(transcript.get_challenge()?);
}
Ok(challenges)
}
}
/// Gadget that interpolates the polynomial from the given vector and returns
/// its evaluation at the given point.
/// It also offers the rust native implementation compatible with the gadget.
pub struct EvalGadget {}
impl EvalGadget {
pub fn evaluate_native<F: PrimeField>(v: &[F], point: F) -> Result<F, Error> {
let mut v = v.to_vec();
v.resize(v.len().next_power_of_two(), F::zero());
Ok(poly_from_vec(v)?.evaluate(&point))
}
pub fn evaluate_gadget<F: PrimeField>(
v: &[FpVar<F>],
point: &FpVar<F>,
) -> Result<FpVar<F>, SynthesisError> {
let mut v = v.to_vec();
v.resize(v.len().next_power_of_two(), FpVar::zero());
let n = v.len() as u64;
let gen = F::get_root_of_unity(n).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
// `unwrap` below is safe because `Radix2DomainVar::new` only fails if
// `offset.enforce_not_equal(&FpVar::zero())` returns an error.
// But in our case, `offset` is `FpVar::one()`, i.e., both operands of
// `enforce_not_equal` are constants.
// Consequently, `FpVar`'s implementation of `enforce_not_equal` will
// always return `Ok(())`.
let domain = Radix2DomainVar::new(gen, log2(v.len()) as u64, FpVar::one()).unwrap();
let evaluations_var = EvaluationsVar::from_vec_and_domain(v, domain, true);
evaluations_var.interpolate_and_evaluate(point)
}
}
/// This is a temporary workaround for step 6 (running NIFS.V for group elements
/// in circuit) in an NIFS-agnostic way, because different folding schemes have
/// different interfaces of folding verification now.
///
/// In the future, we may introduce a better solution that uses a trait for all
/// folding schemes that specifies their native and in-circuit behaviors.
pub trait DeciderEnabledNIFS<
C: CurveGroup,
RU: CommittedInstanceOps<C>, // Running instance
IU: CommittedInstanceOps<C>, // Incoming instance
W: WitnessOps<CF1<C>>,
A: Arith<W, RU>,
>
{
type ProofDummyCfg;
type Proof: Dummy<Self::ProofDummyCfg>;
type RandomnessDummyCfg;
type Randomness: Dummy<Self::RandomnessDummyCfg>;
/// Fold the field elements in `U` and `u` inside the circuit.
///
/// `U_vec` is `U` expressed as a vector of `FpVar`s, which can be reused
/// before or after calling this function to save constraints.
#[allow(clippy::too_many_arguments)]
fn fold_field_elements_gadget(
arith: &A,
transcript: &mut PoseidonSpongeVar<CF1<C>>,
pp_hash: FpVar<CF1<C>>,
U: RU::Var,
U_vec: Vec<FpVar<CF1<C>>>,
u: IU::Var,
proof: Self::Proof,
randomness: Self::Randomness,
) -> Result<RU::Var, SynthesisError>;
/// Fold the group elements (i.e., commitments) in `U` and `u` outside the
/// circuit.
fn fold_group_elements_native(
U_commitments: &[C],
u_commitments: &[C],
proof: Option<Self::Proof>,
randomness: Self::Randomness,
) -> Result<Vec<C>, Error>;
}
#[cfg(test)]
pub mod tests {
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar, poseidon::PoseidonSponge,
};
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
use super::*;
use crate::folding::nova::{circuits::CommittedInstanceVar, CommittedInstance};
use crate::transcript::poseidon::poseidon_canonical_config;
// checks that the gadget and native implementations of the challenge computation match
#[test]
fn test_kzg_challenge_gadget() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let mut transcript = PoseidonSponge::<Fr>::new(&poseidon_config);
let U_i = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
// compute the challenge natively
let challenges = KZGChallengesGadget::get_challenges_native(&mut transcript, &U_i);
let cs = ConstraintSystem::<Fr>::new_ref();
let U_iVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(U_i.clone()))
.unwrap();
let mut transcript_var = PoseidonSpongeVar::<Fr>::new(cs.clone(), &poseidon_config);
let challenges_var =
KZGChallengesGadget::get_challenges_gadget(&mut transcript_var, &U_iVar).unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
assert_eq!(challenges_var.value().unwrap(), challenges);
}
#[test]
fn test_polynomial_interpolation() {
let mut rng = ark_std::test_rng();
let n = 12;
let l = 1 << n;
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(l)
.collect();
let challenge = Fr::rand(&mut rng);
use ark_poly::Polynomial;
let polynomial = poly_from_vec(v.to_vec()).unwrap();
let eval = polynomial.evaluate(&challenge);
let cs = ConstraintSystem::<Fr>::new_ref();
let vVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(v)).unwrap();
let challengeVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(challenge)).unwrap();
let evalVar = EvalGadget::evaluate_gadget(&vVar, &challengeVar).unwrap();
assert_eq!(evalVar.value().unwrap(), eval);
assert!(cs.is_satisfied().unwrap());
}
}

+ 316
- 0
folding-schemes/src/folding/circuits/decider/off_chain.rs

@ -0,0 +1,316 @@
/// This file implements a generic offchain decider circuit.
/// For ethereum use cases, use the `GenericOnchainDeciderCircuit`.
/// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html
use ark_crypto_primitives::sponge::{
constraints::{AbsorbGadget, CryptographicSpongeVar},
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig},
Absorb,
};
use ark_ec::CurveGroup;
use ark_r1cs_std::{
alloc::AllocVar, eq::EqGadget, fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError};
use ark_std::{marker::PhantomData, Zero};
use crate::{
arith::{
r1cs::{circuits::R1CSMatricesVar, R1CS},
Arith, ArithGadget,
},
folding::{
circuits::{
cyclefold::{
CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldWitness,
},
decider::{EvalGadget, KZGChallengesGadget},
nonnative::affine::NonNativeAffineVar,
CF1, CF2,
},
nova::{circuits::CommittedInstanceVar, decider_eth_circuit::WitnessVar},
traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps},
},
};
use super::DeciderEnabledNIFS;
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve2's BaseField (=Curve1's ScalarField).
pub struct GenericOffchainDeciderCircuit1<
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1>, // Running instance
IU: CommittedInstanceOps<C1>, // Incoming instance
W: WitnessOps<CF1<C1>>, // Witness
A: Arith<W, RU>, // Constraint system
AVar: ArithGadget<W::Var, RU::Var>, // In-circuit representation of `A`
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
> {
pub _gc2: PhantomData<GC2>,
pub _avar: PhantomData<AVar>,
/// Constraint system of the Augmented Function circuit
pub arith: A,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: CF1<C1>,
pub i: CF1<C1>,
/// initial state
pub z_0: Vec<CF1<C1>>,
/// current i-th state
pub z_i: Vec<CF1<C1>>,
/// Folding scheme instances
pub U_i: RU,
pub W_i: W,
pub u_i: IU,
pub w_i: W,
pub U_i1: RU,
pub W_i1: W,
/// Helper for folding verification
pub proof: D::Proof,
pub randomness: D::Randomness,
/// CycleFold running instance
pub cf_U_i: CycleFoldCommittedInstance<C2>,
/// KZG challenges
pub kzg_challenges: Vec<CF1<C1>>,
pub kzg_evaluations: Vec<CF1<C1>>,
}
impl<
C1: CurveGroup,
C2: CurveGroup<ScalarField = CF2<C1>, BaseField = CF1<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1> + for<'a> Dummy<&'a A>,
IU: CommittedInstanceOps<C1> + for<'a> Dummy<&'a A>,
W: WitnessOps<CF1<C1>> + for<'a> Dummy<&'a A>,
A: Arith<W, RU>,
AVar: ArithGadget<W::Var, RU::Var> + AllocVar<A, CF1<C1>>,
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
>
Dummy<(
A,
&R1CS<CF1<C2>>,
PoseidonConfig<CF1<C1>>,
D::ProofDummyCfg,
D::RandomnessDummyCfg,
usize,
usize,
)> for GenericOffchainDeciderCircuit1<C1, C2, GC2, RU, IU, W, A, AVar, D>
{
fn dummy(
(
arith,
cf_arith,
poseidon_config,
proof_config,
randomness_config,
state_len,
num_commitments,
): (
A,
&R1CS<CF1<C2>>,
PoseidonConfig<CF1<C1>>,
D::ProofDummyCfg,
D::RandomnessDummyCfg,
usize,
usize,
),
) -> Self {
Self {
_gc2: PhantomData,
_avar: PhantomData,
poseidon_config,
pp_hash: Zero::zero(),
i: Zero::zero(),
z_0: vec![Zero::zero(); state_len],
z_i: vec![Zero::zero(); state_len],
U_i: RU::dummy(&arith),
W_i: W::dummy(&arith),
u_i: IU::dummy(&arith),
w_i: W::dummy(&arith),
U_i1: RU::dummy(&arith),
W_i1: W::dummy(&arith),
proof: D::Proof::dummy(proof_config),
randomness: D::Randomness::dummy(randomness_config),
cf_U_i: CycleFoldCommittedInstance::dummy(cf_arith),
kzg_challenges: vec![Zero::zero(); num_commitments],
kzg_evaluations: vec![Zero::zero(); num_commitments],
arith,
}
}
}
impl<
C1: CurveGroup,
C2: CurveGroup<ScalarField = CF2<C1>, BaseField = CF1<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1>,
IU: CommittedInstanceOps<C1>,
W: WitnessOps<CF1<C1>>,
A: Arith<W, RU>,
AVar: ArithGadget<W::Var, RU::Var> + AllocVar<A, CF1<C1>>,
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
> ConstraintSynthesizer<CF1<C1>>
for GenericOffchainDeciderCircuit1<C1, C2, GC2, RU, IU, W, A, AVar, D>
where
RU::Var: AbsorbGadget<CF1<C1>> + CommittedInstanceVarOps<C1, PointVar = NonNativeAffineVar<C1>>,
CF1<C1>: Absorb,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let arith = AVar::new_witness(cs.clone(), || Ok(&self.arith))?;
let pp_hash = FpVar::new_input(cs.clone(), || Ok(self.pp_hash))?;
let i = FpVar::new_input(cs.clone(), || Ok(self.i))?;
let z_0 = Vec::new_input(cs.clone(), || Ok(self.z_0))?;
let z_i = Vec::new_input(cs.clone(), || Ok(self.z_i))?;
let u_i = IU::Var::new_witness(cs.clone(), || Ok(self.u_i))?;
let U_i = RU::Var::new_witness(cs.clone(), || Ok(self.U_i))?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1_commitments = Vec::<NonNativeAffineVar<C1>>::new_input(cs.clone(), || {
Ok(self.U_i1.get_commitments())
})?;
let U_i1 = RU::Var::new_witness(cs.clone(), || Ok(self.U_i1))?;
let W_i1 = W::Var::new_witness(cs.clone(), || Ok(self.W_i1))?;
U_i1.get_commitments().enforce_equal(&U_i1_commitments)?;
let cf_U_i =
CycleFoldCommittedInstanceVar::<C2, GC2>::new_input(cs.clone(), || Ok(self.cf_U_i))?;
// allocate the inputs for the checks 7.1 and 7.2
let kzg_challenges = Vec::new_input(cs.clone(), || Ok(self.kzg_challenges))?;
let kzg_evaluations = Vec::new_input(cs.clone(), || Ok(self.kzg_evaluations))?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
// notice that the `pp_hash` is absorbed inside the ChallengeGadget::get_challenge_gadget call
// 1. enforce `U_{i+1}` and `W_{i+1}` satisfy `arith`
arith.enforce_relation(&W_i1, &U_i1)?;
// 2. enforce `u_i` is an incoming instance
u_i.enforce_incoming()?;
// 3. u_i.x[0] == H(i, z_0, z_i, U_i), u_i.x[1] == H(cf_U_i)
let (u_i_x, U_i_vec) = U_i.hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
let (cf_u_i_x, _) = cf_U_i.hash(&sponge, pp_hash.clone())?;
u_i.get_public_inputs().enforce_equal(&[u_i_x, cf_u_i_x])?;
// 6.1. partially enforce `NIFS.V(U_i, u_i) = U_{i+1}`.
D::fold_field_elements_gadget(
&self.arith,
&mut transcript,
pp_hash,
U_i,
U_i_vec,
u_i,
self.proof,
self.randomness,
)?
.enforce_partial_equal(&U_i1)?;
// 7.1. compute and check KZG challenges
KZGChallengesGadget::get_challenges_gadget(&mut transcript, &U_i1)?
.enforce_equal(&kzg_challenges)?;
// 7.2. check the claimed evaluations
for (((v, _r), c), e) in W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.zip(&kzg_evaluations)
{
// The randomness `_r` is currently not used.
EvalGadget::evaluate_gadget(v, c)?.enforce_equal(e)?;
}
Ok(())
}
}
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve1's BaseField (=Curve2's ScalarField).
pub struct GenericOffchainDeciderCircuit2<C2: CurveGroup> {
/// R1CS of the CycleFold circuit
pub cf_arith: R1CS<CF1<C2>>,
pub poseidon_config: PoseidonConfig<CF1<C2>>,
/// public params hash
pub pp_hash: CF1<C2>,
/// CycleFold running instance
pub cf_U_i: CycleFoldCommittedInstance<C2>,
pub cf_W_i: CycleFoldWitness<C2>,
/// KZG challenges
pub kzg_challenges: Vec<CF1<C2>>,
pub kzg_evaluations: Vec<CF1<C2>>,
}
impl<C2: CurveGroup> Dummy<(R1CS<CF1<C2>>, PoseidonConfig<CF1<C2>>, usize)>
for GenericOffchainDeciderCircuit2<C2>
{
fn dummy(
(cf_arith, poseidon_config, num_commitments): (
R1CS<CF1<C2>>,
PoseidonConfig<CF1<C2>>,
usize,
),
) -> Self {
Self {
poseidon_config,
pp_hash: Zero::zero(),
cf_U_i: CycleFoldCommittedInstance::dummy(&cf_arith),
cf_W_i: CycleFoldWitness::dummy(&cf_arith),
kzg_challenges: vec![Zero::zero(); num_commitments],
kzg_evaluations: vec![Zero::zero(); num_commitments],
cf_arith,
}
}
}
impl<C2: CurveGroup> ConstraintSynthesizer<CF1<C2>> for GenericOffchainDeciderCircuit2<C2> {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C2>>) -> Result<(), SynthesisError> {
let cf_r1cs = R1CSMatricesVar::<CF1<C2>, FpVar<CF1<C2>>>::new_witness(cs.clone(), || {
Ok(self.cf_arith.clone())
})?;
let pp_hash = FpVar::new_input(cs.clone(), || Ok(self.pp_hash))?;
let cf_U_i = CommittedInstanceVar::new_input(cs.clone(), || Ok(self.cf_U_i))?;
let cf_W_i = WitnessVar::new_witness(cs.clone(), || Ok(self.cf_W_i))?;
// allocate the inputs for the checks 4.1 and 4.2
let kzg_challenges = Vec::new_input(cs.clone(), || Ok(self.kzg_challenges))?;
let kzg_evaluations = Vec::new_input(cs.clone(), || Ok(self.kzg_evaluations))?;
// `transcript` is for challenge generation.
let mut transcript = PoseidonSpongeVar::new(cs.clone(), &self.poseidon_config);
transcript.absorb(&pp_hash)?;
// 5. enforce `cf_U_i` and `cf_W_i` satisfy `cf_r1cs`
cf_r1cs.enforce_relation(&cf_W_i, &cf_U_i)?;
// 4.1. compute and check KZG challenges
KZGChallengesGadget::get_challenges_gadget(&mut transcript, &cf_U_i)?
.enforce_equal(&kzg_challenges)?;
// 4.2. check the claimed evaluations
for (((v, _r), c), e) in cf_W_i
.get_openings()
.iter()
.zip(&kzg_challenges)
.zip(&kzg_evaluations)
{
// The randomness `_r` is currently not used.
EvalGadget::evaluate_gadget(v, c)?.enforce_equal(e)?;
}
Ok(())
}
}

+ 325
- 0
folding-schemes/src/folding/circuits/decider/on_chain.rs

@ -0,0 +1,325 @@
/// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases,
/// other more efficient approaches can be used.
use ark_crypto_primitives::sponge::{
constraints::{AbsorbGadget, CryptographicSpongeVar},
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig},
Absorb,
};
use ark_ec::CurveGroup;
use ark_r1cs_std::{
alloc::AllocVar, eq::EqGadget, fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError};
use ark_std::{marker::PhantomData, Zero};
use crate::{
arith::{r1cs::R1CS, Arith, ArithGadget},
commitment::pedersen::Params as PedersenParams,
folding::{
circuits::{
cyclefold::{
CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldWitness,
},
decider::{EvalGadget, KZGChallengesGadget},
nonnative::affine::NonNativeAffineVar,
CF1, CF2,
},
traits::{CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps},
},
};
use super::DeciderEnabledNIFS;
/// A generic circuit tailored for the onchain (Ethereum's EVM) verification of
/// IVC proofs, where we support IVC built upon any folding scheme.
///
/// Specifically, `GenericDeciderEthCircuit` implements the in-circuit version
/// of the IVC verification algorithm, which essentially checks the following:
/// - `R_arith(W_i, U_i)`:
/// The running instance `U_i` and witness `W_i` satisfy `arith`,
/// and the commitments in `U_i` open to the values in `W_i`.
/// - `R_arith(w_i, u_i)`:
/// The incoming instance `u_i` and witness `w_i` satisfy `arith`,
/// and the commitments in `u_i` open to the values in `w_i`.
/// - `R_cf_arith(cf_W_i, cf_U_i)`:
/// The CycleFold instance `cf_U_i` and witness `cf_W_i` satisfy `cf_arith`,
/// and the commitments in `cf_U_i` open to the values in `cf_W_i`.
/// - `u_i` contains the correct hash of the initial and final states.
///
/// To reduce the number of relation checks, the prover, before invoking the
/// circuit, further folds `U_i, u_i` into `U_{i+1}`, and `W_i, w_i` into
/// `W_{i+1}`.
/// Now, the circuit only needs to perform two relation checks, i.e.,
/// `R_arith(W_{i+1}, U_{i+1})` and `R_cf_arith(cf_W_i, cf_U_i)`, plus a few
/// constraints for enforcing the correct hash in `u_i` and the correct folding
/// from `U_i, u_i` to `U_{i+1}`.
///
/// We further reduce the circuit size by avoiding the non-native commitment
/// checks involved in `R_arith(W_{i+1}, U_{i+1})`.
/// Now, we now only check the satisfiability of the constraint system `arith`
/// with the witness `W_{i+1}` and instance `U_{i+1}` in the circuit, but the
/// actual commitment checks are done with the help of KZG.
///
/// For more details, see [https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html].
pub struct GenericOnchainDeciderCircuit<
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1>, // Running instance
IU: CommittedInstanceOps<C1>, // Incoming instance
W: WitnessOps<CF1<C1>>, // Witness
A: Arith<W, RU>, // Constraint system
AVar: ArithGadget<W::Var, RU::Var>, // In-circuit representation of `A`
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
> {
pub _gc2: PhantomData<GC2>,
pub _avar: PhantomData<AVar>,
/// Constraint system of the Augmented Function circuit
pub arith: A,
/// R1CS of the CycleFold circuit
pub cf_arith: R1CS<CF1<C2>>,
/// CycleFold PedersenParams over C2
pub cf_pedersen_params: PedersenParams<C2>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: CF1<C1>,
pub i: CF1<C1>,
/// initial state
pub z_0: Vec<CF1<C1>>,
/// current i-th state
pub z_i: Vec<CF1<C1>>,
/// Folding scheme instances
pub U_i: RU,
pub W_i: W,
pub u_i: IU,
pub w_i: W,
pub U_i1: RU,
pub W_i1: W,
/// Helper for folding verification
pub proof: D::Proof,
pub randomness: D::Randomness,
/// CycleFold running instance
pub cf_U_i: CycleFoldCommittedInstance<C2>,
pub cf_W_i: CycleFoldWitness<C2>,
/// KZG challenges
pub kzg_challenges: Vec<CF1<C1>>,
pub kzg_evaluations: Vec<CF1<C1>>,
}
impl<
C1: CurveGroup,
C2: CurveGroup<ScalarField = CF2<C1>, BaseField = CF1<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1> + for<'a> Dummy<&'a A>,
IU: CommittedInstanceOps<C1> + for<'a> Dummy<&'a A>,
W: WitnessOps<CF1<C1>> + for<'a> Dummy<&'a A>,
A: Arith<W, RU>,
AVar: ArithGadget<W::Var, RU::Var> + AllocVar<A, CF1<C1>>,
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
>
Dummy<(
A,
R1CS<CF1<C2>>,
PedersenParams<C2>,
PoseidonConfig<CF1<C1>>,
D::ProofDummyCfg,
D::RandomnessDummyCfg,
usize,
usize,
)> for GenericOnchainDeciderCircuit<C1, C2, GC2, RU, IU, W, A, AVar, D>
{
fn dummy(
(
arith,
cf_arith,
cf_pedersen_params,
poseidon_config,
proof_config,
randomness_config,
state_len,
num_commitments,
): (
A,
R1CS<CF1<C2>>,
PedersenParams<C2>,
PoseidonConfig<CF1<C1>>,
D::ProofDummyCfg,
D::RandomnessDummyCfg,
usize,
usize,
),
) -> Self {
Self {
_gc2: PhantomData,
_avar: PhantomData,
cf_pedersen_params,
poseidon_config,
pp_hash: Zero::zero(),
i: Zero::zero(),
z_0: vec![Zero::zero(); state_len],
z_i: vec![Zero::zero(); state_len],
U_i: RU::dummy(&arith),
W_i: W::dummy(&arith),
u_i: IU::dummy(&arith),
w_i: W::dummy(&arith),
U_i1: RU::dummy(&arith),
W_i1: W::dummy(&arith),
proof: D::Proof::dummy(proof_config),
randomness: D::Randomness::dummy(randomness_config),
cf_U_i: CycleFoldCommittedInstance::dummy(&cf_arith),
cf_W_i: CycleFoldWitness::dummy(&cf_arith),
kzg_challenges: vec![Zero::zero(); num_commitments],
kzg_evaluations: vec![Zero::zero(); num_commitments],
arith,
cf_arith,
}
}
}
impl<
C1: CurveGroup,
C2: CurveGroup<ScalarField = CF2<C1>, BaseField = CF1<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
RU: CommittedInstanceOps<C1>,
IU: CommittedInstanceOps<C1>,
W: WitnessOps<CF1<C1>>,
A: Arith<W, RU>,
AVar: ArithGadget<W::Var, RU::Var> + AllocVar<A, CF1<C1>>,
D: DeciderEnabledNIFS<C1, RU, IU, W, A>,
> ConstraintSynthesizer<CF1<C1>>
for GenericOnchainDeciderCircuit<C1, C2, GC2, RU, IU, W, A, AVar, D>
where
RU::Var: AbsorbGadget<CF1<C1>> + CommittedInstanceVarOps<C1, PointVar = NonNativeAffineVar<C1>>,
CF1<C1>: Absorb,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let arith = AVar::new_witness(cs.clone(), || Ok(&self.arith))?;
let pp_hash = FpVar::new_input(cs.clone(), || Ok(self.pp_hash))?;
let i = FpVar::new_input(cs.clone(), || Ok(self.i))?;
let z_0 = Vec::new_input(cs.clone(), || Ok(self.z_0))?;
let z_i = Vec::new_input(cs.clone(), || Ok(self.z_i))?;
let u_i = IU::Var::new_witness(cs.clone(), || Ok(self.u_i))?;
let U_i = RU::Var::new_witness(cs.clone(), || Ok(self.U_i))?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1_commitments = Vec::<NonNativeAffineVar<C1>>::new_input(cs.clone(), || {
Ok(self.U_i1.get_commitments())
})?;
let U_i1 = RU::Var::new_witness(cs.clone(), || Ok(self.U_i1))?;
let W_i1 = W::Var::new_witness(cs.clone(), || Ok(self.W_i1))?;
U_i1.get_commitments().enforce_equal(&U_i1_commitments)?;
let cf_U_i =
CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || Ok(self.cf_U_i))?;
// allocate the inputs for the check 7.1 and 7.2
let kzg_challenges = Vec::new_input(cs.clone(), || Ok(self.kzg_challenges))?;
let kzg_evaluations = Vec::new_input(cs.clone(), || Ok(self.kzg_evaluations))?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
// NOTE: we use the same enumeration as in
// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
// in order to make it easier to reason about.
// 1. enforce `U_{i+1}` and `W_{i+1}` satisfy `arith`
arith.enforce_relation(&W_i1, &U_i1)?;
// 2. enforce `u_i` is an incoming instance
u_i.enforce_incoming()?;
// 3. u_i.x[0] == H(i, z_0, z_i, U_i), u_i.x[1] == H(cf_U_i)
let (u_i_x, U_i_vec) = U_i.hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
let (cf_u_i_x, _) = cf_U_i.hash(&sponge, pp_hash.clone())?;
u_i.get_public_inputs().enforce_equal(&[u_i_x, cf_u_i_x])?;
#[cfg(feature = "light-test")]
log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes.");
// The following two checks (and their respective allocations) are disabled for normal
// tests since they take several millions of constraints and would take several minutes
// (and RAM) to run the test. It is active by default, and not active only when
// 'light-test' feature is used.
#[cfg(not(feature = "light-test"))]
{
// imports here instead of at the top of the file, so we avoid having multiple
// `#[cfg(not(test))]`
use crate::{
arith::r1cs::circuits::R1CSMatricesVar,
commitment::pedersen::PedersenGadget,
folding::circuits::{
cyclefold::CycleFoldWitnessVar, nonnative::uint::NonNativeUintVar,
},
};
use ark_r1cs_std::ToBitsGadget;
let cf_W_i = CycleFoldWitnessVar::<C2>::new_witness(cs.clone(), || Ok(self.cf_W_i))?;
// 4. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::constant(self.cf_pedersen_params.h);
let G = self
.cf_pedersen_params
.generators
.iter()
.map(|&g| GC2::constant(g.into()))
.collect::<Vec<_>>();
let cf_W_i_E_bits = cf_W_i
.E
.iter()
.map(|E_i| E_i.to_bits_le())
.collect::<Result<Vec<_>, _>>()?;
let cf_W_i_W_bits = cf_W_i
.W
.iter()
.map(|W_i| W_i.to_bits_le())
.collect::<Result<Vec<_>, _>>()?;
PedersenGadget::<C2, GC2>::commit(&H, &G, &cf_W_i_E_bits, &cf_W_i.rE.to_bits_le()?)?
.enforce_equal(&cf_U_i.cmE)?;
PedersenGadget::<C2, GC2>::commit(&H, &G, &cf_W_i_W_bits, &cf_W_i.rW.to_bits_le()?)?
.enforce_equal(&cf_U_i.cmW)?;
let cf_r1cs = R1CSMatricesVar::<CF1<C2>, NonNativeUintVar<CF2<C2>>>::new_constant(
ConstraintSystemRef::None,
self.cf_arith,
)?;
// 5. enforce `cf_U_i` and `cf_W_i` satisfy `cf_r1cs`
cf_r1cs.enforce_relation(&cf_W_i, &cf_U_i)?;
}
// 6.1. partially enforce `NIFS.V(U_i, u_i) = U_{i+1}`.
D::fold_field_elements_gadget(
&self.arith,
&mut transcript,
pp_hash,
U_i,
U_i_vec,
u_i,
self.proof,
self.randomness,
)?
.enforce_partial_equal(&U_i1)?;
// 7.1. compute and check KZG challenges
KZGChallengesGadget::get_challenges_gadget(&mut transcript, &U_i1)?
.enforce_equal(&kzg_challenges)?;
// 7.2. check the claimed evaluations
for (((v, _r), c), e) in W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.zip(&kzg_evaluations)
{
// The randomness `_r` is currently not used.
EvalGadget::evaluate_gadget(v, c)?.enforce_equal(e)?;
}
Ok(())
}
}

+ 3
- 2
folding-schemes/src/folding/circuits/mod.rs

@ -1,8 +1,9 @@
/// Circuits and gadgets shared across the different folding schemes. /// Circuits and gadgets shared across the different folding schemes.
use ark_ec::{AffineRepr, CurveGroup};
use ark_ec::{CurveGroup, Group};
use ark_ff::Field; use ark_ff::Field;
pub mod cyclefold; pub mod cyclefold;
pub mod decider;
pub mod nonnative; pub mod nonnative;
pub mod sum_check; pub mod sum_check;
pub mod utils; pub mod utils;
@ -10,7 +11,7 @@ pub mod utils;
/// CF1 uses the ScalarField of the given C. CF1 represents the ConstraintField used for the main /// CF1 uses the ScalarField of the given C. CF1 represents the ConstraintField used for the main
/// folding circuit which is over E1::Fr, where E1 is the main curve where we do the folding. /// folding circuit which is over E1::Fr, where E1 is the main curve where we do the folding.
/// In CF1, the points of C can not be natively represented. /// In CF1, the points of C can not be natively represented.
pub type CF1<C> = <<C as CurveGroup>::Affine as AffineRepr>::ScalarField;
pub type CF1<C> = <C as Group>::ScalarField;
/// CF2 uses the BaseField of the given C. CF2 represents the ConstraintField used for the /// CF2 uses the BaseField of the given C. CF2 represents the ConstraintField used for the
/// CycleFold circuit which is over E2::Fr=E1::Fq, where E2 is the auxiliary curve (from /// CycleFold circuit which is over E2::Fr=E1::Fq, where E2 is the auxiliary curve (from
/// [CycleFold](https://eprint.iacr.org/2023/1192.pdf) approach) where we check the folding of the /// [CycleFold](https://eprint.iacr.org/2023/1192.pdf) approach) where we check the folding of the

+ 78
- 14
folding-schemes/src/folding/circuits/nonnative/affine.rs

@ -2,7 +2,9 @@ use ark_ec::{short_weierstrass::SWFlags, AffineRepr, CurveGroup};
use ark_ff::{Field, PrimeField}; use ark_ff::{Field, PrimeField};
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
eq::EqGadget,
fields::fp::FpVar, fields::fp::FpVar,
prelude::Boolean,
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
@ -10,7 +12,10 @@ use ark_serialize::{CanonicalSerialize, CanonicalSerializeWithFlags};
use ark_std::Zero; use ark_std::Zero;
use core::borrow::Borrow; use core::borrow::Borrow;
use crate::transcript::{AbsorbNonNative, AbsorbNonNativeGadget};
use crate::{
folding::traits::Inputize,
transcript::{AbsorbNonNative, AbsorbNonNativeGadget},
};
use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar}; use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar};
@ -63,8 +68,15 @@ impl R1CSVar for NonNativeAffineVar {
let y = <C::BaseField as Field>::BasePrimeField::from_le_bytes_mod_order( let y = <C::BaseField as Field>::BasePrimeField::from_le_bytes_mod_order(
&self.y.value()?.to_bytes_le(), &self.y.value()?.to_bytes_le(),
); );
// Below is a workaround to convert the `x` and `y` coordinates to a
// point. This is because the `CurveGroup` trait does not provide a
// method to construct a point from `BaseField` elements.
let mut bytes = vec![]; let mut bytes = vec![];
// `unwrap` below is safe because serialization of a `PrimeField` value
// only fails if the serialization flag has more than 8 bits, but here
// we call `serialize_uncompressed` which uses an empty flag.
x.serialize_uncompressed(&mut bytes).unwrap(); x.serialize_uncompressed(&mut bytes).unwrap();
// `unwrap` below is also safe, because the bit size of `SWFlags` is 2.
y.serialize_with_flags( y.serialize_with_flags(
&mut bytes, &mut bytes,
if x.is_zero() && y.is_zero() { if x.is_zero() && y.is_zero() {
@ -76,6 +88,9 @@ impl R1CSVar for NonNativeAffineVar {
}, },
) )
.unwrap(); .unwrap();
// `unwrap` below is safe because `bytes` is constructed from the `x`
// and `y` coordinates of a valid point, and these coordinates are
// serialized in the same way as the `CurveGroup` implementation.
Ok(C::deserialize_uncompressed_unchecked(&bytes[..]).unwrap()) Ok(C::deserialize_uncompressed_unchecked(&bytes[..]).unwrap())
} }
} }
@ -90,6 +105,53 @@ impl ToConstraintFieldGadget for NonNativeAffineV
} }
} }
impl<C: CurveGroup> EqGadget<C::ScalarField> for NonNativeAffineVar<C> {
fn is_eq(&self, other: &Self) -> Result<Boolean<C::ScalarField>, SynthesisError> {
let mut result = Boolean::TRUE;
if self.x.0.len() != other.x.0.len() {
return Err(SynthesisError::Unsatisfiable);
}
if self.y.0.len() != other.y.0.len() {
return Err(SynthesisError::Unsatisfiable);
}
for (l, r) in self
.x
.0
.iter()
.chain(&self.y.0)
.zip(other.x.0.iter().chain(&other.y.0))
{
if l.ub != r.ub {
return Err(SynthesisError::Unsatisfiable);
}
result = result.and(&l.v.is_eq(&r.v)?)?;
}
Ok(result)
}
fn enforce_equal(&self, other: &Self) -> Result<(), SynthesisError> {
if self.x.0.len() != other.x.0.len() {
return Err(SynthesisError::Unsatisfiable);
}
if self.y.0.len() != other.y.0.len() {
return Err(SynthesisError::Unsatisfiable);
}
for (l, r) in self
.x
.0
.iter()
.chain(&self.y.0)
.zip(other.x.0.iter().chain(&other.y.0))
{
if l.ub != r.ub {
return Err(SynthesisError::Unsatisfiable);
}
l.v.enforce_equal(&r.v)?;
}
Ok(())
}
}
/// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field` /// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field`
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub(crate) fn nonnative_affine_to_field_elements<C: CurveGroup>( pub(crate) fn nonnative_affine_to_field_elements<C: CurveGroup>(
@ -104,22 +166,22 @@ pub(crate) fn nonnative_affine_to_field_elements(
(x, y) (x, y)
} }
impl<C: CurveGroup> NonNativeAffineVar<C> {
// Extracts a list of field elements of type `C::ScalarField` from the public input
// `p`, in exactly the same way as how `NonNativeAffineVar` is represented as limbs of type
// `FpVar` in-circuit.
#[allow(clippy::type_complexity)]
pub fn inputize(p: C) -> Result<(Vec<C::ScalarField>, Vec<C::ScalarField>), SynthesisError> {
let affine = p.into_affine();
impl<C: CurveGroup> Inputize<C::ScalarField, NonNativeAffineVar<C>> for C {
fn inputize(&self) -> Vec<C::ScalarField> {
let affine = self.into_affine();
let zero = (&C::BaseField::zero(), &C::BaseField::zero()); let zero = (&C::BaseField::zero(), &C::BaseField::zero());
let (x, y) = affine.xy().unwrap_or(zero); let (x, y) = affine.xy().unwrap_or(zero);
let x = NonNativeUintVar::inputize(*x);
let y = NonNativeUintVar::inputize(*y);
Ok((x, y))
let x = x.inputize();
let y = y.inputize();
[x, y].concat()
} }
}
impl<C: CurveGroup> NonNativeAffineVar<C> {
pub fn zero() -> Self { pub fn zero() -> Self {
// `unwrap` below is safe because we are allocating a constant value,
// which is guaranteed to succeed.
Self::new_constant(ConstraintSystemRef::None, C::zero()).unwrap() Self::new_constant(ConstraintSystemRef::None, C::zero()).unwrap()
} }
} }
@ -179,9 +241,11 @@ mod tests {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
let p = Projective::rand(&mut rng); let p = Projective::rand(&mut rng);
let pVar = NonNativeAffineVar::<Projective>::new_witness(cs.clone(), || Ok(p)).unwrap(); let pVar = NonNativeAffineVar::<Projective>::new_witness(cs.clone(), || Ok(p)).unwrap();
let (x, y) = NonNativeAffineVar::inputize(p).unwrap();
let xy = p.inputize();
assert_eq!(pVar.x.0.value().unwrap(), x);
assert_eq!(pVar.y.0.value().unwrap(), y);
assert_eq!(
[pVar.x.0.value().unwrap(), pVar.y.0.value().unwrap()].concat(),
xy
);
} }
} }

+ 47
- 30
folding-schemes/src/folding/circuits/nonnative/uint.rs

@ -17,8 +17,9 @@ use num_bigint::BigUint;
use num_integer::Integer; use num_integer::Integer;
use crate::{ use crate::{
folding::traits::Inputize,
transcript::{AbsorbNonNative, AbsorbNonNativeGadget}, transcript::{AbsorbNonNative, AbsorbNonNativeGadget},
utils::gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget},
utils::gadgets::{EquivalenceGadget, MatrixGadget, SparseMatrixVar, VectorGadget},
}; };
/// `LimbVar` represents a single limb of a non-native unsigned integer in the /// `LimbVar` represents a single limb of a non-native unsigned integer in the
@ -36,6 +37,11 @@ pub struct LimbVar {
impl<F: PrimeField, B: AsRef<[Boolean<F>]>> From<B> for LimbVar<F> { impl<F: PrimeField, B: AsRef<[Boolean<F>]>> From<B> for LimbVar<F> {
fn from(bits: B) -> Self { fn from(bits: B) -> Self {
Self { Self {
// `Boolean::le_bits_to_fp_var` will return an error if the internal
// invocation of `Boolean::enforce_in_field_le` fails.
// However, this method is only called when the length of `bits` is
// greater than `F::MODULUS_BIT_SIZE`, which should not happen in
// our case where `bits` is guaranteed to be short.
v: Boolean::le_bits_to_fp_var(bits.as_ref()).unwrap(), v: Boolean::le_bits_to_fp_var(bits.as_ref()).unwrap(),
ub: (BigUint::one() << bits.as_ref().len()) - BigUint::one(), ub: (BigUint::one() << bits.as_ref().len()) - BigUint::one(),
} }
@ -219,7 +225,7 @@ impl AllocVar for NonNativeUintVar {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.chunks(Self::bits_per_limb()) .chunks(Self::bits_per_limb())
{ {
let limb = F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap();
let limb = F::from(F::BigInt::from_bits_le(chunk));
let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?; let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?;
Self::enforce_bit_length(&limb, chunk.len())?; Self::enforce_bit_length(&limb, chunk.len())?;
limbs.push(LimbVar { limbs.push(LimbVar {
@ -241,12 +247,15 @@ impl AllocVar for NonNativeUintVar {
let cs = cs.into().cs(); let cs = cs.into().cs();
let v = f()?; let v = f()?;
assert_eq!(G::extension_degree(), 1); assert_eq!(G::extension_degree(), 1);
// `unwrap` is safe because `G` is a field with extension degree 1, and
// thus `G::to_base_prime_field_elements` should return an iterator with
// exactly one element.
let v = v.borrow().to_base_prime_field_elements().next().unwrap(); let v = v.borrow().to_base_prime_field_elements().next().unwrap();
let mut limbs = vec![]; let mut limbs = vec![];
for chunk in v.into_bigint().to_bits_le().chunks(Self::bits_per_limb()) { for chunk in v.into_bigint().to_bits_le().chunks(Self::bits_per_limb()) {
let limb = F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap();
let limb = F::from(F::BigInt::from_bits_le(chunk));
let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?; let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?;
Self::enforce_bit_length(&limb, chunk.len())?; Self::enforce_bit_length(&limb, chunk.len())?;
limbs.push(LimbVar { limbs.push(LimbVar {
@ -259,16 +268,19 @@ impl AllocVar for NonNativeUintVar {
} }
} }
impl<F: PrimeField> NonNativeUintVar<F> {
pub fn inputize<T: Field>(x: T) -> Vec<F> {
impl<F: PrimeField, T: Field> Inputize<F, NonNativeUintVar<F>> for T {
fn inputize(&self) -> Vec<F> {
assert_eq!(T::extension_degree(), 1); assert_eq!(T::extension_degree(), 1);
x.to_base_prime_field_elements()
// `unwrap` is safe because `T` is a field with extension degree 1, and
// thus `T::to_base_prime_field_elements` should return an iterator with
// exactly one element.
self.to_base_prime_field_elements()
.next() .next()
.unwrap() .unwrap()
.into_bigint() .into_bigint()
.to_bits_le() .to_bits_le()
.chunks(Self::bits_per_limb())
.map(|chunk| F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap())
.chunks(NonNativeUintVar::<F>::bits_per_limb())
.map(|chunk| F::from(F::BigInt::from_bits_le(chunk)))
.collect() .collect()
} }
} }
@ -450,7 +462,7 @@ impl NonNativeUintVar {
// (i.e., all of them are "non-negative"), implying that all // (i.e., all of them are "non-negative"), implying that all
// limbs should be zero to make the sum zero. // limbs should be zero to make the sum zero.
LimbVar::add_many(&remaining_limbs[1..]) LimbVar::add_many(&remaining_limbs[1..])
.unwrap()
.ok_or(SynthesisError::Unsatisfiable)?
.v .v
.enforce_equal(&FpVar::zero())?; .enforce_equal(&FpVar::zero())?;
remaining_limbs[0].v.clone() remaining_limbs[0].v.clone()
@ -621,15 +633,15 @@ impl NonNativeUintVar {
} }
/// Compute `self + other`, without aligning the limbs. /// Compute `self + other`, without aligning the limbs.
pub fn add_no_align(&self, other: &Self) -> Self {
pub fn add_no_align(&self, other: &Self) -> Result<Self, SynthesisError> {
let mut z = vec![LimbVar::zero(); max(self.0.len(), other.0.len())]; let mut z = vec![LimbVar::zero(); max(self.0.len(), other.0.len())];
for (i, v) in self.0.iter().enumerate() { for (i, v) in self.0.iter().enumerate() {
z[i] = z[i].add(v).unwrap();
z[i] = z[i].add(v).ok_or(SynthesisError::Unsatisfiable)?;
} }
for (i, v) in other.0.iter().enumerate() { for (i, v) in other.0.iter().enumerate() {
z[i] = z[i].add(v).unwrap();
z[i] = z[i].add(v).ok_or(SynthesisError::Unsatisfiable)?;
} }
Self(z)
Ok(Self(z))
} }
/// Compute `self * other`, without aligning the limbs. /// Compute `self * other`, without aligning the limbs.
@ -650,7 +662,7 @@ impl NonNativeUintVar {
) )
}) })
.collect::<Option<Vec<_>>>() .collect::<Option<Vec<_>>>()
.unwrap();
.ok_or(SynthesisError::Unsatisfiable)?;
return Ok(Self(z)); return Ok(Self(z));
} }
let cs = self.cs().or(other.cs()); let cs = self.cs().or(other.cs());
@ -753,7 +765,7 @@ impl NonNativeUintVar {
let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?; let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?;
// Enforce `self = q * m + r` // Enforce `self = q * m + r`
q.mul_no_align(&m)? q.mul_no_align(&m)?
.add_no_align(&r)
.add_no_align(&r)?
.enforce_equal_unaligned(self)?; .enforce_equal_unaligned(self)?;
// Enforce `r < m` (and `r >= 0` already holds) // Enforce `r < m` (and `r >= 0` already holds)
r.enforce_lt(&m)?; r.enforce_lt(&m)?;
@ -789,8 +801,8 @@ impl NonNativeUintVar {
let zero = Self::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), bits))?; let zero = Self::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), bits))?;
let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?; let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?;
let l = self.add_no_align(&is_ge.select(&zero, &q)?.mul_no_align(&m)?);
let r = other.add_no_align(&is_ge.select(&q, &zero)?.mul_no_align(&m)?);
let l = self.add_no_align(&is_ge.select(&zero, &q)?.mul_no_align(&m)?)?;
let r = other.add_no_align(&is_ge.select(&q, &zero)?.mul_no_align(&m)?)?;
// If `self >= other`, enforce `self = other + q * m` // If `self >= other`, enforce `self = other + q * m`
// Otherwise, enforce `self + q * m = other` // Otherwise, enforce `self + q * m = other`
// Soundness holds because if `self` and `other` are not congruent, then // Soundness holds because if `self` and `other` are not congruent, then
@ -799,6 +811,12 @@ impl NonNativeUintVar {
} }
} }
impl<F: PrimeField, M: PrimeField> EquivalenceGadget<M> for NonNativeUintVar<F> {
fn enforce_equivalent(&self, other: &Self) -> Result<(), SynthesisError> {
self.enforce_congruent::<M>(other)
}
}
impl<F: PrimeField, B: AsRef<[Boolean<F>]>> From<B> for NonNativeUintVar<F> { impl<F: PrimeField, B: AsRef<[Boolean<F>]>> From<B> for NonNativeUintVar<F> {
fn from(bits: B) -> Self { fn from(bits: B) -> Self {
Self( Self(
@ -834,6 +852,9 @@ pub(super) fn nonnative_field_to_field_elements
f: &TargetField, f: &TargetField,
) -> Vec<BaseField> { ) -> Vec<BaseField> {
assert_eq!(TargetField::extension_degree(), 1); assert_eq!(TargetField::extension_degree(), 1);
// `unwrap` is safe because `TargetField` is a field with extension degree
// 1, and thus `TargetField::to_base_prime_field_elements` should return an
// iterator with exactly one element.
let bits = f let bits = f
.to_base_prime_field_elements() .to_base_prime_field_elements()
.next() .next()
@ -864,11 +885,10 @@ pub(super) fn nonnative_field_to_field_elements
impl<F: PrimeField> VectorGadget<NonNativeUintVar<F>> for [NonNativeUintVar<F>] { impl<F: PrimeField> VectorGadget<NonNativeUintVar<F>> for [NonNativeUintVar<F>] {
fn add(&self, other: &Self) -> Result<Vec<NonNativeUintVar<F>>, SynthesisError> { fn add(&self, other: &Self) -> Result<Vec<NonNativeUintVar<F>>, SynthesisError> {
Ok(self
.iter()
self.iter()
.zip(other.iter()) .zip(other.iter())
.map(|(x, y)| x.add_no_align(y)) .map(|(x, y)| x.add_no_align(y))
.collect())
.collect()
} }
fn hadamard(&self, other: &Self) -> Result<Vec<NonNativeUintVar<F>>, SynthesisError> { fn hadamard(&self, other: &Self) -> Result<Vec<NonNativeUintVar<F>>, SynthesisError> {
@ -886,15 +906,12 @@ impl VectorGadget> for [NonNativeUintVar]
} }
} }
impl<F: PrimeField, CF: PrimeField> MatrixGadget<NonNativeUintVar<CF>>
for SparseMatrixVar<F, CF, NonNativeUintVar<CF>>
{
impl<CF: PrimeField> MatrixGadget<NonNativeUintVar<CF>> for SparseMatrixVar<NonNativeUintVar<CF>> {
fn mul_vector( fn mul_vector(
&self, &self,
v: &[NonNativeUintVar<CF>], v: &[NonNativeUintVar<CF>],
) -> Result<Vec<NonNativeUintVar<CF>>, SynthesisError> { ) -> Result<Vec<NonNativeUintVar<CF>>, SynthesisError> {
Ok(self
.coeffs
self.coeffs
.iter() .iter()
.map(|row| { .map(|row| {
let len = row let len = row
@ -906,7 +923,7 @@ impl MatrixGadget>
// that results in more flattened `LinearCombination`s. // that results in more flattened `LinearCombination`s.
// Consequently, `ConstraintSystem::inline_all_lcs` costs less // Consequently, `ConstraintSystem::inline_all_lcs` costs less
// time, thus making trusted setup and proof generation faster. // time, thus making trusted setup and proof generation faster.
let limbs = (0..len)
(0..len)
.map(|i| { .map(|i| {
LimbVar::add_many( LimbVar::add_many(
&row.iter() &row.iter()
@ -919,10 +936,10 @@ impl MatrixGadget>
) )
}) })
.collect::<Option<Vec<_>>>() .collect::<Option<Vec<_>>>()
.unwrap();
NonNativeUintVar(limbs)
.ok_or(SynthesisError::Unsatisfiable)
.map(NonNativeUintVar)
}) })
.collect())
.collect::<Result<Vec<_>, _>>()
} }
} }
@ -1041,7 +1058,7 @@ mod tests {
let mut r_var = let mut r_var =
NonNativeUintVar::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), 0))?; NonNativeUintVar::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), 0))?;
for (a, b) in a_var.into_iter().zip(b_var.into_iter()) { for (a, b) in a_var.into_iter().zip(b_var.into_iter()) {
r_var = r_var.add_no_align(&a.mul_no_align(&b)?);
r_var = r_var.add_no_align(&a.mul_no_align(&b)?)?;
} }
r_var.enforce_congruent::<Fq>(&c_var)?; r_var.enforce_congruent::<Fq>(&c_var)?;

+ 7
- 0
folding-schemes/src/folding/hypernova/cccs.rs

@ -10,6 +10,7 @@ use super::Witness;
use crate::arith::{ccs::CCS, Arith}; use crate::arith::{ccs::CCS, Arith};
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::folding::circuits::CF1; use crate::folding::circuits::CF1;
use crate::folding::traits::Inputize;
use crate::folding::traits::{CommittedInstanceOps, Dummy}; use crate::folding::traits::{CommittedInstanceOps, Dummy};
use crate::transcript::AbsorbNonNative; use crate::transcript::AbsorbNonNative;
use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::mle::dense_vec_to_dense_mle;
@ -152,6 +153,12 @@ impl CommittedInstanceOps for CCCS {
} }
} }
impl<C: CurveGroup> Inputize<C::ScalarField, CCCSVar<C>> for CCCS<C> {
fn inputize(&self) -> Vec<C::ScalarField> {
[&self.C.inputize()[..], &self.x].concat()
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use ark_pallas::Fr; use ark_pallas::Fr;

+ 9
- 20
folding-schemes/src/folding/hypernova/circuits.rs

@ -12,7 +12,6 @@ use ark_r1cs_std::{
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
uint8::UInt8, uint8::UInt8,
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
@ -195,7 +194,6 @@ pub struct ProofVar {
impl<C> AllocVar<NIMFSProof<C>, CF1<C>> for ProofVar<C> impl<C> AllocVar<NIMFSProof<C>, CF1<C>> for ProofVar<C>
where where
C: CurveGroup, C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
{ {
fn new_variable<T: Borrow<NIMFSProof<C>>>( fn new_variable<T: Borrow<NIMFSProof<C>>>(
@ -237,10 +235,7 @@ where
pub struct NIMFSGadget<C: CurveGroup> { pub struct NIMFSGadget<C: CurveGroup> {
_c: PhantomData<C>, _c: PhantomData<C>,
} }
impl<C: CurveGroup> NIMFSGadget<C>
where
<C as CurveGroup>::BaseField: PrimeField,
{
impl<C: CurveGroup> NIMFSGadget<C> {
/// Runs (in-circuit) the NIMFS.V, which outputs the new folded LCCCS instance together with /// Runs (in-circuit) the NIMFS.V, which outputs the new folded LCCCS instance together with
/// the rho_powers, which will be used in other parts of the AugmentedFCircuit /// the rho_powers, which will be used in other parts of the AugmentedFCircuit
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -492,9 +487,7 @@ pub struct AugmentedFCircuit<
FC: FCircuit<CF1<C1>>, FC: FCircuit<CF1<C1>>,
const MU: usize, const MU: usize,
const NU: usize, const NU: usize,
> where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
> {
pub(super) _c2: PhantomData<C2>, pub(super) _c2: PhantomData<C2>,
pub(super) _gc2: PhantomData<GC2>, pub(super) _gc2: PhantomData<GC2>,
pub(super) poseidon_config: PoseidonConfig<CF1<C1>>, pub(super) poseidon_config: PoseidonConfig<CF1<C1>>,
@ -532,7 +525,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
pub fn default( pub fn default(
poseidon_config: &PoseidonConfig<CF1<C1>>, poseidon_config: &PoseidonConfig<CF1<C1>>,
@ -588,11 +580,9 @@ where
M: vec![], M: vec![],
}; };
let mut augmented_f_circuit = Self::default(poseidon_config, F, initial_ccs)?; let mut augmented_f_circuit = Self::default(poseidon_config, F, initial_ccs)?;
if ccs.is_some() {
augmented_f_circuit.ccs = ccs.unwrap();
} else {
augmented_f_circuit.ccs = augmented_f_circuit.upper_bound_ccs()?;
}
augmented_f_circuit.ccs = ccs
.ok_or(())
.or_else(|_| augmented_f_circuit.upper_bound_ccs())?;
Ok(augmented_f_circuit) Ok(augmented_f_circuit)
} }
@ -601,7 +591,7 @@ where
/// For a stable FCircuit circuit, the CCS parameters can be computed in advance and can be /// For a stable FCircuit circuit, the CCS parameters can be computed in advance and can be
/// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there. /// feed in as parameter for the AugmentedFCircuit::empty method to avoid computing them there.
pub fn upper_bound_ccs(&self) -> Result<CCS<C1::ScalarField>, Error> { pub fn upper_bound_ccs(&self) -> Result<CCS<C1::ScalarField>, Error> {
let r1cs = get_r1cs_from_cs::<CF1<C1>>(self.clone()).unwrap();
let r1cs = get_r1cs_from_cs::<CF1<C1>>(self.clone())?;
let mut ccs = CCS::from(r1cs); let mut ccs = CCS::from(r1cs);
let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()]; let z_0 = vec![C1::ScalarField::zero(); self.F.state_len()];
@ -690,7 +680,7 @@ where
self.clone().generate_constraints(cs.clone())?; self.clone().generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
let ccs = CCS::from(r1cs); let ccs = CCS::from(r1cs);
Ok((cs, ccs)) Ok((cs, ccs))
@ -709,7 +699,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> { fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || { let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || {
@ -883,7 +872,7 @@ where
// Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1})
let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?;
let (cf_u_i1_x_base, _) = let (cf_u_i1_x_base, _) =
CycleFoldCommittedInstanceVar::new_constant(cs.clone(), cf_u_dummy)?
CycleFoldCommittedInstanceVar::<C2, GC2>::new_constant(cs.clone(), cf_u_dummy)?
.hash(&sponge, pp_hash)?; .hash(&sponge, pp_hash)?;
let cf_x = FpVar::new_input(cs.clone(), || { let cf_x = FpVar::new_input(cs.clone(), || {
Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?))
@ -1232,7 +1221,7 @@ mod tests {
.into_inner() .into_inner()
.ok_or(Error::NoInnerConstraintSystem) .ok_or(Error::NoInnerConstraintSystem)
.unwrap(); .unwrap();
let cf_r1cs = extract_r1cs::<Fq>(&cs2);
let cf_r1cs = extract_r1cs::<Fq>(&cs2).unwrap();
println!("CF m x n: {} x {}", cf_r1cs.A.n_rows, cf_r1cs.A.n_cols); println!("CF m x n: {} x {}", cf_r1cs.A.n_rows, cf_r1cs.A.n_cols);
let (pedersen_params, _) = let (pedersen_params, _) =

+ 66
- 68
folding-schemes/src/folding/hypernova/decider_eth.rs

@ -2,7 +2,7 @@
use ark_crypto_primitives::sponge::Absorb; use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_snark::SNARK; use ark_snark::SNARK;
use ark_std::rand::{CryptoRng, RngCore}; use ark_std::rand::{CryptoRng, RngCore};
@ -10,12 +10,15 @@ use ark_std::{One, Zero};
use core::marker::PhantomData; use core::marker::PhantomData;
pub use super::decider_eth_circuit::DeciderEthCircuit; pub use super::decider_eth_circuit::DeciderEthCircuit;
use super::{lcccs::LCCCS, HyperNova};
use super::decider_eth_circuit::DeciderHyperNovaGadget;
use super::HyperNova;
use crate::commitment::{ use crate::commitment::{
kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme, kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme,
}; };
use crate::folding::circuits::{nonnative::affine::NonNativeAffineVar, CF2};
use crate::folding::circuits::decider::DeciderEnabledNIFS;
use crate::folding::circuits::CF2;
use crate::folding::nova::decider_eth::VerifierParam; use crate::folding::nova::decider_eth::VerifierParam;
use crate::folding::traits::{Inputize, WitnessOps};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::Error; use crate::Error;
use crate::{Decider as DeciderTrait, FoldingScheme}; use crate::{Decider as DeciderTrait, FoldingScheme};
@ -31,7 +34,6 @@ where
kzg_proof: CS1::Proof, kzg_proof: CS1::Proof,
// rho used at the last fold, U_{i+1}=NIMFS.V(rho, U_i, u_i), it is checked in-circuit // rho used at the last fold, U_{i+1}=NIMFS.V(rho, U_i, u_i), it is checked in-circuit
rho: C1::ScalarField, rho: C1::ScalarField,
U_i1: LCCCS<C1>, // U_{i+1}, which is checked in-circuit
// the KZG challenge is provided by the prover, but in-circuit it is checked to match // the KZG challenge is provided by the prover, but in-circuit it is checked to match
// the in-circuit computed computed one. // the in-circuit computed computed one.
kzg_challenge: C1::ScalarField, kzg_challenge: C1::ScalarField,
@ -75,8 +77,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
// constrain FS into HyperNova, since this is a Decider specifically for HyperNova // constrain FS into HyperNova, since this is a Decider specifically for HyperNova
HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, false>: From<FS>, HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, false>: From<FS>,
crate::folding::hypernova::ProverParams<C1, C2, CS1, CS2, false>: crate::folding::hypernova::ProverParams<C1, C2, CS1, CS2, false>:
@ -89,21 +89,18 @@ where
type Proof = Proof<C1, CS1, S>; type Proof = Proof<C1, CS1, S>;
type VerifierParam = VerifierParam<C1, CS1::VerifierParams, S::VerifyingKey>; type VerifierParam = VerifierParam<C1, CS1::VerifierParams, S::VerifyingKey>;
type PublicInput = Vec<C1::ScalarField>; type PublicInput = Vec<C1::ScalarField>;
type CommittedInstance = ();
type CommittedInstance = Vec<C1>;
fn preprocess( fn preprocess(
mut rng: impl RngCore + CryptoRng, mut rng: impl RngCore + CryptoRng,
prep_param: Self::PreprocessorParam, prep_param: Self::PreprocessorParam,
fs: FS, fs: FS,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
let circuit =
DeciderEthCircuit::<C1, GC1, C2, GC2, CS1, CS2>::from_hypernova::<FC, MU, NU>(
fs.into(),
)
.unwrap();
let circuit = DeciderEthCircuit::<C1, C2, GC2>::try_from(HyperNova::from(fs))?;
// get the Groth16 specific setup for the circuit // get the Groth16 specific setup for the circuit
let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng).unwrap();
let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng)
.map_err(|e| Error::SNARKSetupFail(e.to_string()))?;
// get the FoldingScheme prover & verifier params from HyperNova // get the FoldingScheme prover & verifier params from HyperNova
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -122,7 +119,7 @@ where
let pp = (g16_pk, hypernova_pp.cs_pp); let pp = (g16_pk, hypernova_pp.cs_pp);
let vp = VerifierParam {
let vp = Self::VerifierParam {
pp_hash, pp_hash,
snark_vp: g16_vk, snark_vp: g16_vk,
cs_vp: hypernova_vp.cs_vp, cs_vp: hypernova_vp.cs_vp,
@ -137,40 +134,37 @@ where
) -> Result<Self::Proof, Error> { ) -> Result<Self::Proof, Error> {
let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp;
let circuit = DeciderEthCircuit::<C1, GC1, C2, GC2, CS1, CS2>::from_hypernova::<FC, MU, NU>(
folding_scheme.into(),
)?;
let circuit = DeciderEthCircuit::<C1, C2, GC2>::try_from(HyperNova::from(folding_scheme))?;
let snark_proof = S::prove(&snark_pk, circuit.clone(), &mut rng)
.map_err(|e| Error::Other(e.to_string()))?;
// Notice that since the `circuit` has been constructed at the `from_hypernova` call, which
// in case of failure it would have returned an error there, the next two unwraps should
// never reach an error.
let rho_Fr = circuit.rho.ok_or(Error::Empty)?;
let W_i1 = circuit.W_i1.ok_or(Error::Empty)?;
let rho = circuit.randomness;
// get the challenges that have been already computed when preparing the circuit inputs in // get the challenges that have been already computed when preparing the circuit inputs in
// the above `from_hypernova` call
let challenge_W = circuit
.kzg_challenge
.ok_or(Error::MissingValue("kzg_challenge".to_string()))?;
// the above `try_from` call
let kzg_challenges = circuit.kzg_challenges.clone();
// generate KZG proofs // generate KZG proofs
let U_cmW_proof = CS1::prove_with_challenge(
&cs_pk,
challenge_W,
&W_i1.w,
&C1::ScalarField::zero(),
None,
)?;
let kzg_proofs = circuit
.W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| {
CS1::prove_with_challenge(&cs_pk, c, v, &C1::ScalarField::zero(), None)
})
.collect::<Result<Vec<_>, _>>()?;
let snark_proof =
S::prove(&snark_pk, circuit, &mut rng).map_err(|e| Error::Other(e.to_string()))?;
Ok(Self::Proof { Ok(Self::Proof {
snark_proof, snark_proof,
kzg_proof: U_cmW_proof,
rho: rho_Fr,
U_i1: circuit.U_i1.ok_or(Error::Empty)?,
kzg_challenge: challenge_W,
rho,
kzg_proof: (kzg_proofs.len() == 1)
.then(|| kzg_proofs[0].clone())
.ok_or(Error::NotExpectedLength(kzg_proofs.len(), 1))?,
kzg_challenge: (kzg_challenges.len() == 1)
.then(|| kzg_challenges[0])
.ok_or(Error::NotExpectedLength(kzg_challenges.len(), 1))?,
}) })
} }
@ -180,45 +174,48 @@ where
z_0: Vec<C1::ScalarField>, z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>, z_i: Vec<C1::ScalarField>,
// we don't use the instances at the verifier level, since we check them in-circuit // we don't use the instances at the verifier level, since we check them in-circuit
_running_instance: &Self::CommittedInstance,
_incoming_instance: &Self::CommittedInstance,
running_commitments: &Self::CommittedInstance,
incoming_commitments: &Self::CommittedInstance,
proof: &Self::Proof, proof: &Self::Proof,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
if i <= C1::ScalarField::one() { if i <= C1::ScalarField::one() {
return Err(Error::NotEnoughSteps); return Err(Error::NotEnoughSteps);
} }
let (pp_hash, snark_vk, cs_vk): (C1::ScalarField, S::VerifyingKey, CS1::VerifierParams) =
(vp.pp_hash, vp.snark_vp, vp.cs_vp);
let Self::VerifierParam {
pp_hash,
snark_vp,
cs_vp,
} = vp;
// 6.2. Fold the commitments
let C = DeciderHyperNovaGadget::fold_group_elements_native(
running_commitments,
incoming_commitments,
None,
proof.rho,
)?[0];
// Note: the NIMFS proof is checked inside the DeciderEthCircuit, which ensures that the // Note: the NIMFS proof is checked inside the DeciderEthCircuit, which ensures that the
// 'proof.U_i1' is correctly computed // 'proof.U_i1' is correctly computed
let (cmC_x, cmC_y) = NonNativeAffineVar::inputize(proof.U_i1.C)?;
let public_input: Vec<C1::ScalarField> = [ let public_input: Vec<C1::ScalarField> = [
vec![pp_hash, i],
z_0,
z_i,
// U_i+1:
cmC_x,
cmC_y,
vec![proof.U_i1.u],
proof.U_i1.x.clone(),
proof.U_i1.r_x.clone(),
proof.U_i1.v.clone(),
vec![proof.kzg_challenge, proof.kzg_proof.eval, proof.rho],
&[pp_hash, i][..],
&z_0,
&z_i,
&C.inputize(),
&[proof.kzg_challenge, proof.kzg_proof.eval, proof.rho],
] ]
.concat(); .concat();
let snark_v = S::verify(&snark_vk, &public_input, &proof.snark_proof)
let snark_v = S::verify(&snark_vp, &public_input, &proof.snark_proof)
.map_err(|e| Error::Other(e.to_string()))?; .map_err(|e| Error::Other(e.to_string()))?;
if !snark_v { if !snark_v {
return Err(Error::SNARKVerificationFail); return Err(Error::SNARKVerificationFail);
} }
// 7.3. Verify the KZG proof
// we're at the Ethereum EVM case, so the CS1 is KZG commitments // we're at the Ethereum EVM case, so the CS1 is KZG commitments
CS1::verify_with_challenge(&cs_vk, proof.kzg_challenge, &proof.U_i1.C, &proof.kzg_proof)?;
CS1::verify_with_challenge(&cs_vp, proof.kzg_challenge, &C, &proof.kzg_proof)?;
Ok(true) Ok(true)
} }
@ -229,13 +226,14 @@ pub mod tests {
use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective}; use ark_bn254::{constraints::GVar, Bn254, Fr, G1Projective as Projective};
use ark_groth16::Groth16; use ark_groth16::Groth16;
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_serialize::{Compress, Validate};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate};
use super::*; use super::*;
use crate::commitment::{kzg::KZG, pedersen::Pedersen}; use crate::commitment::{kzg::KZG, pedersen::Pedersen};
use crate::folding::hypernova::cccs::CCCS; use crate::folding::hypernova::cccs::CCCS;
use crate::folding::hypernova::lcccs::LCCCS;
use crate::folding::hypernova::PreprocessorParam; use crate::folding::hypernova::PreprocessorParam;
use crate::folding::nova::decider_eth::VerifierParam;
use crate::folding::traits::CommittedInstanceOps;
use crate::frontend::utils::CubicFCircuit; use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
@ -300,8 +298,8 @@ pub mod tests {
hypernova.i, hypernova.i,
hypernova.z_0, hypernova.z_0,
hypernova.z_i, hypernova.z_i,
&(),
&(),
&hypernova.U_i.get_commitments(),
&hypernova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -403,8 +401,8 @@ pub mod tests {
hypernova.i, hypernova.i,
hypernova.z_0.clone(), hypernova.z_0.clone(),
hypernova.z_i.clone(), hypernova.z_i.clone(),
&(),
&(),
&hypernova.U_i.get_commitments(),
&hypernova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -470,8 +468,8 @@ pub mod tests {
i_deserialized, i_deserialized,
z_0_deserialized.clone(), z_0_deserialized.clone(),
z_i_deserialized.clone(), z_i_deserialized.clone(),
&(),
&(),
&hypernova.U_i.get_commitments(),
&hypernova.u_i.get_commitments(),
&proof_deserialized, &proof_deserialized,
) )
.unwrap(); .unwrap();

+ 145
- 422
folding-schemes/src/folding/hypernova/decider_eth_circuit.rs

@ -2,50 +2,73 @@
/// other more efficient approaches can be used. /// other more efficient approaches can be used.
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar, constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
Absorb, CryptographicSponge, Absorb, CryptographicSponge,
}; };
use ark_ec::{CurveGroup, Group};
use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_poly::Polynomial;
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::fp::FpVar, fields::fp::FpVar,
groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
ToConstraintFieldGadget, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::Zero;
use core::{borrow::Borrow, marker::PhantomData};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{borrow::Borrow, log2, marker::PhantomData};
use super::{ use super::{
circuits::{CCCSVar, LCCCSVar, NIMFSGadget, ProofVar as NIMFSProofVar}, circuits::{CCCSVar, LCCCSVar, NIMFSGadget, ProofVar as NIMFSProofVar},
nimfs::{NIMFSProof, NIMFS}, nimfs::{NIMFSProof, NIMFS},
HyperNova, Witness, CCCS, LCCCS, HyperNova, Witness, CCCS, LCCCS,
}; };
use crate::folding::circuits::{
cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness},
CF1, CF2,
};
use crate::folding::circuits::{decider::on_chain::GenericOnchainDeciderCircuit, CF1, CF2};
use crate::folding::traits::{WitnessOps, WitnessVarOps};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::{Transcript, TranscriptVar};
use crate::utils::{
gadgets::{eval_mle, MatrixGadget, SparseMatrixVar},
vec::poly_from_vec,
};
use crate::utils::gadgets::{eval_mle, MatrixGadget};
use crate::Error; use crate::Error;
use crate::{ use crate::{
arith::{ccs::CCS, r1cs::R1CS},
folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps},
arith::{
ccs::{circuits::CCSMatricesVar, CCS},
ArithGadget,
},
folding::circuits::decider::{EvalGadget, KZGChallengesGadget},
}; };
use crate::{ use crate::{
commitment::{pedersen::Params as PedersenParams, CommitmentScheme}, commitment::{pedersen::Params as PedersenParams, CommitmentScheme},
folding::nova::decider_eth_circuit::evaluate_gadget,
folding::circuits::decider::DeciderEnabledNIFS,
}; };
impl<C: CurveGroup> ArithGadget<WitnessVar<CF1<C>>, LCCCSVar<C>> for CCSMatricesVar<CF1<C>> {
type Evaluation = Vec<FpVar<CF1<C>>>;
fn eval_relation(
&self,
w: &WitnessVar<CF1<C>>,
u: &LCCCSVar<C>,
) -> Result<Self::Evaluation, SynthesisError> {
let z = [&[u.u.clone()][..], &u.x, &w.w].concat();
self.M
.iter()
.map(|M_j| {
let s = log2(M_j.n_rows) as usize;
let Mz = M_j.mul_vector(&z)?;
Ok(eval_mle(s, Mz, u.r_x.clone()))
})
.collect()
}
fn enforce_evaluation(
_w: &WitnessVar<CF1<C>>,
u: &LCCCSVar<C>,
v: Self::Evaluation,
) -> Result<(), SynthesisError> {
v.enforce_equal(&u.v)
}
}
/// In-circuit representation of the Witness associated to the CommittedInstance. /// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct WitnessVar<F: PrimeField> { pub struct WitnessVar<F: PrimeField> {
@ -77,135 +100,38 @@ impl WitnessVarOps for WitnessVar {
} }
} }
/// CCSMatricesVar contains the matrices 'M' of the CCS without the rest of CCS parameters.
#[derive(Debug, Clone)]
pub struct CCSMatricesVar<F: PrimeField> {
// we only need native representation, so the constraint field==F
pub M: Vec<SparseMatrixVar<F, F, FpVar<F>>>,
}
impl<F> AllocVar<CCS<F>, F> for CCSMatricesVar<F>
where
F: PrimeField,
{
fn new_variable<T: Borrow<CCS<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let M: Vec<SparseMatrixVar<F, F, FpVar<F>>> = val
.borrow()
.M
.iter()
.map(|M| SparseMatrixVar::<F, F, FpVar<F>>::new_constant(cs.clone(), M.clone()))
.collect::<Result<_, SynthesisError>>()?;
Ok(Self { M })
})
}
}
/// Gadget to check the LCCCS relation both over the native constraint field and over the
/// non-native constraint field.
#[derive(Debug, Clone)]
pub struct LCCCSCheckerGadget {}
impl LCCCSCheckerGadget {
/// performs in-circuit the RelaxedR1CS check for native variables (Az∘Bz==uCz+E)
pub fn check<F: PrimeField>(
s: usize,
ccs_mat: CCSMatricesVar<F>,
z: Vec<FpVar<F>>,
// LCCCS values
r_x: Vec<FpVar<F>>,
v: Vec<FpVar<F>>,
) -> Result<(), SynthesisError> {
let computed_v: Vec<FpVar<F>> = ccs_mat
.M
.iter()
.map(|M_j| {
let Mz = M_j.mul_vector(&z)?;
Ok(eval_mle(s, Mz, r_x.clone()))
})
.collect::<Result<Vec<FpVar<F>>, SynthesisError>>()?;
(computed_v).enforce_equal(&v)?;
Ok(())
}
}
/// Circuit that implements the in-circuit checks needed for the HyperNova's onchain (Ethereum's
/// EVM) verification.
#[derive(Clone, Debug)]
pub struct DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2, const H: bool = false>
pub type DeciderEthCircuit<C1, C2, GC2> = GenericOnchainDeciderCircuit<
C1,
C2,
GC2,
LCCCS<C1>,
CCCS<C1>,
Witness<CF1<C1>>,
CCS<CF1<C1>>,
CCSMatricesVar<CF1<C1>>,
DeciderHyperNovaGadget,
>;
impl<
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>,
// enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider
CS2: CommitmentScheme<C2, H, ProverParams = PedersenParams<C2>>,
const MU: usize,
const NU: usize,
const H: bool,
> TryFrom<HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, H>>
for DeciderEthCircuit<C1, C2, GC2>
where where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
CF1<C1>: Absorb,
{ {
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
_cs1: PhantomData<CS1>,
_cs2: PhantomData<CS2>,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// CCS of the Augmented Function circuit
pub ccs: CCS<C1::ScalarField>,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
/// CycleFold PedersenParams over C2
pub cf_pedersen_params: PedersenParams<C2>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: Option<C1::ScalarField>,
pub i: Option<CF1<C1>>,
/// initial state
pub z_0: Option<Vec<C1::ScalarField>>,
/// current i-th state
pub z_i: Option<Vec<C1::ScalarField>>,
/// Nova instances
pub U_i: Option<LCCCS<C1>>,
pub W_i: Option<Witness<C1::ScalarField>>,
pub u_i: Option<CCCS<C1>>,
pub w_i: Option<Witness<C1::ScalarField>>,
pub U_i1: Option<LCCCS<C1>>,
pub W_i1: Option<Witness<C1::ScalarField>>,
pub nimfs_proof: Option<NIMFSProof<C1>>,
// rho is the 'random' value used for the fold of the last 2 instances
pub rho: Option<C1::ScalarField>,
/// CycleFold running instance
pub cf_U_i: Option<CycleFoldCommittedInstance<C2>>,
pub cf_W_i: Option<CycleFoldWitness<C2>>,
/// KZG challenge & eval
pub kzg_challenge: Option<C1::ScalarField>,
pub eval_W: Option<C1::ScalarField>,
}
type Error = Error;
impl<C1, GC1, C2, GC2, CS1, CS2, const H: bool> DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
// enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider
CS2: CommitmentScheme<C2, H, ProverParams = PedersenParams<C2>>,
<C1 as Group>::ScalarField: Absorb,
<C1 as CurveGroup>::BaseField: PrimeField,
{
/// returns an instance of the DeciderEthCircuit from the given HyperNova struct
pub fn from_hypernova<FC: FCircuit<C1::ScalarField>, const MU: usize, const NU: usize>(
hn: HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, H>,
) -> Result<Self, Error> {
fn try_from(hn: HyperNova<C1, GC1, C2, GC2, FC, CS1, CS2, MU, NU, H>) -> Result<Self, Error> {
// compute the U_{i+1}, W_{i+1}, by folding the last running & incoming instances // compute the U_{i+1}, W_{i+1}, by folding the last running & incoming instances
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&hn.poseidon_config); let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&hn.poseidon_config);
transcript.absorb(&hn.pp_hash); transcript.absorb(&hn.pp_hash);
@ -219,288 +145,92 @@ where
)?; )?;
// compute the KZG challenges used as inputs in the circuit // compute the KZG challenges used as inputs in the circuit
let kzg_challenge =
KZGChallengeGadget::<C1>::get_challenge_native(&mut transcript, U_i1.clone())?;
let kzg_challenges = KZGChallengesGadget::get_challenges_native(&mut transcript, &U_i1);
// get KZG evals // get KZG evals
let mut W = W_i1.w.clone();
W.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.w.len().next_power_of_two() - W_i1.w.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&kzg_challenge);
let kzg_evaluations = W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| EvalGadget::evaluate_native(v, c))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { Ok(Self {
_c1: PhantomData,
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData, _gc2: PhantomData,
_cs1: PhantomData,
_cs2: PhantomData,
cf_E_len: hn.cf_W_i.E.len(),
ccs: hn.ccs,
cf_r1cs: hn.cf_r1cs,
_avar: PhantomData,
arith: hn.ccs,
cf_arith: hn.cf_r1cs,
cf_pedersen_params: hn.cf_cs_pp, cf_pedersen_params: hn.cf_cs_pp,
poseidon_config: hn.poseidon_config, poseidon_config: hn.poseidon_config,
pp_hash: Some(hn.pp_hash),
i: Some(hn.i),
z_0: Some(hn.z_0),
z_i: Some(hn.z_i),
U_i: Some(hn.U_i),
W_i: Some(hn.W_i),
u_i: Some(hn.u_i),
w_i: Some(hn.w_i),
U_i1: Some(U_i1),
W_i1: Some(W_i1),
nimfs_proof: Some(nimfs_proof),
rho: Some(rho),
cf_U_i: Some(hn.cf_U_i),
cf_W_i: Some(hn.cf_W_i),
kzg_challenge: Some(kzg_challenge),
eval_W: Some(eval_W),
pp_hash: hn.pp_hash,
i: hn.i,
z_0: hn.z_0,
z_i: hn.z_i,
U_i: hn.U_i,
W_i: hn.W_i,
u_i: hn.u_i,
w_i: hn.w_i,
U_i1,
W_i1,
proof: nimfs_proof,
randomness: rho,
cf_U_i: hn.cf_U_i,
cf_W_i: hn.cf_W_i,
kzg_challenges,
kzg_evaluations,
}) })
} }
} }
impl<C1, GC1, C2, GC2, CS1, CS2> ConstraintSynthesizer<CF1<C1>>
for DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2>
pub struct DeciderHyperNovaGadget;
impl<C: CurveGroup> DeciderEnabledNIFS<C, LCCCS<C>, CCCS<C>, Witness<C::ScalarField>, CCS<CF1<C>>>
for DeciderHyperNovaGadget
where where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
C1::ScalarField: PrimeField,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
CF1<C>: Absorb,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let ccs_matrices =
CCSMatricesVar::<C1::ScalarField>::new_witness(cs.clone(), || Ok(self.ccs.clone()))?;
let pp_hash = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C1>::zero))
})?;
let i =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let U_dummy_native = LCCCS::<C1>::dummy(&self.ccs);
let u_dummy_native = CCCS::<C1>::dummy(&self.ccs);
let w_dummy_native = Witness::<C1::ScalarField>::new(
vec![C1::ScalarField::zero(); self.ccs.n - 3 /* (3=2+1, since u_i.x.len=2) */],
);
let U_i = LCCCSVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(U_dummy_native.clone()))
})?;
let u_i = CCCSVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1 = LCCCSVar::<C1>::new_input(cs.clone(), || {
Ok(self.U_i1.unwrap_or(U_dummy_native.clone()))
})?;
let W_i1 = WitnessVar::<C1::ScalarField>::new_witness(cs.clone(), || {
Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?;
let nimfs_proof_dummy = NIMFSProof::<C1>::dummy((&self.ccs, 1, 1)); // mu=1 & nu=1 because the last fold is 2-to-1
let nimfs_proof = NIMFSProofVar::<C1>::new_witness(cs.clone(), || {
Ok(self.nimfs_proof.unwrap_or(nimfs_proof_dummy))
})?;
// allocate the inputs for the check 6
let kzg_challenge = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_challenge.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
type ProofDummyCfg = (usize, usize, usize, usize);
type Proof = NIMFSProof<C>;
type Randomness = CF1<C>;
type RandomnessDummyCfg = ();
fn fold_field_elements_gadget(
arith: &CCS<CF1<C>>,
transcript: &mut PoseidonSpongeVar<CF1<C>>,
pp_hash: FpVar<CF1<C>>,
U: LCCCSVar<C>,
_U_vec: Vec<FpVar<CF1<C>>>,
u: CCCSVar<C>,
proof: Self::Proof,
randomness: Self::Randomness,
) -> Result<LCCCSVar<C>, SynthesisError> {
let cs = transcript.cs();
transcript.absorb(&pp_hash)?; transcript.absorb(&pp_hash)?;
// NOTE: we use the same enumeration as in Nova's DeciderEthCircuit described at
// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
// in order to make it easier to reason about.
// 1. check LCCCS relation of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.w.to_vec()].concat();
LCCCSCheckerGadget::check(
self.ccs.s,
ccs_matrices,
z_U1,
U_i1.r_x.clone(),
U_i1.v.clone(),
)?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, _) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?;
#[cfg(feature = "light-test")]
log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes.");
// The following two checks (and their respective allocations) are disabled for normal
// tests since they take several millions of constraints and would take several minutes
// (and RAM) to run the test. It is active by default, and not active only when
// 'light-test' feature is used.
#[cfg(not(feature = "light-test"))]
{
// imports here instead of at the top of the file, so we avoid having multiple
// `#[cfg(not(test))]`
use crate::commitment::pedersen::PedersenGadget;
use crate::folding::circuits::nonnative::uint::NonNativeUintVar;
use crate::folding::nova::decider_eth_circuit::{R1CSVar, RelaxedR1CSGadget};
use crate::folding::{
circuits::cyclefold::{
CycleFoldCommittedInstanceVar, CycleFoldConfig, CycleFoldWitnessVar,
},
nova::NovaCycleFoldConfig,
};
use ark_r1cs_std::ToBitsGadget;
let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let cf_w_dummy_native = CycleFoldWitness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_W_i = CycleFoldWitnessVar::<C2>::new_witness(cs.clone(), || {
Ok(self.cf_W_i.unwrap_or(cf_w_dummy_native.clone()))
})?;
// 3.b u_i.x[1] == H(cf_U_i)
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
(u_i.x[1]).enforce_equal(&cf_u_i_x)?;
// 4. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
let cf_W_i_E_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> =
cf_W_i.E.iter().map(|E_i| E_i.to_bits_le()).collect();
let cf_W_i_W_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> =
cf_W_i.W.iter().map(|W_i| W_i.to_bits_le()).collect();
let computed_cmE = PedersenGadget::<C2, GC2>::commit(
H.clone(),
G.clone(),
cf_W_i_E_bits?,
cf_W_i.rE.to_bits_le()?,
)?;
cf_U_i.cmE.enforce_equal(&computed_cmE)?;
let computed_cmW =
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i_W_bits?, cf_W_i.rW.to_bits_le()?)?;
cf_U_i.cmW.enforce_equal(&computed_cmW)?;
let cf_r1cs =
R1CSVar::<C1::BaseField, CF1<C1>, NonNativeUintVar<CF1<C1>>>::new_witness(
cs.clone(),
|| Ok(self.cf_r1cs.clone()),
)?;
// 5. check RelaxedR1CS of cf_U_i
let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?;
}
// The following steps are in non-increasing order because the `computed_U_i1` is computed
// at step 8, and later used at step 6. Notice that in Nova, we compute U_i1 outside of the
// circuit, in the smart contract, but here we're computing it in-circuit, and we reuse the
// `rho_bits` computed along the way of computing `computed_U_i1` for the later `rho_powers`
// check (6.b).
// 8.a verify the NIMFS.V of the final fold, and check that the obtained rho_powers from the
// transcript match the one from the public input (so we avoid the onchain logic of the
// verifier computing it).
// Notice that the NIMFSGadget performs all the logic except of checking the fold of the
// instances C parameter, which would require non-native arithmetic, henceforth we perform
// that check outside the circuit.
let (computed_U_i1, rho_bits) = NIMFSGadget::<C1>::verify(
let nimfs_proof = NIMFSProofVar::<C>::new_witness(cs.clone(), || Ok(proof))?;
let rho = FpVar::<CF1<C>>::new_input(cs.clone(), || Ok(randomness))?;
let (computed_U_i1, rho_bits) = NIMFSGadget::<C>::verify(
cs.clone(), cs.clone(),
&self.ccs.clone(),
&mut transcript,
&[U_i],
&[u_i],
arith,
transcript,
&[U],
&[u],
nimfs_proof, nimfs_proof,
Boolean::TRUE, // enabled Boolean::TRUE, // enabled
)?; )?;
// 6.a check KZG challenges
// Notice that this step is done after the NIMFS.V, to follow the transcript absorbs order
// done outside the circuit, where to compute the challenge it needs first to compute the
// U_{i+1} through the NIMFS.V
let incircuit_challenge =
KZGChallengeGadget::<C1>::get_challenge_gadget(&mut transcript, U_i1.clone())?;
incircuit_challenge.enforce_equal(&kzg_challenge)?;
// 6.b check that the obtained U_{i+1} from the NIMFS.V matches the U_{i+1} from the input,
// except for the C parameter, which to compute its folding would require non-native logic
// in-circuit, and we defer it to outside the circuit.
computed_U_i1.u.enforce_equal(&U_i1.u)?;
computed_U_i1.r_x.enforce_equal(&U_i1.r_x)?;
computed_U_i1.v.enforce_equal(&U_i1.v)?;
// 7. check eval_W==p_W(c_W)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.w, incircuit_challenge)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
// 8.b check that the in-circuit computed r is equal to the inputted r.
let rho = Boolean::le_bits_to_fp_var(&rho_bits)?;
let external_rho =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.rho.unwrap_or(CF1::<C1>::zero())))?;
rho.enforce_equal(&external_rho)?;
Ok(())
Boolean::le_bits_to_fp_var(&rho_bits)?.enforce_equal(&rho)?;
Ok(computed_U_i1)
} }
}
/// Gadget that computes the KZG challenges, also offers the rust native implementation compatible
/// with the gadget.
pub struct KZGChallengeGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
#[allow(clippy::type_complexity)]
impl<C> KZGChallengeGadget<C>
where
C: CurveGroup,
C::ScalarField: PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
C::ScalarField: Absorb,
{
pub fn get_challenge_native<T: Transcript<C::ScalarField>>(
transcript: &mut T,
U_i: LCCCS<C>,
) -> Result<C::ScalarField, Error> {
// compute the KZG challenges, which are computed in-circuit and checked that it matches
// the inputted one
transcript.absorb_nonnative(&U_i.C);
Ok(transcript.get_challenge())
}
// compatible with the native get_challenge_native
pub fn get_challenge_gadget<S: CryptographicSponge, T: TranscriptVar<CF1<C>, S>>(
transcript: &mut T,
U_i: LCCCSVar<C>,
) -> Result<FpVar<C::ScalarField>, SynthesisError> {
transcript.absorb(&U_i.C.to_constraint_field()?)?;
transcript.get_challenge()
fn fold_group_elements_native(
U_commitments: &[C],
u_commitments: &[C],
_: Option<Self::Proof>,
r: Self::Randomness,
) -> Result<Vec<C>, Error> {
let U_C = U_commitments[0];
let u_C = u_commitments[0];
let C = U_C + u_C.mul(r);
Ok(vec![C])
} }
} }
@ -508,10 +238,11 @@ where
pub mod tests { pub mod tests {
use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective}; use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective};
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2}; use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_relations::r1cs::ConstraintSystem;
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use ark_std::{test_rng, UniformRand}; use ark_std::{test_rng, UniformRand};
use super::*; use super::*;
use crate::arith::r1cs::R1CS;
use crate::commitment::pedersen::Pedersen; use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::PreprocessorParam; use crate::folding::nova::PreprocessorParam;
use crate::frontend::utils::CubicFCircuit; use crate::frontend::utils::CubicFCircuit;
@ -530,7 +261,7 @@ pub mod tests {
let (pedersen_params, _) = let (pedersen_params, _) =
Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap(); Pedersen::<Projective>::setup(&mut rng, ccs.n - ccs.l - 1).unwrap();
let (lcccs, _) = ccs
let (lcccs, w) = ccs
.to_lcccs::<_, Projective, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &z) .to_lcccs::<_, Projective, Pedersen<Projective>, false>(&mut rng, &pedersen_params, &z)
.unwrap(); .unwrap();
@ -538,11 +269,10 @@ pub mod tests {
// CCS's (sparse) matrices are constants in the circuit // CCS's (sparse) matrices are constants in the circuit
let ccs_mat = CCSMatricesVar::<Fr>::new_constant(cs.clone(), ccs.clone()).unwrap(); let ccs_mat = CCSMatricesVar::<Fr>::new_constant(cs.clone(), ccs.clone()).unwrap();
let zVar = Vec::<FpVar<Fr>>::new_input(cs.clone(), || Ok(z)).unwrap();
let r_xVar = Vec::<FpVar<Fr>>::new_input(cs.clone(), || Ok(lcccs.r_x)).unwrap();
let vVar = Vec::<FpVar<Fr>>::new_input(cs.clone(), || Ok(lcccs.v)).unwrap();
let w_var = WitnessVar::new_witness(cs.clone(), || Ok(w)).unwrap();
let lcccs_var = LCCCSVar::new_input(cs.clone(), || Ok(lcccs)).unwrap();
LCCCSCheckerGadget::check(ccs.s, ccs_mat, zVar, r_xVar, vVar).unwrap();
ccs_mat.enforce_relation(&w_var, &lcccs_var).unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
} }
@ -588,15 +318,8 @@ pub mod tests {
HN::verify(hn_params.1, ivc_proof).unwrap(); HN::verify(hn_params.1, ivc_proof).unwrap();
// load the DeciderEthCircuit from the generated Nova instance // load the DeciderEthCircuit from the generated Nova instance
let decider_circuit = DeciderEthCircuit::<
Projective,
GVar,
Projective2,
GVar2,
Pedersen<Projective>,
Pedersen<Projective2>,
>::from_hypernova(hypernova)
.unwrap();
let decider_circuit =
DeciderEthCircuit::<Projective, Projective2, GVar2>::try_from(hypernova).unwrap();
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();

+ 15
- 1
folding-schemes/src/folding/hypernova/lcccs.rs

@ -14,6 +14,7 @@ use crate::arith::ccs::CCS;
use crate::arith::Arith; use crate::arith::Arith;
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::folding::circuits::CF1; use crate::folding::circuits::CF1;
use crate::folding::traits::Inputize;
use crate::folding::traits::{CommittedInstanceOps, Dummy}; use crate::folding::traits::{CommittedInstanceOps, Dummy};
use crate::transcript::AbsorbNonNative; use crate::transcript::AbsorbNonNative;
use crate::utils::mle::dense_vec_to_dense_mle; use crate::utils::mle::dense_vec_to_dense_mle;
@ -72,7 +73,7 @@ impl CCS {
Ok(( Ok((
LCCCS::<C> { LCCCS::<C> {
C, C,
u: C::ScalarField::one(),
u: z[0],
x: z[1..(1 + self.l)].to_vec(), x: z[1..(1 + self.l)].to_vec(),
r_x, r_x,
v, v,
@ -154,6 +155,19 @@ impl CommittedInstanceOps for LCCCS {
} }
} }
impl<C: CurveGroup> Inputize<C::ScalarField, LCCCSVar<C>> for LCCCS<C> {
fn inputize(&self) -> Vec<C::ScalarField> {
[
&self.C.inputize(),
&[self.u][..],
&self.x,
&self.r_x,
&self.v,
]
.concat()
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use ark_pallas::{Fr, Projective}; use ark_pallas::{Fr, Projective};

+ 25
- 47
folding-schemes/src/folding/hypernova/mod.rs

@ -5,7 +5,7 @@ use ark_crypto_primitives::sponge::{
}; };
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError};
use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero}; use ark_std::{fmt::Debug, marker::PhantomData, rand::RngCore, One, Zero};
@ -293,8 +293,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
type RunningInstance = (LCCCS<C1>, Witness<C1::ScalarField>); type RunningInstance = (LCCCS<C1>, Witness<C1::ScalarField>);
type IncomingInstance = (CCCS<C1>, Witness<C1::ScalarField>); type IncomingInstance = (CCCS<C1>, Witness<C1::ScalarField>);
@ -360,8 +358,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
/// internal helper for new_running_instance & new_incoming_instance methods, returns the R1CS /// internal helper for new_running_instance & new_incoming_instance methods, returns the R1CS
/// z=[u,x,w] vector to be used to create the LCCCS & CCCS fresh instances. /// z=[u,x,w] vector to be used to create the LCCCS & CCCS fresh instances.
@ -467,8 +463,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
/// Reuse Nova's PreprocessorParam. /// Reuse Nova's PreprocessorParam.
type PreprocessorParam = PreprocessorParam<C1, C2, FC, CS1, CS2, H>; type PreprocessorParam = PreprocessorParam<C1, C2, FC, CS1, CS2, H>;
@ -567,23 +561,14 @@ where
let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?; let cf_r1cs = get_r1cs_from_cs::<C2::ScalarField>(cf_circuit)?;
// if cs params exist, use them, if not, generate new ones // if cs params exist, use them, if not, generate new ones
let cs_pp: CS1::ProverParams;
let cs_vp: CS1::VerifierParams;
let cf_cs_pp: CS2::ProverParams;
let cf_cs_vp: CS2::VerifierParams;
if prep_param.cs_pp.is_some()
&& prep_param.cf_cs_pp.is_some()
&& prep_param.cs_vp.is_some()
&& prep_param.cf_cs_vp.is_some()
{
cs_pp = prep_param.clone().cs_pp.unwrap();
cs_vp = prep_param.clone().cs_vp.unwrap();
cf_cs_pp = prep_param.clone().cf_cs_pp.unwrap();
cf_cs_vp = prep_param.clone().cf_cs_vp.unwrap();
} else {
(cs_pp, cs_vp) = CS1::setup(&mut rng, ccs.n - ccs.l - 1)?;
(cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1)?;
}
let (cs_pp, cs_vp) = match (&prep_param.cs_pp, &prep_param.cs_vp) {
(Some(cs_pp), Some(cs_vp)) => (cs_pp.clone(), cs_vp.clone()),
_ => CS1::setup(&mut rng, ccs.n - ccs.l - 1)?,
};
let (cf_cs_pp, cf_cs_vp) = match (&prep_param.cf_cs_pp, &prep_param.cf_cs_vp) {
(Some(cf_cs_pp), Some(cf_cs_vp)) => (cf_cs_pp.clone(), cf_cs_vp.clone()),
_ => CS2::setup(&mut rng, cf_r1cs.A.n_cols - cf_r1cs.l - 1)?,
};
let pp = ProverParams::<C1, C2, CS1, CS2, H> { let pp = ProverParams::<C1, C2, CS1, CS2, H> {
poseidon_config: prep_param.poseidon_config.clone(), poseidon_config: prep_param.poseidon_config.clone(),
@ -726,9 +711,9 @@ where
let (Us, Ws): (Vec<LCCCS<C1>>, Vec<Witness<C1::ScalarField>>) = let (Us, Ws): (Vec<LCCCS<C1>>, Vec<Witness<C1::ScalarField>>) =
lcccs.into_iter().unzip(); lcccs.into_iter().unzip();
let (us, ws): (Vec<CCCS<C1>>, Vec<Witness<C1::ScalarField>>) = cccs.into_iter().unzip(); let (us, ws): (Vec<CCCS<C1>>, Vec<Witness<C1::ScalarField>>) = cccs.into_iter().unzip();
(Some(Us), Some(Ws), Some(us), Some(ws))
(Us, Ws, us, ws)
} else { } else {
(None, None, None, None)
(vec![], vec![], vec![], vec![])
}; };
let augmented_f_circuit: AugmentedFCircuit<C1, C2, GC2, FC, MU, NU>; let augmented_f_circuit: AugmentedFCircuit<C1, C2, GC2, FC, MU, NU>;
@ -807,9 +792,9 @@ where
z_i: Some(self.z_i.clone()), z_i: Some(self.z_i.clone()),
external_inputs: Some(external_inputs.clone()), external_inputs: Some(external_inputs.clone()),
U_i: Some(self.U_i.clone()), U_i: Some(self.U_i.clone()),
Us: Us.clone(),
Us: Some(Us),
u_i_C: Some(self.u_i.C), u_i_C: Some(self.u_i.C),
us: us.clone(),
us: Some(us),
U_i1_C: Some(U_i1.C), U_i1_C: Some(U_i1.C),
F: self.F.clone(), F: self.F.clone(),
x: Some(u_i1_x), x: Some(u_i1_x),
@ -826,21 +811,12 @@ where
PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config); PoseidonSponge::<C1::ScalarField>::new(&self.poseidon_config);
transcript_p.absorb(&self.pp_hash); transcript_p.absorb(&self.pp_hash);
let (all_Us, all_us, all_Ws, all_ws) = if MU > 1 || NU > 1 {
(
[vec![self.U_i.clone()], Us.clone().unwrap()].concat(),
[vec![self.u_i.clone()], us.clone().unwrap()].concat(),
[vec![self.W_i.clone()], Ws.unwrap()].concat(),
[vec![self.w_i.clone()], ws.unwrap()].concat(),
)
} else {
(
vec![self.U_i.clone()],
vec![self.u_i.clone()],
vec![self.W_i.clone()],
vec![self.w_i.clone()],
)
};
let (all_Us, all_us, all_Ws, all_ws) = (
[&[self.U_i.clone()][..], &Us].concat(),
[&[self.u_i.clone()][..], &us].concat(),
[vec![self.W_i.clone()], Ws].concat(),
[vec![self.w_i.clone()], ws].concat(),
);
let (rho, nimfs_proof); let (rho, nimfs_proof);
(nimfs_proof, U_i1, W_i1, rho) = NIMFS::<C1, PoseidonSponge<C1::ScalarField>>::prove( (nimfs_proof, U_i1, W_i1, rho) = NIMFS::<C1, PoseidonSponge<C1::ScalarField>>::prove(
@ -865,7 +841,9 @@ where
); );
let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec(); let rho_bits = rho.into_bigint().to_bits_le()[..NOVA_N_BITS_RO].to_vec();
let rho_Fq = C1::BaseField::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap();
let rho_Fq = C1::BaseField::from(<C1::BaseField as PrimeField>::BigInt::from_bits_le(
&rho_bits,
));
// CycleFold part: // CycleFold part:
// get the vector used as public inputs 'x' in the CycleFold circuit. // get the vector used as public inputs 'x' in the CycleFold circuit.
@ -935,9 +913,9 @@ where
z_i: Some(self.z_i.clone()), z_i: Some(self.z_i.clone()),
external_inputs: Some(external_inputs), external_inputs: Some(external_inputs),
U_i: Some(self.U_i.clone()), U_i: Some(self.U_i.clone()),
Us: Us.clone(),
Us: Some(Us),
u_i_C: Some(self.u_i.C), u_i_C: Some(self.u_i.C),
us: us.clone(),
us: Some(us),
U_i1_C: Some(U_i1.C), U_i1_C: Some(U_i1.C),
F: self.F.clone(), F: self.F.clone(),
x: Some(u_i1_x), x: Some(u_i1_x),
@ -1030,7 +1008,7 @@ where
} = ivc_proof; } = ivc_proof;
let (pp, vp) = params; let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
let f_circuit = FC::new(fcircuit_params)?;
let augmented_f_circuit = AugmentedFCircuit::<C1, C2, GC2, FC, MU, NU>::empty( let augmented_f_circuit = AugmentedFCircuit::<C1, C2, GC2, FC, MU, NU>::empty(
&pp.poseidon_config, &pp.poseidon_config,
f_circuit.clone(), f_circuit.clone(),

+ 19
- 12
folding-schemes/src/folding/hypernova/nimfs.rs

@ -31,28 +31,34 @@ pub struct NIMFSProof {
pub sigmas_thetas: SigmasThetas<C::ScalarField>, pub sigmas_thetas: SigmasThetas<C::ScalarField>,
} }
impl<C: CurveGroup> Dummy<(&CCS<CF1<C>>, usize, usize)> for NIMFSProof<C> {
fn dummy((ccs, mu, nu): (&CCS<CF1<C>>, usize, usize)) -> Self {
impl<C: CurveGroup> Dummy<(usize, usize, usize, usize)> for NIMFSProof<C> {
fn dummy((s, t, mu, nu): (usize, usize, usize, usize)) -> Self {
// use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the // use 'C::ScalarField::one()' instead of 'zero()' to enforce the NIMFSProof to have the
// same in-circuit representation to match the number of constraints of an actual proof. // same in-circuit representation to match the number of constraints of an actual proof.
NIMFSProof::<C> { NIMFSProof::<C> {
sc_proof: SumCheckProof::<C::ScalarField> { sc_proof: SumCheckProof::<C::ScalarField> {
point: vec![C::ScalarField::one(); ccs.s],
point: vec![C::ScalarField::one(); s],
proofs: vec![ proofs: vec![
IOPProverMessage { IOPProverMessage {
coeffs: vec![C::ScalarField::one(); ccs.t + 1]
coeffs: vec![C::ScalarField::one(); t + 1]
}; };
ccs.s
s
], ],
}, },
sigmas_thetas: SigmasThetas( sigmas_thetas: SigmasThetas(
vec![vec![C::ScalarField::one(); ccs.t]; mu],
vec![vec![C::ScalarField::one(); ccs.t]; nu],
vec![vec![C::ScalarField::one(); t]; mu],
vec![vec![C::ScalarField::one(); t]; nu],
), ),
} }
} }
} }
impl<C: CurveGroup> Dummy<(&CCS<CF1<C>>, usize, usize)> for NIMFSProof<C> {
fn dummy((ccs, mu, nu): (&CCS<CF1<C>>, usize, usize)) -> Self {
NIMFSProof::dummy((ccs.s, ccs.t, mu, nu))
}
}
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>); pub struct SigmasThetas<F: PrimeField>(pub Vec<Vec<F>>, pub Vec<Vec<F>>);
@ -67,7 +73,6 @@ pub struct NIMFS> {
impl<C: CurveGroup, T: Transcript<C::ScalarField>> NIMFS<C, T> impl<C: CurveGroup, T: Transcript<C::ScalarField>> NIMFS<C, T>
where where
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
C::BaseField: PrimeField,
{ {
pub fn fold( pub fn fold(
lcccs: &[LCCCS<C>], lcccs: &[LCCCS<C>],
@ -258,8 +263,9 @@ where
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho"); let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
transcript.absorb(&rho_scalar); transcript.absorb(&rho_scalar);
let rho_bits: Vec<bool> = transcript.get_challenge_nbits(NOVA_N_BITS_RO); let rho_bits: Vec<bool> = transcript.get_challenge_nbits(NOVA_N_BITS_RO);
let rho: C::ScalarField =
C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap();
let rho: C::ScalarField = C::ScalarField::from(
<C::ScalarField as PrimeField>::BigInt::from_bits_le(&rho_bits),
);
// Step 7: Create the folded instance // Step 7: Create the folded instance
let folded_lcccs = Self::fold( let folded_lcccs = Self::fold(
@ -377,8 +383,9 @@ where
let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho"); let rho_scalar = C::ScalarField::from_le_bytes_mod_order(b"rho");
transcript.absorb(&rho_scalar); transcript.absorb(&rho_scalar);
let rho_bits: Vec<bool> = transcript.get_challenge_nbits(NOVA_N_BITS_RO); let rho_bits: Vec<bool> = transcript.get_challenge_nbits(NOVA_N_BITS_RO);
let rho: C::ScalarField =
C::ScalarField::from_bigint(BigInteger::from_bits_le(&rho_bits)).unwrap();
let rho: C::ScalarField = C::ScalarField::from(
<C::ScalarField as PrimeField>::BigInt::from_bits_le(&rho_bits),
);
// Step 7: Compute the folded instance // Step 7: Compute the folded instance
Ok(Self::fold( Ok(Self::fold(

+ 4
- 20
folding-schemes/src/folding/nova/circuits.rs

@ -11,7 +11,6 @@ use ark_r1cs_std::{
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar, prelude::CurveVar,
uint8::UInt8, uint8::UInt8,
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
@ -73,11 +72,7 @@ where
} }
} }
impl<C> AbsorbGadget<C::ScalarField> for CommittedInstanceVar<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
impl<C: CurveGroup> AbsorbGadget<C::ScalarField> for CommittedInstanceVar<C> {
fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> { fn to_sponge_bytes(&self) -> Result<Vec<UInt8<C::ScalarField>>, SynthesisError> {
FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?) FpVar::batch_to_sponge_bytes(&self.to_sponge_field_elements()?)
} }
@ -124,11 +119,7 @@ pub struct NIFSGadget {
_c: PhantomData<C>, _c: PhantomData<C>,
} }
impl<C: CurveGroup> NIFSGadget<C>
where
C: CurveGroup,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
{
impl<C: CurveGroup> NIFSGadget<C> {
pub fn fold_committed_instance( pub fn fold_committed_instance(
r: FpVar<CF1<C>>, r: FpVar<CF1<C>>,
ci1: CommittedInstanceVar<C>, // U_i ci1: CommittedInstanceVar<C>, // U_i
@ -174,8 +165,6 @@ pub struct ChallengeGadget {
} }
impl<C: CurveGroup, CI: Absorb> ChallengeGadget<C, CI> impl<C: CurveGroup, CI: Absorb> ChallengeGadget<C, CI>
where where
C: CurveGroup,
<C as CurveGroup>::BaseField: PrimeField,
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
{ {
pub fn get_challenge_native<T: Transcript<C::ScalarField>>( pub fn get_challenge_native<T: Transcript<C::ScalarField>>(
@ -232,9 +221,7 @@ pub struct AugmentedFCircuit<
C2: CurveGroup, C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>, GC2: CurveVar<C2, CF2<C2>>,
FC: FCircuit<CF1<C1>>, FC: FCircuit<CF1<C1>>,
> where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{
> {
pub(super) _gc2: PhantomData<GC2>, pub(super) _gc2: PhantomData<GC2>,
pub(super) poseidon_config: PoseidonConfig<CF1<C1>>, pub(super) poseidon_config: PoseidonConfig<CF1<C1>>,
pub(super) pp_hash: Option<CF1<C1>>, pub(super) pp_hash: Option<CF1<C1>>,
@ -264,8 +251,6 @@ pub struct AugmentedFCircuit<
impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>> impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>>
AugmentedFCircuit<C1, C2, GC2, FC> AugmentedFCircuit<C1, C2, GC2, FC>
where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
pub fn empty(poseidon_config: &PoseidonConfig<CF1<C1>>, F_circuit: FC) -> Self { pub fn empty(poseidon_config: &PoseidonConfig<CF1<C1>>, F_circuit: FC) -> Self {
Self { Self {
@ -306,7 +291,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> { fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || { let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || {
@ -508,7 +492,7 @@ where
// Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1}) // Non-base case: u_{i+1}.x[1] == H(cf_U_{i+1})
let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?; let (cf_u_i1_x, _) = cf_U_i1.clone().hash(&sponge, pp_hash.clone())?;
let (cf_u_i1_x_base, _) = let (cf_u_i1_x_base, _) =
CycleFoldCommittedInstanceVar::new_constant(cs.clone(), cf_u_dummy)?
CycleFoldCommittedInstanceVar::<C2, GC2>::new_constant(cs.clone(), cf_u_dummy)?
.hash(&sponge, pp_hash)?; .hash(&sponge, pp_hash)?;
let cf_x = FpVar::new_input(cs.clone(), || { let cf_x = FpVar::new_input(cs.clone(), || {
Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?)) Ok(self.cf_x.unwrap_or(cf_u_i1_x_base.value()?))

+ 110
- 179
folding-schemes/src/folding/nova/decider.rs

@ -2,8 +2,8 @@
/// DeciderEth from decider_eth.rs file. /// DeciderEth from decider_eth.rs file.
/// More details can be found at the documentation page: /// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html
use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge};
use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget}; use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
@ -13,18 +13,16 @@ use ark_std::{One, Zero};
use core::marker::PhantomData; use core::marker::PhantomData;
use super::decider_circuits::{DeciderCircuit1, DeciderCircuit2}; use super::decider_circuits::{DeciderCircuit1, DeciderCircuit2};
use super::{
nifs::{nova::NIFS, NIFSTrait},
CommittedInstance, Nova,
};
use super::decider_eth_circuit::DeciderNovaGadget;
use super::Nova;
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::folding::circuits::decider::DeciderEnabledNIFS;
use crate::folding::circuits::{ use crate::folding::circuits::{
cyclefold::CycleFoldCommittedInstance,
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
cyclefold::{CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar},
CF2, CF2,
}; };
use crate::folding::traits::{CommittedInstanceOps, Inputize, WitnessOps};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::Error; use crate::Error;
use crate::{Decider as DeciderTrait, FoldingScheme}; use crate::{Decider as DeciderTrait, FoldingScheme};
@ -45,8 +43,9 @@ where
// cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are // cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are
// checked in-circuit // checked in-circuit
cmT: C1, cmT: C1,
r: C1::ScalarField,
// cyclefold committed instance // cyclefold committed instance
cf_U_i: CycleFoldCommittedInstance<C2>,
cf_U_final: CycleFoldCommittedInstance<C2>,
// the CS challenges are provided by the prover, but in-circuit they are checked to match the // the CS challenges are provided by the prover, but in-circuit they are checked to match the
// in-circuit computed computed ones. // in-circuit computed computed ones.
cs1_challenges: [C1::ScalarField; 2], cs1_challenges: [C1::ScalarField; 2],
@ -147,22 +146,21 @@ where
S2::VerifyingKey, S2::VerifyingKey,
>; >;
type PublicInput = Vec<C1::ScalarField>; type PublicInput = Vec<C1::ScalarField>;
type CommittedInstance = CommittedInstance<C1>;
type CommittedInstance = Vec<C1>;
fn preprocess( fn preprocess(
mut rng: impl RngCore + CryptoRng, mut rng: impl RngCore + CryptoRng,
prep_param: Self::PreprocessorParam, prep_param: Self::PreprocessorParam,
fs: FS, fs: FS,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::from_nova::<GC1, CS1, CS2, false, FC>(
fs.clone().into(),
)?;
let circuit2 =
DeciderCircuit2::<C1, GC1, C2>::from_nova::<GC2, CS1, CS2, false, FC>(fs.into())?;
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::try_from(Nova::from(fs.clone()))?;
let circuit2 = DeciderCircuit2::<C2>::try_from(Nova::from(fs))?;
// get the Groth16 specific setup for the circuits // get the Groth16 specific setup for the circuits
let (c1_g16_pk, c1_g16_vk) = S1::circuit_specific_setup(circuit1, &mut rng).unwrap();
let (c2_g16_pk, c2_g16_vk) = S2::circuit_specific_setup(circuit2, &mut rng).unwrap();
let (c1_g16_pk, c1_g16_vk) = S1::circuit_specific_setup(circuit1, &mut rng)
.map_err(|e| Error::SNARKSetupFail(e.to_string()))?;
let (c2_g16_pk, c2_g16_vk) = S2::circuit_specific_setup(circuit2, &mut rng)
.map_err(|e| Error::SNARKSetupFail(e.to_string()))?;
// get the FoldingScheme prover & verifier params from Nova // get the FoldingScheme prover & verifier params from Nova
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -200,76 +198,57 @@ where
pp: Self::ProverParam, pp: Self::ProverParam,
fs: FS, fs: FS,
) -> Result<Self::Proof, Error> { ) -> Result<Self::Proof, Error> {
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::from_nova::<GC1, CS1, CS2, false, FC>(
fs.clone().into(),
)?;
let circuit2 =
DeciderCircuit2::<C1, GC1, C2>::from_nova::<GC2, CS1, CS2, false, FC>(fs.into())?;
let c1_snark_proof = S1::prove(&pp.c1_snark_pp, circuit1.clone(), &mut rng)
let circuit1 = DeciderCircuit1::<C1, C2, GC2>::try_from(Nova::from(fs.clone()))?;
let circuit2 = DeciderCircuit2::<C2>::try_from(Nova::from(fs))?;
let cmT = circuit1.proof;
let r = circuit1.randomness;
let cf_U_final = circuit1.cf_U_i.clone();
let c1_kzg_challenges = circuit1.kzg_challenges.clone();
let c1_kzg_proofs = circuit1
.W_i1
.get_openings()
.iter()
.zip(&c1_kzg_challenges)
.map(|((v, _), &c)| {
CS1::prove_with_challenge(&pp.c1_cs_pp, c, v, &C1::ScalarField::zero(), None)
})
.collect::<Result<Vec<_>, _>>()?;
let c2_kzg_challenges = circuit2.kzg_challenges.clone();
let c2_kzg_proofs = circuit2
.cf_W_i
.get_openings()
.iter()
.zip(&c2_kzg_challenges)
.map(|((v, _), &c)| {
CS2::prove_with_challenge(&pp.c2_cs_pp, c, v, &C2::ScalarField::zero(), None)
})
.collect::<Result<Vec<_>, _>>()?;
let c1_snark_proof = S1::prove(&pp.c1_snark_pp, circuit1, &mut rng)
.map_err(|e| Error::Other(e.to_string()))?; .map_err(|e| Error::Other(e.to_string()))?;
let c2_snark_proof = S2::prove(&pp.c2_snark_pp, circuit2.clone(), &mut rng)
let c2_snark_proof = S2::prove(&pp.c2_snark_pp, circuit2, &mut rng)
.map_err(|e| Error::Other(e.to_string()))?; .map_err(|e| Error::Other(e.to_string()))?;
let cmT = circuit1.cmT.unwrap();
let W_i1 = circuit1.W_i1.unwrap();
let cf_W_i = circuit2.cf_W_i.unwrap();
// get the challenges that have been already computed when preparing the circuits inputs in
// the above `from_nova` calls
let challenge_W = circuit1
.cs_c_W
.ok_or(Error::MissingValue("cs_c_W".to_string()))?;
let challenge_E = circuit1
.cs_c_E
.ok_or(Error::MissingValue("cs_c_E".to_string()))?;
let c2_challenge_W = circuit2
.cs_c_W
.ok_or(Error::MissingValue("c2's cs_c_W".to_string()))?;
let c2_challenge_E = circuit2
.cs_c_E
.ok_or(Error::MissingValue("c2's cs_c_E".to_string()))?;
// generate CommitmentScheme proofs for the main instance
let U_cmW_proof = CS1::prove_with_challenge(
&pp.c1_cs_pp,
challenge_W,
&W_i1.W,
&C1::ScalarField::zero(),
None,
)?;
let U_cmE_proof = CS1::prove_with_challenge(
&pp.c1_cs_pp,
challenge_E,
&W_i1.E,
&C1::ScalarField::zero(),
None,
)?;
// CS proofs for the CycleFold instance
let cf_cmW_proof = CS2::prove_with_challenge(
&pp.c2_cs_pp,
c2_challenge_W,
&cf_W_i.W,
&C2::ScalarField::zero(),
None,
)?;
let cf_cmE_proof = CS2::prove_with_challenge(
&pp.c2_cs_pp,
c2_challenge_E,
&cf_W_i.E,
&C2::ScalarField::zero(),
None,
)?;
Ok(Self::Proof { Ok(Self::Proof {
c1_snark_proof, c1_snark_proof,
c2_snark_proof, c2_snark_proof,
cs1_proofs: [U_cmW_proof, U_cmE_proof],
cs2_proofs: [cf_cmW_proof, cf_cmE_proof],
cs1_proofs: c1_kzg_proofs
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
cs2_proofs: c2_kzg_proofs
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
cmT, cmT,
cf_U_i: circuit1.cf_U_i.unwrap(),
cs1_challenges: [challenge_W, challenge_E],
cs2_challenges: [c2_challenge_W, c2_challenge_E],
r,
cf_U_final,
cs1_challenges: c1_kzg_challenges
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
cs2_challenges: c2_kzg_challenges
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
}) })
} }
@ -278,72 +257,38 @@ where
i: C1::ScalarField, i: C1::ScalarField,
z_0: Vec<C1::ScalarField>, z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>, z_i: Vec<C1::ScalarField>,
running_instance: &Self::CommittedInstance,
incoming_instance: &Self::CommittedInstance,
// we don't use the instances at the verifier level, since we check them in-circuit
running_commitments: &Self::CommittedInstance,
incoming_commitments: &Self::CommittedInstance,
proof: &Self::Proof, proof: &Self::Proof,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
if i <= C1::ScalarField::one() { if i <= C1::ScalarField::one() {
return Err(Error::NotEnoughSteps); return Err(Error::NotEnoughSteps);
} }
// compute U = U_{d+1}= NIFS.V(U_d, u_d, cmT)
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&poseidon_config);
let (U, r_bits) = NIFS::<C1, CS1, PoseidonSponge<C1::ScalarField>>::verify(
&mut transcript,
vp.pp_hash,
running_instance,
incoming_instance,
&proof.cmT,
// 6.2. Fold the commitments
let U_final_commitments = DeciderNovaGadget::fold_group_elements_native(
running_commitments,
incoming_commitments,
Some(proof.cmT),
proof.r,
)?; )?;
let r = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?;
let (cmE_x, cmE_y) = NonNativeAffineVar::inputize(U.cmE)?;
let (cmW_x, cmW_y) = NonNativeAffineVar::inputize(U.cmW)?;
let (cmT_x, cmT_y) = NonNativeAffineVar::inputize(proof.cmT)?;
let zero = (&C2::BaseField::zero(), &C2::BaseField::zero());
let cmE_affine = proof.cf_U_i.cmE.into_affine();
let cmW_affine = proof.cf_U_i.cmW.into_affine();
let (cf_cmE_x, cf_cmE_y) = cmE_affine.xy().unwrap_or(zero);
let cf_cmE_z = C1::ScalarField::one();
let (cf_cmW_x, cf_cmW_y) = cmW_affine.xy().unwrap_or(zero);
let cf_cmW_z = C1::ScalarField::one();
let cf_U = proof.cf_U_final.clone();
// snark proof 1 // snark proof 1
let c1_public_input: Vec<C1::ScalarField> = [
vec![vp.pp_hash, i],
z_0,
z_i,
// U_{i+1} values:
vec![U.u],
U.x.clone(),
cmE_x,
cmE_y,
cmW_x,
cmW_y,
// CS1 values:
proof.cs1_challenges.to_vec(), // c_W, c_E
vec![
proof.cs1_proofs[0].eval, // eval_W
proof.cs1_proofs[1].eval, // eval_E
],
// cf_U_i values
NonNativeUintVar::<CF2<C2>>::inputize(proof.cf_U_i.u),
proof
.cf_U_i
.x
let c1_public_input = [
&[vp.pp_hash, i][..],
&z_0,
&z_i,
&U_final_commitments
.iter() .iter()
.flat_map(|&x_i| NonNativeUintVar::<CF2<C2>>::inputize(x_i))
.collect::<Vec<C1::ScalarField>>(),
vec![
*cf_cmE_x, *cf_cmE_y, cf_cmE_z, *cf_cmW_x, *cf_cmW_y, cf_cmW_z,
],
// NIFS values:
cmT_x,
cmT_y,
vec![r],
.flat_map(|c| c.inputize())
.collect::<Vec<_>>(),
&Inputize::<CF2<C2>, CycleFoldCommittedInstanceVar<C2, GC2>>::inputize(&cf_U),
&proof.cs1_challenges,
&proof.cs1_proofs.iter().map(|p| p.eval).collect::<Vec<_>>(),
&proof.cmT.inputize(),
&[proof.r],
] ]
.concat(); .concat();
@ -353,26 +298,15 @@ where
return Err(Error::SNARKVerificationFail); return Err(Error::SNARKVerificationFail);
} }
let (cf2_cmE_x, cf2_cmE_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmE)?;
let (cf2_cmW_x, cf2_cmW_y) = NonNativeAffineVar::inputize(proof.cf_U_i.cmW)?;
// snark proof 2 // snark proof 2
// migrate pp_hash from C1::Fr to C1::Fq // migrate pp_hash from C1::Fr to C1::Fq
let pp_hash_Fq = let pp_hash_Fq =
C2::ScalarField::from_le_bytes_mod_order(&vp.pp_hash.into_bigint().to_bytes_le()); C2::ScalarField::from_le_bytes_mod_order(&vp.pp_hash.into_bigint().to_bytes_le());
let c2_public_input: Vec<C2::ScalarField> = [ let c2_public_input: Vec<C2::ScalarField> = [
vec![pp_hash_Fq],
vec![proof.cf_U_i.u],
proof.cf_U_i.x.clone(),
cf2_cmE_x,
cf2_cmE_y,
cf2_cmW_x,
cf2_cmW_y,
proof.cs2_challenges.to_vec(),
vec![
proof.cs2_proofs[0].eval, // eval_W
proof.cs2_proofs[1].eval, // eval_E
],
&[pp_hash_Fq][..],
&cf_U.inputize(),
&proof.cs2_challenges,
&proof.cs2_proofs.iter().map(|p| p.eval).collect::<Vec<_>>(),
] ]
.concat(); .concat();
@ -382,33 +316,24 @@ where
return Err(Error::SNARKVerificationFail); return Err(Error::SNARKVerificationFail);
} }
// check C1 commitments (main instance commitments)
CS1::verify_with_challenge(
&vp.c1_cs_vp,
proof.cs1_challenges[0],
&U.cmW,
&proof.cs1_proofs[0],
)?;
CS1::verify_with_challenge(
&vp.c1_cs_vp,
proof.cs1_challenges[1],
&U.cmE,
&proof.cs1_proofs[1],
)?;
// 7.3. check C1 commitments (main instance commitments)
for ((cm, &c), pi) in U_final_commitments
.iter()
.zip(&proof.cs1_challenges)
.zip(&proof.cs1_proofs)
{
CS1::verify_with_challenge(&vp.c1_cs_vp, c, cm, pi)?;
}
// check C2 commitments (CycleFold instance commitments)
CS2::verify_with_challenge(
&vp.c2_cs_vp,
proof.cs2_challenges[0],
&proof.cf_U_i.cmW,
&proof.cs2_proofs[0],
)?;
CS2::verify_with_challenge(
&vp.c2_cs_vp,
proof.cs2_challenges[1],
&proof.cf_U_i.cmE,
&proof.cs2_proofs[1],
)?;
// 4.3. check C2 commitments (CycleFold instance commitments)
for ((cm, &c), pi) in cf_U
.get_commitments()
.iter()
.zip(&proof.cs2_challenges)
.zip(&proof.cs2_proofs)
{
CS2::verify_with_challenge(&vp.c2_cs_vp, c, cm, pi)?;
}
Ok(true) Ok(true)
} }
@ -494,7 +419,13 @@ pub mod tests {
// decider proof verification // decider proof verification
let start = Instant::now(); let start = Instant::now();
let verified = D::verify( let verified = D::verify(
decider_vp, nova.i, nova.z_0, nova.z_i, &nova.U_i, &nova.u_i, &proof,
decider_vp,
nova.i,
nova.z_0,
nova.z_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof,
) )
.unwrap(); .unwrap();
assert!(verified); assert!(verified);

+ 100
- 416
folding-schemes/src/folding/nova/decider_circuits.rs

@ -2,116 +2,63 @@
/// DeciderEthCircuit. /// DeciderEthCircuit.
/// More details can be found at the documentation page: /// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-offchain.html
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::{CurveGroup, Group};
use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge};
use ark_ec::CurveGroup;
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_poly::Polynomial;
use ark_r1cs_std::{
alloc::AllocVar,
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToConstraintFieldGadget,
};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError};
use ark_std::Zero;
use ark_r1cs_std::{fields::fp::FpVar, prelude::CurveVar, ToConstraintFieldGadget};
use core::marker::PhantomData; use core::marker::PhantomData;
use super::{ use super::{
circuits::{ChallengeGadget, CommittedInstanceVar},
decider_eth_circuit::{
evaluate_gadget, KZGChallengesGadget, R1CSVar, RelaxedR1CSGadget, WitnessVar,
},
decider_eth_circuit::DeciderNovaGadget,
nifs::{nova::NIFS, NIFSTrait}, nifs::{nova::NIFS, NIFSTrait},
CommittedInstance, Nova, Witness, CommittedInstance, Nova, Witness,
}; };
use crate::arith::r1cs::R1CS;
use crate::commitment::CommitmentScheme;
use crate::folding::circuits::{
cyclefold::{
CycleFoldCommittedInstance, CycleFoldCommittedInstanceVar, CycleFoldConfig,
CycleFoldWitness,
},
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF1, CF2,
use crate::folding::{
circuits::{CF1, CF2},
traits::WitnessOps,
}; };
use crate::folding::nova::NovaCycleFoldConfig;
use crate::folding::traits::{CommittedInstanceVarOps, Dummy};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::utils::vec::poly_from_vec;
use crate::Error; use crate::Error;
use crate::{
arith::r1cs::{circuits::R1CSMatricesVar, R1CS},
folding::circuits::decider::{
off_chain::{GenericOffchainDeciderCircuit1, GenericOffchainDeciderCircuit2},
EvalGadget, KZGChallengesGadget,
},
};
use crate::{commitment::CommitmentScheme, transcript::poseidon::poseidon_canonical_config};
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over /// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve2's BaseField (=Curve1's ScalarField). /// the Curve2's BaseField (=Curve1's ScalarField).
#[derive(Clone, Debug)]
pub struct DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
{
_c1: PhantomData<C1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
/// E vector's length of the Nova instance witness
pub E_len: usize,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the Augmented Function circuit
pub r1cs: R1CS<C1::ScalarField>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: Option<C1::ScalarField>,
pub i: Option<CF1<C1>>,
/// initial state
pub z_0: Option<Vec<C1::ScalarField>>,
/// current i-th state
pub z_i: Option<Vec<C1::ScalarField>>,
/// Nova instances
pub u_i: Option<CommittedInstance<C1>>,
pub w_i: Option<Witness<C1>>,
pub U_i: Option<CommittedInstance<C1>>,
pub W_i: Option<Witness<C1>>,
pub U_i1: Option<CommittedInstance<C1>>,
pub W_i1: Option<Witness<C1>>,
pub cmT: Option<C1>,
pub r: Option<C1::ScalarField>,
/// CycleFold running instance
pub cf_U_i: Option<CycleFoldCommittedInstance<C2>>,
/// Commitment Scheme challenges
pub cs_c_W: Option<C1::ScalarField>,
pub cs_c_E: Option<C1::ScalarField>,
/// Evaluations of the committed polynomials at the challenge
pub eval_W: Option<C1::ScalarField>,
pub eval_E: Option<C1::ScalarField>,
}
impl<C1, C2, GC2> DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
{
pub fn from_nova<GC1, CS1, CS2, const H: bool, FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>,
) -> Result<Self, Error>
where
C2: CurveGroup,
pub type DeciderCircuit1<C1, C2, GC2> = GenericOffchainDeciderCircuit1<
C1,
C2,
GC2,
CommittedInstance<C1>,
CommittedInstance<C1>,
Witness<C1>,
R1CS<CF1<C1>>,
R1CSMatricesVar<CF1<C1>, FpVar<CF1<C1>>>,
DeciderNovaGadget,
>;
impl<
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>, GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>, GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>, CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>, CS2: CommitmentScheme<C2, H>,
{
const H: bool,
> TryFrom<Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>> for DeciderCircuit1<C1, C2, GC2>
where
CF1<C1>: Absorb,
{
type Error = Error;
fn try_from(nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>) -> Result<Self, Error> {
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config); let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config);
// pp_hash is absorbed to transcript at the NIFS::prove call // pp_hash is absorbed to transcript at the NIFS::prove call
@ -129,360 +76,98 @@ where
let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits)) let r_Fr = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?; .ok_or(Error::OutOfBounds)?;
// compute the commitment scheme challenges used as inputs in the circuit
let (cs_challenge_W, cs_challenge_E) =
KZGChallengesGadget::<C1>::get_challenges_native(&mut transcript, U_i1.clone());
// compute the KZG challenges used as inputs in the circuit
let kzg_challenges = KZGChallengesGadget::get_challenges_native(&mut transcript, &U_i1);
// get evals of the committed polys at the challenges
let mut W = W_i1.W.clone();
W.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.W.len().next_power_of_two() - W_i1.W.len()),
);
let mut E = W_i1.E.clone();
E.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.E.len().next_power_of_two() - W_i1.E.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&cs_challenge_W);
let p_E = poly_from_vec(E.to_vec())?;
let eval_E = p_E.evaluate(&cs_challenge_E);
// get KZG evals
let kzg_evaluations = W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| EvalGadget::evaluate_native(v, c))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { Ok(Self {
_c1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData, _gc2: PhantomData,
E_len: nova.W_i.E.len(),
cf_E_len: nova.cf_W_i.E.len(),
r1cs: nova.r1cs,
_avar: PhantomData,
arith: nova.r1cs,
poseidon_config: nova.poseidon_config, poseidon_config: nova.poseidon_config,
pp_hash: Some(nova.pp_hash),
i: Some(nova.i),
z_0: Some(nova.z_0),
z_i: Some(nova.z_i),
u_i: Some(nova.u_i),
w_i: Some(nova.w_i),
U_i: Some(nova.U_i),
W_i: Some(nova.W_i),
U_i1: Some(U_i1),
W_i1: Some(W_i1),
cmT: Some(cmT),
r: Some(r_Fr),
cf_U_i: Some(nova.cf_U_i),
cs_c_W: Some(cs_challenge_W),
cs_c_E: Some(cs_challenge_E),
eval_W: Some(eval_W),
eval_E: Some(eval_E),
pp_hash: nova.pp_hash,
i: nova.i,
z_0: nova.z_0,
z_i: nova.z_i,
U_i: nova.U_i,
W_i: nova.W_i,
u_i: nova.u_i,
w_i: nova.w_i,
U_i1,
W_i1,
proof: cmT,
randomness: r_Fr,
cf_U_i: nova.cf_U_i,
kzg_challenges,
kzg_evaluations,
}) })
} }
} }
impl<C1, C2, GC2> ConstraintSynthesizer<CF1<C1>> for DeciderCircuit1<C1, C2, GC2>
where
C1: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
C2: CurveGroup,
<C2 as CurveGroup>::BaseField: PrimeField,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let r1cs =
R1CSVar::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.r1cs.clone())
})?;
let pp_hash = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C1>::zero))
})?;
let i =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let u_dummy_native = CommittedInstance::<C1>::dummy(&self.r1cs);
let w_dummy_native = Witness::<C1>::dummy(&self.r1cs);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1 = CommittedInstanceVar::<C1>::new_input(cs.clone(), || {
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
})?;
let W_i1 = WitnessVar::<C1>::new_witness(cs.clone(), || {
Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?;
// allocate the inputs for the check 6
let cs_c_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.cs_c_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let cs_c_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.cs_c_E.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C1>::zero))
})?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
// notice that the `pp_hash` is absorbed inside the ChallengeGadget::get_challenge_gadget call
// 2. u_i.cmE==cm(0), u_i.u==1
// Here zero is the x & y coordinates of the zero point affine representation.
let zero = NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::zero())?;
u_i.cmE.x.enforce_equal_unaligned(&zero)?;
u_i.cmE.y.enforce_equal_unaligned(&zero)?;
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?;
// 3.b u_i.x[1] == H(cf_U_i)
let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_input(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
(u_i.x[1]).enforce_equal(&cf_u_i_x)?;
// 4. check RelaxedR1CS of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?;
// 1.1.a, 5.1 compute NIFS.V and Commitment Scheme challenges.
// We need to ensure the order of challenge generation is the same as
// the native counterpart, so we first compute the challenges here and
// do the actual checks later.
let cmT =
NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_gadget(
&mut transcript,
pp_hash,
U_i_vec,
u_i.clone(),
Some(cmT.clone()),
)?;
// 5.1.
let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C1>::get_challenges_gadget(&mut transcript, U_i1.clone())?;
incircuit_c_W.enforce_equal(&cs_c_W)?;
incircuit_c_E.enforce_equal(&cs_c_E)?;
// 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C1>>(W_i1.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
// 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid
// the verifier computing it)
let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?;
// check that the in-circuit computed r is equal to the inputted r
let r =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.r.unwrap_or_else(CF1::<C1>::zero)))?;
r_Fr.enforce_equal(&r)?;
Ok(())
}
}
/// Circuit that implements part of the in-circuit checks needed for the offchain verification over /// Circuit that implements part of the in-circuit checks needed for the offchain verification over
/// the Curve1's BaseField (=Curve2's ScalarField). /// the Curve1's BaseField (=Curve2's ScalarField).
#[derive(Clone, Debug)]
pub struct DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
{
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
pub poseidon_config: PoseidonConfig<CF1<C2>>,
/// public params hash
pub pp_hash: Option<C2::ScalarField>,
pub type DeciderCircuit2<C2> = GenericOffchainDeciderCircuit2<C2>;
/// CycleFold running instance. Notice that here we use Nova's CommittedInstance (instead of
/// CycleFoldCommittedInstance), since we are over C2::Fr, so that the CycleFold instances can
/// be computed natively
pub cf_U_i: Option<CommittedInstance<C2>>,
pub cf_W_i: Option<CycleFoldWitness<C2>>,
/// Commitment Scheme challenges
pub cs_c_W: Option<C2::ScalarField>,
pub cs_c_E: Option<C2::ScalarField>,
/// Evaluations of the committed polynomials at the challenge
pub eval_W: Option<C2::ScalarField>,
pub eval_E: Option<C2::ScalarField>,
}
impl<C1, GC1, C2> DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as CurveGroup>::BaseField: PrimeField,
<C2 as Group>::ScalarField: Absorb,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
{
pub fn from_nova<GC2, CS1, CS2, const H: bool, FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>,
) -> Result<Self, Error>
where
impl<
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>, GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>, CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>, CS2: CommitmentScheme<C2, H>,
{
const H: bool,
> TryFrom<Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>> for DeciderCircuit2<C2>
where
CF1<C2>: Absorb,
{
type Error = Error;
fn try_from(nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>) -> Result<Self, Error> {
// compute the Commitment Scheme challenges of the CycleFold instance commitments, used as // compute the Commitment Scheme challenges of the CycleFold instance commitments, used as
// inputs in the circuit // inputs in the circuit
let poseidon_config =
crate::transcript::poseidon::poseidon_canonical_config::<C2::ScalarField>();
let poseidon_config = poseidon_canonical_config::<C2::ScalarField>();
let mut transcript = PoseidonSponge::<C2::ScalarField>::new(&poseidon_config); let mut transcript = PoseidonSponge::<C2::ScalarField>::new(&poseidon_config);
let pp_hash_Fq = let pp_hash_Fq =
C2::ScalarField::from_le_bytes_mod_order(&nova.pp_hash.into_bigint().to_bytes_le()); C2::ScalarField::from_le_bytes_mod_order(&nova.pp_hash.into_bigint().to_bytes_le());
transcript.absorb(&pp_hash_Fq); transcript.absorb(&pp_hash_Fq);
let (cs_challenge_W, cs_challenge_E) =
KZGChallengesGadget::<C2>::get_challenges_native(&mut transcript, nova.cf_U_i.clone());
// compute the KZG challenges used as inputs in the circuit
let kzg_challenges =
KZGChallengesGadget::get_challenges_native(&mut transcript, &nova.cf_U_i);
// get evals of the committed polynomials at the challenge
let mut W = nova.cf_W_i.W.clone();
W.extend(
std::iter::repeat(C2::ScalarField::zero())
.take(nova.cf_W_i.W.len().next_power_of_two() - nova.cf_W_i.W.len()),
);
let mut E = nova.cf_W_i.E.clone();
E.extend(
std::iter::repeat(C2::ScalarField::zero())
.take(nova.cf_W_i.E.len().next_power_of_two() - nova.cf_W_i.E.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&cs_challenge_W);
let p_E = poly_from_vec(E.to_vec())?;
let eval_E = p_E.evaluate(&cs_challenge_E);
// get KZG evals
let kzg_evaluations = nova
.cf_W_i
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| EvalGadget::evaluate_native(v, c))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { Ok(Self {
_c1: PhantomData,
_gc1: PhantomData,
_c2: PhantomData,
cf_E_len: nova.cf_W_i.E.len(),
cf_r1cs: nova.cf_r1cs,
cf_arith: nova.cf_r1cs,
poseidon_config, poseidon_config,
pp_hash: Some(pp_hash_Fq),
cf_U_i: Some(nova.cf_U_i),
cf_W_i: Some(nova.cf_W_i),
// CycleFold instance commitments challenges
cs_c_W: Some(cs_challenge_W),
cs_c_E: Some(cs_challenge_E),
eval_W: Some(eval_W),
eval_E: Some(eval_E),
pp_hash: pp_hash_Fq,
cf_U_i: nova.cf_U_i,
cf_W_i: nova.cf_W_i,
kzg_challenges,
kzg_evaluations,
}) })
} }
} }
impl<C1, GC1, C2> ConstraintSynthesizer<CF1<C2>> for DeciderCircuit2<C1, GC1, C2>
where
C1: CurveGroup,
C2: CurveGroup,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
{
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C2>>) -> Result<(), SynthesisError> {
let pp_hash = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C2>::zero))
})?;
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(&self.cf_r1cs);
let w_dummy_native = Witness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CommittedInstanceVar::<C2>::new_input(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_W_i = WitnessVar::<C2>::new_witness(cs.clone(), || {
Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone()))
})?;
let cf_r1cs =
R1CSVar::<C2::ScalarField, CF1<C2>, FpVar<CF1<C2>>>::new_witness(cs.clone(), || {
Ok(self.cf_r1cs.clone())
})?;
// 6. check RelaxedR1CS of cf_U_i
let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(cf_r1cs, cf_W_i.E.clone(), cf_U_i.u.clone(), cf_z_U)?;
// `transcript` is for challenge generation.
let mut transcript =
PoseidonSpongeVar::<C2::ScalarField>::new(cs.clone(), &self.poseidon_config);
transcript.absorb(&pp_hash)?;
// allocate the inputs for the check 7.1
let cs_c_W = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.cs_c_W.unwrap_or_else(CF1::<C2>::zero))
})?;
let cs_c_E = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.cs_c_E.unwrap_or_else(CF1::<C2>::zero))
})?;
// allocate the inputs for the check 7.2
let eval_W = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C2>::zero))
})?;
let eval_E = FpVar::<CF1<C2>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C2>::zero))
})?;
// 7.1. check the commitment scheme challenges correct computation
let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C2>::get_challenges_gadget(&mut transcript, cf_U_i.clone())?;
incircuit_c_W.enforce_equal(&cs_c_W)?;
incircuit_c_E.enforce_equal(&cs_c_E)?;
// 7.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C2>>(cf_W_i.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C2>>(cf_W_i.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
Ok(())
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use ark_pallas::{constraints::GVar, Fq, Fr, Projective}; use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_relations::r1cs::ConstraintSystem;
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use super::*; use super::*;
@ -530,9 +215,8 @@ pub mod tests {
// load the DeciderCircuit 1 & 2 from the Nova instance // load the DeciderCircuit 1 & 2 from the Nova instance
let decider_circuit1 = let decider_circuit1 =
DeciderCircuit1::<Projective, Projective2, GVar2>::from_nova(nova.clone()).unwrap();
let decider_circuit2 =
DeciderCircuit2::<Projective, GVar, Projective2>::from_nova(nova).unwrap();
DeciderCircuit1::<Projective, Projective2, GVar2>::try_from(nova.clone()).unwrap();
let decider_circuit2 = DeciderCircuit2::<Projective2>::try_from(nova).unwrap();
// generate the constraints of both circuits and check that are satisfied by the inputs // generate the constraints of both circuits and check that are satisfied by the inputs
let cs1 = ConstraintSystem::<Fr>::new_ref(); let cs1 = ConstraintSystem::<Fr>::new_ref();

+ 102
- 156
folding-schemes/src/folding/nova/decider_eth.rs

@ -3,11 +3,11 @@
/// More details can be found at the documentation page: /// More details can be found at the documentation page:
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
use ark_bn254::Bn254; use ark_bn254::Bn254;
use ark_crypto_primitives::sponge::{poseidon::PoseidonSponge, Absorb, CryptographicSponge};
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::{AffineRepr, CurveGroup, Group}; use ark_ec::{AffineRepr, CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_groth16::Groth16; use ark_groth16::Groth16;
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_snark::SNARK; use ark_snark::SNARK;
use ark_std::rand::{CryptoRng, RngCore}; use ark_std::rand::{CryptoRng, RngCore};
@ -15,37 +15,36 @@ use ark_std::{One, Zero};
use core::marker::PhantomData; use core::marker::PhantomData;
pub use super::decider_eth_circuit::DeciderEthCircuit; pub use super::decider_eth_circuit::DeciderEthCircuit;
use super::{
nifs::{nova::NIFS, NIFSTrait},
CommittedInstance, Nova,
};
use super::decider_eth_circuit::DeciderNovaGadget;
use super::{CommittedInstance, Nova};
use crate::commitment::{ use crate::commitment::{
kzg::{Proof as KZGProof, KZG}, kzg::{Proof as KZGProof, KZG},
pedersen::Params as PedersenParams, pedersen::Params as PedersenParams,
CommitmentScheme, CommitmentScheme,
}; };
use crate::folding::circuits::{nonnative::affine::NonNativeAffineVar, CF2};
use crate::folding::nova::circuits::ChallengeGadget;
use crate::folding::circuits::decider::DeciderEnabledNIFS;
use crate::folding::circuits::CF2;
use crate::folding::traits::{Inputize, WitnessOps};
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::Error; use crate::Error;
use crate::{Decider as DeciderTrait, FoldingScheme}; use crate::{Decider as DeciderTrait, FoldingScheme};
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Proof<C1, CS1, S>
pub struct Proof<C, CS, S>
where where
C1: CurveGroup,
CS1: CommitmentScheme<C1, ProverChallenge = C1::ScalarField, Challenge = C1::ScalarField>,
S: SNARK<C1::ScalarField>,
C: CurveGroup,
CS: CommitmentScheme<C, ProverChallenge = C::ScalarField, Challenge = C::ScalarField>,
S: SNARK<C::ScalarField>,
{ {
snark_proof: S::Proof, snark_proof: S::Proof,
kzg_proofs: [CS1::Proof; 2],
kzg_proofs: [CS::Proof; 2],
// cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are // cmT and r are values for the last fold, U_{i+1}=NIFS.V(r, U_i, u_i, cmT), and they are
// checked in-circuit // checked in-circuit
cmT: C1,
cmT: C,
r: C::ScalarField,
// the KZG challenges are provided by the prover, but in-circuit they are checked to match // the KZG challenges are provided by the prover, but in-circuit they are checked to match
// the in-circuit computed computed ones. // the in-circuit computed computed ones.
kzg_challenges: [C1::ScalarField; 2],
kzg_challenges: [C::ScalarField; 2],
} }
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
@ -98,8 +97,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC1: GroupOpsBounds<'b, C1, GC1>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
// constrain FS into Nova, since this is a Decider specifically for Nova // constrain FS into Nova, since this is a Decider specifically for Nova
Nova<C1, GC1, C2, GC2, FC, CS1, CS2, false>: From<FS>, Nova<C1, GC1, C2, GC2, FC, CS1, CS2, false>: From<FS>,
crate::folding::nova::ProverParams<C1, C2, CS1, CS2, false>: crate::folding::nova::ProverParams<C1, C2, CS1, CS2, false>:
@ -112,18 +109,18 @@ where
type Proof = Proof<C1, CS1, S>; type Proof = Proof<C1, CS1, S>;
type VerifierParam = VerifierParam<C1, CS1::VerifierParams, S::VerifyingKey>; type VerifierParam = VerifierParam<C1, CS1::VerifierParams, S::VerifyingKey>;
type PublicInput = Vec<C1::ScalarField>; type PublicInput = Vec<C1::ScalarField>;
type CommittedInstance = CommittedInstance<C1>;
type CommittedInstance = Vec<C1>;
fn preprocess( fn preprocess(
mut rng: impl RngCore + CryptoRng, mut rng: impl RngCore + CryptoRng,
prep_param: Self::PreprocessorParam, prep_param: Self::PreprocessorParam,
fs: FS, fs: FS,
) -> Result<(Self::ProverParam, Self::VerifierParam), Error> { ) -> Result<(Self::ProverParam, Self::VerifierParam), Error> {
let circuit =
DeciderEthCircuit::<C1, GC1, C2, GC2, CS1, CS2>::from_nova::<FC>(fs.into()).unwrap();
let circuit = DeciderEthCircuit::<C1, C2, GC2>::try_from(Nova::from(fs))?;
// get the Groth16 specific setup for the circuit // get the Groth16 specific setup for the circuit
let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng).unwrap();
let (g16_pk, g16_vk) = S::circuit_specific_setup(circuit, &mut rng)
.map_err(|e| Error::SNARKSetupFail(e.to_string()))?;
// get the FoldingScheme prover & verifier params from Nova // get the FoldingScheme prover & verifier params from Nova
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -156,46 +153,39 @@ where
) -> Result<Self::Proof, Error> { ) -> Result<Self::Proof, Error> {
let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp; let (snark_pk, cs_pk): (S::ProvingKey, CS1::ProverParams) = pp;
let circuit = DeciderEthCircuit::<C1, GC1, C2, GC2, CS1, CS2>::from_nova::<FC>(
folding_scheme.into(),
)?;
let circuit = DeciderEthCircuit::<C1, C2, GC2>::try_from(Nova::from(folding_scheme))?;
let snark_proof = S::prove(&snark_pk, circuit.clone(), &mut rng)
.map_err(|e| Error::Other(e.to_string()))?;
let cmT = circuit.cmT.unwrap();
let W_i1 = circuit.W_i1.unwrap();
let cmT = circuit.proof;
let r = circuit.randomness;
// get the challenges that have been already computed when preparing the circuit inputs in // get the challenges that have been already computed when preparing the circuit inputs in
// the above `from_nova` call
let challenge_W = circuit
.kzg_c_W
.ok_or(Error::MissingValue("kzg_c_W".to_string()))?;
let challenge_E = circuit
.kzg_c_E
.ok_or(Error::MissingValue("kzg_c_E".to_string()))?;
// the above `try_from` call
let kzg_challenges = circuit.kzg_challenges.clone();
// generate KZG proofs // generate KZG proofs
let U_cmW_proof = CS1::prove_with_challenge(
&cs_pk,
challenge_W,
&W_i1.W,
&C1::ScalarField::zero(),
None,
)?;
let U_cmE_proof = CS1::prove_with_challenge(
&cs_pk,
challenge_E,
&W_i1.E,
&C1::ScalarField::zero(),
None,
)?;
let kzg_proofs = circuit
.W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| {
CS1::prove_with_challenge(&cs_pk, c, v, &C1::ScalarField::zero(), None)
})
.collect::<Result<Vec<_>, _>>()?;
let snark_proof =
S::prove(&snark_pk, circuit, &mut rng).map_err(|e| Error::Other(e.to_string()))?;
Ok(Self::Proof { Ok(Self::Proof {
snark_proof, snark_proof,
kzg_proofs: [U_cmW_proof, U_cmE_proof],
cmT, cmT,
kzg_challenges: [challenge_W, challenge_E],
r,
kzg_proofs: kzg_proofs
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
kzg_challenges: kzg_challenges
.try_into()
.map_err(|e: Vec<_>| Error::NotExpectedLength(e.len(), 2))?,
}) })
} }
@ -204,71 +194,59 @@ where
i: C1::ScalarField, i: C1::ScalarField,
z_0: Vec<C1::ScalarField>, z_0: Vec<C1::ScalarField>,
z_i: Vec<C1::ScalarField>, z_i: Vec<C1::ScalarField>,
running_instance: &Self::CommittedInstance,
incoming_instance: &Self::CommittedInstance,
// we don't use the instances at the verifier level, since we check them in-circuit
running_commitments: &Self::CommittedInstance,
incoming_commitments: &Self::CommittedInstance,
proof: &Self::Proof, proof: &Self::Proof,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
if i <= C1::ScalarField::one() { if i <= C1::ScalarField::one() {
return Err(Error::NotEnoughSteps); return Err(Error::NotEnoughSteps);
} }
// compute U = U_{d+1}= NIFS.V(U_d, u_d, cmT)
let poseidon_config = poseidon_canonical_config::<C1::ScalarField>();
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&poseidon_config);
let (U, r_bits) = NIFS::<C1, CS1, PoseidonSponge<C1::ScalarField>>::verify(
&mut transcript,
vp.pp_hash,
running_instance,
incoming_instance,
&proof.cmT,
let Self::VerifierParam {
pp_hash,
snark_vp,
cs_vp,
} = vp;
// 6.2. Fold the commitments
let U_final_commitments = DeciderNovaGadget::fold_group_elements_native(
running_commitments,
incoming_commitments,
Some(proof.cmT),
proof.r,
)?; )?;
let r = C1::ScalarField::from_bigint(BigInteger::from_bits_le(&r_bits))
.ok_or(Error::OutOfBounds)?;
let (cmE_x, cmE_y) = NonNativeAffineVar::inputize(U.cmE)?;
let (cmW_x, cmW_y) = NonNativeAffineVar::inputize(U.cmW)?;
let (cmT_x, cmT_y) = NonNativeAffineVar::inputize(proof.cmT)?;
let public_input: Vec<C1::ScalarField> = [
vec![vp.pp_hash, i],
z_0,
z_i,
vec![U.u],
U.x.clone(),
cmE_x,
cmE_y,
cmW_x,
cmW_y,
proof.kzg_challenges.to_vec(),
vec![
proof.kzg_proofs[0].eval, // eval_W
proof.kzg_proofs[1].eval, // eval_E
],
cmT_x,
cmT_y,
vec![r],
let public_input = [
&[pp_hash, i][..],
&z_0,
&z_i,
&U_final_commitments
.iter()
.flat_map(|c| c.inputize())
.collect::<Vec<_>>(),
&proof.kzg_challenges,
&proof.kzg_proofs.iter().map(|p| p.eval).collect::<Vec<_>>(),
&proof.cmT.inputize(),
&[proof.r],
] ]
.concat(); .concat();
let snark_v = S::verify(&vp.snark_vp, &public_input, &proof.snark_proof)
let snark_v = S::verify(&snark_vp, &public_input, &proof.snark_proof)
.map_err(|e| Error::Other(e.to_string()))?; .map_err(|e| Error::Other(e.to_string()))?;
if !snark_v { if !snark_v {
return Err(Error::SNARKVerificationFail); return Err(Error::SNARKVerificationFail);
} }
// we're at the Ethereum EVM case, so the CS1 is KZG commitments
CS1::verify_with_challenge(
&vp.cs_vp,
proof.kzg_challenges[0],
&U.cmW,
&proof.kzg_proofs[0],
)?;
CS1::verify_with_challenge(
&vp.cs_vp,
proof.kzg_challenges[1],
&U.cmE,
&proof.kzg_proofs[1],
)?;
// 7.3. Verify the KZG proofs
for ((cm, &c), pi) in U_final_commitments
.iter()
.zip(&proof.kzg_challenges)
.zip(&proof.kzg_proofs)
{
// we're at the Ethereum EVM case, so the CS1 is KZG commitments
CS1::verify_with_challenge(&cs_vp, c, cm, pi)?;
}
Ok(true) Ok(true)
} }
@ -278,7 +256,6 @@ where
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn prepare_calldata( pub fn prepare_calldata(
function_signature_check: [u8; 4], function_signature_check: [u8; 4],
pp_hash: ark_bn254::Fr,
i: ark_bn254::Fr, i: ark_bn254::Fr,
z_0: Vec<ark_bn254::Fr>, z_0: Vec<ark_bn254::Fr>,
z_i: Vec<ark_bn254::Fr>, z_i: Vec<ark_bn254::Fr>,
@ -286,22 +263,6 @@ pub fn prepare_calldata(
incoming_instance: &CommittedInstance<ark_bn254::G1Projective>, incoming_instance: &CommittedInstance<ark_bn254::G1Projective>,
proof: Proof<ark_bn254::G1Projective, KZG<'static, Bn254>, Groth16<Bn254>>, proof: Proof<ark_bn254::G1Projective, KZG<'static, Bn254>, Groth16<Bn254>>,
) -> Result<Vec<u8>, Error> { ) -> Result<Vec<u8>, Error> {
// compute the challenge r
let poseidon_config = poseidon_canonical_config::<ark_bn254::Fr>();
let mut transcript = PoseidonSponge::<ark_bn254::Fr>::new(&poseidon_config);
let r_bits = ChallengeGadget::<
ark_bn254::G1Projective,
CommittedInstance<ark_bn254::G1Projective>,
>::get_challenge_native(
&mut transcript,
pp_hash,
running_instance,
incoming_instance,
Some(&proof.cmT),
);
let r =
ark_bn254::Fr::from_bigint(BigInteger::from_bits_le(&r_bits)).ok_or(Error::OutOfBounds)?;
Ok(vec![ Ok(vec![
function_signature_check.to_vec(), function_signature_check.to_vec(),
i.into_bigint().to_bytes_be(), // i i.into_bigint().to_bytes_be(), // i
@ -311,26 +272,14 @@ pub fn prepare_calldata(
z_i.iter() z_i.iter()
.flat_map(|v| v.into_bigint().to_bytes_be()) .flat_map(|v| v.into_bigint().to_bytes_be())
.collect::<Vec<u8>>(), // z_i .collect::<Vec<u8>>(), // z_i
point_to_eth_format(running_instance.cmW.into_affine())?, // U_i_cmW
point_to_eth_format(running_instance.cmE.into_affine())?, // U_i_cmE
running_instance.u.into_bigint().to_bytes_be(), // U_i_u
incoming_instance.u.into_bigint().to_bytes_be(), // u_i_u
r.into_bigint().to_bytes_be(), // r
running_instance
.x
.iter()
.flat_map(|v| v.into_bigint().to_bytes_be())
.collect::<Vec<u8>>(), // U_i_x
point_to_eth_format(incoming_instance.cmW.into_affine())?, // u_i_cmW
incoming_instance
.x
.iter()
.flat_map(|v| v.into_bigint().to_bytes_be())
.collect::<Vec<u8>>(), // u_i_x
point_to_eth_format(running_instance.cmW.into_affine())?,
point_to_eth_format(running_instance.cmE.into_affine())?,
point_to_eth_format(incoming_instance.cmW.into_affine())?,
point_to_eth_format(proof.cmT.into_affine())?, // cmT point_to_eth_format(proof.cmT.into_affine())?, // cmT
point_to_eth_format(proof.snark_proof.a)?, // pA
point2_to_eth_format(proof.snark_proof.b)?, // pB
point_to_eth_format(proof.snark_proof.c)?, // pC
proof.r.into_bigint().to_bytes_be(), // r
point_to_eth_format(proof.snark_proof.a)?, // pA
point2_to_eth_format(proof.snark_proof.b)?, // pB
point_to_eth_format(proof.snark_proof.c)?, // pC
proof.kzg_challenges[0].into_bigint().to_bytes_be(), // challenge_W proof.kzg_challenges[0].into_bigint().to_bytes_be(), // challenge_W
proof.kzg_challenges[1].into_bigint().to_bytes_be(), // challenge_E proof.kzg_challenges[1].into_bigint().to_bytes_be(), // challenge_E
proof.kzg_proofs[0].eval.into_bigint().to_bytes_be(), // eval W proof.kzg_proofs[0].eval.into_bigint().to_bytes_be(), // eval W
@ -373,6 +322,7 @@ pub mod tests {
use super::*; use super::*;
use crate::commitment::pedersen::Pedersen; use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::{PreprocessorParam, ProverParams as NovaProverParams}; use crate::folding::nova::{PreprocessorParam, ProverParams as NovaProverParams};
use crate::folding::traits::CommittedInstanceOps;
use crate::frontend::utils::CubicFCircuit; use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
@ -434,8 +384,8 @@ pub mod tests {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -444,7 +394,13 @@ pub mod tests {
// decider proof verification using the deserialized data // decider proof verification using the deserialized data
let verified = D::verify( let verified = D::verify(
decider_vp, nova.i, nova.z_0, nova.z_i, &nova.U_i, &nova.u_i, &proof,
decider_vp,
nova.i,
nova.z_0,
nova.z_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof,
) )
.unwrap(); .unwrap();
assert!(verified); assert!(verified);
@ -550,8 +506,8 @@ pub mod tests {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -579,12 +535,6 @@ pub mod tests {
nova.z_i nova.z_i
.serialize_compressed(&mut public_inputs_serialized) .serialize_compressed(&mut public_inputs_serialized)
.unwrap(); .unwrap();
nova.U_i
.serialize_compressed(&mut public_inputs_serialized)
.unwrap();
nova.u_i
.serialize_compressed(&mut public_inputs_serialized)
.unwrap();
// deserialize back the verifier_params, proof and public inputs // deserialize back the verifier_params, proof and public inputs
let decider_vp_deserialized = let decider_vp_deserialized =
@ -605,10 +555,6 @@ pub mod tests {
let i_deserialized = Fr::deserialize_compressed(&mut reader).unwrap(); let i_deserialized = Fr::deserialize_compressed(&mut reader).unwrap();
let z_0_deserialized = Vec::<Fr>::deserialize_compressed(&mut reader).unwrap(); let z_0_deserialized = Vec::<Fr>::deserialize_compressed(&mut reader).unwrap();
let z_i_deserialized = Vec::<Fr>::deserialize_compressed(&mut reader).unwrap(); let z_i_deserialized = Vec::<Fr>::deserialize_compressed(&mut reader).unwrap();
let U_i_deserialized =
CommittedInstance::<Projective>::deserialize_compressed(&mut reader).unwrap();
let u_i_deserialized =
CommittedInstance::<Projective>::deserialize_compressed(&mut reader).unwrap();
// decider proof verification using the deserialized data // decider proof verification using the deserialized data
let verified = D::verify( let verified = D::verify(
@ -616,8 +562,8 @@ pub mod tests {
i_deserialized, i_deserialized,
z_0_deserialized, z_0_deserialized,
z_i_deserialized, z_i_deserialized,
&U_i_deserialized,
&u_i_deserialized,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof_deserialized, &proof_deserialized,
) )
.unwrap(); .unwrap();

+ 123
- 724
folding-schemes/src/folding/nova/decider_eth_circuit.rs

@ -4,130 +4,42 @@
/// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html /// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
use ark_crypto_primitives::sponge::{ use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar, constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig, PoseidonSponge},
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
Absorb, CryptographicSponge, Absorb, CryptographicSponge,
}; };
use ark_ec::{CurveGroup, Group};
use ark_ec::CurveGroup;
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_poly::Polynomial;
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::{fp::FpVar, FieldVar},
groups::GroupOpsBounds,
poly::{domain::Radix2DomainVar, evaluations::univariate::EvaluationsVar},
fields::fp::FpVar,
prelude::CurveVar, prelude::CurveVar,
ToConstraintFieldGadget, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::{log2, Zero};
use core::{borrow::Borrow, marker::PhantomData};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{borrow::Borrow, marker::PhantomData};
use super::{ use super::{
circuits::{ChallengeGadget, CommittedInstanceVar},
circuits::{ChallengeGadget, CommittedInstanceVar, NIFSGadget},
nifs::{nova::NIFS, NIFSTrait}, nifs::{nova::NIFS, NIFSTrait},
CommittedInstance, Nova, Witness, CommittedInstance, Nova, Witness,
}; };
use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme};
use crate::folding::circuits::{
cyclefold::{CycleFoldCommittedInstance, CycleFoldWitness},
nonnative::{affine::NonNativeAffineVar, uint::NonNativeUintVar},
CF1, CF2,
use crate::folding::{
circuits::{
decider::on_chain::GenericOnchainDeciderCircuit, nonnative::affine::NonNativeAffineVar,
CF1, CF2,
},
traits::{WitnessOps, WitnessVarOps},
}; };
use crate::frontend::FCircuit; use crate::frontend::FCircuit;
use crate::transcript::{Transcript, TranscriptVar};
use crate::utils::{
gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget},
vec::poly_from_vec,
};
use crate::Error; use crate::Error;
use crate::{ use crate::{
arith::r1cs::R1CS,
folding::traits::{CommittedInstanceVarOps, Dummy, WitnessVarOps},
arith::r1cs::{circuits::R1CSMatricesVar, R1CS},
folding::circuits::decider::{DeciderEnabledNIFS, EvalGadget, KZGChallengesGadget},
}; };
#[derive(Debug, Clone)]
pub struct RelaxedR1CSGadget {}
impl RelaxedR1CSGadget {
/// performs the RelaxedR1CS check for native variables (Az∘Bz==uCz+E)
pub fn check_native<F: PrimeField>(
r1cs: R1CSVar<F, F, FpVar<F>>,
E: Vec<FpVar<F>>,
u: FpVar<F>,
z: Vec<FpVar<F>>,
) -> Result<(), SynthesisError> {
let Az = r1cs.A.mul_vector(&z)?;
let Bz = r1cs.B.mul_vector(&z)?;
let Cz = r1cs.C.mul_vector(&z)?;
let uCzE = Cz.mul_scalar(&u)?.add(&E)?;
let AzBz = Az.hadamard(&Bz)?;
AzBz.enforce_equal(&uCzE)?;
Ok(())
}
/// performs the RelaxedR1CS check for non-native variables (Az∘Bz==uCz+E)
pub fn check_nonnative<F: PrimeField, CF: PrimeField>(
r1cs: R1CSVar<F, CF, NonNativeUintVar<CF>>,
E: Vec<NonNativeUintVar<CF>>,
u: NonNativeUintVar<CF>,
z: Vec<NonNativeUintVar<CF>>,
) -> Result<(), SynthesisError> {
// First we do addition and multiplication without mod F's order
let Az = r1cs.A.mul_vector(&z)?;
let Bz = r1cs.B.mul_vector(&z)?;
let Cz = r1cs.C.mul_vector(&z)?;
let uCzE = Cz.mul_scalar(&u)?.add(&E)?;
let AzBz = Az.hadamard(&Bz)?;
// Then we compare the results by checking if they are congruent
// modulo the field order
AzBz.into_iter()
.zip(uCzE)
.try_for_each(|(a, b)| a.enforce_congruent::<F>(&b))
}
}
#[derive(Debug, Clone)]
pub struct R1CSVar<F: PrimeField, CF: PrimeField, FV: AllocVar<F, CF>> {
_f: PhantomData<F>,
_cf: PhantomData<CF>,
_fv: PhantomData<FV>,
pub A: SparseMatrixVar<F, CF, FV>,
pub B: SparseMatrixVar<F, CF, FV>,
pub C: SparseMatrixVar<F, CF, FV>,
}
impl<F, CF, FV> AllocVar<R1CS<F>, CF> for R1CSVar<F, CF, FV>
where
F: PrimeField,
CF: PrimeField,
FV: AllocVar<F, CF>,
{
fn new_variable<T: Borrow<R1CS<F>>>(
cs: impl Into<Namespace<CF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let A = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().A)?;
let B = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().B)?;
let C = SparseMatrixVar::<F, CF, FV>::new_constant(cs.clone(), &val.borrow().C)?;
Ok(Self {
_f: PhantomData,
_cf: PhantomData,
_fv: PhantomData,
A,
B,
C,
})
})
}
}
/// In-circuit representation of the Witness associated to the CommittedInstance. /// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct WitnessVar<C: CurveGroup> { pub struct WitnessVar<C: CurveGroup> {
@ -166,82 +78,40 @@ where
impl<C: CurveGroup> WitnessVarOps<C::ScalarField> for WitnessVar<C> { impl<C: CurveGroup> WitnessVarOps<C::ScalarField> for WitnessVar<C> {
fn get_openings(&self) -> Vec<(&[FpVar<C::ScalarField>], FpVar<C::ScalarField>)> { fn get_openings(&self) -> Vec<(&[FpVar<C::ScalarField>], FpVar<C::ScalarField>)> {
vec![(&self.E, self.rE.clone()), (&self.W, self.rW.clone())]
vec![(&self.W, self.rW.clone()), (&self.E, self.rE.clone())]
} }
} }
/// Circuit that implements the in-circuit checks needed for the onchain (Ethereum's EVM)
/// verification.
#[derive(Clone, Debug)]
pub struct DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2, const H: bool = false>
pub type DeciderEthCircuit<C1, C2, GC2> = GenericOnchainDeciderCircuit<
C1,
C2,
GC2,
CommittedInstance<C1>,
CommittedInstance<C1>,
Witness<C1>,
R1CS<CF1<C1>>,
R1CSMatricesVar<CF1<C1>, FpVar<CF1<C1>>>,
DeciderNovaGadget,
>;
/// returns an instance of the DeciderEthCircuit from the given Nova struct
impl<
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, H>,
// enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider
CS2: CommitmentScheme<C2, H, ProverParams = PedersenParams<C2>>,
const H: bool,
> TryFrom<Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>> for DeciderEthCircuit<C1, C2, GC2>
where where
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
CS2: CommitmentScheme<C2, H>,
CF1<C1>: Absorb,
{ {
_c1: PhantomData<C1>,
_gc1: PhantomData<GC1>,
_c2: PhantomData<C2>,
_gc2: PhantomData<GC2>,
_cs1: PhantomData<CS1>,
_cs2: PhantomData<CS2>,
/// E vector's length of the Nova instance witness
pub E_len: usize,
/// E vector's length of the CycleFold instance witness
pub cf_E_len: usize,
/// R1CS of the Augmented Function circuit
pub r1cs: R1CS<C1::ScalarField>,
/// R1CS of the CycleFold circuit
pub cf_r1cs: R1CS<C2::ScalarField>,
/// CycleFold PedersenParams over C2
pub cf_pedersen_params: PedersenParams<C2>,
pub poseidon_config: PoseidonConfig<CF1<C1>>,
/// public params hash
pub pp_hash: Option<C1::ScalarField>,
pub i: Option<CF1<C1>>,
/// initial state
pub z_0: Option<Vec<C1::ScalarField>>,
/// current i-th state
pub z_i: Option<Vec<C1::ScalarField>>,
/// Nova instances
pub u_i: Option<CommittedInstance<C1>>,
pub w_i: Option<Witness<C1>>,
pub U_i: Option<CommittedInstance<C1>>,
pub W_i: Option<Witness<C1>>,
pub U_i1: Option<CommittedInstance<C1>>,
pub W_i1: Option<Witness<C1>>,
pub cmT: Option<C1>,
pub r: Option<C1::ScalarField>,
/// CycleFold running instance
pub cf_U_i: Option<CycleFoldCommittedInstance<C2>>,
pub cf_W_i: Option<CycleFoldWitness<C2>>,
type Error = Error;
/// KZG challenges
pub kzg_c_W: Option<C1::ScalarField>,
pub kzg_c_E: Option<C1::ScalarField>,
pub eval_W: Option<C1::ScalarField>,
pub eval_E: Option<C1::ScalarField>,
}
impl<C1, GC1, C2, GC2, CS1, CS2, const H: bool> DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2, H>
where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1, H>,
// enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider
CS2: CommitmentScheme<C2, H, ProverParams = PedersenParams<C2>>,
<C1 as Group>::ScalarField: Absorb,
<C1 as CurveGroup>::BaseField: PrimeField,
{
/// returns an instance of the DeciderEthCircuit from the given Nova struct
pub fn from_nova<FC: FCircuit<C1::ScalarField>>(
nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>,
) -> Result<Self, Error> {
fn try_from(nova: Nova<C1, GC1, C2, GC2, FC, CS1, CS2, H>) -> Result<Self, Error> {
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config); let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&nova.poseidon_config);
// compute the U_{i+1}, W_{i+1} // compute the U_{i+1}, W_{i+1}
@ -259,517 +129,116 @@ where
.ok_or(Error::OutOfBounds)?; .ok_or(Error::OutOfBounds)?;
// compute the KZG challenges used as inputs in the circuit // compute the KZG challenges used as inputs in the circuit
let (kzg_challenge_W, kzg_challenge_E) =
KZGChallengesGadget::<C1>::get_challenges_native(&mut transcript, U_i1.clone());
let kzg_challenges = KZGChallengesGadget::get_challenges_native(&mut transcript, &U_i1);
// get KZG evals // get KZG evals
let mut W = W_i1.W.clone();
W.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.W.len().next_power_of_two() - W_i1.W.len()),
);
let mut E = W_i1.E.clone();
E.extend(
std::iter::repeat(C1::ScalarField::zero())
.take(W_i1.E.len().next_power_of_two() - W_i1.E.len()),
);
let p_W = poly_from_vec(W.to_vec())?;
let eval_W = p_W.evaluate(&kzg_challenge_W);
let p_E = poly_from_vec(E.to_vec())?;
let eval_E = p_E.evaluate(&kzg_challenge_E);
let kzg_evaluations = W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| EvalGadget::evaluate_native(v, c))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { Ok(Self {
_c1: PhantomData,
_gc1: PhantomData,
_c2: PhantomData,
_gc2: PhantomData, _gc2: PhantomData,
_cs1: PhantomData,
_cs2: PhantomData,
E_len: nova.W_i.E.len(),
cf_E_len: nova.cf_W_i.E.len(),
r1cs: nova.r1cs,
cf_r1cs: nova.cf_r1cs,
_avar: PhantomData,
arith: nova.r1cs,
cf_arith: nova.cf_r1cs,
cf_pedersen_params: nova.cf_cs_pp, cf_pedersen_params: nova.cf_cs_pp,
poseidon_config: nova.poseidon_config, poseidon_config: nova.poseidon_config,
pp_hash: Some(nova.pp_hash),
i: Some(nova.i),
z_0: Some(nova.z_0),
z_i: Some(nova.z_i),
u_i: Some(nova.u_i),
w_i: Some(nova.w_i),
U_i: Some(nova.U_i),
W_i: Some(nova.W_i),
U_i1: Some(U_i1),
W_i1: Some(W_i1),
cmT: Some(cmT),
r: Some(r_Fr),
cf_U_i: Some(nova.cf_U_i),
cf_W_i: Some(nova.cf_W_i),
kzg_c_W: Some(kzg_challenge_W),
kzg_c_E: Some(kzg_challenge_E),
eval_W: Some(eval_W),
eval_E: Some(eval_E),
pp_hash: nova.pp_hash,
i: nova.i,
z_0: nova.z_0,
z_i: nova.z_i,
U_i: nova.U_i,
W_i: nova.W_i,
u_i: nova.u_i,
w_i: nova.w_i,
U_i1,
W_i1,
proof: cmT,
randomness: r_Fr,
cf_U_i: nova.cf_U_i,
cf_W_i: nova.cf_W_i,
kzg_challenges,
kzg_evaluations,
}) })
} }
} }
impl<C1, GC1, C2, GC2, CS1, CS2> ConstraintSynthesizer<CF1<C1>>
for DeciderEthCircuit<C1, GC1, C2, GC2, CS1, CS2>
pub struct DeciderNovaGadget;
impl<C: CurveGroup>
DeciderEnabledNIFS<C, CommittedInstance<C>, CommittedInstance<C>, Witness<C>, R1CS<CF1<C>>>
for DeciderNovaGadget
where where
C1: CurveGroup,
C2: CurveGroup,
GC1: CurveVar<C1, CF2<C1>>,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
CS1: CommitmentScheme<C1>,
CS2: CommitmentScheme<C2>,
<C1 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField,
<C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'b> &'b GC2: GroupOpsBounds<'b, C2, GC2>,
CF1<C>: Absorb,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let r1cs =
R1CSVar::<C1::ScalarField, CF1<C1>, FpVar<CF1<C1>>>::new_witness(cs.clone(), || {
Ok(self.r1cs.clone())
})?;
let pp_hash = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.pp_hash.unwrap_or_else(CF1::<C1>::zero))
})?;
let i =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.i.unwrap_or_else(CF1::<C1>::zero)))?;
let z_0 = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_0.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let z_i = Vec::<FpVar<CF1<C1>>>::new_input(cs.clone(), || {
Ok(self.z_i.unwrap_or(vec![CF1::<C1>::zero()]))
})?;
let u_dummy_native = CommittedInstance::<C1>::dummy(&self.r1cs);
let w_dummy_native = Witness::<C1>::dummy(&self.r1cs);
let u_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.u_i.unwrap_or(u_dummy_native.clone()))
})?;
let U_i = CommittedInstanceVar::<C1>::new_witness(cs.clone(), || {
Ok(self.U_i.unwrap_or(u_dummy_native.clone()))
})?;
// here (U_i1, W_i1) = NIFS.P( (U_i,W_i), (u_i,w_i))
let U_i1 = CommittedInstanceVar::<C1>::new_input(cs.clone(), || {
Ok(self.U_i1.unwrap_or(u_dummy_native.clone()))
})?;
let W_i1 = WitnessVar::<C1>::new_witness(cs.clone(), || {
Ok(self.W_i1.unwrap_or(w_dummy_native.clone()))
})?;
// allocate the inputs for the check 5.1
let kzg_c_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_c_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let kzg_c_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.kzg_c_E.unwrap_or_else(CF1::<C1>::zero))
})?;
// allocate the inputs for the check 5.2
let eval_W = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_W.unwrap_or_else(CF1::<C1>::zero))
})?;
let eval_E = FpVar::<CF1<C1>>::new_input(cs.clone(), || {
Ok(self.eval_E.unwrap_or_else(CF1::<C1>::zero))
})?;
// `sponge` is for digest computation.
let sponge = PoseidonSpongeVar::<C1::ScalarField>::new(cs.clone(), &self.poseidon_config);
// `transcript` is for challenge generation.
let mut transcript = sponge.clone();
// The following enumeration of the steps matches the one used at the documentation page
// https://privacy-scaling-explorations.github.io/sonobe-docs/design/nova-decider-onchain.html
// 2. u_i.cmE==cm(0), u_i.u==1
// Here zero is the x & y coordinates of the zero point affine representation.
let zero = NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::zero())?;
u_i.cmE.x.enforce_equal_unaligned(&zero)?;
u_i.cmE.y.enforce_equal_unaligned(&zero)?;
(u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?;
// 3.a u_i.x[0] == H(i, z_0, z_i, U_i)
let (u_i_x, U_i_vec) = U_i.clone().hash(&sponge, &pp_hash, &i, &z_0, &z_i)?;
(u_i.x[0]).enforce_equal(&u_i_x)?;
// 4. check RelaxedR1CS of U_{i+1}
let z_U1: Vec<FpVar<CF1<C1>>> =
[vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat();
RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?;
#[cfg(feature = "light-test")]
log::warn!("[WARNING]: Running with the 'light-test' feature, skipping the big part of the DeciderEthCircuit.\n Only for testing purposes.");
// The following two checks (and their respective allocations) are disabled for normal
// tests since they take several millions of constraints and would take several minutes
// (and RAM) to run the test. It is active by default, and not active only when
// 'light-test' feature is used.
#[cfg(not(feature = "light-test"))]
{
// imports here instead of at the top of the file, so we avoid having multiple
// `#[cfg(not(test))]`
use crate::commitment::pedersen::PedersenGadget;
use crate::folding::{
circuits::cyclefold::{
CycleFoldCommittedInstanceVar, CycleFoldConfig, CycleFoldWitnessVar,
},
nova::NovaCycleFoldConfig,
};
use ark_r1cs_std::ToBitsGadget;
let cf_u_dummy_native =
CycleFoldCommittedInstance::<C2>::dummy(NovaCycleFoldConfig::<C1>::IO_LEN);
let w_dummy_native = CycleFoldWitness::<C2>::dummy(&self.cf_r1cs);
let cf_U_i = CycleFoldCommittedInstanceVar::<C2, GC2>::new_witness(cs.clone(), || {
Ok(self.cf_U_i.unwrap_or_else(|| cf_u_dummy_native.clone()))
})?;
let cf_W_i = CycleFoldWitnessVar::<C2>::new_witness(cs.clone(), || {
Ok(self.cf_W_i.unwrap_or(w_dummy_native.clone()))
})?;
// 3.b u_i.x[1] == H(cf_U_i)
let (cf_u_i_x, _) = cf_U_i.clone().hash(&sponge, pp_hash.clone())?;
(u_i.x[1]).enforce_equal(&cf_u_i_x)?;
// 7. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
let cf_W_i_E_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> =
cf_W_i.E.iter().map(|E_i| E_i.to_bits_le()).collect();
let cf_W_i_W_bits: Result<Vec<Vec<Boolean<CF1<C1>>>>, SynthesisError> =
cf_W_i.W.iter().map(|W_i| W_i.to_bits_le()).collect();
let computed_cmE = PedersenGadget::<C2, GC2>::commit(
H.clone(),
G.clone(),
cf_W_i_E_bits?,
cf_W_i.rE.to_bits_le()?,
)?;
cf_U_i.cmE.enforce_equal(&computed_cmE)?;
let computed_cmW =
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i_W_bits?, cf_W_i.rW.to_bits_le()?)?;
cf_U_i.cmW.enforce_equal(&computed_cmW)?;
let cf_r1cs =
R1CSVar::<C1::BaseField, CF1<C1>, NonNativeUintVar<CF1<C1>>>::new_witness(
cs.clone(),
|| Ok(self.cf_r1cs.clone()),
)?;
// 6. check RelaxedR1CS of cf_U_i (CycleFold instance)
let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?;
}
// 1.1.a, 5.1. compute NIFS.V and KZG challenges.
// We need to ensure the order of challenge generation is the same as
// the native counterpart, so we first compute the challenges here and
// do the actual checks later.
let cmT =
NonNativeAffineVar::new_input(cs.clone(), || Ok(self.cmT.unwrap_or_else(C1::zero)))?;
// 1.1.a
let r_bits = ChallengeGadget::<C1, CommittedInstance<C1>>::get_challenge_gadget(
&mut transcript,
type ProofDummyCfg = ();
type Proof = C;
type RandomnessDummyCfg = ();
type Randomness = CF1<C>;
fn fold_field_elements_gadget(
_arith: &R1CS<CF1<C>>,
transcript: &mut PoseidonSpongeVar<CF1<C>>,
pp_hash: FpVar<CF1<C>>,
U: CommittedInstanceVar<C>,
U_vec: Vec<FpVar<CF1<C>>>,
u: CommittedInstanceVar<C>,
proof: C,
randomness: CF1<C>,
) -> Result<CommittedInstanceVar<C>, SynthesisError> {
let cs = transcript.cs();
let cmT = NonNativeAffineVar::new_input(cs.clone(), || Ok(proof))?;
let r = FpVar::new_input(cs.clone(), || Ok(randomness))?;
let r_bits = ChallengeGadget::<C, CommittedInstance<C>>::get_challenge_gadget(
transcript,
pp_hash, pp_hash,
U_i_vec,
u_i.clone(),
U_vec,
u.clone(),
Some(cmT), Some(cmT),
)?; )?;
// 5.1.
let (incircuit_c_W, incircuit_c_E) =
KZGChallengesGadget::<C1>::get_challenges_gadget(&mut transcript, U_i1.clone())?;
incircuit_c_W.enforce_equal(&kzg_c_W)?;
incircuit_c_E.enforce_equal(&kzg_c_E)?;
// 5.2. check eval_W==p_W(c_W) and eval_E==p_E(c_E)
let incircuit_eval_W = evaluate_gadget::<CF1<C1>>(W_i1.W, incircuit_c_W)?;
let incircuit_eval_E = evaluate_gadget::<CF1<C1>>(W_i1.E, incircuit_c_E)?;
incircuit_eval_W.enforce_equal(&eval_W)?;
incircuit_eval_E.enforce_equal(&eval_E)?;
// 1.1.b check that the NIFS.V challenge matches the one from the public input (so we avoid
// the verifier computing it)
let r_Fr = Boolean::le_bits_to_fp_var(&r_bits)?;
// check that the in-circuit computed r is equal to the inputted r
let r =
FpVar::<CF1<C1>>::new_input(cs.clone(), || Ok(self.r.unwrap_or_else(CF1::<C1>::zero)))?;
r_Fr.enforce_equal(&r)?;
Ok(())
}
}
/// Interpolates the polynomial from the given vector, and then returns it's evaluation at the
/// given point.
#[allow(unused)] // unused while check 7 is disabled
pub fn evaluate_gadget<F: PrimeField>(
mut v: Vec<FpVar<F>>,
point: FpVar<F>,
) -> Result<FpVar<F>, SynthesisError> {
v.resize(v.len().next_power_of_two(), FpVar::zero());
let n = v.len() as u64;
let gen = F::get_root_of_unity(n).unwrap();
let domain = Radix2DomainVar::new(gen, log2(v.len()) as u64, FpVar::one()).unwrap();
Boolean::le_bits_to_fp_var(&r_bits)?.enforce_equal(&r)?;
let evaluations_var = EvaluationsVar::from_vec_and_domain(v, domain, true);
evaluations_var.interpolate_and_evaluate(&point)
}
/// Gadget that computes the KZG challenges, also offers the rust native implementation compatible
/// with the gadget.
pub struct KZGChallengesGadget<C: CurveGroup> {
_c: PhantomData<C>,
}
#[allow(clippy::type_complexity)]
impl<C> KZGChallengesGadget<C>
where
C: CurveGroup,
C::ScalarField: PrimeField,
<C as CurveGroup>::BaseField: PrimeField,
C::ScalarField: Absorb,
{
pub fn get_challenges_native<T: Transcript<C::ScalarField>>(
transcript: &mut T,
U_i: CommittedInstance<C>,
) -> (C::ScalarField, C::ScalarField) {
// compute the KZG challenges, which are computed in-circuit and checked that it matches
// the inputted one
transcript.absorb_nonnative(&U_i.cmW);
let challenge_W = transcript.get_challenge();
transcript.absorb_nonnative(&U_i.cmE);
let challenge_E = transcript.get_challenge();
(challenge_W, challenge_E)
NIFSGadget::<C>::fold_committed_instance(r, U, u)
} }
// compatible with the native get_challenges_native
pub fn get_challenges_gadget<S: CryptographicSponge, T: TranscriptVar<CF1<C>, S>>(
transcript: &mut T,
U_i: CommittedInstanceVar<C>,
) -> Result<(FpVar<C::ScalarField>, FpVar<C::ScalarField>), SynthesisError> {
transcript.absorb(&U_i.cmW.to_constraint_field()?)?;
let challenge_W = transcript.get_challenge()?;
transcript.absorb(&U_i.cmE.to_constraint_field()?)?;
let challenge_E = transcript.get_challenge()?;
Ok((challenge_W, challenge_E))
fn fold_group_elements_native(
U_commitments: &[C],
u_commitments: &[C],
cmT: Option<Self::Proof>,
r: Self::Randomness,
) -> Result<Vec<C>, Error> {
let cmT = cmT.ok_or(Error::Empty)?;
let U_cmW = U_commitments[0];
let U_cmE = U_commitments[1];
let u_cmW = u_commitments[0];
let u_cmE = u_commitments[1];
if !u_cmE.is_zero() {
return Err(Error::NotIncomingCommittedInstance);
}
let cmW = U_cmW + u_cmW.mul(r);
let cmE = U_cmE + cmT.mul(r);
Ok(vec![cmW, cmE])
} }
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::cmp::max;
use ark_crypto_primitives::crh::{
sha256::{
constraints::{Sha256Gadget, UnitVar},
Sha256,
},
CRHScheme, CRHSchemeGadget,
};
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::bits::uint8::UInt8;
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{
rand::{thread_rng, Rng},
UniformRand,
};
use ark_pallas::{constraints::GVar, Fr, Projective};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2}; use ark_vesta::{constraints::GVar as GVar2, Projective as Projective2};
use super::*; use super::*;
use crate::arith::{
r1cs::{
extract_r1cs, extract_w_x,
tests::{get_test_r1cs, get_test_z},
},
Arith,
};
use crate::commitment::pedersen::Pedersen; use crate::commitment::pedersen::Pedersen;
use crate::folding::nova::PreprocessorParam; use crate::folding::nova::PreprocessorParam;
use crate::frontend::utils::{CubicFCircuit, CustomFCircuit, WrapperCircuit};
use crate::frontend::utils::CubicFCircuit;
use crate::transcript::poseidon::poseidon_canonical_config; use crate::transcript::poseidon::poseidon_canonical_config;
use crate::FoldingScheme; use crate::FoldingScheme;
// Convert `z` to a witness-instance pair for the relaxed R1CS
fn prepare_relaxed_witness_instance<C: CurveGroup, CS: CommitmentScheme<C>, R: Rng>(
mut rng: R,
r1cs: &R1CS<C::ScalarField>,
z: &[C::ScalarField],
) -> (Witness<C>, CommittedInstance<C>) {
let (w, x) = r1cs.split_z(z);
let (cs_pp, _) = CS::setup(&mut rng, max(w.len(), r1cs.A.n_rows)).unwrap();
let mut w = Witness::new::<false>(w, r1cs.A.n_rows, &mut rng);
w.E = r1cs.eval_at_z(z).unwrap();
let mut u = w.commit::<CS, false>(&cs_pp, x).unwrap();
u.u = z[0];
(w, u)
}
#[test] #[test]
fn test_relaxed_r1cs_small_gadget_handcrafted() {
let rng = &mut thread_rng();
let r1cs: R1CS<Fr> = get_test_r1cs();
let mut z = get_test_z(3);
z[0] = Fr::rand(rng); // Randomize `z[0]` (i.e. `u.u`) to test the relaxed R1CS
let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
let cs = ConstraintSystem::<Fr>::new_ref();
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(w.E)).unwrap();
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(u.u)).unwrap();
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
// gets as input a circuit that implements the ConstraintSynthesizer trait, and that has been
// initialized.
fn test_relaxed_r1cs_gadget<CS: ConstraintSynthesizer<Fr>>(circuit: CS) {
let rng = &mut thread_rng();
let cs = ConstraintSystem::<Fr>::new_ref();
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
assert!(cs.is_satisfied().unwrap());
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fr>(&cs);
let (w, x) = extract_w_x::<Fr>(&cs);
r1cs.check_relation(&w, &x).unwrap();
let z = [vec![Fr::rand(rng)], x, w].concat();
let (w, u) = prepare_relaxed_witness_instance::<_, Pedersen<Projective>, _>(rng, &r1cs, &z);
r1cs.check_relation(&w, &u).unwrap();
// set new CS for the circuit that checks the RelaxedR1CS of our original circuit
let cs = ConstraintSystem::<Fr>::new_ref();
// prepare the inputs for our circuit
let zVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(w.E)).unwrap();
let uVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(u.u)).unwrap();
let r1csVar = R1CSVar::<Fr, Fr, FpVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap();
assert!(cs.is_satisfied().unwrap());
}
#[test]
fn test_relaxed_r1cs_small_gadget_arkworks() {
let z_i = vec![Fr::from(3_u32)];
let cubic_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let circuit = WrapperCircuit::<Fr, CubicFCircuit<Fr>> {
FC: cubic_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(cubic_circuit.step_native(0, z_i, vec![]).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
struct Sha256TestCircuit<F: PrimeField> {
_f: PhantomData<F>,
pub x: Vec<u8>,
pub y: Vec<u8>,
}
impl<F: PrimeField> ConstraintSynthesizer<F> for Sha256TestCircuit<F> {
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> Result<(), SynthesisError> {
let x = Vec::<UInt8<F>>::new_witness(cs.clone(), || Ok(self.x))?;
let y = Vec::<UInt8<F>>::new_input(cs.clone(), || Ok(self.y))?;
let unitVar = UnitVar::default();
let comp_y = <Sha256Gadget<F> as CRHSchemeGadget<Sha256, F>>::evaluate(&unitVar, &x)?;
comp_y.0.enforce_equal(&y)?;
Ok(())
}
}
#[test]
fn test_relaxed_r1cs_medium_gadget_arkworks() {
let x = Fr::from(5_u32).into_bigint().to_bytes_le();
let y = <Sha256 as CRHScheme>::evaluate(&(), x.clone()).unwrap();
let circuit = Sha256TestCircuit::<Fr> {
_f: PhantomData,
x,
y,
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_custom_circuit() {
let n_constraints = 10_000;
let custom_circuit = CustomFCircuit::<Fr>::new(n_constraints).unwrap();
let z_i = vec![Fr::from(5_u32)];
let circuit = WrapperCircuit::<Fr, CustomFCircuit<Fr>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()),
};
test_relaxed_r1cs_gadget(circuit);
}
#[test]
fn test_relaxed_r1cs_nonnative_circuit() {
let rng = &mut thread_rng();
let cs = ConstraintSystem::<Fq>::new_ref();
// in practice we would use CycleFoldCircuit, but is a very big circuit (when computed
// non-natively inside the RelaxedR1CS circuit), so in order to have a short test we use a
// custom circuit.
let custom_circuit = CustomFCircuit::<Fq>::new(10).unwrap();
let z_i = vec![Fq::from(5_u32)];
let circuit = WrapperCircuit::<Fq, CustomFCircuit<Fq>> {
FC: custom_circuit,
z_i: Some(z_i.clone()),
z_i1: Some(custom_circuit.step_native(0, z_i, vec![]).unwrap()),
};
circuit.generate_constraints(cs.clone()).unwrap();
cs.finalize();
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fq>(&cs);
let (w, x) = extract_w_x::<Fq>(&cs);
let z = [vec![Fq::rand(rng)], x, w].concat();
let (w, u) =
prepare_relaxed_witness_instance::<_, Pedersen<Projective2>, _>(rng, &r1cs, &z);
// natively
let cs = ConstraintSystem::<Fq>::new_ref();
let zVar = Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(z.clone())).unwrap();
let EVar = Vec::<FpVar<Fq>>::new_witness(cs.clone(), || Ok(w.E.clone())).unwrap();
let uVar = FpVar::<Fq>::new_witness(cs.clone(), || Ok(u.u)).unwrap();
let r1csVar =
R1CSVar::<Fq, Fq, FpVar<Fq>>::new_witness(cs.clone(), || Ok(r1cs.clone())).unwrap();
RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap();
// non-natively
let cs = ConstraintSystem::<Fr>::new_ref();
let zVar = Vec::new_witness(cs.clone(), || Ok(z)).unwrap();
let EVar = Vec::new_witness(cs.clone(), || Ok(w.E)).unwrap();
let uVar = NonNativeUintVar::<Fr>::new_witness(cs.clone(), || Ok(u.u)).unwrap();
let r1csVar =
R1CSVar::<Fq, Fr, NonNativeUintVar<Fr>>::new_witness(cs.clone(), || Ok(r1cs)).unwrap();
RelaxedR1CSGadget::check_nonnative(r1csVar, EVar, uVar, zVar).unwrap();
}
#[test]
fn test_decider_eth_circuit() {
fn test_decider_circuit() {
let mut rng = ark_std::test_rng(); let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>(); let poseidon_config = poseidon_canonical_config::<Fr>();
@ -803,84 +272,14 @@ pub mod tests {
let ivc_proof = nova.ivc_proof(); let ivc_proof = nova.ivc_proof();
N::verify(nova_params.1, ivc_proof).unwrap(); N::verify(nova_params.1, ivc_proof).unwrap();
// load the DeciderEthCircuit from the Nova instance
let decider_eth_circuit = DeciderEthCircuit::<
Projective,
GVar,
Projective2,
GVar2,
Pedersen<Projective>,
Pedersen<Projective2>,
>::from_nova(nova)
.unwrap();
// load the DeciderEthCircuit from the generated Nova instance
let decider_circuit =
DeciderEthCircuit::<Projective, Projective2, GVar2>::try_from(nova).unwrap();
let cs = ConstraintSystem::<Fr>::new_ref(); let cs = ConstraintSystem::<Fr>::new_ref();
// generate the constraints and check that are satisfied by the inputs // generate the constraints and check that are satisfied by the inputs
decider_eth_circuit
.generate_constraints(cs.clone())
.unwrap();
assert!(cs.is_satisfied().unwrap());
}
// checks that the gadget and native implementations of the challenge computation match
#[test]
fn test_kzg_challenge_gadget() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let mut transcript = PoseidonSponge::<Fr>::new(&poseidon_config);
let U_i = CommittedInstance::<Projective> {
cmE: Projective::rand(&mut rng),
u: Fr::rand(&mut rng),
cmW: Projective::rand(&mut rng),
x: vec![Fr::rand(&mut rng); 1],
};
// compute the challenge natively
let (challenge_W, challenge_E) =
KZGChallengesGadget::<Projective>::get_challenges_native(&mut transcript, U_i.clone());
let cs = ConstraintSystem::<Fr>::new_ref();
let U_iVar =
CommittedInstanceVar::<Projective>::new_witness(cs.clone(), || Ok(U_i.clone()))
.unwrap();
let mut transcript_var = PoseidonSpongeVar::<Fr>::new(cs.clone(), &poseidon_config);
let (challenge_W_Var, challenge_E_Var) =
KZGChallengesGadget::<Projective>::get_challenges_gadget(&mut transcript_var, U_iVar)
.unwrap();
assert!(cs.is_satisfied().unwrap());
// check that the natively computed and in-circuit computed hashes match
use ark_r1cs_std::R1CSVar;
assert_eq!(challenge_W_Var.value().unwrap(), challenge_W);
assert_eq!(challenge_E_Var.value().unwrap(), challenge_E);
}
#[test]
fn test_polynomial_interpolation() {
let mut rng = ark_std::test_rng();
let n = 12;
let l = 1 << n;
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(l)
.collect();
let challenge = Fr::rand(&mut rng);
use ark_poly::Polynomial;
let polynomial = poly_from_vec(v.to_vec()).unwrap();
let eval = polynomial.evaluate(&challenge);
let cs = ConstraintSystem::<Fr>::new_ref();
let vVar = Vec::<FpVar<Fr>>::new_witness(cs.clone(), || Ok(v)).unwrap();
let challengeVar = FpVar::<Fr>::new_witness(cs.clone(), || Ok(challenge)).unwrap();
let evalVar = evaluate_gadget::<Fr>(vVar, challengeVar).unwrap();
use ark_r1cs_std::R1CSVar;
assert_eq!(evalVar.value().unwrap(), eval);
decider_circuit.generate_constraints(cs.clone()).unwrap();
assert!(cs.is_satisfied().unwrap()); assert!(cs.is_satisfied().unwrap());
} }
} }

+ 30
- 35
folding-schemes/src/folding/nova/mod.rs

@ -10,7 +10,7 @@ use ark_crypto_primitives::sponge::{
}; };
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget};
use ark_r1cs_std::{prelude::CurveVar, ToConstraintFieldGadget};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Valid}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Valid};
use ark_std::fmt::Debug; use ark_std::fmt::Debug;
@ -56,7 +56,7 @@ pub mod decider_circuits;
pub mod decider_eth; pub mod decider_eth;
pub mod decider_eth_circuit; pub mod decider_eth_circuit;
use super::traits::{CommittedInstanceOps, WitnessOps};
use super::traits::{CommittedInstanceOps, Inputize, WitnessOps};
/// Configuration for Nova's CycleFold circuit /// Configuration for Nova's CycleFold circuit
pub struct NovaCycleFoldConfig<C: CurveGroup> { pub struct NovaCycleFoldConfig<C: CurveGroup> {
@ -137,6 +137,18 @@ impl CommittedInstanceOps for CommittedInstance {
} }
} }
impl<C: CurveGroup> Inputize<C::ScalarField, CommittedInstanceVar<C>> for CommittedInstance<C> {
fn inputize(&self) -> Vec<C::ScalarField> {
[
&[self.u][..],
&self.x,
&self.cmE.inputize(),
&self.cmW.inputize(),
]
.concat()
}
}
#[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, Clone, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Witness<C: CurveGroup> { pub struct Witness<C: CurveGroup> {
pub E: Vec<C::ScalarField>, pub E: Vec<C::ScalarField>,
@ -477,8 +489,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
type PreprocessorParam = PreprocessorParam<C1, C2, FC, CS1, CS2, H>; type PreprocessorParam = PreprocessorParam<C1, C2, FC, CS1, CS2, H>;
type ProverParam = ProverParams<C1, C2, CS1, CS2, H>; type ProverParam = ProverParams<C1, C2, CS1, CS2, H>;
@ -518,7 +528,7 @@ where
augmented_F_circuit.generate_constraints(cs.clone())?; augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
// CycleFold circuit R1CS // CycleFold circuit R1CS
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref(); let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
@ -526,7 +536,7 @@ where
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize(); cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2)?;
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
@ -548,23 +558,14 @@ where
get_r1cs::<C1, GC1, C2, GC2, FC>(&prep_param.poseidon_config, prep_param.F.clone())?; get_r1cs::<C1, GC1, C2, GC2, FC>(&prep_param.poseidon_config, prep_param.F.clone())?;
// if cs params exist, use them, if not, generate new ones // if cs params exist, use them, if not, generate new ones
let cs_pp: CS1::ProverParams;
let cs_vp: CS1::VerifierParams;
let cf_cs_pp: CS2::ProverParams;
let cf_cs_vp: CS2::VerifierParams;
if prep_param.cs_pp.is_some()
&& prep_param.cf_cs_pp.is_some()
&& prep_param.cs_vp.is_some()
&& prep_param.cf_cs_vp.is_some()
{
cs_pp = prep_param.clone().cs_pp.unwrap();
cs_vp = prep_param.clone().cs_vp.unwrap();
cf_cs_pp = prep_param.clone().cf_cs_pp.unwrap();
cf_cs_vp = prep_param.clone().cf_cs_vp.unwrap();
} else {
(cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.A.n_rows)?;
(cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, cf_r1cs.A.n_rows)?;
}
let (cs_pp, cs_vp) = match (&prep_param.cs_pp, &prep_param.cs_vp) {
(Some(cs_pp), Some(cs_vp)) => (cs_pp.clone(), cs_vp.clone()),
_ => CS1::setup(&mut rng, r1cs.A.n_rows)?,
};
let (cf_cs_pp, cf_cs_vp) = match (&prep_param.cf_cs_pp, &prep_param.cf_cs_vp) {
(Some(cf_cs_pp), Some(cf_cs_vp)) => (cf_cs_pp.clone(), cf_cs_vp.clone()),
_ => CS2::setup(&mut rng, cf_r1cs.A.n_rows)?,
};
let prover_params = ProverParams::<C1, C2, CS1, CS2, H> { let prover_params = ProverParams::<C1, C2, CS1, CS2, H> {
poseidon_config: prep_param.poseidon_config.clone(), poseidon_config: prep_param.poseidon_config.clone(),
@ -601,12 +602,12 @@ where
augmented_F_circuit.generate_constraints(cs.clone())?; augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize(); cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2)?;
// compute the public params hash // compute the public params hash
let pp_hash = vp.pp_hash()?; let pp_hash = vp.pp_hash()?;
@ -946,7 +947,7 @@ where
} = ivc_proof; } = ivc_proof;
let (pp, vp) = params; let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
let f_circuit = FC::new(fcircuit_params)?;
let cs = ConstraintSystem::<C1::ScalarField>::new_ref(); let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref(); let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
let augmented_F_circuit = let augmented_F_circuit =
@ -956,12 +957,12 @@ where
augmented_F_circuit.generate_constraints(cs.clone())?; augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize(); cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2)?;
Ok(Self { Ok(Self {
_gc1: PhantomData, _gc1: PhantomData,
@ -1056,8 +1057,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
// folds the given cyclefold circuit and its instances // folds the given cyclefold circuit and its instances
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
@ -1102,7 +1101,7 @@ pub fn get_r1cs_from_cs(
circuit.generate_constraints(cs.clone())?; circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<F>(&cs);
let r1cs = extract_r1cs::<F>(&cs)?;
Ok(r1cs) Ok(r1cs)
} }
@ -1123,8 +1122,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
let augmented_F_circuit = let augmented_F_circuit =
AugmentedFCircuit::<C1, C2, GC2, FC>::empty(poseidon_config, F_circuit); AugmentedFCircuit::<C1, C2, GC2, FC>::empty(poseidon_config, F_circuit);
@ -1151,8 +1148,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
let (r1cs, cf_r1cs) = get_r1cs::<C1, GC1, C2, GC2, FC>(poseidon_config, F_circuit)?; let (r1cs, cf_r1cs) = get_r1cs::<C1, GC1, C2, GC2, FC>(poseidon_config, F_circuit)?;
Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows)) Ok((r1cs.A.n_rows, cf_r1cs.A.n_rows))

+ 0
- 3
folding-schemes/src/folding/nova/nifs/nova.rs

@ -35,8 +35,6 @@ impl, T: Transcript, c
NIFSTrait<C, CS, T, H> for NIFS<C, CS, T, H> NIFSTrait<C, CS, T, H> for NIFS<C, CS, T, H>
where where
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: PrimeField,
<C as Group>::ScalarField: PrimeField,
{ {
type CommittedInstance = CommittedInstance<C>; type CommittedInstance = CommittedInstance<C>;
type Witness = Witness<C>; type Witness = Witness<C>;
@ -150,7 +148,6 @@ impl, T: Transcript, c
NIFS<C, CS, T, H> NIFS<C, CS, T, H>
where where
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: PrimeField,
{ {
/// compute_T: compute cross-terms T /// compute_T: compute cross-terms T
pub fn compute_T( pub fn compute_T(

+ 31
- 2
folding-schemes/src/folding/nova/traits.rs

@ -1,11 +1,18 @@
use ark_ec::CurveGroup; use ark_ec::CurveGroup;
use ark_r1cs_std::fields::fp::FpVar;
use ark_relations::r1cs::SynthesisError;
use ark_std::{rand::RngCore, UniformRand}; use ark_std::{rand::RngCore, UniformRand};
use super::circuits::CommittedInstanceVar;
use super::decider_eth_circuit::WitnessVar;
use super::{CommittedInstance, Witness}; use super::{CommittedInstance, Witness};
use crate::arith::ArithSampler;
use crate::arith::{r1cs::R1CS, Arith};
use crate::arith::{
r1cs::{circuits::R1CSMatricesVar, R1CS},
Arith, ArithGadget, ArithSampler,
};
use crate::commitment::CommitmentScheme; use crate::commitment::CommitmentScheme;
use crate::folding::circuits::CF1; use crate::folding::circuits::CF1;
use crate::utils::gadgets::{EquivalenceGadget, VectorGadget};
use crate::Error; use crate::Error;
/// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and /// Implements `Arith` for R1CS, where the witness is of type [`Witness`], and
@ -95,3 +102,25 @@ impl ArithSampler, CommittedInstance> for R1CS
Ok((witness, cm_witness)) Ok((witness, cm_witness))
} }
} }
impl<C: CurveGroup> ArithGadget<WitnessVar<C>, CommittedInstanceVar<C>>
for R1CSMatricesVar<C::ScalarField, FpVar<C::ScalarField>>
{
type Evaluation = (Vec<FpVar<C::ScalarField>>, Vec<FpVar<C::ScalarField>>);
fn eval_relation(
&self,
w: &WitnessVar<C>,
u: &CommittedInstanceVar<C>,
) -> Result<Self::Evaluation, SynthesisError> {
self.eval_at_z(&[&[u.u.clone()][..], &u.x, &w.W].concat())
}
fn enforce_evaluation(
w: &WitnessVar<C>,
_u: &CommittedInstanceVar<C>,
(AzBz, uCz): Self::Evaluation,
) -> Result<(), SynthesisError> {
EquivalenceGadget::<C::ScalarField>::enforce_equivalent(&AzBz[..], &uCz.add(&w.E)?[..])
}
}

+ 1
- 6
folding-schemes/src/folding/nova/zk.rs

@ -43,10 +43,7 @@ use ark_crypto_primitives::sponge::{
Absorb, CryptographicSponge, Absorb, CryptographicSponge,
}; };
use ark_ec::{CurveGroup, Group}; use ark_ec::{CurveGroup, Group};
use ark_r1cs_std::{
groups::{CurveVar, GroupOpsBounds},
ToConstraintFieldGadget,
};
use ark_r1cs_std::{groups::CurveVar, ToConstraintFieldGadget};
use crate::{commitment::CommitmentScheme, folding::circuits::CF2, frontend::FCircuit, Error}; use crate::{commitment::CommitmentScheme, folding::circuits::CF2, frontend::FCircuit, Error};
@ -90,7 +87,6 @@ where
<C2 as Group>::ScalarField: PrimeField, <C2 as Group>::ScalarField: PrimeField,
<C2 as CurveGroup>::BaseField: PrimeField, <C2 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: Absorb, <C2 as CurveGroup>::BaseField: Absorb,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
GC2: ToConstraintFieldGadget<<C2 as CurveGroup>::BaseField>, GC2: ToConstraintFieldGadget<<C2 as CurveGroup>::BaseField>,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
{ {
@ -161,7 +157,6 @@ where
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
<C2 as CurveGroup>::BaseField: PrimeField, <C2 as CurveGroup>::BaseField: PrimeField,
<C2 as CurveGroup>::BaseField: Absorb, <C2 as CurveGroup>::BaseField: Absorb,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
GC2: ToConstraintFieldGadget<<C2 as CurveGroup>::BaseField>, GC2: ToConstraintFieldGadget<<C2 as CurveGroup>::BaseField>,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
{ {

+ 8
- 16
folding-schemes/src/folding/protogalaxy/circuits.rs

@ -11,7 +11,7 @@ use ark_r1cs_std::{
boolean::Boolean, boolean::Boolean,
eq::EqGadget, eq::EqGadget,
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
groups::{CurveVar, GroupOpsBounds},
groups::CurveVar,
poly::polynomial::univariate::dense::DensePolynomialVar, poly::polynomial::univariate::dense::DensePolynomialVar,
R1CSVar, ToBitsGadget, ToConstraintFieldGadget, R1CSVar, ToBitsGadget, ToConstraintFieldGadget,
}; };
@ -77,7 +77,8 @@ impl FoldingGadget {
let betas_star = betas_star_var(&instance.betas, &deltas, &alpha); let betas_star = betas_star_var(&instance.betas, &deltas, &alpha);
let k = vec_instances.len(); let k = vec_instances.len();
let H = GeneralEvaluationDomain::new(k + 1).unwrap();
let H =
GeneralEvaluationDomain::new(k + 1).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
let L_X = lagrange_polys(H) let L_X = lagrange_polys(H)
.into_iter() .into_iter()
.map(|poly| { .map(|poly| {
@ -183,7 +184,6 @@ impl AugmentationGadget {
) -> Result<CycleFoldCommittedInstanceVar<C2, GC2>, SynthesisError> ) -> Result<CycleFoldCommittedInstanceVar<C2, GC2>, SynthesisError>
where where
C2::BaseField: PrimeField + Absorb, C2::BaseField: PrimeField + Absorb,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
assert_eq!(cf_u_cmWs.len(), cf_u_xs.len()); assert_eq!(cf_u_cmWs.len(), cf_u_xs.len());
assert_eq!(cf_u_xs.len(), cf_cmTs.len()); assert_eq!(cf_u_xs.len(), cf_cmTs.len());
@ -268,8 +268,6 @@ pub struct AugmentedFCircuit<
impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>> impl<C1: CurveGroup, C2: CurveGroup, GC2: CurveVar<C2, CF2<C2>>, FC: FCircuit<CF1<C1>>>
AugmentedFCircuit<C1, C2, GC2, FC> AugmentedFCircuit<C1, C2, GC2, FC>
where
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
pub fn empty( pub fn empty(
poseidon_config: &PoseidonConfig<CF1<C1>>, poseidon_config: &PoseidonConfig<CF1<C1>>,
@ -317,7 +315,6 @@ where
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>, GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<CF1<C1>>, FC: FCircuit<CF1<C1>>,
C2::BaseField: PrimeField + Absorb, C2::BaseField: PrimeField + Absorb,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> { fn generate_constraints(self, cs: ConstraintSystemRef<CF1<C1>>) -> Result<(), SynthesisError> {
let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || Ok(self.pp_hash))?; let pp_hash = FpVar::<CF1<C1>>::new_witness(cs.clone(), || Ok(self.pp_hash))?;
@ -498,7 +495,7 @@ mod tests {
let mut transcript_p = PoseidonSponge::new(&poseidon_config); let mut transcript_p = PoseidonSponge::new(&poseidon_config);
let mut transcript_v = PoseidonSponge::new(&poseidon_config); let mut transcript_v = PoseidonSponge::new(&poseidon_config);
let (_, _, F_coeffs, K_coeffs, _, _) = Folding::<Projective>::prove(
let (_, _, proof, _) = Folding::<Projective>::prove(
&mut transcript_p, &mut transcript_p,
&r1cs, &r1cs,
&instance, &instance,
@ -507,20 +504,15 @@ mod tests {
&witnesses, &witnesses,
)?; )?;
let folded_instance = Folding::<Projective>::verify(
&mut transcript_v,
&instance,
&instances,
F_coeffs.clone(),
K_coeffs.clone(),
)?;
let folded_instance =
Folding::<Projective>::verify(&mut transcript_v, &instance, &instances, proof.clone())?;
let cs = ConstraintSystem::new_ref(); let cs = ConstraintSystem::new_ref();
let mut transcript_var = PoseidonSpongeVar::new(cs.clone(), &poseidon_config); let mut transcript_var = PoseidonSpongeVar::new(cs.clone(), &poseidon_config);
let instance_var = CommittedInstanceVar::new_witness(cs.clone(), || Ok(instance))?; let instance_var = CommittedInstanceVar::new_witness(cs.clone(), || Ok(instance))?;
let instances_var = Vec::new_witness(cs.clone(), || Ok(instances))?; let instances_var = Vec::new_witness(cs.clone(), || Ok(instances))?;
let F_coeffs_var = Vec::new_witness(cs.clone(), || Ok(F_coeffs))?;
let K_coeffs_var = Vec::new_witness(cs.clone(), || Ok(K_coeffs))?;
let F_coeffs_var = Vec::new_witness(cs.clone(), || Ok(proof.F_coeffs))?;
let K_coeffs_var = Vec::new_witness(cs.clone(), || Ok(proof.K_coeffs))?;
let (folded_instance_var, _) = FoldingGadget::fold_committed_instance( let (folded_instance_var, _) = FoldingGadget::fold_committed_instance(
&mut transcript_var, &mut transcript_var,

+ 252
- 0
folding-schemes/src/folding/protogalaxy/decider_eth_circuit.rs

@ -0,0 +1,252 @@
/// This file implements the onchain (Ethereum's EVM) decider circuit. For non-ethereum use cases,
/// other more efficient approaches can be used.
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
Absorb, CryptographicSponge,
};
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
eq::EqGadget,
fields::fp::FpVar,
groups::CurveVar,
ToConstraintFieldGadget,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{borrow::Borrow, marker::PhantomData};
use crate::{
arith::r1cs::{circuits::R1CSMatricesVar, R1CS},
commitment::{pedersen::Params as PedersenParams, CommitmentScheme},
folding::{
circuits::{
decider::{
on_chain::GenericOnchainDeciderCircuit, DeciderEnabledNIFS, EvalGadget,
KZGChallengesGadget,
},
CF1, CF2,
},
traits::{WitnessOps, WitnessVarOps},
},
frontend::FCircuit,
Error,
};
use super::{
circuits::FoldingGadget,
constants::{INCOMING, RUNNING},
folding::{Folding, ProtoGalaxyProof},
CommittedInstance, CommittedInstanceVar, ProtoGalaxy, Witness,
};
/// In-circuit representation of the Witness associated to the CommittedInstance.
#[derive(Debug, Clone)]
pub struct WitnessVar<F: PrimeField> {
pub W: Vec<FpVar<F>>,
pub rW: FpVar<F>,
}
impl<F: PrimeField> AllocVar<Witness<F>, F> for WitnessVar<F> {
fn new_variable<T: Borrow<Witness<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let W = Vec::new_variable(cs.clone(), || Ok(val.borrow().w.to_vec()), mode)?;
let rW = FpVar::new_variable(cs.clone(), || Ok(val.borrow().r_w), mode)?;
Ok(Self { W, rW })
})
}
}
impl<F: PrimeField> WitnessVarOps<F> for WitnessVar<F> {
fn get_openings(&self) -> Vec<(&[FpVar<F>], FpVar<F>)> {
vec![(&self.W, self.rW.clone())]
}
}
pub type DeciderEthCircuit<C1, C2, GC2> = GenericOnchainDeciderCircuit<
C1,
C2,
GC2,
CommittedInstance<C1, RUNNING>,
CommittedInstance<C1, INCOMING>,
Witness<CF1<C1>>,
R1CS<CF1<C1>>,
R1CSMatricesVar<CF1<C1>, FpVar<CF1<C1>>>,
DeciderProtoGalaxyGadget,
>;
/// returns an instance of the DeciderEthCircuit from the given ProtoGalaxy struct
impl<
C1: CurveGroup,
GC1: CurveVar<C1, CF2<C1>> + ToConstraintFieldGadget<CF2<C1>>,
C2: CurveGroup,
GC2: CurveVar<C2, CF2<C2>> + ToConstraintFieldGadget<CF2<C2>>,
FC: FCircuit<C1::ScalarField>,
CS1: CommitmentScheme<C1, false>,
// enforce that the CS2 is Pedersen commitment scheme, since we're at Ethereum's EVM decider
CS2: CommitmentScheme<C2, false, ProverParams = PedersenParams<C2>>,
> TryFrom<ProtoGalaxy<C1, GC1, C2, GC2, FC, CS1, CS2>> for DeciderEthCircuit<C1, C2, GC2>
where
CF1<C1>: Absorb,
{
type Error = Error;
fn try_from(protogalaxy: ProtoGalaxy<C1, GC1, C2, GC2, FC, CS1, CS2>) -> Result<Self, Error> {
let mut transcript = PoseidonSponge::<C1::ScalarField>::new(&protogalaxy.poseidon_config);
let (U_i1, W_i1, proof, aux) = Folding::prove(
&mut transcript,
&protogalaxy.r1cs,
&protogalaxy.U_i,
&protogalaxy.W_i,
&[protogalaxy.u_i.clone()],
&[protogalaxy.w_i.clone()],
)?;
// compute the KZG challenges used as inputs in the circuit
let kzg_challenges = KZGChallengesGadget::get_challenges_native(&mut transcript, &U_i1);
// get KZG evals
let kzg_evaluations = W_i1
.get_openings()
.iter()
.zip(&kzg_challenges)
.map(|((v, _), &c)| EvalGadget::evaluate_native(v, c))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
_gc2: PhantomData,
_avar: PhantomData,
arith: protogalaxy.r1cs,
cf_arith: protogalaxy.cf_r1cs,
cf_pedersen_params: protogalaxy.cf_cs_params,
poseidon_config: protogalaxy.poseidon_config,
pp_hash: protogalaxy.pp_hash,
i: protogalaxy.i,
z_0: protogalaxy.z_0,
z_i: protogalaxy.z_i,
U_i: protogalaxy.U_i,
W_i: protogalaxy.W_i,
u_i: protogalaxy.u_i,
w_i: protogalaxy.w_i,
U_i1,
W_i1,
proof,
randomness: aux.L_X_evals,
cf_U_i: protogalaxy.cf_U_i,
cf_W_i: protogalaxy.cf_W_i,
kzg_challenges,
kzg_evaluations,
})
}
}
pub struct DeciderProtoGalaxyGadget;
impl<C: CurveGroup>
DeciderEnabledNIFS<
C,
CommittedInstance<C, RUNNING>,
CommittedInstance<C, INCOMING>,
Witness<CF1<C>>,
R1CS<CF1<C>>,
> for DeciderProtoGalaxyGadget
{
type Proof = ProtoGalaxyProof<CF1<C>>;
type ProofDummyCfg = (usize, usize, usize);
type Randomness = Vec<CF1<C>>;
type RandomnessDummyCfg = usize;
fn fold_field_elements_gadget(
_arith: &R1CS<CF1<C>>,
transcript: &mut PoseidonSpongeVar<CF1<C>>,
_pp_hash: FpVar<CF1<C>>,
U: CommittedInstanceVar<C, RUNNING>,
_U_vec: Vec<FpVar<CF1<C>>>,
u: CommittedInstanceVar<C, INCOMING>,
proof: Self::Proof,
randomness: Self::Randomness,
) -> Result<CommittedInstanceVar<C, RUNNING>, SynthesisError> {
let cs = transcript.cs();
let F_coeffs = Vec::new_witness(cs.clone(), || Ok(&proof.F_coeffs[..]))?;
let K_coeffs = Vec::new_witness(cs.clone(), || Ok(&proof.K_coeffs[..]))?;
let randomness = Vec::new_input(cs.clone(), || Ok(randomness))?;
let (U_next, L_X_evals) =
FoldingGadget::fold_committed_instance(transcript, &U, &[u], F_coeffs, K_coeffs)?;
L_X_evals.enforce_equal(&randomness)?;
Ok(U_next)
}
fn fold_group_elements_native(
U_commitments: &[C],
u_commitments: &[C],
_: Option<Self::Proof>,
L_X_evals: Self::Randomness,
) -> Result<Vec<C>, Error> {
let U_phi = U_commitments[0];
let u_phi = u_commitments[0];
Ok(vec![U_phi * L_X_evals[0] + u_phi * L_X_evals[1]])
}
}
#[cfg(test)]
pub mod tests {
use ark_bn254::{constraints::GVar, Fr, G1Projective as Projective};
use ark_grumpkin::{constraints::GVar as GVar2, Projective as Projective2};
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
use super::*;
use crate::commitment::pedersen::Pedersen;
use crate::folding::protogalaxy::ProtoGalaxy;
use crate::frontend::{utils::CubicFCircuit, FCircuit};
use crate::transcript::poseidon::poseidon_canonical_config;
use crate::FoldingScheme;
#[test]
fn test_decider_circuit() {
let mut rng = ark_std::test_rng();
let poseidon_config = poseidon_canonical_config::<Fr>();
let F_circuit = CubicFCircuit::<Fr>::new(()).unwrap();
let z_0 = vec![Fr::from(3_u32)];
type PG = ProtoGalaxy<
Projective,
GVar,
Projective2,
GVar2,
CubicFCircuit<Fr>,
Pedersen<Projective>,
Pedersen<Projective2>,
>;
let pg_params = PG::preprocess(&mut rng, &(poseidon_config, F_circuit)).unwrap();
// generate a Nova instance and do a step of it
let mut protogalaxy = PG::init(&pg_params, F_circuit, z_0.clone()).unwrap();
protogalaxy.prove_step(&mut rng, vec![], None).unwrap();
let ivc_proof = protogalaxy.ivc_proof();
PG::verify(pg_params.1, ivc_proof).unwrap();
// load the DeciderEthCircuit from the generated Nova instance
let decider_circuit =
DeciderEthCircuit::<Projective, Projective2, GVar2>::try_from(protogalaxy).unwrap();
let cs = ConstraintSystem::<Fr>::new_ref();
// generate the constraints and check that are satisfied by the inputs
decider_circuit.generate_constraints(cs.clone()).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
}

+ 53
- 43
folding-schemes/src/folding/protogalaxy/folding.rs

@ -15,10 +15,32 @@ use super::ProtoGalaxyError;
use super::{CommittedInstance, Witness}; use super::{CommittedInstance, Witness};
use crate::arith::r1cs::R1CS; use crate::arith::r1cs::R1CS;
use crate::folding::traits::Dummy;
use crate::transcript::Transcript; use crate::transcript::Transcript;
use crate::utils::vec::*; use crate::utils::vec::*;
use crate::Error; use crate::Error;
#[derive(Debug, Clone)]
pub struct ProtoGalaxyProof<F: PrimeField> {
pub F_coeffs: Vec<F>,
pub K_coeffs: Vec<F>,
}
impl<F: PrimeField> Dummy<(usize, usize, usize)> for ProtoGalaxyProof<F> {
fn dummy((t, d, k): (usize, usize, usize)) -> Self {
Self {
F_coeffs: vec![F::zero(); t],
K_coeffs: vec![F::zero(); d * k + 1],
}
}
}
#[derive(Debug, Clone)]
pub struct ProtoGalaxyAux<C: CurveGroup> {
pub L_X_evals: Vec<C::ScalarField>,
pub phi_stars: Vec<C>,
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
/// Implements the protocol described in section 4 of /// Implements the protocol described in section 4 of
/// [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) /// [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
@ -28,7 +50,6 @@ pub struct Folding {
impl<C: CurveGroup> Folding<C> impl<C: CurveGroup> Folding<C>
where where
<C as Group>::ScalarField: Absorb, <C as Group>::ScalarField: Absorb,
<C as CurveGroup>::BaseField: Absorb,
{ {
#![allow(clippy::type_complexity)] #![allow(clippy::type_complexity)]
/// implements the non-interactive Prover from the folding scheme described in section 4 /// implements the non-interactive Prover from the folding scheme described in section 4
@ -45,10 +66,8 @@ where
( (
CommittedInstance<C, true>, CommittedInstance<C, true>,
Witness<C::ScalarField>, Witness<C::ScalarField>,
Vec<C::ScalarField>, // F_X coeffs
Vec<C::ScalarField>, // K_X coeffs
Vec<C::ScalarField>, // L_X evals
Vec<C>, // phi_stars
ProtoGalaxyProof<C::ScalarField>,
ProtoGalaxyAux<C>,
), ),
Error, Error,
> { > {
@ -243,10 +262,11 @@ where
w: w_star, w: w_star,
r_w: r_w_star, r_w: r_w_star,
}, },
F_coeffs,
K_coeffs,
L_X_evals,
phi_stars,
ProtoGalaxyProof { F_coeffs, K_coeffs },
ProtoGalaxyAux {
L_X_evals,
phi_stars,
},
)) ))
} }
@ -258,8 +278,7 @@ where
// incoming instances // incoming instances
vec_instances: &[CommittedInstance<C, false>], vec_instances: &[CommittedInstance<C, false>],
// polys from P // polys from P
F_coeffs: Vec<C::ScalarField>,
K_coeffs: Vec<C::ScalarField>,
proof: ProtoGalaxyProof<C::ScalarField>,
) -> Result<CommittedInstance<C, true>, Error> { ) -> Result<CommittedInstance<C, true>, Error> {
let t = instance.betas.len(); let t = instance.betas.len();
@ -270,20 +289,20 @@ where
let delta = transcript.get_challenge(); let delta = transcript.get_challenge();
let deltas = exponential_powers(delta, t); let deltas = exponential_powers(delta, t);
transcript.absorb(&F_coeffs);
transcript.absorb(&proof.F_coeffs);
let alpha = transcript.get_challenge(); let alpha = transcript.get_challenge();
let alphas = all_powers(alpha, t); let alphas = all_powers(alpha, t);
// F(alpha) = e + \sum_t F_i * alpha^i // F(alpha) = e + \sum_t F_i * alpha^i
let mut F_alpha = instance.e; let mut F_alpha = instance.e;
for (i, F_i) in F_coeffs.iter().skip(1).enumerate() {
for (i, F_i) in proof.F_coeffs.iter().skip(1).enumerate() {
F_alpha += *F_i * alphas[i + 1]; F_alpha += *F_i * alphas[i + 1];
} }
let betas_star = betas_star(&instance.betas, &deltas, alpha); let betas_star = betas_star(&instance.betas, &deltas, alpha);
transcript.absorb(&K_coeffs);
transcript.absorb(&proof.K_coeffs);
let k = vec_instances.len(); let k = vec_instances.len();
let H = let H =
@ -291,7 +310,7 @@ where
let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H); let L_X: Vec<DensePolynomial<C::ScalarField>> = lagrange_polys(H);
let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into(); let Z_X: DensePolynomial<C::ScalarField> = H.vanishing_polynomial().into();
let K_X: DensePolynomial<C::ScalarField> = let K_X: DensePolynomial<C::ScalarField> =
DensePolynomial::<C::ScalarField>::from_coefficients_vec(K_coeffs);
DensePolynomial::<C::ScalarField>::from_coefficients_vec(proof.K_coeffs);
let gamma = transcript.get_challenge(); let gamma = transcript.get_challenge();
@ -483,27 +502,20 @@ pub mod tests {
let mut transcript_p = PoseidonSponge::<Fr>::new(&poseidon_config); let mut transcript_p = PoseidonSponge::<Fr>::new(&poseidon_config);
let mut transcript_v = PoseidonSponge::<Fr>::new(&poseidon_config); let mut transcript_v = PoseidonSponge::<Fr>::new(&poseidon_config);
let (folded_instance, folded_witness, F_coeffs, K_coeffs, _, _) =
Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&instance,
&witness,
&instances,
&witnesses,
)
.unwrap();
// verifier
let folded_instance_v = Folding::<Projective>::verify(
&mut transcript_v,
let (folded_instance, folded_witness, proof, _) = Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&instance, &instance,
&witness,
&instances, &instances,
F_coeffs,
K_coeffs,
&witnesses,
) )
.unwrap(); .unwrap();
// verifier
let folded_instance_v =
Folding::<Projective>::verify(&mut transcript_v, &instance, &instances, proof).unwrap();
// check that prover & verifier folded instances are the same values // check that prover & verifier folded instances are the same values
assert_eq!(folded_instance.phi, folded_instance_v.phi); assert_eq!(folded_instance.phi, folded_instance_v.phi);
assert_eq!(folded_instance.betas, folded_instance_v.betas); assert_eq!(folded_instance.betas, folded_instance_v.betas);
@ -533,24 +545,22 @@ pub mod tests {
// generate the instances to be fold // generate the instances to be fold
let (_, _, witnesses, instances) = prepare_inputs(k); let (_, _, witnesses, instances) = prepare_inputs(k);
let (folded_instance, folded_witness, F_coeffs, K_coeffs, _, _) =
Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&running_instance,
&running_witness,
&instances,
&witnesses,
)
.unwrap();
let (folded_instance, folded_witness, proof, _) = Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&running_instance,
&running_witness,
&instances,
&witnesses,
)
.unwrap();
// verifier // verifier
let folded_instance_v = Folding::<Projective>::verify( let folded_instance_v = Folding::<Projective>::verify(
&mut transcript_v, &mut transcript_v,
&running_instance, &running_instance,
&instances, &instances,
F_coeffs,
K_coeffs,
proof,
) )
.unwrap(); .unwrap();

+ 34
- 34
folding-schemes/src/folding/protogalaxy/mod.rs

@ -9,7 +9,7 @@ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
eq::EqGadget, eq::EqGadget,
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
groups::{CurveVar, GroupOpsBounds},
groups::CurveVar,
R1CSVar, ToConstraintFieldGadget, R1CSVar, ToConstraintFieldGadget,
}; };
use ark_relations::r1cs::{ use ark_relations::r1cs::{
@ -44,6 +44,7 @@ use crate::{
pub mod circuits; pub mod circuits;
pub mod constants; pub mod constants;
pub mod decider_eth_circuit;
pub mod folding; pub mod folding;
pub mod traits; pub mod traits;
pub(crate) mod utils; pub(crate) mod utils;
@ -52,7 +53,7 @@ use circuits::AugmentedFCircuit;
use folding::Folding; use folding::Folding;
use super::traits::{ use super::traits::{
CommittedInstanceOps, CommittedInstanceVarOps, Dummy, WitnessOps, WitnessVarOps,
CommittedInstanceOps, CommittedInstanceVarOps, Dummy, Inputize, WitnessOps, WitnessVarOps,
}; };
/// Configuration for ProtoGalaxy's CycleFold circuit /// Configuration for ProtoGalaxy's CycleFold circuit
@ -121,6 +122,14 @@ impl CommittedInstanceOps for CommittedInsta
} }
} }
impl<C: CurveGroup, const TYPE: bool> Inputize<C::ScalarField, CommittedInstanceVar<C, TYPE>>
for CommittedInstance<C, TYPE>
{
fn inputize(&self) -> Vec<C::ScalarField> {
[&self.phi.inputize(), &self.betas, &[self.e][..], &self.x].concat()
}
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct CommittedInstanceVar<C: CurveGroup, const TYPE: bool> { pub struct CommittedInstanceVar<C: CurveGroup, const TYPE: bool> {
phi: NonNativeAffineVar<C>, phi: NonNativeAffineVar<C>,
@ -534,8 +543,6 @@ where
C1::ScalarField: Absorb, C1::ScalarField: Absorb,
C2::ScalarField: Absorb, C2::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
/// This method computes the parameter `t` in ProtoGalaxy for folding `F'`, /// This method computes the parameter `t` in ProtoGalaxy for folding `F'`,
/// the augmented circuit of `F` /// the augmented circuit of `F`
@ -544,7 +551,7 @@ where
F: &FC, F: &FC,
d: usize, d: usize,
k: usize, k: usize,
) -> Result<usize, SynthesisError> {
) -> Result<usize, Error> {
// In ProtoGalaxy, prover and verifier are parameterized by `t = log(n)` // In ProtoGalaxy, prover and verifier are parameterized by `t = log(n)`
// where `n` is the number of constraints in the circuit (known as the // where `n` is the number of constraints in the circuit (known as the
// mapping `f` in the paper). // mapping `f` in the paper).
@ -581,7 +588,7 @@ where
// Create a dummy circuit with the same state length and external inputs // Create a dummy circuit with the same state length and external inputs
// length as `F`, which replaces `F` in the augmented circuit `F'`. // length as `F`, which replaces `F` in the augmented circuit `F'`.
let dummy_circuit: DummyCircuit = let dummy_circuit: DummyCircuit =
FCircuit::<C1::ScalarField>::new((state_len, external_inputs_len)).unwrap();
FCircuit::<C1::ScalarField>::new((state_len, external_inputs_len))?;
// Compute `augmentation_constraints`, the size of `F'` without `F`. // Compute `augmentation_constraints`, the size of `F'` without `F`.
let cs = ConstraintSystem::<C1::ScalarField>::new_ref(); let cs = ConstraintSystem::<C1::ScalarField>::new_ref();
@ -640,8 +647,6 @@ where
C1::ScalarField: Absorb, C1::ScalarField: Absorb,
C2::ScalarField: Absorb, C2::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
type PreprocessorParam = (PoseidonConfig<CF1<C1>>, FC); type PreprocessorParam = (PoseidonConfig<CF1<C1>>, FC);
type ProverParam = ProverParams<C1, C2, CS1, CS2>; type ProverParam = ProverParams<C1, C2, CS1, CS2>;
@ -692,7 +697,7 @@ where
augmented_F_circuit.generate_constraints(cs.clone())?; augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
// CycleFold circuit R1CS // CycleFold circuit R1CS
let cs2 = ConstraintSystem::<C1::BaseField>::new_ref(); let cs2 = ConstraintSystem::<C1::BaseField>::new_ref();
@ -700,7 +705,7 @@ where
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize(); cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2)?;
let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; let cs_vp = CS1::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?; let cf_cs_vp = CS2::VerifierParams::deserialize_with_mode(&mut reader, compress, validate)?;
@ -740,12 +745,12 @@ where
augmented_F_circuit.generate_constraints(cs.clone())?; augmented_F_circuit.generate_constraints(cs.clone())?;
cs.finalize(); cs.finalize();
let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs = cs.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let r1cs = extract_r1cs::<C1::ScalarField>(&cs);
let r1cs = extract_r1cs::<C1::ScalarField>(&cs)?;
cf_circuit.generate_constraints(cs2.clone())?; cf_circuit.generate_constraints(cs2.clone())?;
cs2.finalize(); cs2.finalize();
let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?; let cs2 = cs2.into_inner().ok_or(Error::NoInnerConstraintSystem)?;
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2);
let cf_r1cs = extract_r1cs::<C1::BaseField>(&cs2)?;
let (cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.A.n_rows)?; let (cs_pp, cs_vp) = CS1::setup(&mut rng, r1cs.A.n_rows)?;
let (cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, max(cf_r1cs.A.n_rows, cf_r1cs.A.n_cols))?; let (cf_cs_pp, cf_cs_vp) = CS2::setup(&mut rng, max(cf_r1cs.A.n_rows, cf_r1cs.A.n_cols))?;
@ -898,7 +903,7 @@ where
} else { } else {
// Primary part: // Primary part:
// Compute `U_{i+1}` by folding `u_i` into `U_i`. // Compute `U_{i+1}` by folding `u_i` into `U_i`.
let (U_i1, W_i1, F_coeffs, K_coeffs, L_evals, phi_stars) = Folding::prove(
let (U_i1, W_i1, proof, aux) = Folding::prove(
&mut transcript_prover, &mut transcript_prover,
&self.r1cs, &self.r1cs,
&self.U_i, &self.U_i,
@ -909,8 +914,8 @@ where
// CycleFold part: // CycleFold part:
// get the vector used as public inputs 'x' in the CycleFold circuit // get the vector used as public inputs 'x' in the CycleFold circuit
let mut r0_bits = L_evals[0].into_bigint().to_bits_le();
let mut r1_bits = L_evals[1].into_bigint().to_bits_le();
let mut r0_bits = aux.L_X_evals[0].into_bigint().to_bits_le();
let mut r1_bits = aux.L_X_evals[1].into_bigint().to_bits_le();
r0_bits.resize(C1::ScalarField::MODULUS_BIT_SIZE as usize, false); r0_bits.resize(C1::ScalarField::MODULUS_BIT_SIZE as usize, false);
r1_bits.resize(C1::ScalarField::MODULUS_BIT_SIZE as usize, false); r1_bits.resize(C1::ScalarField::MODULUS_BIT_SIZE as usize, false);
@ -919,13 +924,12 @@ where
let cf1_u_i_x = [ let cf1_u_i_x = [
r0_bits r0_bits
.chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1) .chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1)
.map(BigInteger::from_bits_le)
.map(C1::BaseField::from_bigint)
.collect::<Option<Vec<_>>>()
.unwrap(),
.map(<C1::BaseField as PrimeField>::BigInt::from_bits_le)
.map(C1::BaseField::from)
.collect::<Vec<_>>(),
get_cm_coordinates(&C1::zero()), get_cm_coordinates(&C1::zero()),
get_cm_coordinates(&self.U_i.phi), get_cm_coordinates(&self.U_i.phi),
get_cm_coordinates(&phi_stars[0]),
get_cm_coordinates(&aux.phi_stars[0]),
] ]
.concat(); .concat();
let cf1_circuit = ProtoGalaxyCycleFoldCircuit::<C1, GC1> { let cf1_circuit = ProtoGalaxyCycleFoldCircuit::<C1, GC1> {
@ -941,11 +945,10 @@ where
let cf2_u_i_x = [ let cf2_u_i_x = [
r1_bits r1_bits
.chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1) .chunks(C1::BaseField::MODULUS_BIT_SIZE as usize - 1)
.map(BigInteger::from_bits_le)
.map(C1::BaseField::from_bigint)
.collect::<Option<Vec<_>>>()
.unwrap(),
get_cm_coordinates(&phi_stars[0]),
.map(<C1::BaseField as PrimeField>::BigInt::from_bits_le)
.map(C1::BaseField::from)
.collect::<Vec<_>>(),
get_cm_coordinates(&aux.phi_stars[0]),
get_cm_coordinates(&self.u_i.phi), get_cm_coordinates(&self.u_i.phi),
get_cm_coordinates(&U_i1.phi), get_cm_coordinates(&U_i1.phi),
] ]
@ -953,7 +956,7 @@ where
let cf2_circuit = ProtoGalaxyCycleFoldCircuit::<C1, GC1> { let cf2_circuit = ProtoGalaxyCycleFoldCircuit::<C1, GC1> {
_gc: PhantomData, _gc: PhantomData,
r_bits: Some(r1_bits), r_bits: Some(r1_bits),
points: Some(vec![phi_stars[0], self.u_i.phi]),
points: Some(vec![aux.phi_stars[0], self.u_i.phi]),
x: Some(cf2_u_i_x.clone()), x: Some(cf2_u_i_x.clone()),
}; };
@ -998,9 +1001,9 @@ where
u_i_phi: self.u_i.phi, u_i_phi: self.u_i.phi,
U_i: self.U_i.clone(), U_i: self.U_i.clone(),
U_i1_phi: U_i1.phi, U_i1_phi: U_i1.phi,
F_coeffs: F_coeffs.clone(),
K_coeffs: K_coeffs.clone(),
phi_stars,
F_coeffs: proof.F_coeffs.clone(),
K_coeffs: proof.K_coeffs.clone(),
phi_stars: aux.phi_stars,
F: self.F.clone(), F: self.F.clone(),
x: Some(u_i1_x), x: Some(u_i1_x),
// cyclefold values // cyclefold values
@ -1020,8 +1023,7 @@ where
&mut transcript_verifier, &mut transcript_verifier,
&self.U_i, &self.U_i,
&[self.u_i.clone()], &[self.u_i.clone()],
F_coeffs,
K_coeffs
proof
)?, )?,
U_i1 U_i1
); );
@ -1108,7 +1110,7 @@ where
} = ivc_proof; } = ivc_proof;
let (pp, vp) = params; let (pp, vp) = params;
let f_circuit = FC::new(fcircuit_params).unwrap();
let f_circuit = FC::new(fcircuit_params)?;
Ok(Self { Ok(Self {
_gc1: PhantomData, _gc1: PhantomData,
@ -1195,8 +1197,6 @@ where
<C1 as Group>::ScalarField: Absorb, <C1 as Group>::ScalarField: Absorb,
<C2 as Group>::ScalarField: Absorb, <C2 as Group>::ScalarField: Absorb,
C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>, C1: CurveGroup<BaseField = C2::ScalarField, ScalarField = C2::BaseField>,
for<'a> &'a GC1: GroupOpsBounds<'a, C1, GC1>,
for<'a> &'a GC2: GroupOpsBounds<'a, C2, GC2>,
{ {
// folds the given cyclefold circuit and its instances // folds the given cyclefold circuit and its instances
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]

+ 46
- 3
folding-schemes/src/folding/protogalaxy/traits.rs

@ -1,14 +1,26 @@
use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb}; use ark_crypto_primitives::sponge::{constraints::AbsorbGadget, Absorb};
use ark_ec::CurveGroup; use ark_ec::CurveGroup;
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_r1cs_std::{fields::fp::FpVar, uint8::UInt8, ToConstraintFieldGadget};
use ark_r1cs_std::{
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
uint8::UInt8,
ToConstraintFieldGadget,
};
use ark_relations::r1cs::SynthesisError; use ark_relations::r1cs::SynthesisError;
use ark_std::{cfg_into_iter, log2, One}; use ark_std::{cfg_into_iter, log2, One};
use rayon::prelude::*; use rayon::prelude::*;
use super::{constants::RUNNING, utils::pow_i, CommittedInstance, CommittedInstanceVar, Witness};
use super::{
constants::RUNNING,
utils::{pow_i, pow_i_var},
CommittedInstance, CommittedInstanceVar, Witness, WitnessVar,
};
use crate::{ use crate::{
arith::{r1cs::R1CS, Arith},
arith::{
r1cs::{circuits::R1CSMatricesVar, R1CS},
Arith, ArithGadget,
},
folding::circuits::CF1, folding::circuits::CF1,
transcript::AbsorbNonNative, transcript::AbsorbNonNative,
utils::vec::is_zero_vec, utils::vec::is_zero_vec,
@ -99,6 +111,37 @@ impl Arith>, CommittedInstance
} }
} }
/// Unlike its native counterpart, we only need to support running instances in
/// circuit, as the decider circuit only checks running instance satisfiability.
impl<C: CurveGroup> ArithGadget<WitnessVar<CF1<C>>, CommittedInstanceVar<C, RUNNING>>
for R1CSMatricesVar<CF1<C>, FpVar<CF1<C>>>
{
type Evaluation = (Vec<FpVar<CF1<C>>>, Vec<FpVar<CF1<C>>>);
fn eval_relation(
&self,
w: &WitnessVar<CF1<C>>,
u: &CommittedInstanceVar<C, RUNNING>,
) -> Result<Self::Evaluation, SynthesisError> {
self.eval_at_z(&[&[FpVar::one()][..], &u.x, &w.W].concat())
}
fn enforce_evaluation(
_w: &WitnessVar<C::ScalarField>,
u: &CommittedInstanceVar<C, RUNNING>,
(AzBz, uCz): Self::Evaluation,
) -> Result<(), SynthesisError> {
let mut e = vec![];
for (i, (l, r)) in AzBz.iter().zip(uCz).enumerate() {
e.push(pow_i_var(i, &u.betas) * (l - r));
}
// Call `sum` on a vector instead of computing the sum in the above loop
// to avoid stack overflow (the cause of this is similar to issue #80
// https://github.com/privacy-scaling-explorations/sonobe/issues/80)
e.iter().sum::<FpVar<_>>().enforce_equal(&u.e)
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;

+ 19
- 2
folding-schemes/src/folding/traits.rs

@ -12,7 +12,7 @@ use crate::{transcript::Transcript, Error};
use super::circuits::CF1; use super::circuits::CF1;
pub trait CommittedInstanceOps<C: CurveGroup> {
pub trait CommittedInstanceOps<C: CurveGroup>: Inputize<CF1<C>, Self::Var> {
/// The in-circuit representation of the committed instance. /// The in-circuit representation of the committed instance.
type Var: AllocVar<Self, CF1<C>> + CommittedInstanceVarOps<C>; type Var: AllocVar<Self, CF1<C>> + CommittedInstanceVarOps<C>;
/// `hash` implements the committed instance hash compatible with the /// `hash` implements the committed instance hash compatible with the
@ -85,7 +85,11 @@ pub trait CommittedInstanceVarOps {
sponge.absorb(&z_0)?; sponge.absorb(&z_0)?;
sponge.absorb(&z_i)?; sponge.absorb(&z_i)?;
sponge.absorb(&U_vec)?; sponge.absorb(&U_vec)?;
Ok((sponge.squeeze_field_elements(1)?.pop().unwrap(), U_vec))
Ok((
// `unwrap` is safe because the sponge is guaranteed to return a single element
sponge.squeeze_field_elements(1)?.pop().unwrap(),
U_vec,
))
} }
/// Returns the commitments contained in the committed instance. /// Returns the commitments contained in the committed instance.
@ -129,3 +133,16 @@ impl Dummy for Vec {
vec![Default::default(); cfg] vec![Default::default(); cfg]
} }
} }
impl<T: Default> Dummy<()> for T {
fn dummy(_: ()) -> Self {
Default::default()
}
}
/// Converts a value `self` into a vector of field elements, ordered in the same
/// way as how a variable of type `Var` would be represented in the circuit.
/// This is useful for the verifier to compute the public inputs.
pub trait Inputize<F, Var> {
fn inputize(&self) -> Vec<F>;
}

+ 4
- 2
folding-schemes/src/lib.rs

@ -38,6 +38,8 @@ pub enum Error {
// Relation errors // Relation errors
#[error("Relation not satisfied")] #[error("Relation not satisfied")]
NotSatisfied, NotSatisfied,
#[error("SNARK setup failed: {0}")]
SNARKSetupFail(String),
#[error("SNARK verification failed")] #[error("SNARK verification failed")]
SNARKVerificationFail, SNARKVerificationFail,
#[error("IVC verification failed")] #[error("IVC verification failed")]
@ -102,8 +104,8 @@ pub enum Error {
MaxStep, MaxStep,
#[error("Witness calculation error: {0}")] #[error("Witness calculation error: {0}")]
WitnessCalculationError(String), WitnessCalculationError(String),
#[error("BigInt to PrimeField conversion error: {0}")]
BigIntConversionError(String),
#[error("Failed to convert {0} into {1}: {2}")]
ConversionError(String, String, String),
#[error("Failed to serde: {0}")] #[error("Failed to serde: {0}")]
JSONSerdeError(String), JSONSerdeError(String),
#[error("Multi instances folding not supported in this scheme")] #[error("Multi instances folding not supported in this scheme")]

+ 26
- 10
folding-schemes/src/utils/gadgets.rs

@ -1,14 +1,36 @@
use ark_ff::PrimeField; use ark_ff::PrimeField;
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode}, alloc::{AllocVar, AllocationMode},
eq::EqGadget,
fields::{fp::FpVar, FieldVar}, fields::{fp::FpVar, FieldVar},
R1CSVar, R1CSVar,
}; };
use ark_relations::r1cs::{Namespace, SynthesisError}; use ark_relations::r1cs::{Namespace, SynthesisError};
use core::{borrow::Borrow, marker::PhantomData};
use core::borrow::Borrow;
use crate::utils::vec::SparseMatrix; use crate::utils::vec::SparseMatrix;
/// `EquivalenceGadget` enforces that two in-circuit variables are equivalent,
/// where the equivalence relation is parameterized by `M`:
/// - For `FpVar`, it is simply a equality relation, and `M` is unused.
/// - For `NonNativeUintVar`, we consider equivalence as a congruence relation,
/// in terms of modular arithmetic, so `M` specifies the modulus.
pub trait EquivalenceGadget<M> {
fn enforce_equivalent(&self, other: &Self) -> Result<(), SynthesisError>;
}
impl<M, F: PrimeField> EquivalenceGadget<M> for FpVar<F> {
fn enforce_equivalent(&self, other: &Self) -> Result<(), SynthesisError> {
self.enforce_equal(other)
}
}
impl<M, T: EquivalenceGadget<M>> EquivalenceGadget<M> for [T] {
fn enforce_equivalent(&self, other: &Self) -> Result<(), SynthesisError> {
self.iter()
.zip(other)
.try_for_each(|(a, b)| a.enforce_equivalent(b))
}
}
pub trait MatrixGadget<FV> { pub trait MatrixGadget<FV> {
fn mul_vector(&self, v: &[FV]) -> Result<Vec<FV>, SynthesisError>; fn mul_vector(&self, v: &[FV]) -> Result<Vec<FV>, SynthesisError>;
} }
@ -42,17 +64,14 @@ impl VectorGadget> for [FpVar] {
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SparseMatrixVar<F: PrimeField, CF: PrimeField, FV: AllocVar<F, CF>> {
_f: PhantomData<F>,
_cf: PhantomData<CF>,
_fv: PhantomData<FV>,
pub struct SparseMatrixVar<FV> {
pub n_rows: usize, pub n_rows: usize,
pub n_cols: usize, pub n_cols: usize,
// same format as the native SparseMatrix (which follows ark_relations::r1cs::Matrix format // same format as the native SparseMatrix (which follows ark_relations::r1cs::Matrix format
pub coeffs: Vec<Vec<(FV, usize)>>, pub coeffs: Vec<Vec<(FV, usize)>>,
} }
impl<F, CF, FV> AllocVar<SparseMatrix<F>, CF> for SparseMatrixVar<F, CF, FV>
impl<F, CF, FV> AllocVar<SparseMatrix<F>, CF> for SparseMatrixVar<FV>
where where
F: PrimeField, F: PrimeField,
CF: PrimeField, CF: PrimeField,
@ -77,9 +96,6 @@ where
} }
Ok(Self { Ok(Self {
_f: PhantomData,
_cf: PhantomData,
_fv: PhantomData,
n_rows: val.borrow().n_rows, n_rows: val.borrow().n_rows,
n_cols: val.borrow().n_cols, n_cols: val.borrow().n_cols,
coeffs, coeffs,
@ -88,7 +104,7 @@ where
} }
} }
impl<F: PrimeField> MatrixGadget<FpVar<F>> for SparseMatrixVar<F, F, FpVar<F>> {
impl<F: PrimeField> MatrixGadget<FpVar<F>> for SparseMatrixVar<FpVar<F>> {
fn mul_vector(&self, v: &[FpVar<F>]) -> Result<Vec<FpVar<F>>, SynthesisError> { fn mul_vector(&self, v: &[FpVar<F>]) -> Result<Vec<FpVar<F>>, SynthesisError> {
Ok(self Ok(self
.coeffs .coeffs

+ 12
- 4
frontends/src/circom/utils.rs

@ -108,11 +108,19 @@ impl CircomWrapper {
// Converts a num_bigint::BigInt to a PrimeField::BigInt. // Converts a num_bigint::BigInt to a PrimeField::BigInt.
pub fn num_bigint_to_ark_bigint(&self, value: &BigInt) -> Result<F::BigInt, Error> { pub fn num_bigint_to_ark_bigint(&self, value: &BigInt) -> Result<F::BigInt, Error> {
let big_uint = value
.to_biguint()
.ok_or_else(|| Error::BigIntConversionError("BigInt is negative".to_string()))?;
let big_uint = value.to_biguint().ok_or_else(|| {
Error::ConversionError(
"BigInt".into(),
"BigUint".into(),
"BigInt is negative".into(),
)
})?;
F::BigInt::try_from(big_uint).map_err(|_| { F::BigInt::try_from(big_uint).map_err(|_| {
Error::BigIntConversionError("Failed to convert to PrimeField::BigInt".to_string())
Error::ConversionError(
"BigUint".into(),
"PrimeField::BigInt".into(),
"BigUint is too large to fit into PrimeField::BigInt".into(),
)
}) })
} }

+ 9
- 6
frontends/src/noir/mod.rs

@ -16,6 +16,7 @@ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use folding_schemes::{frontend::FCircuit, utils::PathOrBin, Error}; use folding_schemes::{frontend::FCircuit, utils::PathOrBin, Error};
use noir_arkworks_backend::{ use noir_arkworks_backend::{
read_program_from_binary, read_program_from_file, sonobe_bridge::AcirCircuitSonobe, read_program_from_binary, read_program_from_file, sonobe_bridge::AcirCircuitSonobe,
FilesystemError,
}; };
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -216,10 +217,12 @@ impl FCircuit for NoirFCircuit {
} }
} }
pub fn load_noir_circuit<F: PrimeField>(path: String) -> Circuit<GenericFieldElement<F>> {
let program: Program<GenericFieldElement<F>> = read_program_from_file(path).unwrap();
pub fn load_noir_circuit<F: PrimeField>(
path: String,
) -> Result<Circuit<GenericFieldElement<F>>, FilesystemError> {
let program: Program<GenericFieldElement<F>> = read_program_from_file(path)?;
let circuit: Circuit<GenericFieldElement<F>> = program.functions[0].clone(); let circuit: Circuit<GenericFieldElement<F>> = program.functions[0].clone();
circuit
Ok(circuit)
} }
#[cfg(test)] #[cfg(test)]
@ -241,7 +244,7 @@ mod tests {
"{}/src/noir/test_folder/test_circuit/target/test_circuit.json", "{}/src/noir/test_folder/test_circuit/target/test_circuit.json",
cur_path.to_str().unwrap() cur_path.to_str().unwrap()
); );
let circuit = load_noir_circuit(circuit_path);
let circuit = load_noir_circuit(circuit_path).unwrap();
let noirfcircuit = NoirFCircuit { let noirfcircuit = NoirFCircuit {
circuit, circuit,
state_len: 2, state_len: 2,
@ -261,7 +264,7 @@ mod tests {
"{}/src/noir/test_folder/test_circuit/target/test_circuit.json", "{}/src/noir/test_folder/test_circuit/target/test_circuit.json",
cur_path.to_str().unwrap() cur_path.to_str().unwrap()
); );
let circuit = load_noir_circuit(circuit_path);
let circuit = load_noir_circuit(circuit_path).unwrap();
let noirfcircuit = NoirFCircuit { let noirfcircuit = NoirFCircuit {
circuit, circuit,
state_len: 2, state_len: 2,
@ -285,7 +288,7 @@ mod tests {
"{}/src/noir/test_folder/test_no_external_inputs/target/test_no_external_inputs.json", "{}/src/noir/test_folder/test_no_external_inputs/target/test_no_external_inputs.json",
cur_path.to_str().unwrap() cur_path.to_str().unwrap()
); );
let circuit = load_noir_circuit(circuit_path);
let circuit = load_noir_circuit(circuit_path).unwrap();
let noirfcircuit = NoirFCircuit { let noirfcircuit = NoirFCircuit {
circuit, circuit,
state_len: 2, state_len: 2,

+ 1
- 1
solidity-verifiers/src/utils/mod.rs

@ -22,7 +22,7 @@ pub fn get_function_selector_for_nova_cyclefold_verifier(
first_param_array_length: usize, first_param_array_length: usize,
) -> [u8; 4] { ) -> [u8; 4] {
let mut hasher = Sha3::keccak256(); let mut hasher = Sha3::keccak256();
let fn_sig = format!("verifyNovaProof(uint256[{}],uint256[4],uint256[3],uint256[4],uint256[4],uint256[2],uint256[2][2],uint256[2],uint256[4],uint256[2][2])", first_param_array_length);
let fn_sig = format!("verifyNovaProof(uint256[{}],uint256[4],uint256[2],uint256[3],uint256[2],uint256[2][2],uint256[2],uint256[4],uint256[2][2])", first_param_array_length);
hasher.input_str(&fn_sig); hasher.input_str(&fn_sig);
let hash = &mut [0u8; 32]; let hash = &mut [0u8; 32];
hasher.result(hash); hasher.result(hash);

+ 8
- 7
solidity-verifiers/src/verifiers/nova_cyclefold.rs

@ -153,9 +153,12 @@ mod tests {
use folding_schemes::{ use folding_schemes::{
commitment::{kzg::KZG, pedersen::Pedersen}, commitment::{kzg::KZG, pedersen::Pedersen},
folding::nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
folding::{
nova::{
decider_eth::{prepare_calldata, Decider as DeciderEth},
Nova, PreprocessorParam,
},
traits::CommittedInstanceOps,
}, },
frontend::FCircuit, frontend::FCircuit,
transcript::poseidon::poseidon_canonical_config, transcript::poseidon::poseidon_canonical_config,
@ -366,7 +369,6 @@ mod tests {
n_steps: usize, n_steps: usize,
) { ) {
let (decider_pp, decider_vp) = decider_params; let (decider_pp, decider_vp) = decider_params;
let pp_hash = fs_params.1.pp_hash().unwrap();
let f_circuit = FC::new(()).unwrap(); let f_circuit = FC::new(()).unwrap();
@ -389,8 +391,8 @@ mod tests {
nova.i, nova.i,
nova.z_0.clone(), nova.z_0.clone(),
nova.z_i.clone(), nova.z_i.clone(),
&nova.U_i,
&nova.u_i,
&nova.U_i.get_commitments(),
&nova.u_i.get_commitments(),
&proof, &proof,
) )
.unwrap(); .unwrap();
@ -401,7 +403,6 @@ mod tests {
let calldata: Vec<u8> = prepare_calldata( let calldata: Vec<u8> = prepare_calldata(
function_selector, function_selector,
pp_hash,
nova.i, nova.i,
nova.z_0, nova.z_0,
nova.z_i, nova.z_i,

+ 32
- 45
solidity-verifiers/templates/nova_cyclefold_decider.askama.sol

@ -63,9 +63,8 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier {
// inputs are grouped to prevent errors due stack too deep // inputs are grouped to prevent errors due stack too deep
uint256[{{ 1 + z_len * 2 }}] calldata i_z0_zi, // [i, z0, zi] where |z0| == |zi| uint256[{{ 1 + z_len * 2 }}] calldata i_z0_zi, // [i, z0, zi] where |z0| == |zi|
uint256[4] calldata U_i_cmW_U_i_cmE, // [U_i_cmW[2], U_i_cmE[2]] uint256[4] calldata U_i_cmW_U_i_cmE, // [U_i_cmW[2], U_i_cmE[2]]
uint256[3] calldata U_i_u_u_i_u_r, // [U_i_u, u_i_u, r]
uint256[4] calldata U_i_x_u_i_cmW, // [U_i_x[2], u_i_cmW[2]]
uint256[4] calldata u_i_x_cmT, // [u_i_x[2], cmT[2]]
uint256[2] calldata u_i_cmW, // [u_i_cmW[2]]
uint256[3] calldata cmT_r, // [cmT[2], r]
uint256[2] calldata pA, // groth16 uint256[2] calldata pA, // groth16
uint256[2][2] calldata pB, // groth16 uint256[2][2] calldata pB, // groth16
uint256[2] calldata pC, // groth16 uint256[2] calldata pC, // groth16
@ -86,20 +85,26 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier {
} }
{ {
// U_i.u + r * u_i.u
uint256 u = rlc(U_i_u_u_i_u_r[0], U_i_u_u_i_u_r[2], U_i_u_u_i_u_r[1]);
// U_i.x + r * u_i.x
uint256 x0 = rlc(U_i_x_u_i_cmW[0], U_i_u_u_i_u_r[2], u_i_x_cmT[0]);
uint256 x1 = rlc(U_i_x_u_i_cmW[1], U_i_u_u_i_u_r[2], u_i_x_cmT[1]);
public_inputs[{{ z_len * 2 + 2 }}] = u;
public_inputs[{{ z_len * 2 + 3 }}] = x0;
public_inputs[{{ z_len * 2 + 4 }}] = x1;
// U_i.cmW + r * u_i.cmW
uint256[2] memory mulScalarPoint = super.mulScalar([u_i_cmW[0], u_i_cmW[1]], cmT_r[2]);
uint256[2] memory cmW = super.add([U_i_cmW_U_i_cmE[0], U_i_cmW_U_i_cmE[1]], mulScalarPoint);
{
uint256[{{num_limbs}}] memory cmW_x_limbs = LimbsDecomposition.decompose(cmW[0]);
uint256[{{num_limbs}}] memory cmW_y_limbs = LimbsDecomposition.decompose(cmW[1]);
for (uint8 k = 0; k < {{num_limbs}}; k++) {
public_inputs[{{ z_len * 2 + 2 }} + k] = cmW_x_limbs[k];
public_inputs[{{ z_len * 2 + 2 + num_limbs }} + k] = cmW_y_limbs[k];
}
}
require(this.check(cmW, kzg_proof[0], challenge_W_challenge_E_kzg_evals[0], challenge_W_challenge_E_kzg_evals[2]), "KZG: verifying proof for challenge W failed");
} }
{ {
// U_i.cmE + r * u_i.cmT
uint256[2] memory mulScalarPoint = super.mulScalar([u_i_x_cmT[2], u_i_x_cmT[3]], U_i_u_u_i_u_r[2]);
// U_i.cmE + r * cmT
uint256[2] memory mulScalarPoint = super.mulScalar([cmT_r[0], cmT_r[1]], cmT_r[2]);
uint256[2] memory cmE = super.add([U_i_cmW_U_i_cmE[2], U_i_cmW_U_i_cmE[3]], mulScalarPoint); uint256[2] memory cmE = super.add([U_i_cmW_U_i_cmE[2], U_i_cmW_U_i_cmE[3]], mulScalarPoint);
{ {
@ -107,53 +112,35 @@ contract NovaDecider is Groth16Verifier, KZG10Verifier {
uint256[{{num_limbs}}] memory cmE_y_limbs = LimbsDecomposition.decompose(cmE[1]); uint256[{{num_limbs}}] memory cmE_y_limbs = LimbsDecomposition.decompose(cmE[1]);
for (uint8 k = 0; k < {{num_limbs}}; k++) { for (uint8 k = 0; k < {{num_limbs}}; k++) {
public_inputs[{{ z_len * 2 + 5 }} + k] = cmE_x_limbs[k];
public_inputs[{{ z_len * 2 + 5 + num_limbs }} + k] = cmE_y_limbs[k];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 2 }} + k] = cmE_x_limbs[k];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 3 }} + k] = cmE_y_limbs[k];
} }
} }
require(this.check(cmE, kzg_proof[1], challenge_W_challenge_E_kzg_evals[1], challenge_W_challenge_E_kzg_evals[3]), "KZG: verifying proof for challenge E failed"); require(this.check(cmE, kzg_proof[1], challenge_W_challenge_E_kzg_evals[1], challenge_W_challenge_E_kzg_evals[3]), "KZG: verifying proof for challenge E failed");
} }
{
// U_i.cmW + r * u_i.cmW
uint256[2] memory mulScalarPoint = super.mulScalar([U_i_x_u_i_cmW[2], U_i_x_u_i_cmW[3]], U_i_u_u_i_u_r[2]);
uint256[2] memory cmW = super.add([U_i_cmW_U_i_cmE[0], U_i_cmW_U_i_cmE[1]], mulScalarPoint);
{
uint256[{{num_limbs}}] memory cmW_x_limbs = LimbsDecomposition.decompose(cmW[0]);
uint256[{{num_limbs}}] memory cmW_y_limbs = LimbsDecomposition.decompose(cmW[1]);
for (uint8 k = 0; k < {{num_limbs}}; k++) {
public_inputs[{{ z_len * 2 + 5 + num_limbs * 2 }} + k] = cmW_x_limbs[k];
public_inputs[{{ z_len * 2 + 5 + num_limbs * 3 }} + k] = cmW_y_limbs[k];
}
}
require(this.check(cmW, kzg_proof[0], challenge_W_challenge_E_kzg_evals[0], challenge_W_challenge_E_kzg_evals[2]), "KZG: verifying proof for challenge W failed");
}
{ {
// add challenges // add challenges
public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 }}] = challenge_W_challenge_E_kzg_evals[0];
public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 1 }}] = challenge_W_challenge_E_kzg_evals[1];
public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 2 }}] = challenge_W_challenge_E_kzg_evals[2];
public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 + 3 }}] = challenge_W_challenge_E_kzg_evals[3];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 4 }}] = challenge_W_challenge_E_kzg_evals[0];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 4 + 1 }}] = challenge_W_challenge_E_kzg_evals[1];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 4 + 2 }}] = challenge_W_challenge_E_kzg_evals[2];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 4 + 3 }}] = challenge_W_challenge_E_kzg_evals[3];
uint256[{{num_limbs}}] memory cmT_x_limbs; uint256[{{num_limbs}}] memory cmT_x_limbs;
uint256[{{num_limbs}}] memory cmT_y_limbs; uint256[{{num_limbs}}] memory cmT_y_limbs;
cmT_x_limbs = LimbsDecomposition.decompose(u_i_x_cmT[2]);
cmT_y_limbs = LimbsDecomposition.decompose(u_i_x_cmT[3]);
cmT_x_limbs = LimbsDecomposition.decompose(cmT_r[0]);
cmT_y_limbs = LimbsDecomposition.decompose(cmT_r[1]);
for (uint8 k = 0; k < {{num_limbs}}; k++) { for (uint8 k = 0; k < {{num_limbs}}; k++) {
public_inputs[{{ z_len * 2 + 5 + num_limbs * 4 }} + 4 + k] = cmT_x_limbs[k];
public_inputs[{{ z_len * 2 + 5 + num_limbs * 5}} + 4 + k] = cmT_y_limbs[k];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 4 }} + 4 + k] = cmT_x_limbs[k];
public_inputs[{{ z_len * 2 + 2 + num_limbs * 5 }} + 4 + k] = cmT_y_limbs[k];
} }
// last element of the groth16 proof's public inputs is `r` // last element of the groth16 proof's public inputs is `r`
public_inputs[{{ public_inputs_len - 2 }}] = U_i_u_u_i_u_r[2];
public_inputs[{{ public_inputs_len - 2 }}] = cmT_r[2];
bool success_g16 = this.verifyProof(pA, pB, pC, public_inputs); bool success_g16 = this.verifyProof(pA, pB, pC, public_inputs);
require(success_g16 == true, "Groth16: verifying proof failed"); require(success_g16 == true, "Groth16: verifying proof failed");
} }

Loading…
Cancel
Save