Browse Source

Add IPA commitment scheme and the respective circuit verifier gadget (#72)

* Add IPA commitment native implementation

* Add IPA Gadget verifier

* polish Pedersen & IPA, add blind bool param to IPA

* Optimize IPA gadget constraints (and native):

- optimize <s,b> computation from linear to log time
- optimize s computation from k*2^k to k*(2^k)/2

* add small optimization: delegate u_i^-1 to prover and just check u_i*u_i^-1==1 in verifier circuit

* IPA polish and document

* Add 'BLIND' parameter to CommitmentProver trait (and to Pedersen and KZG impls). Fit IPA into CommitmentProver trait.

* rename 'BLIND' to 'H' (hiding) in commitment

* IPA: rm u_invs from Proof and compute them incircuit

* Update IPA's build_s & gadget to use Halo2 approach following @han0110 's suggestion.

This reduced further the amount of constraints needed.
- for k=4: -9k constraints (-7%)
- for k=8: -473k constr (-31%)
- for k=9: -1123k constr (-35%)
- for k=10: -2578k constr (-39%)
And now IPA verification (without amortizing) is very close to Pedersen
verification (in-circuits).

* rm dbg!(cs.num_constraints()) from multiple tests

* IPA::prove remove intermediate v_lo,v_hi vectors, add doc to build_s_gadget

* move powers_of into utils/mod.rs, update iters to cfg_iter
main
arnaucube 6 months ago
committed by GitHub
parent
commit
b25037e34c
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
18 changed files with 796 additions and 96 deletions
  1. +1
    -1
      folding-schemes-solidity/src/verifiers/mod.rs
  2. +681
    -0
      folding-schemes/src/commitment/ipa.rs
  3. +9
    -8
      folding-schemes/src/commitment/kzg.rs
  4. +15
    -3
      folding-schemes/src/commitment/mod.rs
  5. +47
    -33
      folding-schemes/src/commitment/pedersen.rs
  6. +1
    -1
      folding-schemes/src/folding/hypernova/cccs.rs
  7. +3
    -3
      folding-schemes/src/folding/hypernova/circuit.rs
  8. +3
    -3
      folding-schemes/src/folding/hypernova/lcccs.rs
  9. +4
    -4
      folding-schemes/src/folding/hypernova/nimfs.rs
  10. +2
    -2
      folding-schemes/src/folding/hypernova/utils.rs
  11. +0
    -1
      folding-schemes/src/folding/nova/circuits.rs
  12. +0
    -2
      folding-schemes/src/folding/nova/cyclefold.rs
  13. +9
    -21
      folding-schemes/src/folding/nova/decider_eth_circuit.rs
  14. +3
    -3
      folding-schemes/src/folding/nova/nifs.rs
  15. +2
    -1
      folding-schemes/src/folding/protogalaxy/folding.rs
  16. +4
    -0
      folding-schemes/src/lib.rs
  17. +0
    -9
      folding-schemes/src/utils/bit.rs
  18. +12
    -1
      folding-schemes/src/utils/mod.rs

+ 1
- 1
folding-schemes-solidity/src/verifiers/mod.rs

@ -243,7 +243,7 @@ mod tests {
let v: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(rng)).take(n).collect();
let cm = KZGProver::<G1>::commit(&pk, &v, &Fr::zero()).unwrap();
let (eval, proof) =
KZGProver::<G1>::prove(&pk, transcript_p, &cm, &v, &Fr::zero()).unwrap();
KZGProver::<G1>::prove(&pk, transcript_p, &cm, &v, &Fr::zero(), None).unwrap();
let template = KZG10Verifier::from(
&vk,
&pk.powers_of_g[..5],

+ 681
- 0
folding-schemes/src/commitment/ipa.rs

@ -0,0 +1,681 @@
/// IPA implements the modified Inner Product Argument described in
/// [Halo](https://eprint.iacr.org/2019/1021.pdf). The variable names used follow the paper
/// notation in order to make it more readable.
///
/// The implementation does the following optimizations in order to reduce the amount of
/// constraints in the circuit:
/// i. <s, b> computation is done in log time following a modification of the equation 3 in section
/// 3.2 from the paper.
/// ii. s computation is done in 2^{k+1}-2 instead of k*2^k.
use ark_ec::{AffineRepr, CurveGroup};
use ark_ff::{Field, PrimeField};
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
boolean::Boolean,
fields::{nonnative::NonNativeFieldVar, FieldVar},
groups::GroupOpsBounds,
prelude::CurveVar,
ToBitsGadget,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use ark_std::{
cfg_iter,
rand::{Rng, RngCore},
UniformRand, Zero,
};
use core::{borrow::Borrow, marker::PhantomData};
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
use super::{pedersen::Params as PedersenParams, CommitmentProver};
use crate::transcript::Transcript;
use crate::utils::{
powers_of,
vec::{vec_add, vec_scalar_mul},
};
use crate::Error;
/// IPA implements the Inner Product Argument protocol. The `H` parameter indicates if to use the
/// commitment in hiding mode or not.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct IPA<C: CurveGroup, const H: bool = false> {
_c: PhantomData<C>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Proof<C: CurveGroup> {
a: C::ScalarField,
l: Vec<C::ScalarField>,
r: Vec<C::ScalarField>,
L: Vec<C>,
R: Vec<C>,
}
impl<C: CurveGroup, const H: bool> IPA<C, H> {
pub fn new_params<R: Rng>(rng: &mut R, max: usize) -> PedersenParams<C> {
let generators: Vec<C::Affine> = std::iter::repeat_with(|| C::Affine::rand(rng))
.take(max.next_power_of_two())
.collect();
PedersenParams::<C> {
h: C::rand(rng),
generators,
}
}
}
// Implement the CommitmentProver trait for IPA
impl<C: CurveGroup, const H: bool> CommitmentProver<C, H> for IPA<C, H> {
type Params = PedersenParams<C>;
type Proof = Proof<C>;
fn commit(
params: &PedersenParams<C>,
a: &[C::ScalarField],
r: &C::ScalarField, // blinding factor
) -> Result<C, Error> {
if params.generators.len() < a.len() {
return Err(Error::PedersenParamsLen(params.generators.len(), a.len()));
}
// h⋅r + <g, a>
// use msm_unchecked because we already ensured at the if that lengths match
if !H {
return Ok(C::msm_unchecked(&params.generators[..a.len()], a));
}
Ok(params.h.mul(r) + C::msm_unchecked(&params.generators[..a.len()], a))
}
fn prove(
params: &Self::Params,
transcript: &mut impl Transcript<C>,
P: &C, // commitment
a: &[C::ScalarField],
x: &C::ScalarField,
rng: Option<&mut dyn RngCore>,
) -> Result<Self::Proof, Error> {
if !a.len().is_power_of_two() {
return Err(Error::NotPowerOfTwo("a".to_string(), a.len()));
}
let d = a.len();
let k = (f64::from(d as u32).log2()) as usize;
if params.generators.len() < a.len() {
return Err(Error::PedersenParamsLen(params.generators.len(), a.len()));
}
// blinding factors
let l: Vec<C::ScalarField>;
let r: Vec<C::ScalarField>;
if H {
let rng = rng.ok_or(Error::MissingRandomness)?;
l = std::iter::repeat_with(|| C::ScalarField::rand(rng))
.take(k)
.collect();
r = std::iter::repeat_with(|| C::ScalarField::rand(rng))
.take(k)
.collect();
} else {
l = vec![];
r = vec![];
}
transcript.absorb_point(P)?;
let s = transcript.get_challenge();
let U = C::generator().mul(s);
let mut a = a.to_owned();
let mut b = powers_of(*x, d);
let mut G = params.generators.clone();
let mut L: Vec<C> = vec![C::zero(); k];
let mut R: Vec<C> = vec![C::zero(); k];
// u challenges
let mut u: Vec<C::ScalarField> = vec![C::ScalarField::zero(); k];
for j in (0..k).rev() {
let m = a.len() / 2;
if H {
L[j] = C::msm_unchecked(&G[m..], &a[..m])
+ params.h.mul(l[j])
+ U.mul(inner_prod(&a[..m], &b[m..])?);
R[j] = C::msm_unchecked(&G[..m], &a[m..])
+ params.h.mul(r[j])
+ U.mul(inner_prod(&a[m..], &b[..m])?);
} else {
L[j] = C::msm_unchecked(&G[m..], &a[..m]) + U.mul(inner_prod(&a[..m], &b[m..])?);
R[j] = C::msm_unchecked(&G[..m], &a[m..]) + U.mul(inner_prod(&a[m..], &b[..m])?);
}
// get challenge for the j-th round
transcript.absorb_point(&L[j])?;
transcript.absorb_point(&R[j])?;
u[j] = transcript.get_challenge();
let uj = u[j];
let uj_inv = u[j]
.inverse()
.ok_or(Error::Other("error on computing inverse".to_string()))?;
// a_hi * uj^-1 + a_lo * uj
a = vec_add(
&vec_scalar_mul(&a[..m], &uj),
&vec_scalar_mul(&a[m..], &uj_inv),
)?;
// b_lo * uj^-1 + b_hi * uj
b = vec_add(
&vec_scalar_mul(&b[..m], &uj_inv),
&vec_scalar_mul(&b[m..], &uj),
)?;
// G_lo * uj^-1 + G_hi * uj
G = cfg_iter!(G[..m])
.map(|e| e.into_group().mul(uj_inv))
.zip(cfg_iter!(G[m..]).map(|e| e.into_group().mul(uj)))
.map(|(a, b)| (a + b).into_affine())
.collect::<Vec<C::Affine>>();
}
if a.len() != 1 {
return Err(Error::NotExpectedLength(a.len(), 1));
}
if b.len() != 1 {
return Err(Error::NotExpectedLength(b.len(), 1));
}
if G.len() != 1 {
return Err(Error::NotExpectedLength(G.len(), 1));
}
Ok(Self::Proof {
a: a[0],
l: l.clone(),
r: r.clone(),
L,
R,
})
}
}
impl<C: CurveGroup, const H: bool> IPA<C, H> {
#[allow(clippy::too_many_arguments)]
pub fn verify(
params: &PedersenParams<C>,
transcript: &mut impl Transcript<C>,
x: C::ScalarField, // evaluation point
v: C::ScalarField, // value at evaluation point
P: C, // commitment
p: Proof<C>,
r: C::ScalarField, // blinding factor
k: usize, // k = log2(d), where d is the degree of the committed polynomial
) -> Result<bool, Error> {
if !H && (!p.l.is_empty() || !p.r.is_empty()) {
return Err(Error::CommitmentVerificationFail);
}
if H && (p.l.len() != k || p.r.len() != k) {
return Err(Error::CommitmentVerificationFail);
}
if p.L.len() != k || p.R.len() != k {
return Err(Error::CommitmentVerificationFail);
}
// absorbs & get challenges
transcript.absorb_point(&P)?;
let s = transcript.get_challenge();
let U = C::generator().mul(s);
let mut u: Vec<C::ScalarField> = vec![C::ScalarField::zero(); k];
for i in (0..k).rev() {
transcript.absorb_point(&p.L[i])?;
transcript.absorb_point(&p.R[i])?;
u[i] = transcript.get_challenge();
}
let P = P + U.mul(v);
let mut q_0 = P;
let mut r = r;
// compute u[i]^-1 once
let mut u_invs = vec![C::ScalarField::zero(); u.len()];
for (j, u_j) in u.iter().enumerate() {
u_invs[j] = u_j
.inverse()
.ok_or(Error::Other("error on computing inverse".to_string()))?;
}
// compute b & G from s
let s = build_s(&u, &u_invs, k)?;
// b = <s, b_vec> = <s, [1, x, x^2, ..., x^d-1]>
let b = s_b_inner(&u, &x)?;
let d: usize = 2_u64.pow(k as u32) as usize;
if params.generators.len() < d {
return Err(Error::PedersenParamsLen(params.generators.len(), d));
}
let G = C::msm_unchecked(&params.generators, &s);
for (j, u_j) in u.iter().enumerate() {
let uj2 = u_j.square();
let uj_inv2 = u_invs[j].square();
q_0 = q_0 + p.L[j].mul(uj2) + p.R[j].mul(uj_inv2);
if H {
r = r + p.l[j] * uj2 + p.r[j] * uj_inv2;
}
}
let q_1 = if H {
G.mul(p.a) + params.h.mul(r) + U.mul(p.a * b)
} else {
G.mul(p.a) + U.mul(p.a * b)
};
Ok(q_0 == q_1)
}
}
/// Computes s such that
/// s = (
/// u₁⁻¹ u₂⁻¹ … uₖ⁻¹,
/// u₁ u₂⁻¹ … uₖ⁻¹,
/// u₁⁻¹ u₂ … uₖ⁻¹,
/// u₁ u₂ … uₖ⁻¹,
/// ⋮ ⋮ ⋮
/// u₁ u₂ … uₖ
/// )
/// Uses Halo2 approach computing $g(X) = \prod\limits_{i=0}^{k-1} (1 + u_{k - 1 - i} X^{2^i})$,
/// taking 2^{k+1}-2.
/// src: https://github.com/zcash/halo2/blob/81729eca91ba4755e247f49c3a72a4232864ec9e/halo2_proofs/src/poly/commitment/verifier.rs#L156
fn build_s<F: PrimeField>(u: &[F], u_invs: &[F], k: usize) -> Result<Vec<F>, Error> {
let d: usize = 2_u64.pow(k as u32) as usize;
let mut s: Vec<F> = vec![F::one(); d];
for (len, (u_j, u_j_inv)) in u
.iter()
.zip(u_invs)
.enumerate()
.map(|(i, u_j)| (1 << i, u_j))
{
let (left, right) = s.split_at_mut(len);
let right = &mut right[0..len];
right.copy_from_slice(left);
for s in left {
*s *= u_j_inv;
}
for s in right {
*s *= u_j;
}
}
Ok(s)
}
/// Computes (in-circuit) s such that
/// s = (
/// u₁⁻¹ u₂⁻¹ … uₖ⁻¹,
/// u₁ u₂⁻¹ … uₖ⁻¹,
/// u₁⁻¹ u₂ … uₖ⁻¹,
/// u₁ u₂ … uₖ⁻¹,
/// ⋮ ⋮ ⋮
/// u₁ u₂ … uₖ
/// )
/// Uses Halo2 approach computing $g(X) = \prod\limits_{i=0}^{k-1} (1 + u_{k - 1 - i} X^{2^i})$,
/// taking 2^{k+1}-2.
/// src: https://github.com/zcash/halo2/blob/81729eca91ba4755e247f49c3a72a4232864ec9e/halo2_proofs/src/poly/commitment/verifier.rs#L156
fn build_s_gadget<F: PrimeField, CF: PrimeField>(
u: &[NonNativeFieldVar<F, CF>],
u_invs: &[NonNativeFieldVar<F, CF>],
k: usize,
) -> Result<Vec<NonNativeFieldVar<F, CF>>, SynthesisError> {
let d: usize = 2_u64.pow(k as u32) as usize;
let mut s: Vec<NonNativeFieldVar<F, CF>> = vec![NonNativeFieldVar::one(); d];
for (len, (u_j, u_j_inv)) in u
.iter()
.zip(u_invs)
.enumerate()
.map(|(i, u_j)| (1 << i, u_j))
{
let (left, right) = s.split_at_mut(len);
let right = &mut right[0..len];
right.clone_from_slice(left);
for s in left {
*s *= u_j_inv;
}
for s in right {
*s *= u_j;
}
}
Ok(s)
}
fn inner_prod<F: PrimeField>(a: &[F], b: &[F]) -> Result<F, Error> {
if a.len() != b.len() {
return Err(Error::NotSameLength(
"a".to_string(),
a.len(),
"b".to_string(),
b.len(),
));
}
let c = cfg_iter!(a)
.zip(cfg_iter!(b))
.map(|(a_i, b_i)| *a_i * b_i)
.sum();
Ok(c)
}
// g(x, u_1, u_2, ..., u_k) = <s, b>, naively takes linear, but can compute in log time through
// g(x, u_1, u_2, ..., u_k) = \Prod u_i x^{2^i} + u_i^-1
fn s_b_inner<F: PrimeField>(u: &[F], x: &F) -> Result<F, Error> {
let mut c: F = F::one();
let mut x_2_i = *x; // x_2_i is x^{2^i}, starting from x^{2^0}=x
for u_i in u.iter() {
c *= (*u_i * x_2_i)
+ u_i
.inverse()
.ok_or(Error::Other("error on computing inverse".to_string()))?;
x_2_i *= x_2_i;
}
Ok(c)
}
// g(x, u_1, u_2, ..., u_k) = <s, b>, naively takes linear, but can compute in log time through
// g(x, u_1, u_2, ..., u_k) = \Prod u_i x^{2^i} + u_i^-1
fn s_b_inner_gadget<F: PrimeField, CF: PrimeField>(
u: &[NonNativeFieldVar<F, CF>],
x: &NonNativeFieldVar<F, CF>,
) -> Result<NonNativeFieldVar<F, CF>, SynthesisError> {
let mut c: NonNativeFieldVar<F, CF> = NonNativeFieldVar::<F, CF>::one();
let mut x_2_i = x.clone(); // x_2_i is x^{2^i}, starting from x^{2^0}=x
for u_i in u.iter() {
c *= u_i.clone() * x_2_i.clone() + u_i.inverse()?;
x_2_i *= x_2_i.clone();
}
Ok(c)
}
pub type CF<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
pub struct ProofVar<C: CurveGroup, GC: CurveVar<C, CF<C>>> {
a: NonNativeFieldVar<C::ScalarField, CF<C>>,
l: Vec<NonNativeFieldVar<C::ScalarField, CF<C>>>,
r: Vec<NonNativeFieldVar<C::ScalarField, CF<C>>>,
L: Vec<GC>,
R: Vec<GC>,
}
impl<C, GC> AllocVar<Proof<C>, CF<C>> for ProofVar<C, GC>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
<C as ark_ec::CurveGroup>::BaseField: PrimeField,
{
fn new_variable<T: Borrow<Proof<C>>>(
cs: impl Into<Namespace<CF<C>>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|val| {
let cs = cs.into();
let a = NonNativeFieldVar::<C::ScalarField, CF<C>>::new_variable(
cs.clone(),
|| Ok(val.borrow().a),
mode,
)?;
let l: Vec<NonNativeFieldVar<C::ScalarField, CF<C>>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().l.clone()), mode)?;
let r: Vec<NonNativeFieldVar<C::ScalarField, CF<C>>> =
Vec::new_variable(cs.clone(), || Ok(val.borrow().r.clone()), mode)?;
let L: Vec<GC> = Vec::new_variable(cs.clone(), || Ok(val.borrow().L.clone()), mode)?;
let R: Vec<GC> = Vec::new_variable(cs.clone(), || Ok(val.borrow().R.clone()), mode)?;
Ok(Self { a, l, r, L, R })
})
}
}
/// IPAGadget implements the circuit that verifies an IPA Proof. The `H` parameter indicates if to
/// use the commitment in hiding mode or not, reducing a bit the number of constraints needed in
/// the later case.
pub struct IPAGadget<C, GC, const H: bool = false>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
{
_cf: PhantomData<CF<C>>,
_c: PhantomData<C>,
_gc: PhantomData<GC>,
}
impl<C, GC, const H: bool> IPAGadget<C, GC, H>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
<C as ark_ec::CurveGroup>::BaseField: ark_ff::PrimeField,
for<'a> &'a GC: GroupOpsBounds<'a, C, GC>,
{
/// Verify the IPA opening proof, K=log2(d), where d is the degree of the committed polynomial,
/// and H indicates if the commitment is in hiding mode and thus uses blinding factors, if not,
/// there are some constraints saved.
#[allow(clippy::too_many_arguments)]
pub fn verify<const K: usize>(
g: &Vec<GC>, // params.generators
h: &GC, // params.h
x: &NonNativeFieldVar<C::ScalarField, CF<C>>, // evaluation point
v: &NonNativeFieldVar<C::ScalarField, CF<C>>, // value at evaluation point
P: &GC, // commitment
p: &ProofVar<C, GC>,
r: &NonNativeFieldVar<C::ScalarField, CF<C>>, // blinding factor
u: &[NonNativeFieldVar<C::ScalarField, CF<C>>; K], // challenges
U: &GC, // challenge
) -> Result<Boolean<CF<C>>, SynthesisError> {
if p.L.len() != K || p.R.len() != K {
return Err(SynthesisError::Unsatisfiable);
}
let P_ = P + U.scalar_mul_le(v.to_bits_le()?.iter())?;
let mut q_0 = P_;
let mut r = r.clone();
// compute u[i]^-1 once
let mut u_invs = vec![NonNativeFieldVar::<C::ScalarField, CF<C>>::zero(); u.len()];
for (j, u_j) in u.iter().enumerate() {
u_invs[j] = u_j.inverse()?;
}
// compute b & G from s
// let d: usize = 2_u64.pow(K as u32) as usize;
let s = build_s_gadget(u, &u_invs, K)?;
// b = <s, b_vec> = <s, [1, x, x^2, ..., x^K-1]>
let b = s_b_inner_gadget(u, x)?;
// ensure that generators.len() === s.len():
if g.len() < K {
return Err(SynthesisError::Unsatisfiable);
}
// msm: G=<G, s>
let mut G = GC::zero();
for (i, s_i) in s.iter().enumerate() {
G += g[i].scalar_mul_le(s_i.to_bits_le()?.iter())?;
}
for (j, u_j) in u.iter().enumerate() {
let uj2 = u_j.square()?;
let uj_inv2 = u_invs[j].square()?; // cheaper square than inversing the uj2
q_0 = q_0
+ p.L[j].scalar_mul_le(uj2.to_bits_le()?.iter())?
+ p.R[j].scalar_mul_le(uj_inv2.to_bits_le()?.iter())?;
if H {
r = r + &p.l[j] * &uj2 + &p.r[j] * &uj_inv2;
}
}
let q_1 = if H {
G.scalar_mul_le(p.a.to_bits_le()?.iter())?
+ h.scalar_mul_le(r.to_bits_le()?.iter())?
+ U.scalar_mul_le((p.a.clone() * b).to_bits_le()?.iter())?
} else {
G.scalar_mul_le(p.a.to_bits_le()?.iter())?
+ U.scalar_mul_le((p.a.clone() * b).to_bits_le()?.iter())?
};
// q_0 == q_1
q_0.is_eq(&q_1)
}
}
#[cfg(test)]
mod tests {
use ark_ec::Group;
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, bits::boolean::Boolean, eq::EqGadget};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
use std::ops::Mul;
use super::*;
use crate::transcript::poseidon::{poseidon_test_config, PoseidonTranscript};
#[test]
fn test_ipa() {
test_ipa_opt::<false>();
test_ipa_opt::<true>();
}
fn test_ipa_opt<const hiding: bool>() {
let mut rng = ark_std::test_rng();
const k: usize = 4;
const d: usize = 2_u64.pow(k as u32) as usize;
// setup params
let params = IPA::<Projective, hiding>::new_params(&mut rng, d);
let poseidon_config = poseidon_test_config::<Fr>();
// init Prover's transcript
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
// init Verifier's transcript
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
let a: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(d)
.collect();
let r_blind: Fr = Fr::rand(&mut rng);
let cm = IPA::<Projective, hiding>::commit(&params, &a, &r_blind).unwrap();
// evaluation point
let x = Fr::rand(&mut rng);
let proof = IPA::<Projective, hiding>::prove(
&params,
&mut transcript_p,
&cm,
&a,
&x,
Some(&mut rng),
)
.unwrap();
let b = powers_of(x, d);
let v = inner_prod(&a, &b).unwrap();
assert!(IPA::<Projective, hiding>::verify(
&params,
&mut transcript_v,
x,
v,
cm,
proof,
r_blind,
k,
)
.unwrap());
}
#[test]
fn test_ipa_gadget() {
test_ipa_gadget_opt::<false>();
test_ipa_gadget_opt::<true>();
}
fn test_ipa_gadget_opt<const hiding: bool>() {
let mut rng = ark_std::test_rng();
const k: usize = 4;
const d: usize = 2_u64.pow(k as u32) as usize;
// setup params
let params = IPA::<Projective, hiding>::new_params(&mut rng, d);
let poseidon_config = poseidon_test_config::<Fr>();
// init Prover's transcript
let mut transcript_p = PoseidonTranscript::<Projective>::new(&poseidon_config);
// init Verifier's transcript
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
let mut a: Vec<Fr> = std::iter::repeat_with(|| Fr::rand(&mut rng))
.take(d / 2)
.collect();
a.extend(vec![Fr::zero(); d / 2]);
let r_blind: Fr = Fr::rand(&mut rng);
let cm = IPA::<Projective, hiding>::commit(&params, &a, &r_blind).unwrap();
// evaluation point
let x = Fr::rand(&mut rng);
let proof = IPA::<Projective, hiding>::prove(
&params,
&mut transcript_p,
&cm,
&a,
&x,
Some(&mut rng),
)
.unwrap();
let b = powers_of(x, d);
let v = inner_prod(&a, &b).unwrap();
assert!(IPA::<Projective, hiding>::verify(
&params,
&mut transcript_v,
x,
v,
cm,
proof.clone(),
r_blind,
k,
)
.unwrap());
// circuit
let cs = ConstraintSystem::<Fq>::new_ref();
let mut transcript_v = PoseidonTranscript::<Projective>::new(&poseidon_config);
transcript_v.absorb_point(&cm).unwrap();
let s = transcript_v.get_challenge();
let U = Projective::generator().mul(s);
let mut u: Vec<Fr> = vec![Fr::zero(); k];
for i in (0..k).rev() {
transcript_v.absorb_point(&proof.L[i]).unwrap();
transcript_v.absorb_point(&proof.R[i]).unwrap();
u[i] = transcript_v.get_challenge();
}
// prepare inputs
let gVar = Vec::<GVar>::new_constant(cs.clone(), params.generators).unwrap();
let hVar = GVar::new_constant(cs.clone(), params.h).unwrap();
let xVar = NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(x)).unwrap();
let vVar = NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(v)).unwrap();
let cmVar = GVar::new_witness(cs.clone(), || Ok(cm)).unwrap();
let proofVar = ProofVar::<Projective, GVar>::new_witness(cs.clone(), || Ok(proof)).unwrap();
let r_blindVar =
NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(r_blind)).unwrap();
let uVar_vec = Vec::<NonNativeFieldVar<Fr, Fq>>::new_witness(cs.clone(), || Ok(u)).unwrap();
let uVar: [NonNativeFieldVar<Fr, Fq>; k] = uVar_vec.try_into().unwrap();
let UVar = GVar::new_witness(cs.clone(), || Ok(U)).unwrap();
let v = IPAGadget::<Projective, GVar, hiding>::verify::<k>(
// &mut transcriptVar,
&gVar,
&hVar,
&xVar,
&vVar,
&cmVar,
&proofVar,
&r_blindVar,
&uVar,
&UVar,
)
.unwrap();
v.enforce_equal(&Boolean::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
}
}

+ 9
- 8
folding-schemes/src/commitment/kzg.rs

@ -17,7 +17,7 @@ use ark_poly::{
DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial,
};
use ark_poly_commit::kzg10::{VerifierKey, KZG10};
use ark_std::rand::Rng;
use ark_std::rand::{Rng, RngCore};
use ark_std::{borrow::Cow, fmt::Debug};
use ark_std::{One, Zero};
use core::marker::PhantomData;
@ -66,11 +66,11 @@ where
/// KZGProver implements the CommitmentProver trait for the KZG commitment scheme.
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct KZGProver<'a, C: CurveGroup> {
pub struct KZGProver<'a, C: CurveGroup, const H: bool = false> {
_a: PhantomData<&'a ()>,
_c: PhantomData<C>,
}
impl<'a, C> CommitmentProver<C> for KZGProver<'a, C>
impl<'a, C, const H: bool> CommitmentProver<C, H> for KZGProver<'a, C, H>
where
C: CurveGroup,
{
@ -87,8 +87,8 @@ where
v: &[C::ScalarField],
_blind: &C::ScalarField,
) -> Result<C, Error> {
if !_blind.is_zero() {
return Err(Error::NotSupportedYet("blinding factors".to_string()));
if !_blind.is_zero() || H {
return Err(Error::NotSupportedYet("hiding".to_string()));
}
let polynomial = poly_from_vec(v.to_vec())?;
@ -113,9 +113,10 @@ where
cm: &C,
v: &[C::ScalarField],
_blind: &C::ScalarField,
_rng: Option<&mut dyn RngCore>,
) -> Result<Self::Proof, Error> {
if !_blind.is_zero() {
return Err(Error::NotSupportedYet("blinding factors".to_string()));
if !_blind.is_zero() || H {
return Err(Error::NotSupportedYet("hiding".to_string()));
}
let polynomial = poly_from_vec(v.to_vec())?;
@ -213,7 +214,7 @@ mod tests {
let cm = KZGProver::<G1>::commit(&pk, &v, &Fr::zero()).unwrap();
let (eval, proof) =
KZGProver::<G1>::prove(&pk, transcript_p, &cm, &v, &Fr::zero()).unwrap();
KZGProver::<G1>::prove(&pk, transcript_p, &cm, &v, &Fr::zero(), None).unwrap();
// verify the proof:
// get evaluation challenge

+ 15
- 3
folding-schemes/src/commitment/mod.rs

@ -1,14 +1,17 @@
use ark_ec::CurveGroup;
use ark_std::fmt::Debug;
use ark_std::rand::RngCore;
use crate::transcript::Transcript;
use crate::Error;
pub mod ipa;
pub mod kzg;
pub mod pedersen;
/// CommitmentProver defines the vector commitment scheme prover trait.
pub trait CommitmentProver<C: CurveGroup>: Clone + Debug {
/// CommitmentProver defines the vector commitment scheme prover trait. Where `H` indicates if to
/// use the commitment in hiding mode or not.
pub trait CommitmentProver<C: CurveGroup, const H: bool = false>: Clone + Debug {
type Params: Clone + Debug;
type Proof: Clone + Debug;
@ -23,6 +26,7 @@ pub trait CommitmentProver: Clone + Debug {
cm: &C,
v: &[C::ScalarField],
blind: &C::ScalarField,
rng: Option<&mut dyn RngCore>,
) -> Result<Self::Proof, Error>;
}
@ -65,7 +69,15 @@ mod tests {
let v_3: Vec<C::ScalarField> = v_1.iter().zip(v_2).map(|(a, b)| *a + (r * b)).collect();
let transcript = &mut PoseidonTranscript::<C>::new(poseidon_config);
let proof = CP::prove(params, transcript, &cm_3, &v_3, &C::ScalarField::zero()).unwrap();
let proof = CP::prove(
params,
transcript,
&cm_3,
&v_3,
&C::ScalarField::zero(),
None,
)
.unwrap();
Ok((cm_3, proof))
}

+ 47
- 33
folding-schemes/src/commitment/pedersen.rs

@ -1,8 +1,12 @@
use ark_ec::CurveGroup;
use ark_ff::Field;
use ark_r1cs_std::{boolean::Boolean, groups::GroupOpsBounds, prelude::CurveVar};
use ark_r1cs_std::{groups::GroupOpsBounds, prelude::CurveVar};
use ark_relations::r1cs::SynthesisError;
use ark_std::{rand::Rng, UniformRand};
use ark_std::Zero;
use ark_std::{
rand::{Rng, RngCore},
UniformRand,
};
use core::marker::PhantomData;
use super::CommitmentProver;
@ -24,25 +28,24 @@ pub struct Params {
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Pedersen<C: CurveGroup> {
pub struct Pedersen<C: CurveGroup, const H: bool = false> {
_c: PhantomData<C>,
}
impl<C: CurveGroup> Pedersen<C> {
impl<C: CurveGroup, const H: bool> Pedersen<C, H> {
pub fn new_params<R: Rng>(rng: &mut R, max: usize) -> Params<C> {
let generators: Vec<C::Affine> = std::iter::repeat_with(|| C::Affine::rand(rng))
.take(max.next_power_of_two())
.collect();
let params: Params<C> = Params::<C> {
Params::<C> {
h: C::rand(rng),
generators,
};
params
}
}
}
// implement the CommitmentProver trait for Pedersen
impl<C: CurveGroup> CommitmentProver<C> for Pedersen<C> {
impl<C: CurveGroup, const H: bool> CommitmentProver<C, H> for Pedersen<C, H> {
type Params = Params<C>;
type Proof = Proof<C>;
fn commit(
@ -55,6 +58,9 @@ impl CommitmentProver for Pedersen {
}
// h⋅r + <g, v>
// use msm_unchecked because we already ensured at the if that lengths match
if !H {
return Ok(C::msm_unchecked(&params.generators[..v.len()], v));
}
Ok(params.h.mul(r) + C::msm_unchecked(&params.generators[..v.len()], v))
}
@ -64,6 +70,7 @@ impl CommitmentProver for Pedersen {
cm: &C,
v: &[C::ScalarField],
r: &C::ScalarField, // blinding factor
_rng: Option<&mut dyn RngCore>,
) -> Result<Self::Proof, Error> {
if params.generators.len() < v.len() {
return Err(Error::PedersenParamsLen(params.generators.len(), v.len()));
@ -75,7 +82,10 @@ impl CommitmentProver for Pedersen {
// R = h⋅r_1 + <g, d>
// use msm_unchecked because we already ensured at the if that lengths match
let R: C = params.h.mul(r1) + C::msm_unchecked(&params.generators[..d.len()], &d);
let mut R: C = C::msm_unchecked(&params.generators[..d.len()], &d);
if H {
R += params.h.mul(r1);
}
transcript.absorb_point(&R)?;
let e = transcript.get_challenge();
@ -83,13 +93,16 @@ impl CommitmentProver for Pedersen {
// u = d + v⋅e
let u = vec_add(&vec_scalar_mul(v, &e), &d)?;
// r_u = e⋅r + r_1
let r_u = e * r + r1;
let mut r_u = C::ScalarField::zero();
if H {
r_u = e * r + r1;
}
Ok(Self::Proof { R, u, r_u })
}
}
impl<C: CurveGroup> Pedersen<C> {
impl<C: CurveGroup, const H: bool> Pedersen<C, H> {
pub fn verify(
params: &Params<C>,
transcript: &mut impl Transcript<C>,
@ -112,8 +125,10 @@ impl Pedersen {
// check that: R + cm⋅e == h⋅r_u + <g, u>
let lhs = proof.R + cm.mul(e);
// use msm_unchecked because we already ensured at the if that lengths match
let rhs = params.h.mul(proof.r_u)
+ C::msm_unchecked(&params.generators[..proof.u.len()], &proof.u);
let mut rhs = C::msm_unchecked(&params.generators[..proof.u.len()], &proof.u);
if H {
rhs += params.h.mul(proof.r_u);
}
if lhs != rhs {
return Err(Error::CommitmentVerificationFail);
}
@ -123,7 +138,7 @@ impl Pedersen {
pub type CF<C> = <<C as CurveGroup>::BaseField as Field>::BasePrimeField;
pub struct PedersenGadget<C, GC>
pub struct PedersenGadget<C, GC, const H: bool = false>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
@ -133,7 +148,8 @@ where
_gc: PhantomData<GC>,
}
impl<C, GC> PedersenGadget<C, GC>
use ark_r1cs_std::{fields::nonnative::NonNativeFieldVar, ToBitsGadget};
impl<C, GC, const H: bool> PedersenGadget<C, GC, H>
where
C: CurveGroup,
GC: CurveVar<C, CF<C>>,
@ -144,13 +160,15 @@ where
pub fn commit(
h: GC,
g: Vec<GC>,
v: Vec<Vec<Boolean<CF<C>>>>,
r: Vec<Boolean<CF<C>>>,
v: Vec<NonNativeFieldVar<C::ScalarField, CF<C>>>,
r: NonNativeFieldVar<C::ScalarField, CF<C>>,
) -> Result<GC, SynthesisError> {
let mut res = GC::zero();
res += h.scalar_mul_le(r.iter())?;
if H {
res += h.scalar_mul_le(r.to_bits_le()?.iter())?;
}
for (i, v_i) in v.iter().enumerate() {
res += g[i].scalar_mul_le(v_i.iter())?;
res += g[i].scalar_mul_le(v_i.to_bits_le()?.iter())?;
}
Ok(res)
}
@ -158,9 +176,8 @@ where
#[cfg(test)]
mod tests {
use ark_ff::{BigInteger, PrimeField};
use ark_pallas::{constraints::GVar, Fq, Fr, Projective};
use ark_r1cs_std::{alloc::AllocVar, bits::boolean::Boolean, eq::EqGadget};
use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;
@ -186,15 +203,20 @@ mod tests {
.collect();
let r: Fr = Fr::rand(&mut rng);
let cm = Pedersen::<Projective>::commit(&params, &v, &r).unwrap();
let proof = Pedersen::<Projective>::prove(&params, &mut transcript_p, &cm, &v, &r).unwrap();
let proof =
Pedersen::<Projective>::prove(&params, &mut transcript_p, &cm, &v, &r, None).unwrap();
Pedersen::<Projective>::verify(&params, &mut transcript_v, cm, proof).unwrap();
}
#[test]
fn test_pedersen_circuit() {
test_pedersen_circuit_opt::<false>();
test_pedersen_circuit_opt::<true>();
}
fn test_pedersen_circuit_opt<const hiding: bool>() {
let mut rng = ark_std::test_rng();
let n: usize = 10;
let n: usize = 16;
// setup params
let params = Pedersen::<Projective>::new_params(&mut rng, n);
@ -207,17 +229,9 @@ mod tests {
// circuit
let cs = ConstraintSystem::<Fq>::new_ref();
let v_bits: Vec<Vec<bool>> = v.iter().map(|val| val.into_bigint().to_bits_le()).collect();
let r_bits: Vec<bool> = r.into_bigint().to_bits_le();
// prepare inputs
let vVar: Vec<Vec<Boolean<Fq>>> = v_bits
.iter()
.map(|val_bits| {
Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(val_bits.clone())).unwrap()
})
.collect();
let rVar = Vec::<Boolean<Fq>>::new_witness(cs.clone(), || Ok(r_bits)).unwrap();
let vVar = Vec::<NonNativeFieldVar<Fr, Fq>>::new_witness(cs.clone(), || Ok(v)).unwrap();
let rVar = NonNativeFieldVar::<Fr, Fq>::new_witness(cs.clone(), || Ok(r)).unwrap();
let gVar = Vec::<GVar>::new_witness(cs.clone(), || Ok(params.generators)).unwrap();
let hVar = GVar::new_witness(cs.clone(), || Ok(params.h)).unwrap();
let expected_cmVar = GVar::new_witness(cs.clone(), || Ok(cm)).unwrap();

+ 1
- 1
folding-schemes/src/folding/hypernova/cccs.rs

@ -112,7 +112,7 @@ impl CCCS {
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
// opening, but checking that the commitment comes from committing to the witness.
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
if self.C != Pedersen::<C>::commit(pedersen_params, &w.w, &w.r_w)? {
return Err(Error::NotSatisfied);
}

+ 3
- 3
folding-schemes/src/folding/hypernova/circuit.rs

@ -180,7 +180,7 @@ mod tests {
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
@ -224,7 +224,7 @@ mod tests {
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);
@ -267,7 +267,7 @@ mod tests {
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
compute_sigmas_and_thetas(&ccs, &[z1.clone()], &[z2.clone()], &r_x_prime);

+ 3
- 3
folding-schemes/src/folding/hypernova/lcccs.rs

@ -46,7 +46,7 @@ impl CCS {
) -> Result<(LCCCS<C>, Witness<C::ScalarField>), Error> {
let w: Vec<C::ScalarField> = z[(1 + self.l)..].to_vec();
let r_w = C::ScalarField::rand(rng);
let C = Pedersen::commit(pedersen_params, &w, &r_w)?;
let C = Pedersen::<C>::commit(pedersen_params, &w, &r_w)?;
let r_x: Vec<C::ScalarField> = (0..self.s).map(|_| C::ScalarField::rand(rng)).collect();
let v = self.compute_v_j(z, &r_x);
@ -96,8 +96,8 @@ impl LCCCS {
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
// check that C is the commitment of w. Notice that this is not verifying a Pedersen
// opening, but checking that the commitment comes from committing to the witness.
if self.C != Pedersen::commit(pedersen_params, &w.w, &w.r_w)? {
// opening, but checking that the Commmitment comes from committing to the witness.
if self.C != Pedersen::<C>::commit(pedersen_params, &w.w, &w.r_w)? {
return Err(Error::NotSatisfied);
}

+ 4
- 4
folding-schemes/src/folding/hypernova/nimfs.rs

@ -430,7 +430,7 @@ pub mod tests {
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
// Generate a satisfying witness
let z_1 = get_test_z(3);
@ -489,7 +489,7 @@ pub mod tests {
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
// LCCCS witness
let z_1 = get_test_z(2);
@ -557,7 +557,7 @@ pub mod tests {
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let mu = 10;
let nu = 15;
@ -639,7 +639,7 @@ pub mod tests {
// Create a basic CCS circuit
let ccs = get_test_ccs::<Projective>();
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let poseidon_config = poseidon_test_config::<Fr>();
// Prover's transcript

+ 2
- 2
folding-schemes/src/folding/hypernova/utils.rs

@ -290,7 +290,7 @@ pub mod tests {
let r_x_prime: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let sigmas_thetas =
@ -333,7 +333,7 @@ pub mod tests {
let beta: Vec<Fr> = (0..ccs.s).map(|_| Fr::rand(&mut rng)).collect();
// Initialize a multifolding object
let pedersen_params = Pedersen::new_params(&mut rng, ccs.n - ccs.l - 1);
let pedersen_params = Pedersen::<Projective>::new_params(&mut rng, ccs.n - ccs.l - 1);
let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &pedersen_params, &z1).unwrap();
let mut sum_v_j_gamma = Fr::zero();

+ 0
- 1
folding-schemes/src/folding/nova/circuits.rs

@ -654,7 +654,6 @@ pub mod tests {
.generate_constraints(cs.clone())
.unwrap();
cs.finalize();
println!("num_constraints={:?}", cs.num_constraints());
let cs = cs.into_inner().unwrap();
let r1cs = extract_r1cs::<Fr>(&cs);
let (w, x) = extract_w_x::<Fr>(&cs);

+ 0
- 2
folding-schemes/src/folding/nova/cyclefold.rs

@ -459,7 +459,6 @@ pub mod tests {
.unwrap();
nifs_cf_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
#[test]
@ -500,7 +499,6 @@ pub mod tests {
.unwrap();
nifs_check.enforce_equal(&Boolean::<Fq>::TRUE).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
#[test]

+ 9
- 21
folding-schemes/src/folding/nova/decider_eth_circuit.rs

@ -370,13 +370,6 @@ where
// `#[cfg(not(test))]`
use crate::commitment::pedersen::PedersenGadget;
use crate::folding::nova::cyclefold::{CycleFoldCommittedInstanceVar, CF_IO_LEN};
use ark_r1cs_std::ToBitsGadget;
let cf_r1cs = R1CSVar::<
C1::BaseField,
CF1<C1>,
NonNativeFieldVar<C1::BaseField, CF1<C1>>,
>::new_witness(cs.clone(), || Ok(self.cf_r1cs.clone()))?;
let cf_u_dummy_native = CommittedInstance::<C2>::dummy(CF_IO_LEN);
let w_dummy_native = Witness::<C2>::new(
@ -393,28 +386,24 @@ where
// 5. check Pedersen commitments of cf_U_i.{cmE, cmW}
let H = GC2::new_constant(cs.clone(), self.cf_pedersen_params.h)?;
let G = Vec::<GC2>::new_constant(cs.clone(), self.cf_pedersen_params.generators)?;
let cf_W_i_E_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
.E
.iter()
.map(|E_i| E_i.to_bits_le().unwrap())
.collect();
let cf_W_i_W_bits: Vec<Vec<Boolean<CF1<C1>>>> = cf_W_i
.W
.iter()
.map(|W_i| W_i.to_bits_le().unwrap())
.collect();
let computed_cmE = PedersenGadget::<C2, GC2>::commit(
H.clone(),
G.clone(),
cf_W_i_E_bits,
cf_W_i.rE.to_bits_le()?,
cf_W_i.E.clone(),
cf_W_i.rE,
)?;
cf_U_i.cmE.enforce_equal(&computed_cmE)?;
let computed_cmW =
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i_W_bits, cf_W_i.rW.to_bits_le()?)?;
PedersenGadget::<C2, GC2>::commit(H, G, cf_W_i.W.clone(), cf_W_i.rW)?;
cf_U_i.cmW.enforce_equal(&computed_cmW)?;
let cf_r1cs = R1CSVar::<
C1::BaseField,
CF1<C1>,
NonNativeFieldVar<C1::BaseField, CF1<C1>>,
>::new_witness(cs.clone(), || Ok(self.cf_r1cs.clone()))?;
// 6. check RelaxedR1CS of cf_U_i
let cf_z_U: Vec<NonNativeFieldVar<C2::ScalarField, CF1<C1>>> =
[vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat();
@ -681,6 +670,5 @@ pub mod tests {
// generate the constraints and check that are satisfied by the inputs
decider_circuit.generate_constraints(cs.clone()).unwrap();
assert!(cs.is_satisfied().unwrap());
dbg!(cs.num_constraints());
}
}

+ 3
- 3
folding-schemes/src/folding/nova/nifs.rs

@ -190,9 +190,9 @@ where
T: Vec<C::ScalarField>,
cmT: &C,
) -> Result<[CP::Proof; 3], Error> {
let cmE_proof = CP::prove(cm_prover_params, tr, &ci.cmE, &w.E, &w.rE)?;
let cmW_proof = CP::prove(cm_prover_params, tr, &ci.cmW, &w.W, &w.rW)?;
let cmT_proof = CP::prove(cm_prover_params, tr, cmT, &T, &C::ScalarField::one())?; // cm(T) is committed with rT=1
let cmE_proof = CP::prove(cm_prover_params, tr, &ci.cmE, &w.E, &w.rE, None)?;
let cmW_proof = CP::prove(cm_prover_params, tr, &ci.cmW, &w.W, &w.rW, None)?;
let cmT_proof = CP::prove(cm_prover_params, tr, cmT, &T, &C::ScalarField::one(), None)?; // cm(T) is committed with rT=1
Ok([cmE_proof, cmW_proof, cmT_proof])
}
}

+ 2
- 1
folding-schemes/src/folding/protogalaxy/folding.rs

@ -18,7 +18,8 @@ use super::{CommittedInstance, Witness};
use crate::ccs::r1cs::R1CS;
use crate::transcript::Transcript;
use crate::utils::{bit::bit_decompose, vec::*};
use crate::utils::vec::*;
use crate::utils::virtual_polynomial::bit_decompose;
use crate::Error;
#[derive(Clone, Debug)]

+ 4
- 0
folding-schemes/src/lib.rs

@ -40,10 +40,14 @@ pub enum Error {
NotSameLength(String, usize, String, usize),
#[error("Vector's length ({0}) is not the expected ({1})")]
NotExpectedLength(usize, usize),
#[error("Vector ({0}) length ({1}) is not a power of two")]
NotPowerOfTwo(String, usize),
#[error("Can not be empty")]
Empty,
#[error("Pedersen parameters length is not sufficient (generators.len={0} < vector.len={1} unsatisfied)")]
PedersenParamsLen(usize, usize),
#[error("Randomness for blinding not found")]
MissingRandomness,
#[error("Commitment verification failed")]
CommitmentVerificationFail,
#[error("IVC verification failed")]

+ 0
- 9
folding-schemes/src/utils/bit.rs

@ -1,9 +0,0 @@
pub fn bit_decompose(input: u64, n: usize) -> Vec<bool> {
let mut res = Vec::with_capacity(n);
let mut i = input;
for _ in 0..n {
res.push(i & 1 == 1);
i >>= 1;
}
res
}

+ 12
- 1
folding-schemes/src/utils/mod.rs

@ -1,4 +1,5 @@
pub mod bit;
use ark_ff::PrimeField;
pub mod gadgets;
pub mod hypercube;
pub mod lagrange_poly;
@ -10,3 +11,13 @@ pub mod espresso;
pub use crate::utils::espresso::multilinear_polynomial;
pub use crate::utils::espresso::sum_check;
pub use crate::utils::espresso::virtual_polynomial;
/// For a given x, returns [1, x^1, x^2, ..., x^n-1];
pub fn powers_of<F: PrimeField>(x: F, n: usize) -> Vec<F> {
let mut c: Vec<F> = vec![F::zero(); n];
c[0] = F::one();
for i in 1..n {
c[i] = c[i - 1] * x;
}
c
}

Loading…
Cancel
Save