Browse Source

Protogalaxy verifier circuit (#95)

* Add in-circuit couterparts of util functions

* Support folding `CommittedInstance`s with `u` and `x`

* Initial implementation of protogalaxy verifier circuit

* Fix the edge case in `all_powers_var`

* Absorb `u` and `x` as well

* Fix imports

* Rename some test functions for clarity

* Format

* Avoid the use of magic numbers
update-nifs-interface
winderica 3 months ago
committed by GitHub
parent
commit
6248a90e89
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
6 changed files with 453 additions and 70 deletions
  1. +16
    -0
      folding-schemes/src/arith/r1cs.rs
  2. +179
    -0
      folding-schemes/src/folding/protogalaxy/circuits.rs
  3. +102
    -66
      folding-schemes/src/folding/protogalaxy/folding.rs
  4. +35
    -2
      folding-schemes/src/folding/protogalaxy/mod.rs
  5. +4
    -0
      folding-schemes/src/folding/protogalaxy/traits.rs
  6. +117
    -2
      folding-schemes/src/folding/protogalaxy/utils.rs

+ 16
- 0
folding-schemes/src/arith/r1cs.rs

@ -179,6 +179,22 @@ pub mod tests {
])
}
pub fn get_test_z_split<F: PrimeField>(input: usize) -> (F, Vec<F>, Vec<F>) {
// z = (1, io, w)
(
F::one(),
to_F_vec(vec![
input, // io
]),
to_F_vec(vec![
input * input * input + input + 5, // x^3 + x + 5
input * input, // x^2
input * input * input, // x^2 * x
input * input * input + input, // x^3 + x
]),
)
}
#[test]
fn test_check_relation() {
let r1cs = get_test_r1cs::<Fr>();

+ 179
- 0
folding-schemes/src/folding/protogalaxy/circuits.rs

@ -0,0 +1,179 @@
use ark_crypto_primitives::sponge::CryptographicSponge;
use ark_ec::CurveGroup;
use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain};
use ark_r1cs_std::{
alloc::AllocVar,
fields::{fp::FpVar, FieldVar},
poly::polynomial::univariate::dense::DensePolynomialVar,
};
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use super::{
folding::lagrange_polys,
utils::{all_powers_var, betas_star_var, exponential_powers_var},
CommittedInstanceVar,
};
use crate::{
folding::circuits::nonnative::affine::NonNativeAffineVar, transcript::TranscriptVar,
utils::gadgets::VectorGadget,
};
pub struct FoldingGadget {}
impl FoldingGadget {
pub fn fold_committed_instance<C: CurveGroup, S: CryptographicSponge>(
transcript: &mut impl TranscriptVar<C::ScalarField, S>,
// running instance
instance: &CommittedInstanceVar<C>,
// incoming instances
vec_instances: &[CommittedInstanceVar<C>],
// polys from P
F_coeffs: Vec<FpVar<C::ScalarField>>,
K_coeffs: Vec<FpVar<C::ScalarField>>,
) -> Result<CommittedInstanceVar<C>, SynthesisError> {
let t = instance.betas.len();
let n = F_coeffs.len();
// absorb the committed instances
transcript.absorb(instance)?;
transcript.absorb(&vec_instances)?;
let delta = transcript.get_challenge()?;
let deltas = exponential_powers_var(delta, t);
transcript.absorb(&F_coeffs)?;
let alpha = transcript.get_challenge()?;
let alphas = all_powers_var(alpha.clone(), n);
// F(alpha) = e + \sum_t F_i * alpha^i
let mut F_alpha = instance.e.clone();
for (i, F_i) in F_coeffs.iter().skip(1).enumerate() {
F_alpha += F_i * &alphas[i + 1];
}
let betas_star = betas_star_var(&instance.betas, &deltas, &alpha);
let k = vec_instances.len();
let H = GeneralEvaluationDomain::new(k + 1).unwrap();
let L_X = lagrange_polys(H)
.into_iter()
.map(|poly| {
DensePolynomialVar::from_coefficients_vec(
poly.coeffs
.into_iter()
.map(FpVar::constant)
.collect::<Vec<_>>(),
)
})
.collect::<Vec<_>>();
let Z_X = DensePolynomialVar::from_coefficients_vec(
DensePolynomial::from(H.vanishing_polynomial())
.coeffs
.into_iter()
.map(FpVar::constant)
.collect::<Vec<_>>(),
);
let K_X = DensePolynomialVar { coeffs: K_coeffs };
transcript.absorb(&K_X.coeffs)?;
let gamma = transcript.get_challenge()?;
let L_X_evals = L_X
.iter()
.take(k + 1)
.map(|L| L.evaluate(&gamma))
.collect::<Result<Vec<_>, _>>()?;
let e_star = F_alpha * &L_X_evals[0] + Z_X.evaluate(&gamma)? * K_X.evaluate(&gamma)?;
let mut u_star = &instance.u * &L_X_evals[0];
let mut x_star = instance.x.mul_scalar(&L_X_evals[0])?;
for i in 0..k {
u_star += &vec_instances[i].u * &L_X_evals[i + 1];
x_star = x_star.add(&vec_instances[i].x.mul_scalar(&L_X_evals[i + 1])?)?;
}
// return the folded instance
Ok(CommittedInstanceVar {
betas: betas_star,
// phi will be computed in CycleFold
phi: NonNativeAffineVar::new_constant(ConstraintSystemRef::None, C::zero())?,
e: e_star,
u: u_star,
x: x_star,
})
}
}
#[cfg(test)]
mod tests {
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge},
};
use ark_pallas::{Fr, Projective};
use ark_r1cs_std::R1CSVar;
use ark_relations::r1cs::ConstraintSystem;
use std::error::Error;
use super::*;
use crate::{
arith::r1cs::tests::get_test_r1cs,
folding::protogalaxy::folding::{tests::prepare_inputs, Folding},
transcript::poseidon::poseidon_canonical_config,
};
#[test]
fn test_fold_gadget() -> Result<(), Box<dyn Error>> {
let k = 7;
let (witness, instance, witnesses, instances) = prepare_inputs(k);
let r1cs = get_test_r1cs::<Fr>();
// init Prover & Verifier's transcript
let poseidon_config = poseidon_canonical_config::<Fr>();
let mut transcript_p = PoseidonSponge::new(&poseidon_config);
let mut transcript_v = PoseidonSponge::new(&poseidon_config);
let (_, _, F_coeffs, K_coeffs) = Folding::<Projective>::prove(
&mut transcript_p,
&r1cs,
&instance,
&witness,
&instances,
&witnesses,
)?;
let folded_instance = Folding::<Projective>::verify(
&mut transcript_v,
&r1cs,
&instance,
&instances,
F_coeffs.clone(),
K_coeffs.clone(),
)?;
let cs = ConstraintSystem::new_ref();
let mut transcript_var = PoseidonSpongeVar::new(cs.clone(), &poseidon_config);
let instance_var = CommittedInstanceVar::new_witness(cs.clone(), || Ok(instance))?;
let instances_var = Vec::new_witness(cs.clone(), || Ok(instances))?;
let F_coeffs_var = Vec::new_witness(cs.clone(), || Ok(F_coeffs))?;
let K_coeffs_var = Vec::new_witness(cs.clone(), || Ok(K_coeffs))?;
let folded_instance_var = FoldingGadget::fold_committed_instance(
&mut transcript_var,
&instance_var,
&instances_var,
F_coeffs_var,
K_coeffs_var,
)?;
assert_eq!(folded_instance.betas, folded_instance_var.betas.value()?);
assert_eq!(folded_instance.e, folded_instance_var.e.value()?);
assert_eq!(folded_instance.u, folded_instance_var.u.value()?);
assert_eq!(folded_instance.x, folded_instance_var.x.value()?);
assert!(cs.is_satisfied()?);
Ok(())
}
}

+ 102
- 66
folding-schemes/src/folding/protogalaxy/folding.rs

@ -63,10 +63,13 @@ where
let k = vec_instances.len();
let t = instance.betas.len();
let n = r1cs.A.n_cols;
if w.w.len() != n {
let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat();
if z.len() != n {
return Err(Error::NotSameLength(
"w.w.len()".to_string(),
w.w.len(),
"z.len()".to_string(),
z.len(),
"n".to_string(),
n,
));
@ -85,11 +88,11 @@ where
let delta = transcript.get_challenge();
let deltas = exponential_powers(delta, t);
let f_w = eval_f(r1cs, &w.w)?;
let f_z = eval_f(r1cs, &z)?;
// F(X)
let F_X: SparsePolynomial<C::ScalarField> =
calc_f_from_btree(&f_w, &instance.betas, &deltas).expect("Error calculating F[x]");
calc_f_from_btree(&f_z, &instance.betas, &deltas).expect("Error calculating F[x]");
let F_X_dense = DensePolynomial::from(F_X.clone());
transcript.absorb(&F_X_dense.coeffs);
@ -110,24 +113,28 @@ where
phi: instance.phi,
betas: betas_star.clone(),
e: F_alpha,
u: instance.u,
x: instance.x.clone(),
},
w,
)?;
let ws: Vec<Vec<C::ScalarField>> = std::iter::once(w.w.clone())
let zs: Vec<Vec<C::ScalarField>> = std::iter::once(z.clone())
.chain(
vec_w
.iter()
.map(|wj| {
if wj.w.len() != n {
.zip(vec_instances)
.map(|(wj, uj)| {
let zj = [vec![uj.u], uj.x.clone(), wj.w.clone()].concat();
if zj.len() != n {
return Err(Error::NotSameLength(
"wj.w.len()".to_string(),
wj.w.len(),
"zj.len()".to_string(),
zj.len(),
"n".to_string(),
n,
));
}
Ok(wj.w.clone())
Ok(zj)
})
.collect::<Result<Vec<Vec<C::ScalarField>>, Error>>()?,
)
@ -144,17 +151,17 @@ where
let mut G_evals: Vec<C::ScalarField> = vec![C::ScalarField::zero(); G_domain.size()];
for (hi, h) in G_domain.elements().enumerate() {
// each iteration evaluates G(h)
// inner = L_0(x) * w + \sum_k L_i(x) * w_j
let mut inner: Vec<C::ScalarField> = vec![C::ScalarField::zero(); ws[0].len()];
for (i, w) in ws.iter().enumerate() {
// Li_w_h = (Li(X)*wj)(h) = Li(h) * wj
let mut Liw_h: Vec<C::ScalarField> = vec![C::ScalarField::zero(); w.len()];
for (j, wj) in w.iter().enumerate() {
Liw_h[j] = (&L_X[i] * *wj).evaluate(&h);
// inner = L_0(x) * z + \sum_k L_i(x) * z_j
let mut inner: Vec<C::ScalarField> = vec![C::ScalarField::zero(); zs[0].len()];
for (i, z) in zs.iter().enumerate() {
// Li_z_h = (Li(X)*zj)(h) = Li(h) * zj
let mut Liz_h: Vec<C::ScalarField> = vec![C::ScalarField::zero(); z.len()];
for (j, zj) in z.iter().enumerate() {
Liz_h[j] = (&L_X[i] * *zj).evaluate(&h);
}
for j in 0..inner.len() {
inner[j] += Liw_h[j];
inner[j] += Liz_h[j];
}
}
let f_ev = eval_f(r1cs, &inner)?;
@ -187,19 +194,27 @@ where
let gamma = transcript.get_challenge();
let e_star =
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
let L_X_evals = L_X
.iter()
.take(k + 1)
.map(|L| L.evaluate(&gamma))
.collect::<Vec<_>>();
let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let mut w_star = vec_scalar_mul(&w.w, &L_X_evals[0]);
let mut r_w_star = w.r_w * L_X_evals[0];
let mut phi_star = instance.phi * L_X_evals[0];
let mut u_star = instance.u * L_X_evals[0];
let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]);
for i in 0..k {
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
}
let mut w_star: Vec<C::ScalarField> = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma));
let mut r_w_star: C::ScalarField = w.r_w * L_X[0].evaluate(&gamma);
for i in 0..k {
let L_X_at_i1 = L_X[i + 1].evaluate(&gamma);
w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_at_i1))?;
r_w_star += vec_w[i].r_w * L_X_at_i1;
w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_evals[i + 1]))?;
r_w_star += vec_w[i].r_w * L_X_evals[i + 1];
phi_star += vec_instances[i].phi * L_X_evals[i + 1];
u_star += vec_instances[i].u * L_X_evals[i + 1];
x_star = vec_add(
&x_star,
&vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]),
)?;
}
Ok((
@ -207,6 +222,8 @@ where
betas: betas_star,
phi: phi_star,
e: e_star,
u: u_star,
x: x_star,
},
Witness {
w: w_star,
@ -264,12 +281,24 @@ where
let gamma = transcript.get_challenge();
let e_star =
F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let L_X_evals = L_X
.iter()
.take(k + 1)
.map(|L| L.evaluate(&gamma))
.collect::<Vec<_>>();
let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma);
let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma);
let mut phi_star = instance.phi * L_X_evals[0];
let mut u_star = instance.u * L_X_evals[0];
let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]);
for i in 0..k {
phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma);
phi_star += vec_instances[i].phi * L_X_evals[i + 1];
u_star += vec_instances[i].u * L_X_evals[i + 1];
x_star = vec_add(
&x_star,
&vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]),
)?;
}
// return the folded instance
@ -277,6 +306,8 @@ where
betas: betas_star,
phi: phi_star,
e: e_star,
u: u_star,
x: x_star,
})
}
}
@ -350,7 +381,9 @@ fn calc_f_from_btree(
}
// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300
fn lagrange_polys<F: PrimeField>(domain_n: GeneralEvaluationDomain<F>) -> Vec<DensePolynomial<F>> {
pub fn lagrange_polys<F: PrimeField>(
domain_n: GeneralEvaluationDomain<F>,
) -> Vec<DensePolynomial<F>> {
let mut lagrange_polynomials: Vec<DensePolynomial<F>> = Vec::new();
for i in 0..domain_n.size() {
let evals: Vec<F> = cfg_into_iter!(0..domain_n.size())
@ -363,23 +396,23 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec
// f(w) in R1CS context. For the moment we use R1CS, in the future we will abstract this with a
// trait
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, w: &[F]) -> Result<Vec<F>, Error> {
let Az = mat_vec_mul(&r1cs.A, w)?;
let Bz = mat_vec_mul(&r1cs.B, w)?;
let Cz = mat_vec_mul(&r1cs.C, w)?;
fn eval_f<F: PrimeField>(r1cs: &R1CS<F>, z: &[F]) -> Result<Vec<F>, Error> {
let Az = mat_vec_mul(&r1cs.A, z)?;
let Bz = mat_vec_mul(&r1cs.B, z)?;
let Cz = mat_vec_mul(&r1cs.C, z)?;
let AzBz = hadamard(&Az, &Bz)?;
vec_sub(&AzBz, &Cz)
}
#[cfg(test)]
mod tests {
pub mod tests {
use super::*;
use ark_crypto_primitives::sponge::poseidon::PoseidonSponge;
use ark_crypto_primitives::sponge::CryptographicSponge;
use ark_pallas::{Fr, Projective};
use ark_std::UniformRand;
use ark_std::{rand::Rng, UniformRand};
use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z};
use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z, get_test_z_split};
use crate::commitment::{pedersen::Pedersen, CommitmentScheme};
use crate::transcript::poseidon::poseidon_canonical_config;
@ -388,20 +421,22 @@ mod tests {
instance: &CommittedInstance<C>,
w: &Witness<C::ScalarField>,
) -> Result<(), Error> {
if instance.betas.len() != log2(w.w.len()) as usize {
let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat();
if instance.betas.len() != log2(z.len()) as usize {
return Err(Error::NotSameLength(
"instance.betas.len()".to_string(),
instance.betas.len(),
"log2(w.w.len())".to_string(),
log2(w.w.len()) as usize,
"log2(z.len())".to_string(),
log2(z.len()) as usize,
));
}
let f_w = eval_f(r1cs, &w.w)?; // f(w)
let f_z = eval_f(r1cs, &z)?; // f(z)
let mut r = C::ScalarField::zero();
for (i, f_w_i) in f_w.iter().enumerate() {
r += pow_i(i, &instance.betas) * f_w_i;
for (i, f_z_i) in f_z.iter().enumerate() {
r += pow_i(i, &instance.betas) * f_z_i;
}
if instance.e == r {
return Ok(());
@ -426,8 +461,9 @@ mod tests {
#[test]
fn test_eval_f() {
let mut rng = ark_std::test_rng();
let r1cs = get_test_r1cs::<Fr>();
let mut z = get_test_z::<Fr>(3);
let mut z = get_test_z::<Fr>(rng.gen::<u16>() as usize);
let f_w = eval_f(&r1cs, &z).unwrap();
assert!(is_zero_vec(&f_w));
@ -439,7 +475,7 @@ mod tests {
// k represents the number of instances to be fold, apart from the running instance
#[allow(clippy::type_complexity)]
fn prepare_inputs(
pub fn prepare_inputs(
k: usize,
) -> (
Witness<Fr>,
@ -448,23 +484,19 @@ mod tests {
Vec<CommittedInstance<Projective>>,
) {
let mut rng = ark_std::test_rng();
let (pedersen_params, _) = Pedersen::<Projective>::setup(&mut rng, 100).unwrap(); // 100 is wip, will get it from actual vec
let z = get_test_z::<Fr>(3);
let mut zs: Vec<Vec<Fr>> = Vec::new();
for i in 0..k {
let z_i = get_test_z::<Fr>(i + 4);
zs.push(z_i);
}
let (u, x, w) = get_test_z_split::<Fr>(rng.gen::<u16>() as usize);
let (pedersen_params, _) = Pedersen::<Projective>::setup(&mut rng, w.len()).unwrap();
let n = z.len();
let n = 1 + x.len() + w.len();
let t = log2(n) as usize;
let beta = Fr::rand(&mut rng);
let betas = exponential_powers(beta, t);
let witness = Witness::<Fr> {
w: z.clone(),
w,
r_w: Fr::rand(&mut rng),
};
let phi = Pedersen::<Projective, true>::commit(&pedersen_params, &witness.w, &witness.r_w)
@ -473,14 +505,17 @@ mod tests {
phi,
betas: betas.clone(),
e: Fr::zero(),
u,
x,
};
// same for the other instances
let mut witnesses: Vec<Witness<Fr>> = Vec::new();
let mut instances: Vec<CommittedInstance<Projective>> = Vec::new();
#[allow(clippy::needless_range_loop)]
for i in 0..k {
for _ in 0..k {
let (u_i, x_i, w_i) = get_test_z_split::<Fr>(rng.gen::<u16>() as usize);
let witness_i = Witness::<Fr> {
w: zs[i].clone(),
w: w_i,
r_w: Fr::rand(&mut rng),
};
let phi_i = Pedersen::<Projective, true>::commit(
@ -493,6 +528,8 @@ mod tests {
phi: phi_i,
betas: vec![],
e: Fr::zero(),
u: u_i,
x: x_i,
};
witnesses.push(witness_i);
instances.push(instance_i);
@ -502,7 +539,7 @@ mod tests {
}
#[test]
fn test_fold_native_case() {
fn test_fold() {
let k = 7;
let (witness, instance, witnesses, instances) = prepare_inputs(k);
let r1cs = get_test_r1cs::<Fr>();
@ -584,9 +621,8 @@ mod tests {
.unwrap();
// check that prover & verifier folded instances are the same values
assert_eq!(folded_instance.phi, folded_instance_v.phi);
assert_eq!(folded_instance.betas, folded_instance_v.betas);
assert_eq!(folded_instance.e, folded_instance_v.e);
assert_eq!(folded_instance, folded_instance_v);
assert!(!folded_instance.e.is_zero());
// check that the folded instance satisfies the relation

+ 35
- 2
folding-schemes/src/folding/protogalaxy/mod.rs

@ -1,20 +1,29 @@
use std::borrow::Borrow;
/// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf)
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_r1cs_std::fields::fp::FpVar;
use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
};
use ark_relations::r1cs::{Namespace, SynthesisError};
use thiserror::Error;
use super::circuits::nonnative::affine::NonNativeAffineVar;
pub mod circuits;
pub mod folding;
pub mod traits;
pub(crate) mod utils;
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CommittedInstance<C: CurveGroup> {
phi: C,
betas: Vec<C::ScalarField>,
e: C::ScalarField,
u: C::ScalarField,
x: Vec<C::ScalarField>,
}
#[derive(Clone, Debug)]
@ -22,6 +31,30 @@ pub struct CommittedInstanceVar {
phi: NonNativeAffineVar<C>,
betas: Vec<FpVar<C::ScalarField>>,
e: FpVar<C::ScalarField>,
u: FpVar<C::ScalarField>,
x: Vec<FpVar<C::ScalarField>>,
}
impl<C: CurveGroup> AllocVar<CommittedInstance<C>, C::ScalarField> for CommittedInstanceVar<C> {
fn new_variable<T: Borrow<CommittedInstance<C>>>(
cs: impl Into<Namespace<C::ScalarField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|u| {
let cs = cs.into();
let u = u.borrow();
Ok(Self {
phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?,
betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?,
e: FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?,
u: FpVar::new_variable(cs.clone(), || Ok(u.u), mode)?,
x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?,
})
})
}
}
#[derive(Clone, Debug)]

+ 4
- 0
folding-schemes/src/folding/protogalaxy/traits.rs

@ -22,6 +22,8 @@ where
.to_sponge_field_elements(dest);
self.betas.to_sponge_field_elements(dest);
self.e.to_sponge_field_elements(dest);
self.u.to_sponge_field_elements(dest);
self.x.to_sponge_field_elements(dest);
}
}
@ -36,6 +38,8 @@ impl AbsorbGadget for CommittedInstanceVar {
self.phi.to_constraint_field()?,
self.betas.to_sponge_field_elements()?,
self.e.to_sponge_field_elements()?,
self.u.to_sponge_field_elements()?,
self.x.to_sponge_field_elements()?,
]
.concat())
}

+ 117
- 2
folding-schemes/src/folding/protogalaxy/utils.rs

@ -1,6 +1,7 @@
use ark_ff::PrimeField;
use ark_r1cs_std::fields::{fp::FpVar, FieldVar};
// returns (b, b^2, b^4, ..., b^{2^{t-1}})
/// Returns (b, b^2, b^4, ..., b^{2^{t-1}})
pub fn exponential_powers<F: PrimeField>(b: F, t: usize) -> Vec<F> {
let mut r = vec![F::zero(); t];
r[0] = b;
@ -9,6 +10,18 @@ pub fn exponential_powers(b: F, t: usize) -> Vec {
}
r
}
/// The in-circuit version of `exponential_powers`
pub fn exponential_powers_var<F: PrimeField>(b: FpVar<F>, t: usize) -> Vec<FpVar<F>> {
let mut r = vec![FpVar::zero(); t];
r[0] = b;
for i in 1..t {
r[i] = &r[i - 1] * &r[i - 1];
}
r
}
/// Returns (a, a^2, a^3, ..., a^{n-1})
pub fn all_powers<F: PrimeField>(a: F, n: usize) -> Vec<F> {
let mut r = vec![F::zero(); n];
for (i, r_i) in r.iter_mut().enumerate() {
@ -17,7 +30,20 @@ pub fn all_powers(a: F, n: usize) -> Vec {
r
}
// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ
/// The in-circuit version of `all_powers`
pub fn all_powers_var<F: PrimeField>(a: FpVar<F>, n: usize) -> Vec<FpVar<F>> {
if n == 0 {
return vec![];
}
let mut r = vec![FpVar::zero(); n];
r[0] = FpVar::one();
for i in 1..n {
r[i] = &r[i - 1] * &a;
}
r
}
/// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ
pub fn betas_star<F: PrimeField>(betas: &[F], deltas: &[F], alpha: F) -> Vec<F> {
betas
.iter()
@ -30,3 +56,92 @@ pub fn betas_star(betas: &[F], deltas: &[F], alpha: F) -> Vec
.map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha)
.collect()
}
/// The in-circuit version of `betas_star`
pub fn betas_star_var<F: PrimeField>(
betas: &[FpVar<F>],
deltas: &[FpVar<F>],
alpha: &FpVar<F>,
) -> Vec<FpVar<F>> {
betas
.iter()
.zip(deltas)
.map(|(beta_i, delta_i)| beta_i + alpha * delta_i)
.collect::<Vec<FpVar<F>>>()
}
#[cfg(test)]
mod tests {
use std::error::Error;
use ark_bn254::Fr;
use ark_r1cs_std::{alloc::AllocVar, R1CSVar};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
use super::*;
#[test]
fn test_exponential_powers() -> Result<(), Box<dyn Error>> {
let rng = &mut test_rng();
for t in 1..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let b = Fr::rand(rng);
let b_var = FpVar::new_witness(cs.clone(), || Ok(b))?;
let r = exponential_powers(b, t);
let r_var = exponential_powers_var(b_var, t);
assert_eq!(r, r_var.value()?);
assert!(cs.is_satisfied()?);
}
Ok(())
}
#[test]
fn test_all_powers() -> Result<(), Box<dyn Error>> {
let rng = &mut test_rng();
for n in 1..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a = Fr::rand(rng);
let a_var = FpVar::new_witness(cs.clone(), || Ok(a))?;
let r = all_powers(a, n);
let r_var = all_powers_var(a_var, n);
assert_eq!(r, r_var.value()?);
assert!(cs.is_satisfied()?);
}
Ok(())
}
#[test]
fn test_betas_star() -> Result<(), Box<dyn Error>> {
let rng = &mut test_rng();
for t in 1..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let betas = (0..t).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let deltas = (0..t).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let alpha = Fr::rand(rng);
let betas_var = Vec::new_witness(cs.clone(), || Ok(betas.clone()))?;
let deltas_var = Vec::new_witness(cs.clone(), || Ok(deltas.clone()))?;
let alpha_var = FpVar::new_witness(cs.clone(), || Ok(alpha))?;
let r = betas_star(&betas, &deltas, alpha);
let r_var = betas_star_var(&betas_var, &deltas_var, &alpha_var);
assert_eq!(r, r_var.value()?);
assert!(cs.is_satisfied()?);
}
Ok(())
}
}

Loading…
Cancel
Save