diff --git a/folding-schemes/src/arith/r1cs.rs b/folding-schemes/src/arith/r1cs.rs index a09e957..f510b67 100644 --- a/folding-schemes/src/arith/r1cs.rs +++ b/folding-schemes/src/arith/r1cs.rs @@ -179,6 +179,22 @@ pub mod tests { ]) } + pub fn get_test_z_split(input: usize) -> (F, Vec, Vec) { + // z = (1, io, w) + ( + F::one(), + to_F_vec(vec![ + input, // io + ]), + to_F_vec(vec![ + input * input * input + input + 5, // x^3 + x + 5 + input * input, // x^2 + input * input * input, // x^2 * x + input * input * input + input, // x^3 + x + ]), + ) + } + #[test] fn test_check_relation() { let r1cs = get_test_r1cs::(); diff --git a/folding-schemes/src/folding/protogalaxy/circuits.rs b/folding-schemes/src/folding/protogalaxy/circuits.rs new file mode 100644 index 0000000..5f72690 --- /dev/null +++ b/folding-schemes/src/folding/protogalaxy/circuits.rs @@ -0,0 +1,179 @@ +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_ec::CurveGroup; +use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain}; +use ark_r1cs_std::{ + alloc::AllocVar, + fields::{fp::FpVar, FieldVar}, + poly::polynomial::univariate::dense::DensePolynomialVar, +}; +use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; + +use super::{ + folding::lagrange_polys, + utils::{all_powers_var, betas_star_var, exponential_powers_var}, + CommittedInstanceVar, +}; +use crate::{ + folding::circuits::nonnative::affine::NonNativeAffineVar, transcript::TranscriptVar, + utils::gadgets::VectorGadget, +}; + +pub struct FoldingGadget {} + +impl FoldingGadget { + pub fn fold_committed_instance( + transcript: &mut impl TranscriptVar, + // running instance + instance: &CommittedInstanceVar, + // incoming instances + vec_instances: &[CommittedInstanceVar], + // polys from P + F_coeffs: Vec>, + K_coeffs: Vec>, + ) -> Result, SynthesisError> { + let t = instance.betas.len(); + let n = F_coeffs.len(); + + // absorb the committed instances + transcript.absorb(instance)?; + transcript.absorb(&vec_instances)?; + + let delta = transcript.get_challenge()?; + let deltas = exponential_powers_var(delta, t); + + transcript.absorb(&F_coeffs)?; + + let alpha = transcript.get_challenge()?; + let alphas = all_powers_var(alpha.clone(), n); + + // F(alpha) = e + \sum_t F_i * alpha^i + let mut F_alpha = instance.e.clone(); + for (i, F_i) in F_coeffs.iter().skip(1).enumerate() { + F_alpha += F_i * &alphas[i + 1]; + } + + let betas_star = betas_star_var(&instance.betas, &deltas, &alpha); + + let k = vec_instances.len(); + let H = GeneralEvaluationDomain::new(k + 1).unwrap(); + let L_X = lagrange_polys(H) + .into_iter() + .map(|poly| { + DensePolynomialVar::from_coefficients_vec( + poly.coeffs + .into_iter() + .map(FpVar::constant) + .collect::>(), + ) + }) + .collect::>(); + let Z_X = DensePolynomialVar::from_coefficients_vec( + DensePolynomial::from(H.vanishing_polynomial()) + .coeffs + .into_iter() + .map(FpVar::constant) + .collect::>(), + ); + let K_X = DensePolynomialVar { coeffs: K_coeffs }; + + transcript.absorb(&K_X.coeffs)?; + + let gamma = transcript.get_challenge()?; + + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::, _>>()?; + + let e_star = F_alpha * &L_X_evals[0] + Z_X.evaluate(&gamma)? * K_X.evaluate(&gamma)?; + + let mut u_star = &instance.u * &L_X_evals[0]; + let mut x_star = instance.x.mul_scalar(&L_X_evals[0])?; + for i in 0..k { + u_star += &vec_instances[i].u * &L_X_evals[i + 1]; + x_star = x_star.add(&vec_instances[i].x.mul_scalar(&L_X_evals[i + 1])?)?; + } + + // return the folded instance + Ok(CommittedInstanceVar { + betas: betas_star, + // phi will be computed in CycleFold + phi: NonNativeAffineVar::new_constant(ConstraintSystemRef::None, C::zero())?, + e: e_star, + u: u_star, + x: x_star, + }) + } +} + +#[cfg(test)] +mod tests { + use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonSponge}, + }; + use ark_pallas::{Fr, Projective}; + use ark_r1cs_std::R1CSVar; + use ark_relations::r1cs::ConstraintSystem; + use std::error::Error; + + use super::*; + use crate::{ + arith::r1cs::tests::get_test_r1cs, + folding::protogalaxy::folding::{tests::prepare_inputs, Folding}, + transcript::poseidon::poseidon_canonical_config, + }; + + #[test] + fn test_fold_gadget() -> Result<(), Box> { + let k = 7; + let (witness, instance, witnesses, instances) = prepare_inputs(k); + let r1cs = get_test_r1cs::(); + + // init Prover & Verifier's transcript + let poseidon_config = poseidon_canonical_config::(); + let mut transcript_p = PoseidonSponge::new(&poseidon_config); + let mut transcript_v = PoseidonSponge::new(&poseidon_config); + + let (_, _, F_coeffs, K_coeffs) = Folding::::prove( + &mut transcript_p, + &r1cs, + &instance, + &witness, + &instances, + &witnesses, + )?; + + let folded_instance = Folding::::verify( + &mut transcript_v, + &r1cs, + &instance, + &instances, + F_coeffs.clone(), + K_coeffs.clone(), + )?; + + let cs = ConstraintSystem::new_ref(); + let mut transcript_var = PoseidonSpongeVar::new(cs.clone(), &poseidon_config); + let instance_var = CommittedInstanceVar::new_witness(cs.clone(), || Ok(instance))?; + let instances_var = Vec::new_witness(cs.clone(), || Ok(instances))?; + let F_coeffs_var = Vec::new_witness(cs.clone(), || Ok(F_coeffs))?; + let K_coeffs_var = Vec::new_witness(cs.clone(), || Ok(K_coeffs))?; + + let folded_instance_var = FoldingGadget::fold_committed_instance( + &mut transcript_var, + &instance_var, + &instances_var, + F_coeffs_var, + K_coeffs_var, + )?; + assert_eq!(folded_instance.betas, folded_instance_var.betas.value()?); + assert_eq!(folded_instance.e, folded_instance_var.e.value()?); + assert_eq!(folded_instance.u, folded_instance_var.u.value()?); + assert_eq!(folded_instance.x, folded_instance_var.x.value()?); + assert!(cs.is_satisfied()?); + + Ok(()) + } +} diff --git a/folding-schemes/src/folding/protogalaxy/folding.rs b/folding-schemes/src/folding/protogalaxy/folding.rs index e9b78da..ef9b50d 100644 --- a/folding-schemes/src/folding/protogalaxy/folding.rs +++ b/folding-schemes/src/folding/protogalaxy/folding.rs @@ -63,10 +63,13 @@ where let k = vec_instances.len(); let t = instance.betas.len(); let n = r1cs.A.n_cols; - if w.w.len() != n { + + let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat(); + + if z.len() != n { return Err(Error::NotSameLength( - "w.w.len()".to_string(), - w.w.len(), + "z.len()".to_string(), + z.len(), "n".to_string(), n, )); @@ -85,11 +88,11 @@ where let delta = transcript.get_challenge(); let deltas = exponential_powers(delta, t); - let f_w = eval_f(r1cs, &w.w)?; + let f_z = eval_f(r1cs, &z)?; // F(X) let F_X: SparsePolynomial = - calc_f_from_btree(&f_w, &instance.betas, &deltas).expect("Error calculating F[x]"); + calc_f_from_btree(&f_z, &instance.betas, &deltas).expect("Error calculating F[x]"); let F_X_dense = DensePolynomial::from(F_X.clone()); transcript.absorb(&F_X_dense.coeffs); @@ -110,24 +113,28 @@ where phi: instance.phi, betas: betas_star.clone(), e: F_alpha, + u: instance.u, + x: instance.x.clone(), }, w, )?; - let ws: Vec> = std::iter::once(w.w.clone()) + let zs: Vec> = std::iter::once(z.clone()) .chain( vec_w .iter() - .map(|wj| { - if wj.w.len() != n { + .zip(vec_instances) + .map(|(wj, uj)| { + let zj = [vec![uj.u], uj.x.clone(), wj.w.clone()].concat(); + if zj.len() != n { return Err(Error::NotSameLength( - "wj.w.len()".to_string(), - wj.w.len(), + "zj.len()".to_string(), + zj.len(), "n".to_string(), n, )); } - Ok(wj.w.clone()) + Ok(zj) }) .collect::>, Error>>()?, ) @@ -144,17 +151,17 @@ where let mut G_evals: Vec = vec![C::ScalarField::zero(); G_domain.size()]; for (hi, h) in G_domain.elements().enumerate() { // each iteration evaluates G(h) - // inner = L_0(x) * w + \sum_k L_i(x) * w_j - let mut inner: Vec = vec![C::ScalarField::zero(); ws[0].len()]; - for (i, w) in ws.iter().enumerate() { - // Li_w_h = (Li(X)*wj)(h) = Li(h) * wj - let mut Liw_h: Vec = vec![C::ScalarField::zero(); w.len()]; - for (j, wj) in w.iter().enumerate() { - Liw_h[j] = (&L_X[i] * *wj).evaluate(&h); + // inner = L_0(x) * z + \sum_k L_i(x) * z_j + let mut inner: Vec = vec![C::ScalarField::zero(); zs[0].len()]; + for (i, z) in zs.iter().enumerate() { + // Li_z_h = (Li(X)*zj)(h) = Li(h) * zj + let mut Liz_h: Vec = vec![C::ScalarField::zero(); z.len()]; + for (j, zj) in z.iter().enumerate() { + Liz_h[j] = (&L_X[i] * *zj).evaluate(&h); } for j in 0..inner.len() { - inner[j] += Liw_h[j]; + inner[j] += Liz_h[j]; } } let f_ev = eval_f(r1cs, &inner)?; @@ -187,19 +194,27 @@ where let gamma = transcript.get_challenge(); - let e_star = - F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); - - let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma); + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::>(); + + let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); + let mut w_star = vec_scalar_mul(&w.w, &L_X_evals[0]); + let mut r_w_star = w.r_w * L_X_evals[0]; + let mut phi_star = instance.phi * L_X_evals[0]; + let mut u_star = instance.u * L_X_evals[0]; + let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]); for i in 0..k { - phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma); - } - let mut w_star: Vec = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma)); - let mut r_w_star: C::ScalarField = w.r_w * L_X[0].evaluate(&gamma); - for i in 0..k { - let L_X_at_i1 = L_X[i + 1].evaluate(&gamma); - w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_at_i1))?; - r_w_star += vec_w[i].r_w * L_X_at_i1; + w_star = vec_add(&w_star, &vec_scalar_mul(&vec_w[i].w, &L_X_evals[i + 1]))?; + r_w_star += vec_w[i].r_w * L_X_evals[i + 1]; + phi_star += vec_instances[i].phi * L_X_evals[i + 1]; + u_star += vec_instances[i].u * L_X_evals[i + 1]; + x_star = vec_add( + &x_star, + &vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]), + )?; } Ok(( @@ -207,6 +222,8 @@ where betas: betas_star, phi: phi_star, e: e_star, + u: u_star, + x: x_star, }, Witness { w: w_star, @@ -264,12 +281,24 @@ where let gamma = transcript.get_challenge(); - let e_star = - F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); + let L_X_evals = L_X + .iter() + .take(k + 1) + .map(|L| L.evaluate(&gamma)) + .collect::>(); + + let e_star = F_alpha * L_X_evals[0] + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); - let mut phi_star: C = instance.phi * L_X[0].evaluate(&gamma); + let mut phi_star = instance.phi * L_X_evals[0]; + let mut u_star = instance.u * L_X_evals[0]; + let mut x_star = vec_scalar_mul(&instance.x, &L_X_evals[0]); for i in 0..k { - phi_star += vec_instances[i].phi * L_X[i + 1].evaluate(&gamma); + phi_star += vec_instances[i].phi * L_X_evals[i + 1]; + u_star += vec_instances[i].u * L_X_evals[i + 1]; + x_star = vec_add( + &x_star, + &vec_scalar_mul(&vec_instances[i].x, &L_X_evals[i + 1]), + )?; } // return the folded instance @@ -277,6 +306,8 @@ where betas: betas_star, phi: phi_star, e: e_star, + u: u_star, + x: x_star, }) } } @@ -350,7 +381,9 @@ fn calc_f_from_btree( } // lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 -fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec> { +pub fn lagrange_polys( + domain_n: GeneralEvaluationDomain, +) -> Vec> { let mut lagrange_polynomials: Vec> = Vec::new(); for i in 0..domain_n.size() { let evals: Vec = cfg_into_iter!(0..domain_n.size()) @@ -363,23 +396,23 @@ fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec(r1cs: &R1CS, w: &[F]) -> Result, Error> { - let Az = mat_vec_mul(&r1cs.A, w)?; - let Bz = mat_vec_mul(&r1cs.B, w)?; - let Cz = mat_vec_mul(&r1cs.C, w)?; +fn eval_f(r1cs: &R1CS, z: &[F]) -> Result, Error> { + let Az = mat_vec_mul(&r1cs.A, z)?; + let Bz = mat_vec_mul(&r1cs.B, z)?; + let Cz = mat_vec_mul(&r1cs.C, z)?; let AzBz = hadamard(&Az, &Bz)?; vec_sub(&AzBz, &Cz) } #[cfg(test)] -mod tests { +pub mod tests { use super::*; use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; use ark_crypto_primitives::sponge::CryptographicSponge; use ark_pallas::{Fr, Projective}; - use ark_std::UniformRand; + use ark_std::{rand::Rng, UniformRand}; - use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z}; + use crate::arith::r1cs::tests::{get_test_r1cs, get_test_z, get_test_z_split}; use crate::commitment::{pedersen::Pedersen, CommitmentScheme}; use crate::transcript::poseidon::poseidon_canonical_config; @@ -388,20 +421,22 @@ mod tests { instance: &CommittedInstance, w: &Witness, ) -> Result<(), Error> { - if instance.betas.len() != log2(w.w.len()) as usize { + let z = [vec![instance.u], instance.x.clone(), w.w.clone()].concat(); + + if instance.betas.len() != log2(z.len()) as usize { return Err(Error::NotSameLength( "instance.betas.len()".to_string(), instance.betas.len(), - "log2(w.w.len())".to_string(), - log2(w.w.len()) as usize, + "log2(z.len())".to_string(), + log2(z.len()) as usize, )); } - let f_w = eval_f(r1cs, &w.w)?; // f(w) + let f_z = eval_f(r1cs, &z)?; // f(z) let mut r = C::ScalarField::zero(); - for (i, f_w_i) in f_w.iter().enumerate() { - r += pow_i(i, &instance.betas) * f_w_i; + for (i, f_z_i) in f_z.iter().enumerate() { + r += pow_i(i, &instance.betas) * f_z_i; } if instance.e == r { return Ok(()); @@ -426,8 +461,9 @@ mod tests { #[test] fn test_eval_f() { + let mut rng = ark_std::test_rng(); let r1cs = get_test_r1cs::(); - let mut z = get_test_z::(3); + let mut z = get_test_z::(rng.gen::() as usize); let f_w = eval_f(&r1cs, &z).unwrap(); assert!(is_zero_vec(&f_w)); @@ -439,7 +475,7 @@ mod tests { // k represents the number of instances to be fold, apart from the running instance #[allow(clippy::type_complexity)] - fn prepare_inputs( + pub fn prepare_inputs( k: usize, ) -> ( Witness, @@ -448,23 +484,19 @@ mod tests { Vec>, ) { let mut rng = ark_std::test_rng(); - let (pedersen_params, _) = Pedersen::::setup(&mut rng, 100).unwrap(); // 100 is wip, will get it from actual vec - let z = get_test_z::(3); - let mut zs: Vec> = Vec::new(); - for i in 0..k { - let z_i = get_test_z::(i + 4); - zs.push(z_i); - } + let (u, x, w) = get_test_z_split::(rng.gen::() as usize); + + let (pedersen_params, _) = Pedersen::::setup(&mut rng, w.len()).unwrap(); - let n = z.len(); + let n = 1 + x.len() + w.len(); let t = log2(n) as usize; let beta = Fr::rand(&mut rng); let betas = exponential_powers(beta, t); let witness = Witness:: { - w: z.clone(), + w, r_w: Fr::rand(&mut rng), }; let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w) @@ -473,14 +505,17 @@ mod tests { phi, betas: betas.clone(), e: Fr::zero(), + u, + x, }; // same for the other instances let mut witnesses: Vec> = Vec::new(); let mut instances: Vec> = Vec::new(); #[allow(clippy::needless_range_loop)] - for i in 0..k { + for _ in 0..k { + let (u_i, x_i, w_i) = get_test_z_split::(rng.gen::() as usize); let witness_i = Witness:: { - w: zs[i].clone(), + w: w_i, r_w: Fr::rand(&mut rng), }; let phi_i = Pedersen::::commit( @@ -493,6 +528,8 @@ mod tests { phi: phi_i, betas: vec![], e: Fr::zero(), + u: u_i, + x: x_i, }; witnesses.push(witness_i); instances.push(instance_i); @@ -502,7 +539,7 @@ mod tests { } #[test] - fn test_fold_native_case() { + fn test_fold() { let k = 7; let (witness, instance, witnesses, instances) = prepare_inputs(k); let r1cs = get_test_r1cs::(); @@ -584,9 +621,8 @@ mod tests { .unwrap(); // check that prover & verifier folded instances are the same values - assert_eq!(folded_instance.phi, folded_instance_v.phi); - assert_eq!(folded_instance.betas, folded_instance_v.betas); - assert_eq!(folded_instance.e, folded_instance_v.e); + assert_eq!(folded_instance, folded_instance_v); + assert!(!folded_instance.e.is_zero()); // check that the folded instance satisfies the relation diff --git a/folding-schemes/src/folding/protogalaxy/mod.rs b/folding-schemes/src/folding/protogalaxy/mod.rs index 1d8ab6f..192f227 100644 --- a/folding-schemes/src/folding/protogalaxy/mod.rs +++ b/folding-schemes/src/folding/protogalaxy/mod.rs @@ -1,20 +1,29 @@ +use std::borrow::Borrow; + /// Implements the scheme described in [ProtoGalaxy](https://eprint.iacr.org/2023/1106.pdf) use ark_ec::CurveGroup; use ark_ff::PrimeField; -use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + fields::fp::FpVar, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; use thiserror::Error; use super::circuits::nonnative::affine::NonNativeAffineVar; +pub mod circuits; pub mod folding; pub mod traits; pub(crate) mod utils; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct CommittedInstance { phi: C, betas: Vec, e: C::ScalarField, + u: C::ScalarField, + x: Vec, } #[derive(Clone, Debug)] @@ -22,6 +31,30 @@ pub struct CommittedInstanceVar { phi: NonNativeAffineVar, betas: Vec>, e: FpVar, + u: FpVar, + x: Vec>, +} + +impl AllocVar, C::ScalarField> for CommittedInstanceVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|u| { + let cs = cs.into(); + + let u = u.borrow(); + + Ok(Self { + phi: NonNativeAffineVar::new_variable(cs.clone(), || Ok(u.phi), mode)?, + betas: Vec::new_variable(cs.clone(), || Ok(u.betas.clone()), mode)?, + e: FpVar::new_variable(cs.clone(), || Ok(u.e), mode)?, + u: FpVar::new_variable(cs.clone(), || Ok(u.u), mode)?, + x: Vec::new_variable(cs.clone(), || Ok(u.x.clone()), mode)?, + }) + }) + } } #[derive(Clone, Debug)] diff --git a/folding-schemes/src/folding/protogalaxy/traits.rs b/folding-schemes/src/folding/protogalaxy/traits.rs index db96638..ef45b31 100644 --- a/folding-schemes/src/folding/protogalaxy/traits.rs +++ b/folding-schemes/src/folding/protogalaxy/traits.rs @@ -22,6 +22,8 @@ where .to_sponge_field_elements(dest); self.betas.to_sponge_field_elements(dest); self.e.to_sponge_field_elements(dest); + self.u.to_sponge_field_elements(dest); + self.x.to_sponge_field_elements(dest); } } @@ -36,6 +38,8 @@ impl AbsorbGadget for CommittedInstanceVar { self.phi.to_constraint_field()?, self.betas.to_sponge_field_elements()?, self.e.to_sponge_field_elements()?, + self.u.to_sponge_field_elements()?, + self.x.to_sponge_field_elements()?, ] .concat()) } diff --git a/folding-schemes/src/folding/protogalaxy/utils.rs b/folding-schemes/src/folding/protogalaxy/utils.rs index 4910279..3ac31f1 100644 --- a/folding-schemes/src/folding/protogalaxy/utils.rs +++ b/folding-schemes/src/folding/protogalaxy/utils.rs @@ -1,6 +1,7 @@ use ark_ff::PrimeField; +use ark_r1cs_std::fields::{fp::FpVar, FieldVar}; -// returns (b, b^2, b^4, ..., b^{2^{t-1}}) +/// Returns (b, b^2, b^4, ..., b^{2^{t-1}}) pub fn exponential_powers(b: F, t: usize) -> Vec { let mut r = vec![F::zero(); t]; r[0] = b; @@ -9,6 +10,18 @@ pub fn exponential_powers(b: F, t: usize) -> Vec { } r } + +/// The in-circuit version of `exponential_powers` +pub fn exponential_powers_var(b: FpVar, t: usize) -> Vec> { + let mut r = vec![FpVar::zero(); t]; + r[0] = b; + for i in 1..t { + r[i] = &r[i - 1] * &r[i - 1]; + } + r +} + +/// Returns (a, a^2, a^3, ..., a^{n-1}) pub fn all_powers(a: F, n: usize) -> Vec { let mut r = vec![F::zero(); n]; for (i, r_i) in r.iter_mut().enumerate() { @@ -17,7 +30,20 @@ pub fn all_powers(a: F, n: usize) -> Vec { r } -// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ +/// The in-circuit version of `all_powers` +pub fn all_powers_var(a: FpVar, n: usize) -> Vec> { + if n == 0 { + return vec![]; + } + let mut r = vec![FpVar::zero(); n]; + r[0] = FpVar::one(); + for i in 1..n { + r[i] = &r[i - 1] * &a; + } + r +} + +/// returns a vector containing βᵢ* = βᵢ + α ⋅ δᵢ pub fn betas_star(betas: &[F], deltas: &[F], alpha: F) -> Vec { betas .iter() @@ -30,3 +56,92 @@ pub fn betas_star(betas: &[F], deltas: &[F], alpha: F) -> Vec .map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha) .collect() } + +/// The in-circuit version of `betas_star` +pub fn betas_star_var( + betas: &[FpVar], + deltas: &[FpVar], + alpha: &FpVar, +) -> Vec> { + betas + .iter() + .zip(deltas) + .map(|(beta_i, delta_i)| beta_i + alpha * delta_i) + .collect::>>() +} + +#[cfg(test)] +mod tests { + use std::error::Error; + + use ark_bn254::Fr; + use ark_r1cs_std::{alloc::AllocVar, R1CSVar}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::{test_rng, UniformRand}; + + use super::*; + + #[test] + fn test_exponential_powers() -> Result<(), Box> { + let rng = &mut test_rng(); + + for t in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let b = Fr::rand(rng); + let b_var = FpVar::new_witness(cs.clone(), || Ok(b))?; + + let r = exponential_powers(b, t); + let r_var = exponential_powers_var(b_var, t); + + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } + + #[test] + fn test_all_powers() -> Result<(), Box> { + let rng = &mut test_rng(); + + for n in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let a = Fr::rand(rng); + let a_var = FpVar::new_witness(cs.clone(), || Ok(a))?; + + let r = all_powers(a, n); + let r_var = all_powers_var(a_var, n); + + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } + + #[test] + fn test_betas_star() -> Result<(), Box> { + let rng = &mut test_rng(); + + for t in 1..10 { + let cs = ConstraintSystem::::new_ref(); + + let betas = (0..t).map(|_| Fr::rand(rng)).collect::>(); + let deltas = (0..t).map(|_| Fr::rand(rng)).collect::>(); + let alpha = Fr::rand(rng); + + let betas_var = Vec::new_witness(cs.clone(), || Ok(betas.clone()))?; + let deltas_var = Vec::new_witness(cs.clone(), || Ok(deltas.clone()))?; + let alpha_var = FpVar::new_witness(cs.clone(), || Ok(alpha))?; + + let r = betas_star(&betas, &deltas, alpha); + let r_var = betas_star_var(&betas_var, &deltas_var, &alpha_var); + assert_eq!(r, r_var.value()?); + assert!(cs.is_satisfied()?); + } + + Ok(()) + } +}