From b648ddb300a76a9ed1f58209efaee0257216deaa Mon Sep 17 00:00:00 2001 From: winderica Date: Tue, 16 Apr 2024 15:50:19 +0100 Subject: [PATCH] Reduce the number of constraints in `DeciderEthCircuit` (#88) * Add a dedicated variant of `mat_vec_mul_sparse` for `NonNativeFieldVar` * Switch to a customized in-circuit nonnative implementation for efficiency * Comments and tests for `NonNativeUintVar` * Make `CycleFoldCircuit` a bit smaller * Faster trusted setup and proof generation by avoiding some nested LCs * Check the remaining limbs in a more safe way * Format * Disable the non-native checks in tests again * Clarify the group operation in `enforce_equal_unaligned` * Explain the rationale behind non-native mat-vec multiplication * Explain the difference with some other impls of `enforce_equal_unaligned` * Format --- folding-schemes/Cargo.toml | 1 + .../src/folding/circuits/nonnative.rs | 306 ----- .../src/folding/circuits/nonnative/affine.rs | 161 +++ .../src/folding/circuits/nonnative/mod.rs | 2 + .../src/folding/circuits/nonnative/uint.rs | 1019 +++++++++++++++++ folding-schemes/src/folding/nova/circuits.rs | 33 +- folding-schemes/src/folding/nova/cyclefold.rs | 68 +- .../src/folding/nova/decider_eth.rs | 2 +- .../src/folding/nova/decider_eth_circuit.rs | 147 ++- folding-schemes/src/folding/nova/mod.rs | 8 +- folding-schemes/src/utils/gadgets.rs | 99 +- 11 files changed, 1359 insertions(+), 487 deletions(-) delete mode 100644 folding-schemes/src/folding/circuits/nonnative.rs create mode 100644 folding-schemes/src/folding/circuits/nonnative/affine.rs create mode 100644 folding-schemes/src/folding/circuits/nonnative/mod.rs create mode 100644 folding-schemes/src/folding/circuits/nonnative/uint.rs diff --git a/folding-schemes/Cargo.toml b/folding-schemes/Cargo.toml index 9ccd8a1..c29958c 100644 --- a/folding-schemes/Cargo.toml +++ b/folding-schemes/Cargo.toml @@ -18,6 +18,7 @@ ark-circom = { git = "https://github.com/arnaucube/circom-compat.git" } thiserror = "1.0" rayon = "1.7.0" num-bigint = "0.4" +num-integer = "0.1" color-eyre = "=0.6.2" # tmp imports for espresso's sumcheck diff --git a/folding-schemes/src/folding/circuits/nonnative.rs b/folding-schemes/src/folding/circuits/nonnative.rs deleted file mode 100644 index 67ca365..0000000 --- a/folding-schemes/src/folding/circuits/nonnative.rs +++ /dev/null @@ -1,306 +0,0 @@ -use ark_ec::{AffineRepr, CurveGroup}; -use ark_ff::{BigInteger, PrimeField}; -use ark_r1cs_std::{ - alloc::{AllocVar, AllocationMode}, - boolean::Boolean, - fields::{ - fp::FpVar, - nonnative::{ - params::{get_params, OptimizationType}, - AllocatedNonNativeFieldVar, NonNativeFieldVar, - }, - FieldVar, - }, - ToBitsGadget, ToConstraintFieldGadget, -}; -use ark_relations::r1cs::{ConstraintSystemRef, Namespace, OptimizationGoal, SynthesisError}; -use ark_std::Zero; -use core::borrow::Borrow; -use std::marker::PhantomData; - -/// Compose a vector boolean into a `NonNativeFieldVar` -pub fn nonnative_field_var_from_le_bits( - cs: ConstraintSystemRef, - bits: &[Boolean], -) -> Result, SynthesisError> { - let params = get_params( - TargetField::MODULUS_BIT_SIZE as usize, - BaseField::MODULUS_BIT_SIZE as usize, - match cs.optimization_goal() { - OptimizationGoal::None => OptimizationType::Constraints, - OptimizationGoal::Constraints => OptimizationType::Constraints, - OptimizationGoal::Weight => OptimizationType::Weight, - }, - ); - - // push the lower limbs first - let mut limbs = bits - .chunks(params.bits_per_limb) - .map(Boolean::le_bits_to_fp_var) - .collect::, _>>()?; - limbs.resize(params.num_limbs, FpVar::zero()); - limbs.reverse(); - - Ok(AllocatedNonNativeFieldVar { - cs, - limbs, - num_of_additions_over_normal_form: BaseField::one(), - is_in_the_normal_form: false, - target_phantom: PhantomData, - } - .into()) -} - -/// A more efficient version of `NonNativeFieldVar::to_constraint_field` -pub fn nonnative_field_var_to_constraint_field( - f: &NonNativeFieldVar, -) -> Result>, SynthesisError> { - let bits = f.to_bits_le()?; - - let bits_per_limb = BaseField::MODULUS_BIT_SIZE as usize - 1; - let num_limbs = (TargetField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); - - let mut limbs = bits - .chunks(bits_per_limb) - .map(|chunk| { - let mut limb = FpVar::::zero(); - let mut w = BaseField::one(); - for b in chunk.iter() { - limb += FpVar::from(b.clone()) * w; - w.double_in_place(); - } - limb - }) - .collect::>>(); - limbs.resize(num_limbs, FpVar::zero()); - limbs.reverse(); - - Ok(limbs) -} - -/// The out-circuit counterpart of `nonnative_field_var_to_constraint_field` -pub fn nonnative_field_to_field_elements( - f: &TargetField, -) -> Vec { - let bits = f.into_bigint().to_bits_le(); - - let bits_per_limb = BaseField::MODULUS_BIT_SIZE as usize - 1; - let num_limbs = (TargetField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); - - let mut limbs = bits - .chunks(bits_per_limb) - .map(|chunk| { - let mut limb = BaseField::zero(); - let mut w = BaseField::one(); - for &b in chunk.iter() { - limb += BaseField::from(b) * w; - w.double_in_place(); - } - limb - }) - .collect::>(); - limbs.resize(num_limbs, BaseField::zero()); - limbs.reverse(); - - limbs -} - -/// NonNativeAffineVar represents an elliptic curve point in Affine representation in the non-native -/// field, over the constraint field. It is not intended to perform operations, but just to contain -/// the affine coordinates in order to perform hash operations of the point. -#[derive(Debug, Clone)] -pub struct NonNativeAffineVar -where - ::BaseField: ark_ff::PrimeField, -{ - pub x: NonNativeFieldVar, - pub y: NonNativeFieldVar, -} - -impl AllocVar for NonNativeAffineVar -where - C: CurveGroup, - ::BaseField: ark_ff::PrimeField, -{ - fn new_variable>( - cs: impl Into>, - f: impl FnOnce() -> Result, - mode: AllocationMode, - ) -> Result { - f().and_then(|val| { - let cs = cs.into(); - - let affine = val.borrow().into_affine(); - let zero_point = (&C::BaseField::zero(), &C::BaseField::zero()); - let xy = affine.xy().unwrap_or(zero_point); - - let x = NonNativeFieldVar::::new_variable( - cs.clone(), - || Ok(xy.0), - mode, - )?; - let y = NonNativeFieldVar::::new_variable( - cs.clone(), - || Ok(xy.1), - mode, - )?; - - Ok(Self { x, y }) - }) - } -} - -impl ToConstraintFieldGadget for NonNativeAffineVar -where - ::BaseField: ark_ff::PrimeField, -{ - // A more efficient version of `point_to_nonnative_limbs_custom_opt`. - // Used for converting `NonNativeAffineVar` to a vector of `FpVar` with minimum length in - // the circuit. - fn to_constraint_field(&self) -> Result>, SynthesisError> { - let x = nonnative_field_var_to_constraint_field(&self.x)?; - let y = nonnative_field_var_to_constraint_field(&self.y)?; - Ok([x, y].concat()) - } -} - -/// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field` -#[allow(clippy::type_complexity)] -pub fn nonnative_affine_to_field_elements( - p: C, -) -> Result<(Vec, Vec), SynthesisError> -where - ::BaseField: ark_ff::PrimeField, -{ - let affine = p.into_affine(); - if affine.is_zero() { - let x = nonnative_field_to_field_elements(&C::BaseField::zero()); - let y = nonnative_field_to_field_elements(&C::BaseField::zero()); - return Ok((x, y)); - } - - let (x, y) = affine.xy().unwrap(); - let x = nonnative_field_to_field_elements(x); - let y = nonnative_field_to_field_elements(y); - Ok((x, y)) -} - -impl NonNativeAffineVar -where - ::BaseField: ark_ff::PrimeField, -{ - // A wrapper of `point_to_nonnative_limbs_custom_opt` with constraints-focused optimization - // type (which is the default optimization type for arkworks' Groth16). - // Used for extracting a list of field elements of type `C::ScalarField` from the public input - // `p`, in exactly the same way as how `NonNativeAffineVar` is represented as limbs of type - // `FpVar` in-circuit. - #[allow(clippy::type_complexity)] - pub fn inputize(p: C) -> Result<(Vec, Vec), SynthesisError> { - point_to_nonnative_limbs_custom_opt(p, OptimizationType::Constraints) - } -} - -// Used to compute (outside the circuit) the limbs representation of a point. -// For `OptimizationType::Constraints`, the result matches the one used in-circuit. -// For `OptimizationType::Weight`, the result vector is more dense and is suitable for hashing. -// It is possible to further optimize the length of the result vector (see -// `nonnative_affine_to_field_elements`) -#[allow(clippy::type_complexity)] -fn point_to_nonnative_limbs_custom_opt( - p: C, - optimization_type: OptimizationType, -) -> Result<(Vec, Vec), SynthesisError> -where - ::BaseField: ark_ff::PrimeField, -{ - let affine = p.into_affine(); - if affine.is_zero() { - let x = - AllocatedNonNativeFieldVar::::get_limbs_representations( - &C::BaseField::zero(), - optimization_type, - )?; - let y = - AllocatedNonNativeFieldVar::::get_limbs_representations( - &C::BaseField::zero(), - optimization_type, - )?; - return Ok((x, y)); - } - - let (x, y) = affine.xy().unwrap(); - let x = AllocatedNonNativeFieldVar::::get_limbs_representations( - x, - optimization_type, - )?; - let y = AllocatedNonNativeFieldVar::::get_limbs_representations( - y, - optimization_type, - )?; - Ok((x, y)) -} - -#[cfg(test)] -mod tests { - use super::*; - use ark_pallas::{Fr, Projective}; - use ark_r1cs_std::R1CSVar; - use ark_relations::r1cs::ConstraintSystem; - use ark_std::UniformRand; - - #[test] - fn test_alloc_zero() { - let cs = ConstraintSystem::::new_ref(); - - // dealing with the 'zero' point should not panic when doing the unwrap - let p = Projective::zero(); - assert!(NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).is_ok()); - } - - #[test] - fn test_arkworks_to_constraint_field() { - let cs = ConstraintSystem::::new_ref(); - - // check that point_to_nonnative_limbs returns the expected values - let mut rng = ark_std::test_rng(); - let p = Projective::rand(&mut rng); - let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let (x, y) = point_to_nonnative_limbs_custom_opt(p, OptimizationType::Weight).unwrap(); - assert_eq!(pVar.x.to_constraint_field().unwrap().value().unwrap(), x); - assert_eq!(pVar.y.to_constraint_field().unwrap().value().unwrap(), y); - } - - #[test] - fn test_improved_to_constraint_field() { - let cs = ConstraintSystem::::new_ref(); - - // check that point_to_nonnative_limbs returns the expected values - let mut rng = ark_std::test_rng(); - let p = Projective::rand(&mut rng); - let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let (x, y) = nonnative_affine_to_field_elements(p).unwrap(); - assert_eq!( - pVar.to_constraint_field().unwrap().value().unwrap(), - [x, y].concat() - ); - } - - #[test] - fn test_inputize() { - let cs = ConstraintSystem::::new_ref(); - - // check that point_to_nonnative_limbs returns the expected values - let mut rng = ark_std::test_rng(); - let p = Projective::rand(&mut rng); - let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); - let (x, y) = NonNativeAffineVar::inputize(p).unwrap(); - - match (pVar.x, pVar.y) { - (NonNativeFieldVar::Var(p_x), NonNativeFieldVar::Var(p_y)) => { - assert_eq!(p_x.limbs.value().unwrap(), x); - assert_eq!(p_y.limbs.value().unwrap(), y); - } - _ => unreachable!(), - } - } -} diff --git a/folding-schemes/src/folding/circuits/nonnative/affine.rs b/folding-schemes/src/folding/circuits/nonnative/affine.rs new file mode 100644 index 0000000..a5e2af9 --- /dev/null +++ b/folding-schemes/src/folding/circuits/nonnative/affine.rs @@ -0,0 +1,161 @@ +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::PrimeField; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + fields::fp::FpVar, + ToConstraintFieldGadget, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use ark_std::Zero; +use core::borrow::Borrow; + +use super::uint::{nonnative_field_to_field_elements, NonNativeUintVar}; + +/// NonNativeAffineVar represents an elliptic curve point in Affine representation in the non-native +/// field, over the constraint field. It is not intended to perform operations, but just to contain +/// the affine coordinates in order to perform hash operations of the point. +#[derive(Debug, Clone)] +pub struct NonNativeAffineVar +where + ::BaseField: ark_ff::PrimeField, +{ + pub x: NonNativeUintVar, + pub y: NonNativeUintVar, +} + +impl AllocVar for NonNativeAffineVar +where + C: CurveGroup, + ::BaseField: ark_ff::PrimeField, +{ + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|val| { + let cs = cs.into(); + + let affine = val.borrow().into_affine(); + let zero_point = (&C::BaseField::zero(), &C::BaseField::zero()); + let xy = affine.xy().unwrap_or(zero_point); + + let x = NonNativeUintVar::new_variable(cs.clone(), || Ok(*xy.0), mode)?; + let y = NonNativeUintVar::new_variable(cs.clone(), || Ok(*xy.1), mode)?; + + Ok(Self { x, y }) + }) + } +} + +impl ToConstraintFieldGadget for NonNativeAffineVar +where + ::BaseField: ark_ff::PrimeField, +{ + // Used for converting `NonNativeAffineVar` to a vector of `FpVar` with minimum length in + // the circuit. + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let x = self.x.to_constraint_field()?; + let y = self.y.to_constraint_field()?; + Ok([x, y].concat()) + } +} + +/// The out-circuit counterpart of `NonNativeAffineVar::to_constraint_field` +#[allow(clippy::type_complexity)] +pub fn nonnative_affine_to_field_elements( + p: C, +) -> Result<(Vec, Vec), SynthesisError> +where + ::BaseField: ark_ff::PrimeField, +{ + let affine = p.into_affine(); + if affine.is_zero() { + let x = nonnative_field_to_field_elements(&C::BaseField::zero()); + let y = nonnative_field_to_field_elements(&C::BaseField::zero()); + return Ok((x, y)); + } + + let (x, y) = affine.xy().unwrap(); + let x = nonnative_field_to_field_elements(x); + let y = nonnative_field_to_field_elements(y); + Ok((x, y)) +} + +impl NonNativeAffineVar +where + ::BaseField: ark_ff::PrimeField, +{ + // A wrapper of `point_to_nonnative_limbs_custom_opt` with constraints-focused optimization + // type (which is the default optimization type for arkworks' Groth16). + // Used for extracting a list of field elements of type `C::ScalarField` from the public input + // `p`, in exactly the same way as how `NonNativeAffineVar` is represented as limbs of type + // `FpVar` in-circuit. + #[allow(clippy::type_complexity)] + pub fn inputize(p: C) -> Result<(Vec, Vec), SynthesisError> { + let affine = p.into_affine(); + if affine.is_zero() { + let x = NonNativeUintVar::inputize( + &(C::ScalarField::zero()).into(), + C::ScalarField::MODULUS_BIT_SIZE as usize, + ); + let y = NonNativeUintVar::inputize( + &(C::ScalarField::zero()).into(), + C::ScalarField::MODULUS_BIT_SIZE as usize, + ); + return Ok((x, y)); + } + + let (x, y) = affine.xy().unwrap(); + let x = NonNativeUintVar::inputize(&(*x).into(), C::ScalarField::MODULUS_BIT_SIZE as usize); + let y = NonNativeUintVar::inputize(&(*y).into(), C::ScalarField::MODULUS_BIT_SIZE as usize); + Ok((x, y)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_pallas::{Fr, Projective}; + use ark_r1cs_std::R1CSVar; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::UniformRand; + + #[test] + fn test_alloc_zero() { + let cs = ConstraintSystem::::new_ref(); + + // dealing with the 'zero' point should not panic when doing the unwrap + let p = Projective::zero(); + assert!(NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).is_ok()); + } + + #[test] + fn test_improved_to_constraint_field() { + let cs = ConstraintSystem::::new_ref(); + + // check that point_to_nonnative_limbs returns the expected values + let mut rng = ark_std::test_rng(); + let p = Projective::rand(&mut rng); + let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); + let (x, y) = nonnative_affine_to_field_elements(p).unwrap(); + assert_eq!( + pVar.to_constraint_field().unwrap().value().unwrap(), + [x, y].concat() + ); + } + + #[test] + fn test_inputize() { + let cs = ConstraintSystem::::new_ref(); + + // check that point_to_nonnative_limbs returns the expected values + let mut rng = ark_std::test_rng(); + let p = Projective::rand(&mut rng); + let pVar = NonNativeAffineVar::::new_witness(cs.clone(), || Ok(p)).unwrap(); + let (x, y) = NonNativeAffineVar::inputize(p).unwrap(); + + assert_eq!(pVar.x.0.value().unwrap(), x); + assert_eq!(pVar.y.0.value().unwrap(), y); + } +} diff --git a/folding-schemes/src/folding/circuits/nonnative/mod.rs b/folding-schemes/src/folding/circuits/nonnative/mod.rs new file mode 100644 index 0000000..497b987 --- /dev/null +++ b/folding-schemes/src/folding/circuits/nonnative/mod.rs @@ -0,0 +1,2 @@ +pub mod affine; +pub mod uint; diff --git a/folding-schemes/src/folding/circuits/nonnative/uint.rs b/folding-schemes/src/folding/circuits/nonnative/uint.rs new file mode 100644 index 0000000..49f5fe0 --- /dev/null +++ b/folding-schemes/src/folding/circuits/nonnative/uint.rs @@ -0,0 +1,1019 @@ +use std::{ + borrow::Borrow, + cmp::{max, min}, +}; + +use ark_ff::{BigInteger, One, PrimeField, Zero}; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + boolean::Boolean, + fields::{fp::FpVar, FieldVar}, + prelude::EqGadget, + select::CondSelectGadget, + R1CSVar, ToBitsGadget, ToConstraintFieldGadget, +}; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use num_bigint::BigUint; +use num_integer::Integer; + +use crate::utils::gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget}; + +/// `LimbVar` represents a single limb of a non-native unsigned integer in the +/// circuit. +/// The limb value `v` should be small enough to fit into `FpVar`, and we also +/// store an upper bound `ub` for the limb value, which is treated as a constant +/// in the circuit and is used for efficient equality checks and some arithmetic +/// operations. +#[derive(Debug, Clone)] +pub struct LimbVar { + pub v: FpVar, + pub ub: BigUint, +} + +impl]>> From for LimbVar { + fn from(bits: B) -> Self { + Self { + v: Boolean::le_bits_to_fp_var(bits.as_ref()).unwrap(), + ub: (BigUint::one() << bits.as_ref().len()) - BigUint::one(), + } + } +} + +impl Default for LimbVar { + fn default() -> Self { + Self { + v: FpVar::zero(), + ub: BigUint::zero(), + } + } +} + +impl R1CSVar for LimbVar { + type Value = F; + + fn cs(&self) -> ConstraintSystemRef { + self.v.cs() + } + + fn value(&self) -> Result { + self.v.value() + } +} + +impl CondSelectGadget for LimbVar { + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + // We only allow selecting between two values with the same upper bound + assert_eq!(true_value.ub, false_value.ub); + Ok(Self { + v: cond.select(&true_value.v, &false_value.v)?, + ub: true_value.ub.clone(), + }) + } +} + +impl LimbVar { + /// Add two `LimbVar`s. + /// Returns `None` if the upper bound of the sum is too large, i.e., + /// greater than `F::MODULUS_MINUS_ONE_DIV_TWO`. + /// Otherwise, returns the sum as a `LimbVar`. + pub fn add(&self, other: &Self) -> Option { + let ubound = &self.ub + &other.ub; + if ubound < F::MODULUS_MINUS_ONE_DIV_TWO.into() { + Some(Self { + v: &self.v + &other.v, + ub: ubound, + }) + } else { + None + } + } + + /// Add multiple `LimbVar`s. + /// Returns `None` if the upper bound of the sum is too large, i.e., + /// greater than `F::MODULUS_MINUS_ONE_DIV_TWO`. + /// Otherwise, returns the sum as a `LimbVar`. + pub fn add_many(limbs: &[Self]) -> Option { + let ubound = limbs.iter().map(|l| &l.ub).sum(); + if ubound < F::MODULUS_MINUS_ONE_DIV_TWO.into() { + Some(Self { + v: if limbs.is_constant() { + FpVar::constant(limbs.value().unwrap_or_default().into_iter().sum()) + } else { + limbs.iter().map(|l| &l.v).sum() + }, + ub: ubound, + }) + } else { + None + } + } + + /// Multiply two `LimbVar`s. + /// Returns `None` if the upper bound of the product is too large, i.e., + /// greater than `F::MODULUS_MINUS_ONE_DIV_TWO`. + /// Otherwise, returns the product as a `LimbVar`. + pub fn mul(&self, other: &Self) -> Option { + let ubound = &self.ub * &other.ub; + if ubound < F::MODULUS_MINUS_ONE_DIV_TWO.into() { + Some(Self { + v: &self.v * &other.v, + ub: ubound, + }) + } else { + None + } + } + + pub fn zero() -> Self { + Self::default() + } + + pub fn constant(v: F) -> Self { + Self { + v: FpVar::constant(v), + ub: v.into(), + } + } +} + +impl ToBitsGadget for LimbVar { + fn to_bits_le(&self) -> Result>, SynthesisError> { + let cs = self.cs(); + + let bits = &self + .v + .value() + .unwrap_or_default() + .into_bigint() + .to_bits_le()[..self.ub.bits() as usize]; + let bits = if cs.is_none() { + Vec::new_constant(cs, bits)? + } else { + Vec::new_witness(cs, || Ok(bits))? + }; + + Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(&self.v)?; + + Ok(bits) + } +} + +/// `NonNativeUintVar` represents a non-native unsigned integer (BigUint) in the +/// circuit. +/// We apply [xJsnark](https://akosba.github.io/papers/xjsnark.pdf)'s techniques +/// for efficient operations on `NonNativeUintVar`. +/// Note that `NonNativeUintVar` is different from arkworks' `NonNativeFieldVar` +/// in that the latter runs the expensive `reduce` (`align` + `modulo` in our +/// terminology) after each arithmetic operation, while the former only reduces +/// the integer when explicitly called. +#[derive(Debug, Clone)] +pub struct NonNativeUintVar(pub Vec>); + +impl NonNativeUintVar { + const fn bits_per_limb() -> usize { + assert!(F::MODULUS_BIT_SIZE > 250); + // For a `F` with order > 250 bits, 55 is chosen for optimizing the most + // expensive part `Az∘Bz` when checking the R1CS relation for CycleFold. + // Consider using `NonNativeUintVar` to represent the base field `Fq`. + // Since 250 / 55 = 4.46, the `NonNativeUintVar` has 5 limbs. + // Now, the multiplication of two `NonNativeUintVar`s has 9 limbs, and + // each limb has at most 2^{55 * 2} * 5 = 112.3 bits. + // For a 1400x1400 matrix `A`, the multiplication of `A`'s row and `z` + // is the sum of 1400 `NonNativeUintVar`s, each with 9 limbs. + // Thus, the maximum bit length of limbs of each element in `Az` is + // 2^{55 * 2} * 5 * 1400 = 122.7 bits. + // Finally, in the hadamard product of `Az` and `Bz`, every element has + // 17 limbs, whose maximum bit length is (2^{55 * 2} * 5 * 1400)^2 * 9 + // = 248.7 bits and is less than the native field `Fr`. + // Thus, 55 allows us to compute `Az∘Bz` without the expensive alignment + // operation. + // + // TODO (@winderica): either make it a global const, or compute an + // optimal value based on the modulus size + 55 + } +} + +struct BoundedBigUint(BigUint, usize); + +impl AllocVar for NonNativeUintVar { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let cs = cs.into().cs(); + let v = f()?; + let BoundedBigUint(x, l) = v.borrow(); + + let mut limbs = vec![]; + for chunk in (0..*l) + .map(|i| x.bit(i as u64)) + .collect::>() + .chunks(Self::bits_per_limb()) + { + let limb = F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap(); + let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?; + Self::enforce_bit_length(&limb, chunk.len())?; + limbs.push(LimbVar { + v: limb, + ub: (BigUint::one() << chunk.len()) - BigUint::one(), + }); + } + + Ok(Self(limbs)) + } +} + +impl AllocVar for NonNativeUintVar { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let cs = cs.into().cs(); + let v = f()?; + let v = v.borrow(); + + let mut limbs = vec![]; + + for chunk in v.into_bigint().to_bits_le().chunks(Self::bits_per_limb()) { + let limb = F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap(); + let limb = FpVar::new_variable(cs.clone(), || Ok(limb), mode)?; + Self::enforce_bit_length(&limb, chunk.len())?; + limbs.push(LimbVar { + v: limb, + ub: (BigUint::one() << chunk.len()) - BigUint::one(), + }); + } + + Ok(Self(limbs)) + } +} + +impl NonNativeUintVar { + pub fn inputize(x: &BigUint, l: usize) -> Vec { + (0..l) + .map(|i| x.bit(i as u64)) + .collect::>() + .chunks(Self::bits_per_limb()) + .map(|chunk| F::from_bigint(F::BigInt::from_bits_le(chunk)).unwrap()) + .collect() + } +} + +impl R1CSVar for NonNativeUintVar { + type Value = BigUint; + + fn cs(&self) -> ConstraintSystemRef { + self.0.cs() + } + + fn value(&self) -> Result { + let mut r = BigUint::zero(); + + for limb in self.0.value()?.into_iter().rev() { + r <<= Self::bits_per_limb(); + r += Into::::into(limb); + } + + Ok(r) + } +} + +impl NonNativeUintVar { + /// Enforce `self` to be less than `other`, where `self` and `other` should + /// be aligned. + /// Adapted from https://github.com/akosba/jsnark/blob/0955389d0aae986ceb25affc72edf37a59109250/JsnarkCircuitBuilder/src/circuit/auxiliary/LongElement.java#L801-L872 + pub fn enforce_lt(&self, other: &Self) -> Result<(), SynthesisError> { + let len = max(self.0.len(), other.0.len()); + let zero = LimbVar::zero(); + + // Compute the difference between limbs of `other` and `self`. + // Denote a positive limb by `+`, a negative limb by `-`, a zero limb by + // `0`, and an unknown limb by `?`. + // Then, for `self < other`, `delta` should look like: + // ? ? ... ? ? + 0 0 ... 0 0 + let delta = (0..len) + .map(|i| { + let x = &self.0.get(i).unwrap_or(&zero).v; + let y = &other.0.get(i).unwrap_or(&zero).v; + y - x + }) + .collect::>(); + + // `helper` is a vector of booleans that indicates if the corresponding + // limb of `delta` is the first (searching from MSB) positive limb. + // For example, if `delta` is: + // - + ... + - + 0 0 ... 0 0 + // <---- search in this direction -------- + // Then `helper` should be: + // F F ... F F T F F ... F F + let helper = { + let cs = self.cs().or(other.cs()); + let mut helper = vec![false; len]; + for i in (0..len).rev() { + let delta = delta[i].value().unwrap_or_default().into_bigint(); + if !delta.is_zero() && delta < F::MODULUS_MINUS_ONE_DIV_TWO { + helper[i] = true; + break; + } + } + if cs.is_none() { + Vec::>::new_constant(cs, helper)? + } else { + Vec::new_witness(cs, || Ok(helper))? + } + }; + + // `p` is the first positive limb in `delta`. + let mut p = FpVar::::zero(); + // `r` is the sum of all bits in `helper`, which should be 1 when `self` + // is less than `other`, as there should be more than one positive limb + // in `delta`, and thus exactly one true bit in `helper`. + let mut r = FpVar::zero(); + for (b, d) in helper.into_iter().zip(delta) { + // Choose the limb `d` only if `b` is true. + p += b.select(&d, &FpVar::zero())?; + // Either `r` or `d` should be zero. + // Consider the same example as above: + // - + ... + - + 0 0 ... 0 0 + // F F ... F F T F F ... F F + // |-----------| + // `r = 0` in this range (before/when we meet the first positive limb) + // |---------| + // `d = 0` in this range (after we meet the first positive limb) + // This guarantees that for every bit after the true bit in `helper`, + // the corresponding limb in `delta` is zero. + (&r * &d).enforce_equal(&FpVar::zero())?; + // Add the current bit to `r`. + r += FpVar::from(b); + } + + // Ensure that `r` is exactly 1. This guarantees that there is exactly + // one true value in `helper`. + r.enforce_equal(&FpVar::one())?; + // Ensure that `p` is positive, i.e., + // `0 <= p - 1 < 2^bits_per_limb < F::MODULUS_MINUS_ONE_DIV_TWO`. + // This guarantees that the true value in `helper` corresponds to a + // positive limb in `delta`. + Self::enforce_bit_length(&(p - FpVar::one()), Self::bits_per_limb())?; + + Ok(()) + } + + /// Enforce `self` to be equal to `other`, where `self` and `other` are not + /// necessarily aligned. + /// + /// Adapted from https://github.com/akosba/jsnark/blob/0955389d0aae986ceb25affc72edf37a59109250/JsnarkCircuitBuilder/src/circuit/auxiliary/LongElement.java#L562-L798 + /// Similar implementations can also be found in https://github.com/alex-ozdemir/bellman-bignat/blob/0585b9d90154603a244cba0ac80b9aafe1d57470/src/mp/bignat.rs#L566-L661 + /// and https://github.com/arkworks-rs/r1cs-std/blob/4020fbc22625621baa8125ede87abaeac3c1ca26/src/fields/emulated_fp/reduce.rs#L201-L323 + pub fn enforce_equal_unaligned(&self, other: &Self) -> Result<(), SynthesisError> { + let len = min(self.0.len(), other.0.len()); + + // Group the limbs of `self` and `other` so that each group nearly + // reaches the capacity `F::MODULUS_MINUS_ONE_DIV_TWO`. + // By saying group, we mean the operation `Σ x_i 2^{i * W}`, where `W` + // is the initial number of bits in a limb, just as what we do in grade + // school arithmetic, e.g., + // 5 9 + // x 7 3 + // ------------- + // 15 27 + // 35 63 + // ------------- <- When grouping 35, 15 + 63, and 27, we are computing + // 4 3 0 7 35 * 100 + (15 + 63) * 10 + 27 = 4307 + // Note that this is different from the concatenation `x_0 || x_1 ...`, + // since the bit-length of each limb is not necessarily the initial size + // `W`. + let (steps, x, y, rest) = { + // `steps` stores the size of each grouped limb. + let mut steps = vec![]; + // `x_grouped` stores the grouped limbs of `self`. + let mut x_grouped = vec![]; + // `y_grouped` stores the grouped limbs of `other`. + let mut y_grouped = vec![]; + let mut i = 0; + while i < len { + let mut j = i; + // The current grouped limbs of `self` and `other`. + let mut xx = LimbVar::zero(); + let mut yy = LimbVar::zero(); + while j < len { + let shift = BigUint::one() << (Self::bits_per_limb() * (j - i)); + assert!(shift < F::MODULUS_MINUS_ONE_DIV_TWO.into()); + let shift = LimbVar::constant(shift.into()); + match ( + // Try to group `x` and `y` into `xx` and `yy`. + self.0[j].mul(&shift).and_then(|x| xx.add(&x)), + other.0[j].mul(&shift).and_then(|y| yy.add(&y)), + ) { + // Update the result if successful. + (Some(x), Some(y)) => (xx, yy) = (x, y), + // Break the loop if the upper bound of the result exceeds + // the maximum capacity. + _ => break, + } + j += 1; + } + // Store the grouped limbs and their size. + steps.push((j - i) * Self::bits_per_limb()); + x_grouped.push(xx); + y_grouped.push(yy); + // Start the next group + i = j; + } + let remaining_limbs = &(if i < self.0.len() { self } else { other }).0[i..]; + let rest = if remaining_limbs.is_empty() { + FpVar::zero() + } else { + // If there is any remaining limb, the first one should be the + // final carry (which will be checked later), and the following + // ones should be zero. + + // Enforce the remaining limbs to be zero. + // Instead of doing that one by one, we check if their sum is + // zero using a single constraint. + // This is sound, as the upper bounds of the limbs and their sum + // are guaranteed to be less than `F::MODULUS_MINUS_ONE_DIV_TWO` + // (i.e., all of them are "non-negative"), implying that all + // limbs should be zero to make the sum zero. + LimbVar::add_many(&remaining_limbs[1..]) + .unwrap() + .v + .enforce_equal(&FpVar::zero())?; + remaining_limbs[0].v.clone() + }; + (steps, x_grouped, y_grouped, rest) + }; + let n = steps.len(); + // `c` stores the current carry of `x_i - y_i` + let mut c = FpVar::::zero(); + // For each group, check the last `step_i` bits of `x_i` and `y_i` are + // equal. + // The intuition is to check `diff = x_i - y_i = 0 (mod 2^step_i)`. + // However, this is only true for `i = 0`, and we need to consider carry + // values `diff >> step_i` for `i > 0`. + // Therefore, we actually check `diff = x_i - y_i + c = 0 (mod 2^step_i)` + // and derive the next `c` by computing `diff >> step_i`. + // To enforce `diff = 0 (mod 2^step_i)`, we compute `diff / 2^step_i` + // and enforce it to be small (soundness holds because for `a` that does + // not divide `b`, `b / a` in the field will be very large. + for i in 0..n { + let step = steps[i]; + c = (&x[i].v - &y[i].v + &c) + .mul_by_inverse_unchecked(&FpVar::constant(F::from(BigUint::one() << step)))?; + if i != n - 1 { + // Unlike the code mentioned above which add some offset to the + // diff `x_i - y_i + c` to make it always positive, we directly + // check if the absolute value of the diff is small. + Self::enforce_abs_bit_length( + &c, + (max(&x[i].ub, &y[i].ub).bits() as usize) + .checked_sub(step) + .unwrap_or_default(), + )?; + } else { + // For the final carry, we need to ensure that it equals the + // remaining limb `rest`. + c.enforce_equal(&rest)?; + } + } + + Ok(()) + } +} + +impl ToBitsGadget for NonNativeUintVar { + fn to_bits_le(&self) -> Result>, SynthesisError> { + Ok(self + .0 + .iter() + .map(|limb| limb.to_bits_le()) + .collect::, _>>()? + .concat()) + } +} + +impl ToConstraintFieldGadget for NonNativeUintVar { + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let bits_per_limb = F::MODULUS_BIT_SIZE as usize - 1; + + let limbs = self + .to_bits_le()? + .chunks(bits_per_limb) + .map(Boolean::le_bits_to_fp_var) + .collect::, _>>()?; + + Ok(limbs) + } +} + +impl CondSelectGadget for NonNativeUintVar { + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + assert_eq!(true_value.0.len(), false_value.0.len()); + let mut v = vec![]; + for i in 0..true_value.0.len() { + v.push(cond.select(&true_value.0[i], &false_value.0[i])?); + } + Ok(Self(v)) + } +} + +impl NonNativeUintVar { + pub fn ubound(&self) -> BigUint { + let mut r = BigUint::zero(); + + for i in self.0.iter().rev() { + r <<= Self::bits_per_limb(); + r += &i.ub; + } + + r + } + + fn enforce_bit_length(x: &FpVar, length: usize) -> Result>, SynthesisError> { + let cs = x.cs(); + + let bits = &x.value().unwrap_or_default().into_bigint().to_bits_le()[..length]; + let bits = if cs.is_none() { + Vec::new_constant(cs, bits)? + } else { + Vec::new_witness(cs, || Ok(bits))? + }; + + Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(x)?; + + Ok(bits) + } + + fn enforce_abs_bit_length( + x: &FpVar, + length: usize, + ) -> Result>, SynthesisError> { + let cs = x.cs(); + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + + let is_neg = Boolean::new_variable( + cs.clone(), + || Ok(x.value().unwrap_or_default().into_bigint() > F::MODULUS_MINUS_ONE_DIV_TWO), + mode, + )?; + let bits = Vec::new_variable( + cs.clone(), + || { + Ok({ + let x = x.value().unwrap_or_default(); + let mut bits = if is_neg.value().unwrap_or_default() { + -x + } else { + x + } + .into_bigint() + .to_bits_le(); + bits.resize(length, false); + bits + }) + }, + mode, + )?; + + // Below is equivalent to but more efficient than + // `Boolean::le_bits_to_fp_var(&bits)?.enforce_equal(&is_neg.select(&x.negate()?, &x)?)?` + // Note that this enforces: + // 1. The claimed absolute value `is_neg.select(&x.negate()?, &x)?` has + // exactly `length` bits. + // 2. `is_neg` is indeed the sign of `x`, i.e., `is_neg = false` when + // `0 <= x < (|F| - 1) / 2`, and `is_neg = true` when + // `(|F| - 1) / 2 <= x < F`, thus the claimed absolute value is + // correct. + // If `is_neg` is incorrect, then: + // a. `0 <= x < (|F| - 1) / 2`, but `is_neg = true`, then + // `is_neg.select(&x.negate()?, &x)?` returns `|F| - x`, + // which is greater than `(|F| - 1) / 2` and cannot fit in + // `length` bits (given that `length` is small). + // b. `(|F| - 1) / 2 <= x < F`, but `is_neg = false`, then + // `is_neg.select(&x.negate()?, &x)?` returns `x`, which is + // greater than `(|F| - 1) / 2` and cannot fit in `length` + // bits. + FpVar::from(is_neg).mul_equals(&x.double()?, &(x - Boolean::le_bits_to_fp_var(&bits)?))?; + + Ok(bits) + } + + /// Compute `self + other`, without aligning the limbs. + pub fn add_no_align(&self, other: &Self) -> Self { + let mut z = vec![LimbVar::zero(); max(self.0.len(), other.0.len())]; + for (i, v) in self.0.iter().enumerate() { + z[i] = z[i].add(v).unwrap(); + } + for (i, v) in other.0.iter().enumerate() { + z[i] = z[i].add(v).unwrap(); + } + Self(z) + } + + /// Compute `self * other`, without aligning the limbs. + /// Implements the O(n) approach described in xJsnark, Section IV.B.1) + pub fn mul_no_align(&self, other: &Self) -> Result { + let len = self.0.len() + other.0.len() - 1; + if self.is_constant() || other.is_constant() { + // Use the naive approach for constant operands, which costs no + // constraints. + let z = (0..len) + .map(|i| { + let start = max(i + 1, other.0.len()) - other.0.len(); + let end = min(i + 1, self.0.len()); + LimbVar::add_many( + &(start..end) + .map(|j| self.0[j].mul(&other.0[i - j])) + .collect::>>()?, + ) + }) + .collect::>>() + .unwrap(); + return Ok(Self(z)); + } + let cs = self.cs().or(other.cs()); + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + + // Compute the result `z` outside the circuit and provide it as hints. + let z = { + let mut z = vec![(F::zero(), BigUint::zero()); len]; + for i in 0..self.0.len() { + for j in 0..other.0.len() { + z[i + j].0 += self.0[i].value().unwrap_or_default() + * other.0[j].value().unwrap_or_default(); + z[i + j].1 += &self.0[i].ub * &other.0[j].ub; + } + } + z.into_iter() + .map(|(v, ub)| { + assert!(ub < F::MODULUS_MINUS_ONE_DIV_TWO.into()); + Ok(LimbVar { + v: FpVar::new_variable(cs.clone(), || Ok(v), mode)?, + ub, + }) + }) + .collect::, _>>()? + }; + for c in 1..=len { + let c = F::from(c as u64); + let mut t = F::one(); + let mut c_powers = vec![]; + for _ in 0..len { + c_powers.push(t); + t *= c; + } + // `l = Σ self[i] c^i` + let l = self + .0 + .iter() + .zip(&c_powers) + .map(|(v, t)| (&v.v * *t)) + .collect::>() + .iter() + .sum::>(); + // `r = Σ other[i] c^i` + let r = other + .0 + .iter() + .zip(&c_powers) + .map(|(v, t)| (&v.v * *t)) + .collect::>() + .iter() + .sum::>(); + // `o = Σ z[i] c^i` + let o = z + .iter() + .zip(&c_powers) + .map(|(v, t)| &v.v * *t) + .collect::>() + .iter() + .sum::>(); + // Enforce `o = l * r` + l.mul_equals(&r, &o)?; + } + + Ok(Self(z)) + } + + /// Convert `Self` to an element in `M`, i.e., compute `Self % M::MODULUS`. + pub fn modulo(&self) -> Result { + let cs = self.cs(); + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + let m: BigUint = M::MODULUS.into(); + // Provide the quotient and remainder as hints + let (q, r) = { + let v = self.value().unwrap_or_default(); + let (q, r) = v.div_rem(&m); + let q_ubound = self.ubound().div_ceil(&m); + let r_ubound = &m; + ( + Self::new_variable( + cs.clone(), + || Ok(BoundedBigUint(q, q_ubound.bits() as usize)), + mode, + )?, + Self::new_variable( + cs.clone(), + || Ok(BoundedBigUint(r, r_ubound.bits() as usize)), + mode, + )?, + ) + }; + + let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?; + // Enforce `self = q * m + r` + q.mul_no_align(&m)? + .add_no_align(&r) + .enforce_equal_unaligned(self)?; + // Enforce `r < m` (and `r >= 0` already holds) + r.enforce_lt(&m)?; + + Ok(r) + } + + /// Enforce that `self` is congruent to `other` modulo `M::MODULUS`. + pub fn enforce_congruent(&self, other: &Self) -> Result<(), SynthesisError> { + let cs = self.cs(); + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + let m: BigUint = M::MODULUS.into(); + let bits = (max(self.ubound(), other.ubound()) / &m).bits() as usize; + // Provide the quotient `|x - y| / m` and a boolean indicating if `x > y` + // as hints. + let (q, is_ge) = { + let x = self.value().unwrap_or_default(); + let y = other.value().unwrap_or_default(); + let (d, b) = if x > y { + ((x - y) / &m, true) + } else { + ((y - x) / &m, false) + }; + ( + Self::new_variable(cs.clone(), || Ok(BoundedBigUint(d, bits)), mode)?, + Boolean::new_variable(cs.clone(), || Ok(b), mode)?, + ) + }; + + let zero = Self::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), bits))?; + let m = Self::new_constant(cs.clone(), BoundedBigUint(m, M::MODULUS_BIT_SIZE as usize))?; + let l = self.add_no_align(&is_ge.select(&zero, &q)?.mul_no_align(&m)?); + let r = other.add_no_align(&is_ge.select(&q, &zero)?.mul_no_align(&m)?); + // If `self >= other`, enforce `self = other + q * m` + // Otherwise, enforce `self + q * m = other` + // Soundness holds because if `self` and `other` are not congruent, then + // one can never find a `q` satisfying either equation above. + l.enforce_equal_unaligned(&r) + } +} + +impl]>> From for NonNativeUintVar { + fn from(bits: B) -> Self { + Self( + bits.as_ref() + .chunks(Self::bits_per_limb()) + .map(LimbVar::from) + .collect::>(), + ) + } +} + +/// The out-circuit counterpart of `NonNativeUintVar::to_constraint_field` +pub fn nonnative_field_to_field_elements( + f: &TargetField, +) -> Vec { + let bits = f.into_bigint().to_bits_le(); + + let bits_per_limb = BaseField::MODULUS_BIT_SIZE as usize - 1; + let num_limbs = (TargetField::MODULUS_BIT_SIZE as usize).div_ceil(bits_per_limb); + + let mut limbs = bits + .chunks(bits_per_limb) + .map(|chunk| { + let mut limb = BaseField::zero(); + let mut w = BaseField::one(); + for &b in chunk.iter() { + limb += BaseField::from(b) * w; + w.double_in_place(); + } + limb + }) + .collect::>(); + limbs.resize(num_limbs, BaseField::zero()); + + limbs +} + +impl VectorGadget> for [NonNativeUintVar] { + fn add(&self, other: &Self) -> Result>, SynthesisError> { + Ok(self + .iter() + .zip(other.iter()) + .map(|(x, y)| x.add_no_align(y)) + .collect()) + } + + fn hadamard(&self, other: &Self) -> Result>, SynthesisError> { + self.iter() + .zip(other.iter()) + .map(|(x, y)| x.mul_no_align(y)) + .collect() + } + + fn mul_scalar( + &self, + other: &NonNativeUintVar, + ) -> Result>, SynthesisError> { + self.iter().map(|x| x.mul_no_align(other)).collect() + } +} + +impl MatrixGadget> + for SparseMatrixVar> +{ + fn mul_vector( + &self, + v: &[NonNativeUintVar], + ) -> Result>, SynthesisError> { + Ok(self + .coeffs + .iter() + .map(|row| { + let len = row + .iter() + .map(|(value, col_i)| value.0.len() + v[*col_i].0.len() - 1) + .max() + .unwrap_or(0); + // This is a combination of `mul_no_align` and `add_no_align` + // that results in more flattened `LinearCombination`s. + // Consequently, `ConstraintSystem::inline_all_lcs` costs less + // time, thus making trusted setup and proof generation faster. + let limbs = (0..len) + .map(|i| { + LimbVar::add_many( + &row.iter() + .flat_map(|(value, col_i)| { + let start = max(i + 1, v[*col_i].0.len()) - v[*col_i].0.len(); + let end = min(i + 1, value.0.len()); + (start..end).map(|j| value.0[j].mul(&v[*col_i].0[i - j])) + }) + .collect::>>()?, + ) + }) + .collect::>>() + .unwrap(); + NonNativeUintVar(limbs) + }) + .collect()) + } +} + +#[cfg(test)] +mod tests { + use std::error::Error; + + use super::*; + use ark_ff::Field; + use ark_pallas::{Fq, Fr}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::{test_rng, UniformRand}; + use num_bigint::RandBigInt; + + #[test] + fn test_mul_biguint() -> Result<(), Box> { + let cs = ConstraintSystem::::new_ref(); + + let size = 256; + + let rng = &mut test_rng(); + let a = rng.gen_biguint(size as u64); + let b = rng.gen_biguint(size as u64); + let ab = &a * &b; + let aab = &a * &ab; + let abb = &ab * &b; + + let a_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(BoundedBigUint(a, size)))?; + let b_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(BoundedBigUint(b, size)))?; + let ab_var = + NonNativeUintVar::new_witness(cs.clone(), || Ok(BoundedBigUint(ab, size * 2)))?; + let aab_var = + NonNativeUintVar::new_witness(cs.clone(), || Ok(BoundedBigUint(aab, size * 3)))?; + let abb_var = + NonNativeUintVar::new_witness(cs.clone(), || Ok(BoundedBigUint(abb, size * 3)))?; + + a_var + .mul_no_align(&b_var)? + .enforce_equal_unaligned(&ab_var)?; + a_var + .mul_no_align(&ab_var)? + .enforce_equal_unaligned(&aab_var)?; + ab_var + .mul_no_align(&b_var)? + .enforce_equal_unaligned(&abb_var)?; + + assert!(cs.is_satisfied()?); + Ok(()) + } + + #[test] + fn test_mul_fq() -> Result<(), Box> { + let cs = ConstraintSystem::::new_ref(); + + let rng = &mut test_rng(); + let a = Fq::rand(rng); + let b = Fq::rand(rng); + let ab = a * b; + let aab = a * ab; + let abb = ab * b; + + let a_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(a))?; + let b_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(b))?; + let ab_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(ab))?; + let aab_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(aab))?; + let abb_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(abb))?; + + a_var + .mul_no_align(&b_var)? + .enforce_congruent::(&ab_var)?; + a_var + .mul_no_align(&ab_var)? + .enforce_congruent::(&aab_var)?; + ab_var + .mul_no_align(&b_var)? + .enforce_congruent::(&abb_var)?; + + assert!(cs.is_satisfied()?); + Ok(()) + } + + #[test] + fn test_pow() -> Result<(), Box> { + let cs = ConstraintSystem::::new_ref(); + + let rng = &mut test_rng(); + + let a = Fq::rand(rng); + + let a_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(a))?; + + let mut r_var = a_var.clone(); + for _ in 0..16 { + r_var = r_var.mul_no_align(&r_var)?.modulo::()?; + } + r_var = r_var.mul_no_align(&a_var)?.modulo::()?; + assert_eq!(a.pow([65537u64]), Fq::from(r_var.value()?)); + assert!(cs.is_satisfied()?); + Ok(()) + } + + #[test] + fn test_vec_vec_mul() -> Result<(), Box> { + let cs = ConstraintSystem::::new_ref(); + + let len = 1000; + + let rng = &mut test_rng(); + let a = (0..len).map(|_| Fq::rand(rng)).collect::>(); + let b = (0..len).map(|_| Fq::rand(rng)).collect::>(); + let c = a.iter().zip(b.iter()).map(|(a, b)| a * b).sum::(); + + let a_var = Vec::>::new_witness(cs.clone(), || Ok(a))?; + let b_var = Vec::>::new_witness(cs.clone(), || Ok(b))?; + let c_var = NonNativeUintVar::new_witness(cs.clone(), || Ok(c))?; + + let mut r_var = + NonNativeUintVar::new_constant(cs.clone(), BoundedBigUint(BigUint::zero(), 0))?; + for (a, b) in a_var.into_iter().zip(b_var.into_iter()) { + r_var = r_var.add_no_align(&a.mul_no_align(&b)?); + } + r_var.enforce_congruent::(&c_var)?; + + assert!(cs.is_satisfied()?); + Ok(()) + } +} diff --git a/folding-schemes/src/folding/nova/circuits.rs b/folding-schemes/src/folding/nova/circuits.rs index b80093f..dbf4749 100644 --- a/folding-schemes/src/folding/nova/circuits.rs +++ b/folding-schemes/src/folding/nova/circuits.rs @@ -14,13 +14,13 @@ use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, - fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar}, + fields::{fp::FpVar, FieldVar}, groups::GroupOpsBounds, prelude::CurveVar, R1CSVar, ToConstraintFieldGadget, }; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError}; -use ark_std::{fmt::Debug, Zero}; +use ark_std::{fmt::Debug, One, Zero}; use core::{borrow::Borrow, marker::PhantomData}; use super::{ @@ -29,9 +29,12 @@ use super::{ }, CommittedInstance, }; -use crate::folding::circuits::nonnative::{nonnative_affine_to_field_elements, NonNativeAffineVar}; +use crate::constants::N_BITS_RO; +use crate::folding::circuits::nonnative::{ + affine::{nonnative_affine_to_field_elements, NonNativeAffineVar}, + uint::NonNativeUintVar, +}; use crate::frontend::FCircuit; -use crate::{constants::N_BITS_RO, folding::circuits::nonnative::nonnative_field_var_from_le_bits}; /// CF1 represents the ConstraintField used for the main Nova circuit which is over E1::Fr, where /// E1 is the main curve where we do the folding. @@ -402,7 +405,11 @@ where )?; let r = Boolean::le_bits_to_fp_var(&r_bits)?; // Also convert r_bits to a `NonNativeFieldVar` - let r_nonnat = nonnative_field_var_from_le_bits(cs.clone(), &r_bits)?; + let r_nonnat = { + let mut bits = r_bits; + bits.resize(C1::BaseField::MODULUS_BIT_SIZE as usize, Boolean::FALSE); + NonNativeUintVar::from(&bits) + }; // Notice that NIFSGadget::fold_committed_instance does not fold cmE & cmW. // We set `U_i1.cmE` and `U_i1.cmW` to unconstrained witnesses `U_i1_cmE` and `U_i1_cmW` @@ -452,7 +459,7 @@ where // cf1_u_i.cmE = 0 cmE: GC2::zero(), // cf1_u_i.u = 1 - u: NonNativeFieldVar::one(), + u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, // cf1_u_i.cmW is provided by the prover as witness cmW: GC2::new_witness(cs.clone(), || Ok(self.cf1_u_i_cmW.unwrap_or(C2::zero())))?, // cf1_u_i.x is computed in step 1 @@ -462,7 +469,7 @@ where // cf2_u_i.cmE = 0 cmE: GC2::zero(), // cf2_u_i.u = 1 - u: NonNativeFieldVar::one(), + u: NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::one())?, // cf2_u_i.cmW is provided by the prover as witness cmW: GC2::new_witness(cs.clone(), || Ok(self.cf2_u_i_cmW.unwrap_or(C2::zero())))?, // cf2_u_i.x is computed in step 1 @@ -482,7 +489,11 @@ where cf1_cmT.clone(), )?; // Convert cf1_r_bits to a `NonNativeFieldVar` - let cf1_r_nonnat = nonnative_field_var_from_le_bits(cs.clone(), &cf1_r_bits)?; + let cf1_r_nonnat = { + let mut bits = cf1_r_bits.clone(); + bits.resize(C1::BaseField::MODULUS_BIT_SIZE as usize, Boolean::FALSE); + NonNativeUintVar::from(&bits) + }; // Fold cf1_u_i & cf_U_i into cf1_U_{i+1} let cf1_U_i1 = NIFSFullGadget::::fold_committed_instance( cf1_r_bits, @@ -500,7 +511,11 @@ where cf2_u_i.clone(), cf2_cmT.clone(), )?; - let cf2_r_nonnat = nonnative_field_var_from_le_bits(cs.clone(), &cf2_r_bits)?; + let cf2_r_nonnat = { + let mut bits = cf2_r_bits.clone(); + bits.resize(C1::BaseField::MODULUS_BIT_SIZE as usize, Boolean::FALSE); + NonNativeUintVar::from(&bits) + }; let cf_U_i1 = NIFSFullGadget::::fold_committed_instance( cf2_r_bits, cf2_r_nonnat, diff --git a/folding-schemes/src/folding/nova/cyclefold.rs b/folding-schemes/src/folding/nova/cyclefold.rs index 7bc87bb..c372303 100644 --- a/folding-schemes/src/folding/nova/cyclefold.rs +++ b/folding-schemes/src/folding/nova/cyclefold.rs @@ -16,7 +16,7 @@ use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, - fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar}, + fields::{fp::FpVar, FieldVar}, groups::GroupOpsBounds, prelude::CurveVar, ToConstraintFieldGadget, @@ -29,7 +29,7 @@ use core::{borrow::Borrow, marker::PhantomData}; use super::circuits::CF2; use super::CommittedInstance; use crate::constants::N_BITS_RO; -use crate::folding::circuits::nonnative::nonnative_field_var_to_constraint_field; +use crate::folding::circuits::nonnative::uint::NonNativeUintVar; use crate::Error; // public inputs length for the CycleFoldCircuit: |[r, p1.x,y, p2.x,y, p3.x,y]| @@ -43,9 +43,9 @@ where for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, { pub cmE: GC, - pub u: NonNativeFieldVar>, + pub u: NonNativeUintVar>, pub cmW: GC, - pub x: Vec>>, + pub x: Vec>>, } impl AllocVar, CF2> for CycleFoldCommittedInstanceVar where @@ -64,16 +64,8 @@ where let cmE = GC::new_variable(cs.clone(), || Ok(val.borrow().cmE), mode)?; let cmW = GC::new_variable(cs.clone(), || Ok(val.borrow().cmW), mode)?; - let u = NonNativeFieldVar::>::new_variable( - cs.clone(), - || Ok(val.borrow().u), - mode, - )?; - let x = Vec::>>::new_variable( - cs.clone(), - || Ok(val.borrow().x.clone()), - mode, - )?; + let u = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().u), mode)?; + let x = Vec::new_variable(cs.clone(), || Ok(val.borrow().x.clone()), mode)?; Ok(Self { cmE, u, cmW, x }) }) @@ -99,10 +91,10 @@ where let is_inf = cmE_is_inf.double()? + cmW_is_inf; Ok([ - nonnative_field_var_to_constraint_field(&self.u)?, + self.u.to_constraint_field()?, self.x .iter() - .map(nonnative_field_var_to_constraint_field) + .map(|i| i.to_constraint_field()) .collect::, _>>()? .concat(), cmE_elems, @@ -193,7 +185,7 @@ where pub fn fold_committed_instance( // assumes that r_bits is equal to r_nonnat just that in a different format r_bits: Vec>>, - r_nonnat: NonNativeFieldVar>, + r_nonnat: NonNativeUintVar>, cmT: GC, ci1: CycleFoldCommittedInstanceVar, // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) @@ -202,20 +194,23 @@ where Ok(CycleFoldCommittedInstanceVar { cmE: cmT.scalar_mul_le(r_bits.iter())? + ci1.cmE, cmW: ci1.cmW + ci2.cmW.scalar_mul_le(r_bits.iter())?, - u: ci1.u + r_nonnat.clone(), + u: ci1.u.add_no_align(&r_nonnat).modulo::()?, x: ci1 .x .iter() .zip(ci2.x) - .map(|(a, b)| a + &r_nonnat * &b) - .collect::>(), + .map(|(a, b)| { + a.add_no_align(&r_nonnat.mul_no_align(&b)?) + .modulo::() + }) + .collect::, _>>()?, }) } pub fn verify( // assumes that r_bits is equal to r_nonnat just that in a different format r_bits: Vec>>, - r_nonnat: NonNativeFieldVar>, + r_nonnat: NonNativeUintVar>, cmT: GC, ci1: CycleFoldCommittedInstanceVar, // ci2 is assumed to be always with cmE=0, u=1 (checks done previous to this method) @@ -225,9 +220,11 @@ where let ci = Self::fold_committed_instance(r_bits, r_nonnat, cmT, ci1, ci2)?; ci.cmE.enforce_equal(&ci3.cmE)?; - ci.u.enforce_equal(&ci3.u)?; + ci.u.enforce_equal_unaligned(&ci3.u)?; ci.cmW.enforce_equal(&ci3.cmW)?; - ci.x.enforce_equal(&ci3.x)?; + for (x, y) in ci.x.iter().zip(ci3.x.iter()) { + x.enforce_equal_unaligned(y)?; + } Ok(()) } @@ -316,7 +313,6 @@ pub struct CycleFoldCircuit>> { pub r_bits: Option>, pub p1: Option, pub p2: Option, - pub p3: Option, pub x: Option>>, // public inputs (cf_u_{i+1}.x) } impl>> CycleFoldCircuit { @@ -326,7 +322,6 @@ impl>> CycleFoldCircuit { r_bits: None, p1: None, p2: None, - p3: None, x: None, } } @@ -344,7 +339,11 @@ where })?; let p1 = GC::new_witness(cs.clone(), || Ok(self.p1.unwrap_or(C::zero())))?; let p2 = GC::new_witness(cs.clone(), || Ok(self.p2.unwrap_or(C::zero())))?; - let p3 = GC::new_witness(cs.clone(), || Ok(self.p3.unwrap_or(C::zero())))?; + // Fold the original Nova instances natively in CycleFold + // For the cmW we're computing: U_i1.cmW = U_i.cmW + r * u_i.cmW + // For the cmE we're computing: U_i1.cmE = U_i.cmE + r * cmT + r^2 * u_i.cmE, where u_i.cmE + // is assumed to be 0, so, U_i1.cmE = U_i.cmE + r * cmT + let p3 = &p1 + p2.scalar_mul_le(r_bits.iter())?; let x = Vec::>>::new_input(cs.clone(), || { Ok(self.x.unwrap_or(vec![CF2::::zero(); CF_IO_LEN])) @@ -356,19 +355,13 @@ where let r: FpVar> = Boolean::le_bits_to_fp_var(&r_bits)?; let points_coords: Vec>> = [ vec![r], - p1.clone().to_constraint_field()?[..2].to_vec(), - p2.clone().to_constraint_field()?[..2].to_vec(), - p3.clone().to_constraint_field()?[..2].to_vec(), + p1.to_constraint_field()?[..2].to_vec(), + p2.to_constraint_field()?[..2].to_vec(), + p3.to_constraint_field()?[..2].to_vec(), ] .concat(); points_coords.enforce_equal(&x)?; - // Fold the original Nova instances natively in CycleFold - // For the cmW we're checking: U_i1.cmW == U_i.cmW + r * u_i.cmW - // For the cmE we're checking: U_i1.cmE == U_i.cmE + r * cmT + r^2 * u_i.cmE, where u_i.cmE - // is assumed to be 0, so, U_i1.cmE == U_i.cmE + r * cmT - p3.enforce_equal(&(p1 + p2.scalar_mul_le(r_bits.iter())?))?; - Ok(()) } } @@ -429,7 +422,6 @@ pub mod tests { r_bits: Some(r_bits.clone()), p1: Some(ci1.clone().cmW), p2: Some(ci2.clone().cmW), - p3: Some(ci3.clone().cmW), x: Some(cfW_u_i_x.clone()), }; cfW_circuit.generate_constraints(cs.clone()).unwrap(); @@ -449,7 +441,6 @@ pub mod tests { r_bits: Some(r_bits.clone()), p1: Some(ci1.clone().cmE), p2: Some(cmT), - p3: Some(ci3.clone().cmE), x: Some(cfE_u_i_x.clone()), }; cfE_circuit.generate_constraints(cs.clone()).unwrap(); @@ -462,8 +453,7 @@ pub mod tests { let cs = ConstraintSystem::::new_ref(); - let r_nonnatVar = - NonNativeFieldVar::::new_witness(cs.clone(), || Ok(r_Fr)).unwrap(); + let r_nonnatVar = NonNativeUintVar::::new_witness(cs.clone(), || Ok(r_Fr)).unwrap(); let r_bitsVar = Vec::>::new_witness(cs.clone(), || Ok(r_bits)).unwrap(); let ci1Var = diff --git a/folding-schemes/src/folding/nova/decider_eth.rs b/folding-schemes/src/folding/nova/decider_eth.rs index 1ae68c5..b6b36cb 100644 --- a/folding-schemes/src/folding/nova/decider_eth.rs +++ b/folding-schemes/src/folding/nova/decider_eth.rs @@ -13,7 +13,7 @@ use super::{circuits::CF2, nifs::NIFS, CommittedInstance, Nova}; use crate::commitment::{ kzg::Proof as KZGProof, pedersen::Params as PedersenParams, CommitmentScheme, }; -use crate::folding::circuits::nonnative::NonNativeAffineVar; +use crate::folding::circuits::nonnative::affine::NonNativeAffineVar; use crate::frontend::FCircuit; use crate::Error; use crate::{Decider as DeciderTrait, FoldingScheme}; diff --git a/folding-schemes/src/folding/nova/decider_eth_circuit.rs b/folding-schemes/src/folding/nova/decider_eth_circuit.rs index 335eb28..bf0032b 100644 --- a/folding-schemes/src/folding/nova/decider_eth_circuit.rs +++ b/folding-schemes/src/folding/nova/decider_eth_circuit.rs @@ -9,7 +9,7 @@ use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, boolean::Boolean, eq::EqGadget, - fields::{fp::FpVar, nonnative::NonNativeFieldVar, FieldVar}, + fields::{fp::FpVar, FieldVar}, groups::GroupOpsBounds, poly::{domain::Radix2DomainVar, evaluations::univariate::EvaluationsVar}, prelude::CurveVar, @@ -22,7 +22,10 @@ use core::{borrow::Borrow, marker::PhantomData}; use super::{circuits::ChallengeGadget, nifs::NIFS}; use crate::ccs::r1cs::R1CS; use crate::commitment::{pedersen::Params as PedersenParams, CommitmentScheme}; -use crate::folding::circuits::nonnative::{nonnative_affine_to_field_elements, NonNativeAffineVar}; +use crate::folding::circuits::nonnative::{ + affine::{nonnative_affine_to_field_elements, NonNativeAffineVar}, + uint::NonNativeUintVar, +}; use crate::folding::nova::{ circuits::{CommittedInstanceVar, CF1, CF2}, CommittedInstance, Nova, Witness, @@ -33,40 +36,54 @@ use crate::transcript::{ Transcript, TranscriptVar, }; use crate::utils::{ - gadgets::{hadamard, mat_vec_mul_sparse, vec_add, vec_scalar_mul, SparseMatrixVar}, + gadgets::{MatrixGadget, SparseMatrixVar, VectorGadget}, vec::poly_from_vec, }; use crate::Error; #[derive(Debug, Clone)] -pub struct RelaxedR1CSGadget> { - _f: PhantomData, - _cf: PhantomData, - _fv: PhantomData, -} -impl> RelaxedR1CSGadget { - /// performs the RelaxedR1CS check (Az∘Bz==uCz+E) - pub fn check( - r1cs: R1CSVar, - E: Vec, - u: FV, - z: Vec, +pub struct RelaxedR1CSGadget {} +impl RelaxedR1CSGadget { + /// performs the RelaxedR1CS check for native variables (Az∘Bz==uCz+E) + pub fn check_native( + r1cs: R1CSVar>, + E: Vec>, + u: FpVar, + z: Vec>, ) -> Result<(), SynthesisError> { - let Az = mat_vec_mul_sparse(r1cs.A, z.clone()); - let Bz = mat_vec_mul_sparse(r1cs.B, z.clone()); - let Cz = mat_vec_mul_sparse(r1cs.C, z.clone()); - let uCz = vec_scalar_mul(&Cz, &u); - let uCzE = vec_add(&uCz, &E)?; - let AzBz = hadamard(&Az, &Bz)?; - for i in 0..AzBz.len() { - AzBz[i].enforce_equal(&uCzE[i].clone())?; - } + let Az = r1cs.A.mul_vector(&z)?; + let Bz = r1cs.B.mul_vector(&z)?; + let Cz = r1cs.C.mul_vector(&z)?; + let uCzE = Cz.mul_scalar(&u)?.add(&E)?; + let AzBz = Az.hadamard(&Bz)?; + AzBz.enforce_equal(&uCzE)?; Ok(()) } + + /// performs the RelaxedR1CS check for non-native variables (Az∘Bz==uCz+E) + pub fn check_nonnative( + r1cs: R1CSVar>, + E: Vec>, + u: NonNativeUintVar, + z: Vec>, + ) -> Result<(), SynthesisError> { + // First we do addition and multiplication without mod F's order + let Az = r1cs.A.mul_vector(&z)?; + let Bz = r1cs.B.mul_vector(&z)?; + let Cz = r1cs.C.mul_vector(&z)?; + let uCzE = Cz.mul_scalar(&u)?.add(&E)?; + let AzBz = Az.hadamard(&Bz)?; + + // Then we compare the results by checking if they are congruent + // modulo the field order + AzBz.into_iter() + .zip(uCzE) + .try_for_each(|(a, b)| a.enforce_congruent::(&b)) + } } #[derive(Debug, Clone)] -pub struct R1CSVar> { +pub struct R1CSVar> { _f: PhantomData, _cf: PhantomData, _fv: PhantomData, @@ -79,7 +96,7 @@ impl AllocVar, CF> for R1CSVar where F: PrimeField, CF: PrimeField, - FV: FieldVar, + FV: AllocVar, { fn new_variable>>( cs: impl Into>, @@ -146,10 +163,10 @@ where /// non-native representation, since it is used to represent the CycleFold witness. #[derive(Debug, Clone)] pub struct CycleFoldWitnessVar { - pub E: Vec>>, - pub rE: NonNativeFieldVar>, - pub W: Vec>>, - pub rW: NonNativeFieldVar>, + pub E: Vec>>, + pub rE: NonNativeUintVar>, + pub W: Vec>>, + pub rW: NonNativeUintVar>, } impl AllocVar, CF2> for CycleFoldWitnessVar @@ -165,21 +182,11 @@ where f().and_then(|val| { let cs = cs.into(); - let E: Vec>> = - Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?; - let rE = NonNativeFieldVar::>::new_variable( - cs.clone(), - || Ok(val.borrow().rE), - mode, - )?; + let E = Vec::new_variable(cs.clone(), || Ok(val.borrow().E.clone()), mode)?; + let rE = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().rE), mode)?; - let W: Vec>> = - Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?; - let rW = NonNativeFieldVar::>::new_variable( - cs.clone(), - || Ok(val.borrow().rW), - mode, - )?; + let W = Vec::new_variable(cs.clone(), || Ok(val.borrow().W.clone()), mode)?; + let rW = NonNativeUintVar::new_variable(cs.clone(), || Ok(val.borrow().rW), mode)?; Ok(Self { E, rE, W, rW }) }) @@ -404,18 +411,13 @@ where // 1. check RelaxedR1CS of U_{i+1} let z_U1: Vec>> = [vec![U_i1.u.clone()], U_i1.x.to_vec(), W_i1.W.to_vec()].concat(); - RelaxedR1CSGadget::, FpVar>>::check( - r1cs, - W_i1.E.clone(), - U_i1.u.clone(), - z_U1, - )?; + RelaxedR1CSGadget::check_native(r1cs, W_i1.E.clone(), U_i1.u.clone(), z_U1)?; // 2. u_i.cmE==cm(0), u_i.u==1 // Here zero is the x & y coordinates of the zero point affine representation. - let zero = NonNativeFieldVar::::zero(); - u_i.cmE.x.enforce_equal(&zero)?; - u_i.cmE.y.enforce_equal(&zero)?; + let zero = NonNativeUintVar::new_constant(cs.clone(), C1::BaseField::zero())?; + u_i.cmE.x.enforce_equal_unaligned(&zero)?; + u_i.cmE.y.enforce_equal_unaligned(&zero)?; (u_i.u.is_one()?).enforce_equal(&Boolean::TRUE)?; // 3.a u_i.x[0] == H(i, z_0, z_i, U_i) @@ -470,20 +472,15 @@ where PedersenGadget::::commit(H, G, cf_W_i_W_bits?, cf_W_i.rW.to_bits_le()?)?; cf_U_i.cmW.enforce_equal(&computed_cmW)?; - let cf_r1cs = R1CSVar::< - C1::BaseField, - CF1, - NonNativeFieldVar>, - >::new_witness(cs.clone(), || Ok(self.cf_r1cs.clone()))?; + let cf_r1cs = + R1CSVar::, NonNativeUintVar>>::new_witness( + cs.clone(), + || Ok(self.cf_r1cs.clone()), + )?; // 5. check RelaxedR1CS of cf_U_i - let cf_z_U: Vec>> = - [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat(); - RelaxedR1CSGadget::< - C2::ScalarField, - CF1, - NonNativeFieldVar>, - >::check(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?; + let cf_z_U = [vec![cf_U_i.u.clone()], cf_U_i.x.to_vec(), cf_W_i.W.to_vec()].concat(); + RelaxedR1CSGadget::check_nonnative(cf_r1cs, cf_W_i.E, cf_U_i.u.clone(), cf_z_U)?; } // 6. check KZG challenges @@ -632,7 +629,7 @@ pub mod tests { let uVar = FpVar::::new_witness(cs.clone(), || Ok(rel_r1cs.u)).unwrap(); let r1csVar = R1CSVar::>::new_witness(cs.clone(), || Ok(r1cs)).unwrap(); - RelaxedR1CSGadget::>::check(r1csVar, EVar, uVar, zVar).unwrap(); + RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap(); assert!(cs.is_satisfied().unwrap()); } @@ -663,7 +660,7 @@ pub mod tests { let uVar = FpVar::::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap(); let r1csVar = R1CSVar::>::new_witness(cs.clone(), || Ok(r1cs)).unwrap(); - RelaxedR1CSGadget::>::check(r1csVar, EVar, uVar, zVar).unwrap(); + RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap(); assert!(cs.is_satisfied().unwrap()); } @@ -752,20 +749,16 @@ pub mod tests { let uVar = FpVar::::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap(); let r1csVar = R1CSVar::>::new_witness(cs.clone(), || Ok(r1cs.clone())).unwrap(); - RelaxedR1CSGadget::>::check(r1csVar, EVar, uVar, zVar).unwrap(); + RelaxedR1CSGadget::check_native(r1csVar, EVar, uVar, zVar).unwrap(); // non-natively let cs = ConstraintSystem::::new_ref(); - let zVar = Vec::>::new_witness(cs.clone(), || Ok(z)).unwrap(); - let EVar = Vec::>::new_witness(cs.clone(), || Ok(relaxed_r1cs.E)) - .unwrap(); - let uVar = - NonNativeFieldVar::::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap(); + let zVar = Vec::new_witness(cs.clone(), || Ok(z)).unwrap(); + let EVar = Vec::new_witness(cs.clone(), || Ok(relaxed_r1cs.E)).unwrap(); + let uVar = NonNativeUintVar::::new_witness(cs.clone(), || Ok(relaxed_r1cs.u)).unwrap(); let r1csVar = - R1CSVar::>::new_witness(cs.clone(), || Ok(r1cs)) - .unwrap(); - RelaxedR1CSGadget::>::check(r1csVar, EVar, uVar, zVar) - .unwrap(); + R1CSVar::>::new_witness(cs.clone(), || Ok(r1cs)).unwrap(); + RelaxedR1CSGadget::check_nonnative(r1csVar, EVar, uVar, zVar).unwrap(); } #[test] diff --git a/folding-schemes/src/folding/nova/mod.rs b/folding-schemes/src/folding/nova/mod.rs index b6cc2d7..7a0894d 100644 --- a/folding-schemes/src/folding/nova/mod.rs +++ b/folding-schemes/src/folding/nova/mod.rs @@ -16,7 +16,7 @@ use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use crate::ccs::r1cs::{extract_r1cs, extract_w_x, R1CS}; use crate::commitment::CommitmentScheme; use crate::folding::circuits::nonnative::{ - nonnative_affine_to_field_elements, nonnative_field_to_field_elements, + affine::nonnative_affine_to_field_elements, uint::nonnative_field_to_field_elements, }; use crate::frontend::FCircuit; use crate::utils::vec::is_zero_vec; @@ -434,7 +434,6 @@ where r_bits: Some(r_bits.clone()), p1: Some(self.U_i.clone().cmW), p2: Some(self.u_i.clone().cmW), - p3: Some(U_i1.clone().cmW), x: Some(cfW_u_i_x.clone()), }; let cfE_circuit = CycleFoldCircuit:: { @@ -442,7 +441,6 @@ where r_bits: Some(r_bits.clone()), p1: Some(self.U_i.clone().cmE), p2: Some(cmT), - p3: Some(U_i1.clone().cmE), x: Some(cfE_u_i_x.clone()), }; @@ -482,8 +480,8 @@ where cf_x: Some(cf_u_i1_x), }; - self.cf_W_i = cf_W_i1.clone(); - self.cf_U_i = cf_U_i1.clone(); + self.cf_W_i = cf_W_i1; + self.cf_U_i = cf_U_i1; #[cfg(test)] { diff --git a/folding-schemes/src/utils/gadgets.rs b/folding-schemes/src/utils/gadgets.rs index dc6aa9c..6c22918 100644 --- a/folding-schemes/src/utils/gadgets.rs +++ b/folding-schemes/src/utils/gadgets.rs @@ -1,69 +1,48 @@ use ark_ff::PrimeField; use ark_r1cs_std::{ alloc::{AllocVar, AllocationMode}, - fields::FieldVar, + fields::{fp::FpVar, FieldVar}, + R1CSVar, }; use ark_relations::r1cs::{Namespace, SynthesisError}; use core::{borrow::Borrow, marker::PhantomData}; use crate::utils::vec::SparseMatrix; -pub fn mat_vec_mul_sparse>( - m: SparseMatrixVar, - v: Vec, -) -> Vec { - let mut res = vec![FV::zero(); m.n_rows]; - for (row_i, row) in m.coeffs.iter().enumerate() { - for (value, col_i) in row.iter() { - if value.value().unwrap() == F::one() { - // no need to multiply by 1 - res[row_i] += v[*col_i].clone(); - continue; - } - res[row_i] += value.clone().mul(&v[*col_i].clone()); - } - } - res +pub trait MatrixGadget { + fn mul_vector(&self, v: &[FV]) -> Result, SynthesisError>; } -pub fn vec_add>( - a: &Vec, - b: &Vec, -) -> Result, SynthesisError> { - if a.len() != b.len() { - return Err(SynthesisError::Unsatisfiable); - } - let mut r: Vec = vec![FV::zero(); a.len()]; - for i in 0..a.len() { - r[i] = a[i].clone() + b[i].clone(); - } - Ok(r) + +pub trait VectorGadget { + fn add(&self, other: &Self) -> Result, SynthesisError>; + + fn mul_scalar(&self, other: &FV) -> Result, SynthesisError>; + + fn hadamard(&self, other: &Self) -> Result, SynthesisError>; } -pub fn vec_scalar_mul>( - vec: &Vec, - c: &FV, -) -> Vec { - let mut result = vec![FV::zero(); vec.len()]; - for (i, a) in vec.iter().enumerate() { - result[i] = a.clone() * c; + +impl VectorGadget> for [FpVar] { + fn add(&self, other: &Self) -> Result>, SynthesisError> { + if self.len() != other.len() { + return Err(SynthesisError::Unsatisfiable); + } + Ok(self.iter().zip(other.iter()).map(|(a, b)| a + b).collect()) } - result -} -pub fn hadamard>( - a: &Vec, - b: &Vec, -) -> Result, SynthesisError> { - if a.len() != b.len() { - return Err(SynthesisError::Unsatisfiable); + + fn mul_scalar(&self, c: &FpVar) -> Result>, SynthesisError> { + Ok(self.iter().map(|a| a * c).collect()) } - let mut r: Vec = vec![FV::zero(); a.len()]; - for i in 0..a.len() { - r[i] = a[i].clone() * b[i].clone(); + + fn hadamard(&self, other: &Self) -> Result>, SynthesisError> { + if self.len() != other.len() { + return Err(SynthesisError::Unsatisfiable); + } + Ok(self.iter().zip(other.iter()).map(|(a, b)| a * b).collect()) } - Ok(r) } #[derive(Debug, Clone)] -pub struct SparseMatrixVar> { +pub struct SparseMatrixVar> { _f: PhantomData, _cf: PhantomData, _fv: PhantomData, @@ -77,7 +56,7 @@ impl AllocVar, CF> for SparseMatrixVar where F: PrimeField, CF: PrimeField, - FV: FieldVar, + FV: AllocVar, { fn new_variable>>( cs: impl Into>, @@ -108,3 +87,23 @@ where }) } } + +impl MatrixGadget> for SparseMatrixVar> { + fn mul_vector(&self, v: &[FpVar]) -> Result>, SynthesisError> { + Ok(self + .coeffs + .iter() + .map(|row| { + let products = row + .iter() + .map(|(value, col_i)| value * &v[*col_i]) + .collect::>(); + if products.is_constant() { + FpVar::constant(products.value().unwrap_or_default().into_iter().sum()) + } else { + products.iter().sum() + } + }) + .collect()) + } +}