Browse Source

Fix with latest arkworks version. (#95)

Co-authored-by: Pratyush Mishra <pratyushmishra@berkeley.edu>
master
Michele Orrù 1 year ago
committed by GitHub
parent
commit
6d64f379a2
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 448 additions and 391 deletions
  1. +0
    -1
      Cargo.toml
  2. +5
    -2
      benches/bench.rs
  3. +2
    -2
      src/bits/boolean.rs
  4. +5
    -6
      src/bits/uint.rs
  5. +17
    -10
      src/bits/uint8.rs
  6. +1
    -2
      src/fields/fp/cmp.rs
  7. +14
    -7
      src/fields/fp/mod.rs
  8. +5
    -3
      src/fields/fp12.rs
  9. +4
    -2
      src/fields/fp3.rs
  10. +4
    -2
      src/fields/fp4.rs
  11. +1
    -2
      src/fields/fp6_2over3.rs
  12. +4
    -2
      src/fields/fp6_3over2.rs
  13. +2
    -2
      src/fields/mod.rs
  14. +39
    -27
      src/fields/nonnative/allocated_field_var.rs
  15. +13
    -10
      src/fields/nonnative/allocated_mul_result.rs
  16. +28
    -23
      src/fields/nonnative/field_var.rs
  17. +42
    -20
      src/fields/nonnative/mod.rs
  18. +3
    -2
      src/fields/nonnative/mul_result.rs
  19. +3
    -2
      src/fields/nonnative/params.rs
  20. +16
    -12
      src/fields/nonnative/reduce.rs
  21. +7
    -4
      src/groups/curves/short_weierstrass/bls12/mod.rs
  22. +48
    -38
      src/groups/curves/short_weierstrass/mod.rs
  23. +28
    -40
      src/groups/curves/short_weierstrass/non_zero_affine.rs
  24. +8
    -5
      src/groups/curves/twisted_edwards/mod.rs
  25. +5
    -1
      src/lib.rs
  26. +18
    -14
      src/poly/domain/mod.rs
  27. +10
    -10
      src/poly/domain/vanishing_poly.rs
  28. +25
    -27
      src/poly/evaluations/univariate/lagrange_interpolator.rs
  29. +56
    -39
      src/poly/evaluations/univariate/mod.rs
  30. +12
    -13
      src/poly/polynomial/univariate/dense.rs
  31. +4
    -2
      src/select.rs
  32. +11
    -3
      tests/arithmetic_tests.rs
  33. +5
    -3
      tests/from_test.rs
  34. +0
    -50
      tests/to_bytes_test.rs
  35. +3
    -3
      tests/to_constraint_field_test.rs

+ 0
- 1
Cargo.toml

@ -58,7 +58,6 @@ ark-std = { git = "https://github.com/arkworks-rs/std" }
ark-ec = { git = "https://github.com/arkworks-rs/algebra" }
ark-ff = { git = "https://github.com/arkworks-rs/algebra" }
ark-poly = { git = "https://github.com/arkworks-rs/algebra" }
ark-serialize = { git = "https://github.com/arkworks-rs/algebra" }
ark-test-curves = { git = "https://github.com/arkworks-rs/algebra" }
ark-bls12-381 = { git = "https://github.com/arkworks-rs/curves" }
ark-bls12-377 = { git = "https://github.com/arkworks-rs/curves" }

+ 5
- 2
benches/bench.rs

@ -1,6 +1,9 @@
use ark_ff::PrimeField;
use ark_r1cs_std::fields::nonnative::NonNativeFieldVar;
use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::FieldVar};
use ark_r1cs_std::{
alloc::AllocVar,
eq::EqGadget,
fields::{nonnative::NonNativeFieldVar, FieldVar},
};
use ark_relations::{
ns,
r1cs::{ConstraintSystem, ConstraintSystemRef, OptimizationGoal},

+ 2
- 2
src/bits/boolean.rs

@ -608,7 +608,8 @@ impl Boolean {
}
}
/// Convert a little-endian bitwise representation of a field element to `FpVar<F>`
/// Convert a little-endian bitwise representation of a field element to
/// `FpVar<F>`
#[tracing::instrument(target = "r1cs", skip(bits))]
pub fn le_bits_to_fp_var(bits: &[Self]) -> Result<FpVar<F>, SynthesisError>
where
@ -761,7 +762,6 @@ impl Boolean {
/// # Ok(())
/// # }
/// ```
///
#[tracing::instrument(target = "r1cs", skip(first, second))]
pub fn select<T: CondSelectGadget<F>>(
&self,

+ 5
- 6
src/bits/uint.rs

@ -7,8 +7,7 @@ macro_rules! make_uint {
#[doc = " type."]
pub mod $mod_name {
use ark_ff::{Field, One, PrimeField, Zero};
use core::borrow::Borrow;
use core::convert::TryFrom;
use core::{borrow::Borrow, convert::TryFrom};
use num_bigint::BigUint;
use num_traits::cast::ToPrimitive;
@ -87,7 +86,6 @@ macro_rules! make_uint {
/// Construct `Self` from a slice of `Boolean`s.
///
/// # Panics
///
#[doc = "This method panics if `bits.len() != "]
#[doc = $num_bits_doc]
#[doc = "`."]
@ -142,8 +140,8 @@ macro_rules! make_uint {
/// Outputs `self ^ other`.
///
/// If at least one of `self` and `other` are constants, then this method
/// *does not* create any constraints or variables.
/// If at least one of `self` and `other` are constants, then this
/// method *does not* create any constraints or variables.
#[tracing::instrument(target = "r1cs", skip(self, other))]
pub fn xor(&self, other: &Self) -> Result<Self, SynthesisError> {
let mut result = self.clone();
@ -225,7 +223,8 @@ macro_rules! make_uint {
Boolean::Not(ref bit) => {
all_constants = false;
// Add coeff * (1 - bit_gadget) = coeff * ONE - coeff * bit_gadget
// Add coeff * (1 - bit_gadget) = coeff * ONE - coeff *
// bit_gadget
lc = lc + (coeff, Variable::One) - (coeff, bit.variable());
},
Boolean::Constant(bit) => {

+ 17
- 10
src/bits/uint8.rs

@ -2,8 +2,11 @@ use ark_ff::{Field, PrimeField, ToConstraintField};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use crate::fields::fp::{AllocatedFp, FpVar};
use crate::{prelude::*, Assignment, ToConstraintFieldGadget, Vec};
use crate::{
fields::fp::{AllocatedFp, FpVar},
prelude::*,
Assignment, ToConstraintFieldGadget, Vec,
};
use core::{borrow::Borrow, convert::TryFrom};
/// Represents an interpretation of 8 `Boolean` objects as an
@ -335,9 +338,9 @@ impl AllocVar for UInt8 {
}
}
/// Parses the `Vec<UInt8<ConstraintF>>` in fixed-sized `ConstraintF::MODULUS_BIT_SIZE - 1` chunks and
/// converts each chunk, which is assumed to be little-endian, to its `FpVar<ConstraintF>`
/// representation.
/// Parses the `Vec<UInt8<ConstraintF>>` in fixed-sized
/// `ConstraintF::MODULUS_BIT_SIZE - 1` chunks and converts each chunk, which is
/// assumed to be little-endian, to its `FpVar<ConstraintF>` representation.
/// This is the gadget counterpart to the `[u8]` implementation of
/// [ToConstraintField](ark_ff::ToConstraintField).
impl<ConstraintF: PrimeField> ToConstraintFieldGadget<ConstraintF> for [UInt8<ConstraintF>] {
@ -360,13 +363,17 @@ impl ToConstraintFieldGadget for Vec
#[cfg(test)]
mod test {
use super::UInt8;
use crate::fields::fp::FpVar;
use crate::prelude::AllocationMode::{Constant, Input, Witness};
use crate::{prelude::*, ToConstraintFieldGadget, Vec};
use crate::{
fields::fp::FpVar,
prelude::{
AllocationMode::{Constant, Input, Witness},
*,
},
ToConstraintFieldGadget, Vec,
};
use ark_ff::{PrimeField, ToConstraintField};
use ark_relations::r1cs::{ConstraintSystem, SynthesisError};
use ark_std::rand::distributions::Uniform;
use ark_std::rand::Rng;
use ark_std::rand::{distributions::Uniform, Rng};
use ark_test_curves::bls12_381::Fr;
#[test]

+ 1
- 2
src/fields/fp/cmp.rs

@ -153,8 +153,7 @@ impl FpVar {
#[cfg(test)]
mod test {
use ark_std::cmp::Ordering;
use ark_std::rand::Rng;
use ark_std::{cmp::Ordering, rand::Rng};
use crate::{alloc::AllocVar, fields::fp::FpVar};
use ark_ff::{PrimeField, UniformRand};

+ 14
- 7
src/fields/fp/mod.rs

@ -127,7 +127,8 @@ impl AllocatedFp {
/// Add many allocated Fp elements together.
///
/// This does not create any constraints and only creates one linear combination.
/// This does not create any constraints and only creates one linear
/// combination.
pub fn addmany<'a, I: Iterator<Item = &'a Self>>(iter: I) -> Self {
let mut cs = ConstraintSystemRef::None;
let mut has_value = true;
@ -918,7 +919,9 @@ impl ToBytesGadget for FpVar {
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(&ark_ff::to_bytes![c].unwrap())),
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_be().as_slice(),
)),
Self::Var(v) => v.to_bytes(),
}
}
@ -926,7 +929,9 @@ impl ToBytesGadget for FpVar {
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(&ark_ff::to_bytes![c].unwrap())),
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_be().as_slice(),
)),
Self::Var(v) => v.to_non_unique_bytes(),
}
}
@ -1060,10 +1065,12 @@ impl<'a, F: PrimeField> Sum<&'a FpVar> for FpVar {
#[cfg(test)]
mod test {
use crate::alloc::{AllocVar, AllocationMode};
use crate::eq::EqGadget;
use crate::fields::fp::FpVar;
use crate::R1CSVar;
use crate::{
alloc::{AllocVar, AllocationMode},
eq::EqGadget,
fields::fp::FpVar,
R1CSVar,
};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{UniformRand, Zero};
use ark_test_curves::bls12_381::Fr;

+ 5
- 3
src/fields/fp12.rs

@ -1,7 +1,9 @@
use crate::fields::{fp2::Fp2Var, fp6_3over2::Fp6Var, quadratic_extension::*, FieldVar};
use ark_ff::fields::{fp12_2over3over2::*, Field};
use ark_ff::fp6_3over2::Fp6Config;
use ark_ff::QuadExtConfig;
use ark_ff::{
fields::{fp12_2over3over2::*, Field},
fp6_3over2::Fp6Config,
QuadExtConfig,
};
use ark_relations::r1cs::SynthesisError;
/// A degree-12 extension field constructed as the tower of a

+ 4
- 2
src/fields/fp3.rs

@ -1,6 +1,8 @@
use crate::fields::{cubic_extension::*, fp::FpVar};
use ark_ff::fields::{CubicExtConfig, Fp3ConfigWrapper};
use ark_ff::Fp3Config;
use ark_ff::{
fields::{CubicExtConfig, Fp3ConfigWrapper},
Fp3Config,
};
/// A cubic extension field constructed over a prime field.
/// This is the R1CS equivalent of `ark_ff::Fp3<P>`.

+ 4
- 2
src/fields/fp4.rs

@ -1,6 +1,8 @@
use crate::fields::{fp2::Fp2Var, quadratic_extension::*};
use ark_ff::fields::{Fp4ConfigWrapper, QuadExtConfig};
use ark_ff::Fp4Config;
use ark_ff::{
fields::{Fp4ConfigWrapper, QuadExtConfig},
Fp4Config,
};
/// A quartic extension field constructed as the tower of a
/// quadratic extension over a quadratic extension field.

+ 1
- 2
src/fields/fp6_2over3.rs

@ -1,6 +1,5 @@
use crate::fields::{fp3::Fp3Var, quadratic_extension::*};
use ark_ff::fields::fp6_2over3::*;
use ark_ff::QuadExtConfig;
use ark_ff::{fields::fp6_2over3::*, QuadExtConfig};
/// A sextic extension field constructed as the tower of a
/// quadratic extension over a cubic extension field.

+ 4
- 2
src/fields/fp6_3over2.rs

@ -1,6 +1,8 @@
use crate::fields::{cubic_extension::*, fp2::*};
use ark_ff::fields::{fp6_3over2::*, Fp2};
use ark_ff::CubicExtConfig;
use ark_ff::{
fields::{fp6_3over2::*, Fp2},
CubicExtConfig,
};
use ark_relations::r1cs::SynthesisError;
use ark_std::ops::MulAssign;

+ 2
- 2
src/fields/mod.rs

@ -20,8 +20,8 @@ pub mod quadratic_extension;
/// That is, it implements the R1CS equivalent of `ark_ff::Fp*`.
pub mod fp;
/// This module contains a generic implementation of "nonnative" prime field variables.
/// It emulates `Fp` arithmetic using `Fq` operations, where `p != q`.
/// This module contains a generic implementation of "nonnative" prime field
/// variables. It emulates `Fp` arithmetic using `Fq` operations, where `p != q`.
pub mod nonnative;
/// This module contains a generic implementation of the degree-12 tower

+ 39
- 27
src/fields/nonnative/allocated_field_var.rs

@ -1,18 +1,23 @@
use super::params::{get_params, OptimizationType};
use super::reduce::{bigint_to_basefield, limbs_to_bigint, Reducer};
use super::AllocatedNonNativeFieldMulResultVar;
use crate::fields::fp::FpVar;
use crate::prelude::*;
use crate::ToConstraintFieldGadget;
use super::{
params::{get_params, OptimizationType},
reduce::{bigint_to_basefield, limbs_to_bigint, Reducer},
AllocatedNonNativeFieldMulResultVar,
};
use crate::{fields::fp::FpVar, prelude::*, ToConstraintFieldGadget};
use ark_ff::{BigInteger, PrimeField};
use ark_relations::r1cs::{OptimizationGoal, Result as R1CSResult};
use ark_relations::{
ns,
r1cs::{ConstraintSystemRef, Namespace, SynthesisError},
r1cs::{
ConstraintSystemRef, Namespace, OptimizationGoal, Result as R1CSResult, SynthesisError,
},
};
use ark_std::{
borrow::Borrow,
cmp::{max, min},
marker::PhantomData,
vec,
vec::Vec,
};
use ark_std::cmp::{max, min};
use ark_std::marker::PhantomData;
use ark_std::{borrow::Borrow, vec, vec::Vec};
/// The allocated version of `NonNativeFieldVar` (introduced below)
#[derive(Debug)]
@ -22,9 +27,12 @@ pub struct AllocatedNonNativeFieldVar
pub cs: ConstraintSystemRef<BaseField>,
/// The limbs, each of which is a BaseField gadget.
pub limbs: Vec<FpVar<BaseField>>,
/// Number of additions done over this gadget, using which the gadget decides when to reduce.
/// Number of additions done over this gadget, using which the gadget
/// decides when to reduce.
pub num_of_additions_over_normal_form: BaseField,
/// Whether the limb representation is the normal form (using only the bits specified in the parameters, and the representation is strictly within the range of TargetField).
/// Whether the limb representation is the normal form (using only the bits
/// specified in the parameters, and the representation is strictly within
/// the range of TargetField).
pub is_in_the_normal_form: bool,
#[doc(hidden)]
pub target_phantom: PhantomData<TargetField>,
@ -51,8 +59,9 @@ impl
let mut base_repr: <TargetField as PrimeField>::BigInt = TargetField::one().into_bigint();
// Convert 2^{(params.bits_per_limb - 1)} into the TargetField and then double the base
// This is because 2^{(params.bits_per_limb)} might indeed be larger than the target field's prime.
// Convert 2^{(params.bits_per_limb - 1)} into the TargetField and then double
// the base This is because 2^{(params.bits_per_limb)} might indeed be
// larger than the target field's prime.
base_repr.muln((params.bits_per_limb - 1) as u32);
let mut base: TargetField = TargetField::from_bigint(base_repr).unwrap();
base = base + &base;
@ -303,7 +312,8 @@ impl
}
/// Convert a `TargetField` element into limbs (not constraints)
/// This is an internal function that would be reused by a number of other functions
/// This is an internal function that would be reused by a number of other
/// functions
pub fn get_limbs_representations(
elem: &TargetField,
optimization_type: OptimizationType,
@ -340,8 +350,10 @@ impl
Ok(limbs)
}
/// for advanced use, multiply and output the intermediate representations (without reduction)
/// This intermediate representations can be added with each other, and they can later be reduced back to the `NonNativeFieldVar`.
/// for advanced use, multiply and output the intermediate representations
/// (without reduction) This intermediate representations can be added
/// with each other, and they can later be reduced back to the
/// `NonNativeFieldVar`.
#[tracing::instrument(target = "r1cs")]
pub fn mul_without_reduce(
&self,
@ -532,7 +544,8 @@ impl
}
}
/// Allocates a new variable, but does not check that the allocation's limbs are in-range.
/// Allocates a new variable, but does not check that the allocation's limbs
/// are in-range.
fn new_variable_unchecked<T: Borrow<TargetField>>(
cs: impl Into<Namespace<BaseField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
@ -579,8 +592,8 @@ impl
})
}
/// Check that this element is in-range; i.e., each limb is in-range, and the whole number is
/// less than the modulus.
/// Check that this element is in-range; i.e., each limb is in-range, and
/// the whole number is less than the modulus.
///
/// Returns the bits of the element, in little-endian form
fn enforce_in_range(
@ -620,9 +633,10 @@ impl
Ok(bits)
}
/// Allocates a new non-native field witness with value given by the function `f`. Enforces
/// that the field element has value in `[0, modulus)`, and returns the bits of its binary
/// representation. The bits are in little-endian (i.e., the bit at index 0 is the LSB) and the
/// Allocates a new non-native field witness with value given by the
/// function `f`. Enforces that the field element has value in `[0, modulus)`,
/// and returns the bits of its binary representation.
/// The bits are in little-endian (i.e., the bit at index 0 is the LSB) and the
/// bit-vector is empty in non-witness allocation modes.
pub fn new_witness_with_le_bits<T: Borrow<TargetField>>(
cs: impl Into<Namespace<BaseField>>,
@ -902,9 +916,7 @@ impl ToConstraintFieldGadget
}
}
/*
* Implementation of a few traits
*/
// Implementation of a few traits
impl<TargetField: PrimeField, BaseField: PrimeField> Clone
for AllocatedNonNativeFieldVar<TargetField, BaseField>

+ 13
- 10
src/fields/nonnative/allocated_mul_result.rs

@ -1,13 +1,15 @@
use super::params::{get_params, OptimizationType};
use super::reduce::{bigint_to_basefield, limbs_to_bigint, Reducer};
use super::AllocatedNonNativeFieldVar;
use crate::fields::fp::FpVar;
use crate::prelude::*;
use super::{
params::{get_params, OptimizationType},
reduce::{bigint_to_basefield, limbs_to_bigint, Reducer},
AllocatedNonNativeFieldVar,
};
use crate::{fields::fp::FpVar, prelude::*};
use ark_ff::PrimeField;
use ark_relations::r1cs::{OptimizationGoal, Result as R1CSResult};
use ark_relations::{ns, r1cs::ConstraintSystemRef};
use ark_std::marker::PhantomData;
use ark_std::vec::Vec;
use ark_relations::{
ns,
r1cs::{ConstraintSystemRef, OptimizationGoal, Result as R1CSResult},
};
use ark_std::{marker::PhantomData, vec::Vec};
use num_bigint::BigUint;
/// The allocated form of `NonNativeFieldMulResultVar` (introduced below)
@ -84,7 +86,8 @@ impl
Ok(res)
}
/// Constraints for reducing the result of a multiplication mod p, to get an original representation.
/// Constraints for reducing the result of a multiplication mod p, to get an
/// original representation.
pub fn reduce(&self) -> R1CSResult<AllocatedNonNativeFieldVar<TargetField, BaseField>> {
let params = get_params(
TargetField::MODULUS_BIT_SIZE as usize,

+ 28
- 23
src/fields/nonnative/field_var.rs

@ -1,18 +1,20 @@
use super::params::OptimizationType;
use super::{AllocatedNonNativeFieldVar, NonNativeFieldMulResultVar};
use crate::boolean::Boolean;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::prelude::*;
use crate::{R1CSVar, ToConstraintFieldGadget};
use ark_ff::to_bytes;
use ark_ff::PrimeField;
use ark_relations::r1cs::Result as R1CSResult;
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use ark_std::hash::{Hash, Hasher};
use ark_std::{borrow::Borrow, vec::Vec};
/// A gadget for representing non-native (`TargetField`) field elements over the constraint field (`BaseField`).
use super::{params::OptimizationType, AllocatedNonNativeFieldVar, NonNativeFieldMulResultVar};
use crate::{
boolean::Boolean,
fields::{fp::FpVar, FieldVar},
prelude::*,
R1CSVar, ToConstraintFieldGadget,
};
use ark_ff::{BigInteger, PrimeField};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, Result as R1CSResult, SynthesisError};
use ark_std::{
borrow::Borrow,
hash::{Hash, Hasher},
vec::Vec,
};
/// A gadget for representing non-native (`TargetField`) field elements over the
/// constraint field (`BaseField`).
#[derive(Clone, Debug)]
#[must_use]
pub enum NonNativeFieldVar<TargetField: PrimeField, BaseField: PrimeField> {
@ -144,9 +146,6 @@ impl FieldVar
}
}
/****************************************************************************/
/****************************************************************************/
impl_bounded_ops!(
NonNativeFieldVar<TargetField, BaseField>,
TargetField,
@ -213,8 +212,8 @@ impl_bounded_ops!(
(TargetField: PrimeField, BaseField: PrimeField),
);
/****************************************************************************/
/****************************************************************************/
/// *************************************************************************
/// *************************************************************************
impl<TargetField: PrimeField, BaseField: PrimeField> EqGadget<BaseField>
for NonNativeFieldVar<TargetField, BaseField>
@ -313,7 +312,10 @@ impl ToBytesGadget
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> R1CSResult<Vec<UInt8<BaseField>>> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(&to_bytes![c].unwrap())),
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_be().as_slice(),
)),
Self::Var(v) => v.to_bytes(),
}
}
@ -321,7 +323,9 @@ impl ToBytesGadget
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> R1CSResult<Vec<UInt8<BaseField>>> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(&to_bytes![c].unwrap())),
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_be().as_slice(),
)),
Self::Var(v) => v.to_non_unique_bytes(),
}
}
@ -440,7 +444,8 @@ impl ToConstraintFieldGadget
fn to_constraint_field(&self) -> R1CSResult<Vec<FpVar<BaseField>>> {
// Use one group element to represent the optimization type.
//
// By default, the constant is converted in the weight-optimized type, because it results in fewer elements.
// By default, the constant is converted in the weight-optimized type, because
// it results in fewer elements.
match self {
Self::Constant(c) => Ok(AllocatedNonNativeFieldVar::get_limbs_representations(
c,

+ 42
- 20
src/fields/nonnative/mod.rs

@ -1,18 +1,27 @@
//!
//! ## Overview
//!
//! This module implements a field gadget for a prime field `Fp` over another prime field `Fq` where `p != q`.
//! This module implements a field gadget for a prime field `Fp` over another
//! prime field `Fq` where `p != q`.
//!
//! When writing constraint systems for many cryptographic proofs, we are restricted to a native field (e.g., the scalar field of the pairing-friendly curve).
//! This can be inconvenient; for example, the recursive composition of proofs via cycles of curves requires the verifier to compute over a non-native field.
//! When writing constraint systems for many cryptographic proofs, we are
//! restricted to a native field (e.g., the scalar field of the pairing-friendly
//! curve). This can be inconvenient; for example, the recursive composition of
//! proofs via cycles of curves requires the verifier to compute over a
//! non-native field.
//!
//! The library makes it possible to write computations over a non-native field in the same way one would write computations over the native field. This naturally introduces additional overhead, which we minimize using a variety of optimizations. (Nevertheless, the overhead is still substantial, and native fields should be used where possible.)
//! The library makes it possible to write computations over a non-native field
//! in the same way one would write computations over the native field. This
//! naturally introduces additional overhead, which we minimize using a variety
//! of optimizations. (Nevertheless, the overhead is still substantial, and
//! native fields should be used where possible.)
//!
//! ## Usage
//!
//! Because [`NonNativeFieldVar`] implements the [`FieldVar`] trait in arkworks, we can treat it like a native field variable ([`FpVar`]).
//! Because [`NonNativeFieldVar`] implements the [`FieldVar`] trait in arkworks,
//! we can treat it like a native field variable ([`FpVar`]).
//!
//! We can do the standard field operations, such as `+`, `-`, and `*`. See the following example:
//! We can do the standard field operations, such as `+`, `-`, and `*`. See the
//! following example:
//!
//! ```rust
//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
@ -47,15 +56,21 @@
//!
//! ## Advanced optimization
//!
//! After each multiplication, our library internally performs a *reduce* operation,
//! which reduces an intermediate type [`NonNativeFieldMulResultVar`] to the normalized type [`NonNativeFieldVar`].
//! This enables a user to seamlessly perform a sequence of operations without worrying about the underlying details.
//! After each multiplication, our library internally performs a *reduce*
//! operation, which reduces an intermediate type [`NonNativeFieldMulResultVar`]
//! to the normalized type [`NonNativeFieldVar`]. This enables a user to
//! seamlessly perform a sequence of operations without worrying about the
//! underlying details.
//!
//! However, this operation is expensive and is sometimes avoidable. We can reduce the number of constraints by using this intermediate type, which only supports additions. To multiply, it must be reduced back to [`NonNativeFieldVar`]. See below for a skeleton example.
//! However, this operation is expensive and is sometimes avoidable. We can
//! reduce the number of constraints by using this intermediate type, which only
//! supports additions. To multiply, it must be reduced back to
//! [`NonNativeFieldVar`]. See below for a skeleton example.
//!
//! ---
//!
//! To compute `a * b + c * d`, the straightforward (but more expensive) implementation is as follows:
//! To compute `a * b + c * d`, the straightforward (but more expensive)
//! implementation is as follows:
//!
//! ```ignore
//! let a_times_b = &a * &b;
@ -67,7 +82,8 @@
//!
//! ---
//!
//! We can save one reduction by using the [`NonNativeFieldMulResultVar`], as follows:
//! We can save one reduction by using the [`NonNativeFieldMulResultVar`], as
//! follows:
//!
//! ```ignore
//! let a_times_b = a.mul_without_reduce(&b)?;
@ -75,26 +91,31 @@
//! let res = (&a_times_b + &c_times_d)?.reduce()?;
//! ```
//!
//! It performs only one *reduce* operation and is roughly 2x faster than the first implementation.
//! It performs only one *reduce* operation and is roughly 2x faster than the
//! first implementation.
//!
//! ## Inspiration and basic design
//!
//! This implementation employs the standard idea of using multiple **limbs** to represent an element of the target field. For example, an element in the TargetField may be represented by three BaseField elements (i.e., the limbs).
//! This implementation employs the standard idea of using multiple **limbs** to
//! represent an element of the target field. For example, an element in the
//! TargetField may be represented by three BaseField elements (i.e., the
//! limbs).
//!
//! ```text
//! TargetField -> limb 1, limb 2, and limb 3 (each is a BaseField element)
//! ```
//!
//! After some computation, the limbs become saturated and need to be **reduced**, in order to engage in more computation.
//! After some computation, the limbs become saturated and need to be
//! **reduced**, in order to engage in more computation.
//!
//! We heavily use the optimization techniques in [\[KPS18\]](https://akosba.github.io/papers/xjsnark.pdf) and [\[OWWB20\]](https://eprint.iacr.org/2019/1494).
//! Both works have their own open-source libraries:
//! [xJsnark](https://github.com/akosba/xjsnark) and
//! [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat).
//! Compared with these, this module works with the `arkworks` ecosystem.
//! It also provides the option (based on an `optimization_goal` for the constraint system) to optimize
//! for constraint density instead of number of constraints, which improves efficiency in
//! proof systems like [Marlin](https://github.com/arkworks-rs/marlin).
//! It also provides the option (based on an `optimization_goal` for the
//! constraint system) to optimize for constraint density instead of number of
//! constraints, which improves efficiency in proof systems like [Marlin](https://github.com/arkworks-rs/marlin).
//!
//! ## References
//! \[KPS18\]: A. E. Kosba, C. Papamanthou, and E. Shi. "xJsnark: a framework for efficient verifiable computation," in *Proceedings of the 39th Symposium on Security and Privacy*, ser. S&P ’18, 2018, pp. 944–961.
@ -160,7 +181,8 @@ pub(crate) use overhead;
/// Parameters for a specific `NonNativeFieldVar` instantiation
#[derive(Clone, Debug)]
pub struct NonNativeFieldConfig {
/// The number of limbs (`BaseField` elements) used to represent a `TargetField` element. Highest limb first.
/// The number of limbs (`BaseField` elements) used to represent a
/// `TargetField` element. Highest limb first.
pub num_limbs: usize,
/// The number of bits of the limb

+ 3
- 2
src/fields/nonnative/mul_result.rs

@ -2,8 +2,9 @@ use super::{AllocatedNonNativeFieldMulResultVar, NonNativeFieldVar};
use ark_ff::PrimeField;
use ark_relations::r1cs::Result as R1CSResult;
/// An intermediate representation especially for the result of a multiplication, containing more limbs.
/// It is intended for advanced usage to improve the efficiency.
/// An intermediate representation especially for the result of a
/// multiplication, containing more limbs. It is intended for advanced usage to
/// improve the efficiency.
///
/// That is, instead of calling `mul`, one can call `mul_without_reduce` to
/// obtain this intermediate representation, which can still be added.

+ 3
- 2
src/fields/nonnative/params.rs

@ -1,6 +1,7 @@
use super::NonNativeFieldConfig;
/// Obtain the parameters from a `ConstraintSystem`'s cache or generate a new one
/// Obtain the parameters from a `ConstraintSystem`'s cache or generate a new
/// one
#[must_use]
pub const fn get_params(
target_field_size: usize,
@ -64,7 +65,7 @@ pub const fn find_parameters(
OptimizationType::Constraints => {
this_cost += target_field_prime_bit_length; // allocation of k
this_cost += target_field_prime_bit_length + num_of_limbs; // allocation of r
//this_cost += 2 * num_of_limbs - 1; // compute kp
// this_cost += 2 * num_of_limbs - 1; // compute kp
this_cost += num_of_groups + (num_of_groups - 1) * (limb_size * 2 + surfeit) + 1;
// equality check
},

+ 16
- 12
src/fields/nonnative/reduce.rs

@ -1,10 +1,11 @@
use super::overhead;
use super::params::get_params;
use super::AllocatedNonNativeFieldVar;
use crate::eq::EqGadget;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::{alloc::AllocVar, boolean::Boolean, R1CSVar};
use super::{overhead, params::get_params, AllocatedNonNativeFieldVar};
use crate::{
alloc::AllocVar,
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
R1CSVar,
};
use ark_ff::{biginteger::BigInteger, BitIteratorBE, One, PrimeField, Zero};
use ark_relations::{
ns,
@ -61,9 +62,10 @@ pub struct Reducer {
}
impl<TargetField: PrimeField, BaseField: PrimeField> Reducer<TargetField, BaseField> {
/// convert limbs to bits (take at most `BaseField::MODULUS_BIT_SIZE as usize - 1` bits)
/// This implementation would be more efficient than the original `to_bits`
/// or `to_non_unique_bits` since we enforce that some bits are always zero.
/// convert limbs to bits (take at most `BaseField::MODULUS_BIT_SIZE as
/// usize - 1` bits) This implementation would be more efficient than
/// the original `to_bits` or `to_non_unique_bits` since we enforce that
/// some bits are always zero.
#[tracing::instrument(target = "r1cs")]
pub fn limb_to_bits(
limb: &FpVar<BaseField>,
@ -147,7 +149,8 @@ impl Reducer
}
}
/// Reduction used before multiplication to reduce the representations in a way that allows efficient multiplication
/// Reduction used before multiplication to reduce the representations in a
/// way that allows efficient multiplication
#[tracing::instrument(target = "r1cs")]
pub fn pre_mul_reduce(
elem: &mut AllocatedNonNativeFieldVar<TargetField, BaseField>,
@ -244,7 +247,8 @@ impl Reducer
};
for (left_limb, right_limb) in left.iter().zip(right.iter()).rev() {
// note: the `rev` operation is here, so that the first limb (and the first groupped limb) will be the least significant limb.
// note: the `rev` operation is here, so that the first limb (and the first
// groupped limb) will be the least significant limb.
limb_pairs.push((left_limb.clone(), right_limb.clone()));
}

+ 7
- 4
src/groups/curves/short_weierstrass/bls12/mod.rs

@ -1,6 +1,6 @@
use ark_ec::{
bls12::{Bls12Parameters, G1Prepared, G2Prepared, TwistType},
short_weierstrass_jacobian::GroupAffine,
short_weierstrass::Affine as GroupAffine,
};
use ark_ff::{BitIteratorBE, Field, One};
use ark_relations::r1cs::{Namespace, SynthesisError};
@ -42,8 +42,11 @@ impl G1PreparedVar

{

let x = self.0.x.value()?;
let y = self.0.y.value()?;
let infinity = self.0.infinity.value()?;
let g = GroupAffine::new(x, y, infinity);
Ok(g.into())
let g = infinity
.then_some(GroupAffine::zero())
.unwrap_or(GroupAffine::new(x, y))
.into();
Ok(g)
}
/// Constructs `Self` from a `G1Var`.
@ -139,7 +142,7 @@ impl AllocVar, P::Fp> for G2PreparedVar

{

TwistType::D => {
let mut z_s = projective_coeffs
.iter()
.map(|(z, _, _)| *z)
.map(|(z, ..)| *z)
.collect::<Vec<_>>();
ark_ff::fields::batch_inversion(&mut z_s);
projective_coeffs

+ 48
- 38
src/groups/curves/short_weierstrass/mod.rs

@ -1,6 +1,8 @@
use ark_ec::{
short_weierstrass_jacobian::{GroupAffine as SWAffine, GroupProjective as SWProjective},
AffineCurve, ProjectiveCurve, SWModelParameters,
short_weierstrass::{
Affine as SWAffine, Projective as SWProjective, SWCurveConfig as SWModelParameters,
},
AffineCurve, ProjectiveCurve,
};
use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
@ -22,12 +24,14 @@ pub mod mnt4;
/// family of bilinear groups.
pub mod mnt6;
/// This module provides a generic implementation of elliptic curve operations for points on
/// short-weierstrass curves in affine coordinates that **are not** equal to zero.
/// This module provides a generic implementation of elliptic curve operations
/// for points on short-weierstrass curves in affine coordinates that **are
/// not** equal to zero.
///
/// Note: this module is **unsafe** in general: it can synthesize unsatisfiable or
/// underconstrained constraint systems when a represented point _is_ equal to zero.
/// The [ProjectiveVar] gadget is the recommended way of working with elliptic curve points.
/// Note: this module is **unsafe** in general: it can synthesize unsatisfiable
/// or underconstrained constraint systems when a represented point _is_ equal
/// to zero. The [ProjectiveVar] gadget is the recommended way of working with
/// elliptic curve points.
pub mod non_zero_affine;
/// An implementation of arithmetic for Short Weierstrass curves that relies on
/// the complete formulae derived in the paper of
@ -89,11 +93,10 @@ where
/// Returns the value assigned to `self` in the underlying
/// constraint system.
pub fn value(&self) -> Result<SWAffine<P>, SynthesisError> {
Ok(SWAffine::new(
self.x.value()?,
self.y.value()?,
self.infinity.value()?,
))
Ok(match self.infinity.value()? {
true => SWAffine::zero(),
false => SWAffine::new(self.x.value()?, self.y.value()?),
})
}
}
@ -132,7 +135,7 @@ where
fn value(&self) -> Result<Self::Value, SynthesisError> {
let (x, y, z) = (self.x.value()?, self.y.value()?, self.z.value()?);
let result = if let Some(z_inv) = z.inverse() {
SWAffine::new(x * &z_inv, y * &z_inv, false)
SWAffine::new(x * &z_inv, y * &z_inv)
} else {
SWAffine::zero()
};
@ -169,8 +172,8 @@ where
let infinity = self.is_zero()?;
let zero_x = F::zero();
let zero_y = F::one();
// Allocate a variable whose value is either `self.z.inverse()` if the inverse exists,
// and is zero otherwise.
// Allocate a variable whose value is either `self.z.inverse()` if the inverse
// exists, and is zero otherwise.
let z_inv = F::new_witness(ark_relations::ns!(cs, "z_inverse"), || {
Ok(self.z.value()?.inverse().unwrap_or_else(P::BaseField::zero))
})?;
@ -230,7 +233,8 @@ where
Ok(Self::new(x, y, z))
}
/// Mixed addition, which is useful when `other = (x2, y2)` is known to have z = 1.
/// Mixed addition, which is useful when `other = (x2, y2)` is known to have
/// z = 1.
#[tracing::instrument(target = "r1cs", skip(self, other))]
pub(crate) fn add_mixed(&self, other: &NonZeroAffineVar<P, F>) -> Result<Self, SynthesisError> {
// Complete mixed addition formula from Renes-Costello-Batina 2015
@ -271,7 +275,8 @@ where
Ok(ProjectiveVar::new(x, y, z))
}
/// Computes a scalar multiplication with a little-endian scalar of size `P::ScalarField::MODULUS_BITS`.
/// Computes a scalar multiplication with a little-endian scalar of size
/// `P::ScalarField::MODULUS_BITS`.
#[tracing::instrument(
target = "r1cs",
skip(self, mul_result, multiple_of_power_of_two, bits)
@ -293,27 +298,30 @@ where
// We rely on *incomplete* affine formulae for partially computing this.
// However, we avoid exceptional edge cases because we partition the scalar
// into two chunks: one guaranteed to be less than p - 2, and the rest.
// We only use incomplete formulae for the first chunk, which means we avoid exceptions:
// We only use incomplete formulae for the first chunk, which means we avoid
// exceptions:
//
// `add_unchecked(a, b)` is incomplete when either `b.is_zero()`, or when
// `b = ±a`. During scalar multiplication, we don't hit either case:
// * `b = ±a`: `b = accumulator = k * a`, where `2 <= k < p - 1`.
// This implies that `k != p ± 1`, and so `b != (p ± 1) * a`.
// Because the group is finite, this in turn means that `b != ±a`, as required.
// * `a` or `b` is zero: for `a`, we handle the zero case after the loop; for `b`, notice
// that it is monotonically increasing, and furthermore, equals `k * a`, where
// `k != p = 0 mod p`.
// Unlike normal double-and-add, here we start off with a non-zero `accumulator`,
// because `NonZeroAffineVar::add_unchecked` doesn't support addition with `zero`.
// In more detail, we initialize `accumulator` to be the initial value of
// `multiple_of_power_of_two`. This ensures that all unchecked additions of `accumulator`
// with later values of `multiple_of_power_of_two` are safe.
// However, to do this correctly, we need to perform two steps:
// * We must skip the LSB, and instead proceed assuming that it was 1. Later, we will
// conditionally subtract the initial value of `accumulator`:
// if LSB == 0: subtract initial_acc_value; else, subtract 0.
// * Because we are assuming the first bit, we must double `multiple_of_power_of_two`.
// * `b = ±a`: `b = accumulator = k * a`, where `2 <= k < p - 1`. This implies
// that `k != p ± 1`, and so `b != (p ± 1) * a`. Because the group is finite,
// this in turn means that `b != ±a`, as required.
// * `a` or `b` is zero: for `a`, we handle the zero case after the loop; for
// `b`, notice that it is monotonically increasing, and furthermore, equals `k
// * a`, where `k != p = 0 mod p`.
// Unlike normal double-and-add, here we start off with a non-zero
// `accumulator`, because `NonZeroAffineVar::add_unchecked` doesn't
// support addition with `zero`. In more detail, we initialize
// `accumulator` to be the initial value of `multiple_of_power_of_two`.
// This ensures that all unchecked additions of `accumulator` with later
// values of `multiple_of_power_of_two` are safe. However, to do this
// correctly, we need to perform two steps:
// * We must skip the LSB, and instead proceed assuming that it was 1. Later, we
// will conditionally subtract the initial value of `accumulator`: if LSB ==
// 0: subtract initial_acc_value; else, subtract 0.
// * Because we are assuming the first bit, we must double
// `multiple_of_power_of_two`.
let mut accumulator = multiple_of_power_of_two.clone();
let initial_acc_value = accumulator.into_projective();
@ -321,7 +329,8 @@ where
// The powers start at 2 (instead of 1) because we're skipping the first bit.
multiple_of_power_of_two.double_in_place()?;
// As mentioned, we will skip the LSB, and will later handle it via a conditional subtraction.
// As mentioned, we will skip the LSB, and will later handle it via a
// conditional subtraction.
for bit in affine_bits.iter().skip(1) {
if bit.is_constant() {
if *bit == &Boolean::TRUE {
@ -335,8 +344,9 @@ where
}
// Perform conditional subtraction:
// We can convert to projective safely because the result is guaranteed to be non-zero
// by the condition on `affine_bits.len()`, and by the fact that `accumulator` is non-zero
// We can convert to projective safely because the result is guaranteed to be
// non-zero by the condition on `affine_bits.len()`, and by the fact
// that `accumulator` is non-zero
let result = accumulator.into_projective();
// If bits[0] is 0, then we have to subtract `self`; else, we subtract zero.
let subtrahend = bits[0].select(&Self::zero(), &initial_acc_value)?;

+ 28
- 40
src/groups/curves/short_weierstrass/non_zero_affine.rs

@ -55,8 +55,9 @@ where
// y3 = lambda * (x1 - x3) - y1
let numerator = y2 - y1;
let denominator = x2 - x1;
// It's okay to use `unchecked` here, because the precondition of `add_unchecked` is that
// self != ±other, which means that `numerator` and `denominator` are both non-zero.
// It's okay to use `unchecked` here, because the precondition of
// `add_unchecked` is that self != ±other, which means that
// `numerator` and `denominator` are both non-zero.
let lambda = numerator.mul_by_inverse_unchecked(&denominator)?;
let x3 = lambda.square()? - x1 - x2;
let y3 = lambda * &(x1 - &x3) - y1;
@ -82,8 +83,8 @@ where
// y3 = lambda * (x1 - x3) - y1
let numerator = x1_sqr.double()? + &x1_sqr + P::COEFF_A;
let denominator = y1.double()?;
// It's okay to use `unchecked` here, because the precondition of `double` is that
// self != zero.
// It's okay to use `unchecked` here, because the precondition of `double` is
// that self != zero.
let lambda = numerator.mul_by_inverse_unchecked(&denominator)?;
let x3 = lambda.square()? - x1.double()?;
let y3 = lambda * &(x1 - &x3) - y1;
@ -91,8 +92,9 @@ where
}
}
/// Computes `(self + other) + self`. This method requires only 5 constraints,
/// less than the 7 required when computing via `self.double() + other`.
/// Computes `(self + other) + self`. This method requires only 5
/// constraints, less than the 7 required when computing via
/// `self.double() + other`.
///
/// This follows the formulae from [\[ELM03\]](https://arxiv.org/abs/math/0208038).
#[tracing::instrument(target = "r1cs", skip(self))]
@ -100,7 +102,8 @@ where
if [self].is_constant() || other.is_constant() {
self.double()?.add_unchecked(other)
} else {
// It's okay to use `unchecked` the precondition is that `self != ±other` (i.e. same logic as in `add_unchecked`)
// It's okay to use `unchecked` the precondition is that `self != ±other` (i.e.
// same logic as in `add_unchecked`)
let (x1, y1) = (&self.x, &self.y);
let (x2, y2) = (&other.x, &other.y);
@ -145,7 +148,7 @@ where
}
fn value(&self) -> Result<SWAffine<P>, SynthesisError> {
Ok(SWAffine::new(self.x.value()?, self.y.value()?, false))
Ok(SWAffine::new(self.x.value()?, self.y.value()?))
}
}
@ -223,14 +226,17 @@ where
#[cfg(test)]
mod test_non_zero_affine {
use crate::alloc::AllocVar;
use crate::eq::EqGadget;
use crate::fields::fp::{AllocatedFp, FpVar};
use crate::groups::curves::short_weierstrass::non_zero_affine::NonZeroAffineVar;
use crate::groups::curves::short_weierstrass::ProjectiveVar;
use crate::groups::CurveVar;
use crate::R1CSVar;
use ark_ec::{ProjectiveCurve, SWModelParameters};
use crate::{
alloc::AllocVar,
eq::EqGadget,
fields::fp::{AllocatedFp, FpVar},
groups::{
curves::short_weierstrass::{non_zero_affine::NonZeroAffineVar, ProjectiveVar},
CurveVar,
},
R1CSVar,
};
use ark_ec::{models::short_weierstrass::SWCurveConfig, ProjectiveCurve};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{vec::Vec, One};
use ark_test_curves::bls12_381::{g1::Parameters as G1Parameters, Fq};
@ -240,16 +246,10 @@ mod test_non_zero_affine {
let cs = ConstraintSystem::<Fq>::new_ref();
let x = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.0)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.x)).unwrap(),
);
let y = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.1)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.y)).unwrap(),
);
// The following code uses `double` and `add` (`add_unchecked`) to compute
@ -307,16 +307,10 @@ mod test_non_zero_affine {
let cs = ConstraintSystem::<Fq>::new_ref();
let x = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.0)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.x)).unwrap(),
);
let y = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.1)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.y)).unwrap(),
);
// The following code tests `double_and_add`.
@ -359,16 +353,10 @@ mod test_non_zero_affine {
let cs = ConstraintSystem::<Fq>::new_ref();
let x = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.0)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.x)).unwrap(),
);
let y = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.1)
})
.unwrap(),
AllocatedFp::<Fq>::new_witness(cs.clone(), || Ok(G1Parameters::GENERATOR.y)).unwrap(),
);
let a = NonZeroAffineVar::<G1Parameters, FpVar<Fq>>::new(x, y);

+ 8
- 5
src/groups/curves/twisted_edwards/mod.rs

@ -1,6 +1,9 @@
use ark_ec::{
twisted_edwards_extended::{GroupAffine as TEAffine, GroupProjective as TEProjective},
AffineCurve, MontgomeryModelParameters, ProjectiveCurve, TEModelParameters,
twisted_edwards::{
Affine as TEAffine, MontCurveConfig as MontgomeryModelParameter,
Projective as TEProjective, TECurveConfig as TEModelParameters,
},
AffineCurve, ProjectiveCurve,
};
use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero};
@ -36,7 +39,7 @@ pub struct MontgomeryAffineVar<
mod montgomery_affine_impl {
use super::*;
use ark_ec::twisted_edwards_extended::GroupAffine;
use ark_ec::twisted_edwards::Affine as GroupAffine;
use ark_ff::Field;
use core::ops::Add;
@ -173,8 +176,8 @@ mod montgomery_affine_impl {
AllocationMode::Witness
};
let coeff_b = P::MontgomeryModelParameters::COEFF_B;
let coeff_a = P::MontgomeryModelParameters::COEFF_A;
let coeff_b = P::MontCurveConfig::COEFF_B;
let coeff_a = P::MontCurveConfig::COEFF_A;
let lambda = F::new_variable(
ark_relations::ns!(cs, "lambda"),

+ 5
- 1
src/lib.rs

@ -13,6 +13,9 @@
#[macro_use]
extern crate ark_std;
#[macro_use]
extern crate ark_ff;
#[macro_use]
extern crate ark_relations;
@ -49,7 +52,8 @@ pub mod pairing;
pub mod alloc;
/// This module describes a trait for checking equality of variables.
pub mod eq;
/// This module implements functions for manipulating polynomial variables over finite fields.
/// This module implements functions for manipulating polynomial variables over
/// finite fields.
pub mod poly;
/// This module describes traits for conditionally selecting a variable from a
/// list of variables.

+ 18
- 14
src/poly/domain/mod.rs

@ -1,7 +1,8 @@
use crate::boolean::Boolean;
use crate::eq::EqGadget;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::{
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
};
use ark_ff::PrimeField;
use ark_relations::r1cs::SynthesisError;
use ark_std::vec::Vec;
@ -9,11 +10,11 @@ use ark_std::vec::Vec;
pub mod vanishing_poly;
#[derive(Clone, Debug)]
/// Defines an evaluation domain over a prime field. The domain is a coset of size `1<<dim`.
///
/// Native code corresponds to `ark-poly::univariate::domain::radix2`, but `ark-poly` only supports
/// subgroup for now.
/// Defines an evaluation domain over a prime field. The domain is a coset of
/// size `1<<dim`.
///
/// Native code corresponds to `ark-poly::univariate::domain::radix2`, but
/// `ark-poly` only supports subgroup for now.
// TODO: support cosets in `ark-poly`.
pub struct Radix2DomainVar<F: PrimeField> {
/// generator of subgroup g
@ -72,12 +73,14 @@ impl Radix2DomainVar {
1 << self.dim
}
/// For domain `h<g>` with dimension `n`, `position` represented by `query_pos` in big endian form,
/// returns all points of `h*g^{position}<g^{2^{n-coset_dim}}>`. The result is the query coset at index `query_pos`
/// for the FRI protocol.
/// For domain `h<g>` with dimension `n`, `position` represented by
/// `query_pos` in big endian form, returns all points of
/// `h*g^{position}<g^{2^{n-coset_dim}}>`. The result is the query coset at
/// index `query_pos` for the FRI protocol.
///
/// # Panics
/// This function panics when `query_pos.len() != coset_dim` or `query_pos.len() != self.dim`.
/// This function panics when `query_pos.len() != coset_dim` or
/// `query_pos.len() != self.dim`.
pub fn query_position_to_coset_elements(
&self,
query_pos: &[Boolean<F>],
@ -88,8 +91,9 @@ impl Radix2DomainVar {
.elements())
}
/// For domain `h<g>` with dimension `n`, `position` represented by `query_pos` in big endian form,
/// returns all points of `h*g^{position}<g^{n-query_pos.len()}>`
/// For domain `h<g>` with dimension `n`, `position` represented by
/// `query_pos` in big endian form, returns all points of
/// `h*g^{position}<g^{n-query_pos.len()}>`
///
/// # Panics
/// This function panics when `query_pos.len() < log2_num_cosets`.

+ 10
- 10
src/poly/domain/vanishing_poly.rs

@ -1,12 +1,11 @@
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::fields::{fp::FpVar, FieldVar};
use ark_ff::{Field, PrimeField};
use ark_relations::r1cs::SynthesisError;
use ark_std::ops::Sub;
/// Struct describing vanishing polynomial for a multiplicative coset H where |H| is a power of 2.
/// As H is a coset, every element can be described as h*g^i and therefore
/// has vanishing polynomial Z_H(x) = x^|H| - h^|H|
/// Struct describing vanishing polynomial for a multiplicative coset H where
/// |H| is a power of 2. As H is a coset, every element can be described as
/// h*g^i and therefore has vanishing polynomial Z_H(x) = x^|H| - h^|H|
#[derive(Clone)]
pub struct VanishingPolynomial<F: Field> {
/// h^|H|
@ -37,7 +36,8 @@ impl VanishingPolynomial {
}
/// Evaluates the constraints and just gives you the gadget for the result.
/// Caution for use in holographic lincheck: The output has 2 entries in one matrix
/// Caution for use in holographic lincheck: The output has 2 entries in one
/// matrix
pub fn evaluate_constraints(&self, x: &FpVar<F>) -> Result<FpVar<F>, SynthesisError> {
if self.dim_h == 1 {
let result = x.sub(x);
@ -55,10 +55,10 @@ impl VanishingPolynomial {
#[cfg(test)]
mod tests {
use crate::alloc::AllocVar;
use crate::fields::fp::FpVar;
use crate::poly::domain::vanishing_poly::VanishingPolynomial;
use crate::R1CSVar;
use crate::{
alloc::AllocVar, fields::fp::FpVar, poly::domain::vanishing_poly::VanishingPolynomial,
R1CSVar,
};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::{test_rng, UniformRand};
use ark_test_curves::bls12_381::Fr;

+ 25
- 27
src/poly/evaluations/univariate/lagrange_interpolator.rs

@ -31,16 +31,14 @@ impl LagrangeInterpolator {
cur_elem *= domain_generator;
all_domain_elems.push(cur_elem);
}
/*
By computing the following elements as constants,
we can further reduce the interpolation costs.
m = order of the interpolation domain
v_inv[i] = prod_{j != i} h(g^i - g^j)
We use the following facts to compute this:
v_inv[0] = m*h^{m-1}
v_inv[i] = g^{-1} * v_inv[i-1]
*/
// By computing the following elements as constants,
// we can further reduce the interpolation costs.
//
// m = order of the interpolation domain
// v_inv[i] = prod_{j != i} h(g^i - g^j)
// We use the following facts to compute this:
// v_inv[0] = m*h^{m-1}
// v_inv[i] = g^{-1} * v_inv[i-1]
// TODO: Include proof of the above two points
let g_inv = domain_generator.inverse().unwrap();
let m = F::from((1 << domain_dim) as u128);
@ -64,16 +62,14 @@ impl LagrangeInterpolator {
}
pub(crate) fn compute_lagrange_coefficients(&self, interpolation_point: F) -> Vec<F> {
/*
* Let t be the interpolation point, H be the multiplicative coset, with elements of the form h*g^i.
Compute each L_{i,H}(t) as Z_{H}(t) * v_i / (t- h g^i)
where:
- Z_{H}(t) = \prod_{j} (t-h*g^j) = (t^m-h^m), and
- v_{i} = 1 / \prod_{j \neq i} h(g^i-g^j).
Below we use the fact that v_{0} = 1/(m * h^(m-1)) and v_{i+1} = g * v_{i}.
We first compute the inverse of each coefficient, except for the Z_H(t) term.
We then batch invert the entire result, and multiply by Z_H(t).
*/
// Let t be the interpolation point, H be the multiplicative coset, with
// elements of the form h*g^i. Compute each L_{i,H}(t) as Z_{H}(t) * v_i
// / (t- h g^i) where:
// - Z_{H}(t) = \prod_{j} (t-h*g^j) = (t^m-h^m), and
// - v_{i} = 1 / \prod_{j \neq i} h(g^i-g^j).
// Below we use the fact that v_{0} = 1/(m * h^(m-1)) and v_{i+1} = g * v_{i}.
// We first compute the inverse of each coefficient, except for the Z_H(t) term.
// We then batch invert the entire result, and multiply by Z_H(t).
let mut inverted_lagrange_coeffs: Vec<F> = Vec::with_capacity(self.all_domain_elems.len());
for i in 0..self.domain_order {
let l = self.v_inv_elems[i];
@ -98,14 +94,16 @@ impl LagrangeInterpolator {
#[cfg(test)]
mod tests {
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::poly::domain::Radix2DomainVar;
use crate::poly::evaluations::univariate::lagrange_interpolator::LagrangeInterpolator;
use crate::R1CSVar;
use crate::{
fields::{fp::FpVar, FieldVar},
poly::{
domain::Radix2DomainVar,
evaluations::univariate::lagrange_interpolator::LagrangeInterpolator,
},
R1CSVar,
};
use ark_ff::{FftField, Field, One};
use ark_poly::univariate::DensePolynomial;
use ark_poly::{Polynomial, UVPolynomial};
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial};
use ark_std::{test_rng, UniformRand};
use ark_test_curves::bls12_381::Fr;

+ 56
- 39
src/poly/evaluations/univariate/mod.rs

@ -1,16 +1,21 @@
pub mod lagrange_interpolator;
use crate::alloc::AllocVar;
use crate::eq::EqGadget;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::poly::domain::Radix2DomainVar;
use crate::poly::evaluations::univariate::lagrange_interpolator::LagrangeInterpolator;
use crate::R1CSVar;
use crate::{
alloc::AllocVar,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
poly::{
domain::Radix2DomainVar,
evaluations::univariate::lagrange_interpolator::LagrangeInterpolator,
},
R1CSVar,
};
use ark_ff::{batch_inversion, PrimeField};
use ark_relations::r1cs::SynthesisError;
use ark_std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
use ark_std::vec::Vec;
use ark_std::{
ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign},
vec::Vec,
};
#[derive(Clone)]
/// Stores a UV polynomial in evaluation form.
@ -22,8 +27,9 @@ pub struct EvaluationsVar {
domain: Radix2DomainVar<F>,
/// Contains all domain elements of `domain.base_domain`.
///
/// This is a cache for lagrange interpolation when offset is non-constant. Will be `None` if offset is constant
/// or `interpolate` is set to `false`.
/// This is a cache for lagrange interpolation when offset is non-constant.
/// Will be `None` if offset is constant or `interpolate` is set to
/// `false`.
subgroup_points: Option<Vec<F>>,
}
@ -54,7 +60,8 @@ impl EvaluationsVar {
ev
}
/// Precompute necessary calculation for lagrange interpolation and mark it ready to interpolate
/// Precompute necessary calculation for lagrange interpolation and mark it
/// ready to interpolate
pub fn generate_interpolation_cache(&mut self) {
if self.domain.offset().is_constant() {
let poly_evaluations_val: Vec<_> =
@ -67,7 +74,8 @@ impl EvaluationsVar {
};
self.lagrange_interpolator = Some(lagrange_interpolator)
} else {
// calculate all elements of base subgroup so that in later part we don't need to calculate the exponents again
// calculate all elements of base subgroup so that in later part we don't need
// to calculate the exponents again
let mut subgroup_points = Vec::with_capacity(self.domain.size() as usize);
subgroup_points.push(F::one());
for i in 1..self.domain.size() as usize {
@ -77,8 +85,8 @@ impl EvaluationsVar {
}
}
/// Compute lagrange coefficients for each evaluation, given `interpolation_point`.
/// Only valid if the domain offset is constant.
/// Compute lagrange coefficients for each evaluation, given
/// `interpolation_point`. Only valid if the domain offset is constant.
fn compute_lagrange_coefficients(
&self,
interpolation_point: &FpVar<F>,
@ -94,8 +102,8 @@ impl EvaluationsVar {
let lagrange_coeffs =
lagrange_interpolator.compute_lagrange_coefficients(t.value().unwrap());
let mut lagrange_coeffs_fg = Vec::new();
// Now we convert these lagrange coefficients to gadgets, and then constrain them.
// The i-th lagrange coefficients constraint is:
// Now we convert these lagrange coefficients to gadgets, and then constrain
// them. The i-th lagrange coefficients constraint is:
// (v_inv[i] * t - v_inv[i] * domain_elem[i]) * (coeff) = 1/Z_I(t)
let vp_t = lagrange_interpolator.domain_vp.evaluate_constraints(t)?;
// let inv_vp_t = vp_t.inverse()?;
@ -124,16 +132,19 @@ impl EvaluationsVar {
Ok(lagrange_coeffs_fg)
}
/// Returns constraints for Interpolating and evaluating at `interpolation_point`
/// Returns constraints for Interpolating and evaluating at
/// `interpolation_point`
pub fn interpolate_and_evaluate(
&self,
interpolation_point: &FpVar<F>,
) -> Result<FpVar<F>, SynthesisError> {
// specialize: if domain offset is constant, we can optimize to have fewer constraints
// specialize: if domain offset is constant, we can optimize to have fewer
// constraints
if self.domain.offset().is_constant() {
self.lagrange_interpolate_with_constant_offset(interpolation_point)
} else {
// if domain offset is not constant, then we use standard lagrange interpolation code
// if domain offset is not constant, then we use standard lagrange interpolation
// code
self.lagrange_interpolate_with_non_constant_offset(interpolation_point)
}
}
@ -156,7 +167,8 @@ impl EvaluationsVar {
Ok(interpolation)
}
/// Generate interpolation constraints. We assume at compile time we know the base coset (i.e. `gen`) but not know `offset`.
/// Generate interpolation constraints. We assume at compile time we know
/// the base coset (i.e. `gen`) but not know `offset`.
fn lagrange_interpolate_with_non_constant_offset(
&self,
interpolation_point: &FpVar<F>, // = alpha in the following code
@ -167,10 +179,12 @@ impl EvaluationsVar {
Call `self.generate_interpolation_cache` first or set `interpolate` to true in constructor. ");
// Let denote interpolation_point as alpha.
// Lagrange polynomial for coset element `a` is
// \frac{1}{size * offset ^ size} * \frac{alpha^size - offset^size}{alpha * a^{-1} - 1}
// Notice that a = (offset * a') where a' is the corresponding element of base coset
// \frac{1}{size * offset ^ size} * \frac{alpha^size - offset^size}{alpha *
// a^{-1} - 1} Notice that a = (offset * a') where a' is the
// corresponding element of base coset
// let `lhs` be \frac{alpha^size - offset^size}{size * offset ^ size}. This part is shared by all lagrange polynomials
// let `lhs` be \frac{alpha^size - offset^size}{size * offset ^ size}. This part
// is shared by all lagrange polynomials
let coset_offset_to_size = self
.domain
.offset()
@ -181,14 +195,16 @@ impl EvaluationsVar {
// This also means that the denominator is
lhs_numerator.enforce_not_equal(&FpVar::zero())?;
// `domain.offset()` is non-zero by construction, so `coset_offset_to_size` is also non-zero, which means `lhs_denominator` is non-zero
// `domain.offset()` is non-zero by construction, so `coset_offset_to_size` is
// also non-zero, which means `lhs_denominator` is non-zero
let lhs_denominator = &coset_offset_to_size * FpVar::constant(F::from(self.domain.size()));
// unchecked is okay because the denominator is non-zero.
let lhs = lhs_numerator.mul_by_inverse_unchecked(&lhs_denominator)?;
// `rhs` for coset element `a` is \frac{1}{alpha * a^{-1} - 1} = \frac{1}{alpha * offset^{-1} * a'^{-1} - 1}
// domain.offset() is non-zero by construction.
// `rhs` for coset element `a` is \frac{1}{alpha * a^{-1} - 1} = \frac{1}{alpha
// * offset^{-1} * a'^{-1} - 1} domain.offset() is non-zero by
// construction.
let alpha_coset_offset_inv =
interpolation_point.mul_by_inverse_unchecked(&self.domain.offset())?;
@ -204,12 +220,14 @@ impl EvaluationsVar {
let lag_denom = &alpha_coset_offset_inv * subgroup_point_inv - F::one();
// lag_denom cannot be zero, so we use `unchecked`.
//
// Proof: lag_denom is zero if and only if alpha * (coset_offset * subgroup_point)^{-1} == 1.
// This can happen only if `alpha` is itself in the coset.
// Proof: lag_denom is zero if and only if alpha * (coset_offset *
// subgroup_point)^{-1} == 1. This can happen only if `alpha` is
// itself in the coset.
//
// Earlier we asserted that `lhs_numerator` is not zero.
// Since `lhs_numerator` is just the vanishing polynomial for the coset evaluated at `alpha`,
// and since this is non-zero, `alpha` is not in the coset.
// Since `lhs_numerator` is just the vanishing polynomial for the coset
// evaluated at `alpha`, and since this is non-zero, `alpha` is not
// in the coset.
let lag_coeff = lhs.mul_by_inverse_unchecked(&lag_denom)?;
let lag_interpoland = &self.evals[i] * lag_coeff;
@ -346,15 +364,14 @@ impl<'a, F: PrimeField> DivAssign<&'a EvaluationsVar> for EvaluationsVar {
#[cfg(test)]
mod tests {
use crate::alloc::AllocVar;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::poly::domain::Radix2DomainVar;
use crate::poly::evaluations::univariate::EvaluationsVar;
use crate::R1CSVar;
use crate::{
alloc::AllocVar,
fields::{fp::FpVar, FieldVar},
poly::{domain::Radix2DomainVar, evaluations::univariate::EvaluationsVar},
R1CSVar,
};
use ark_ff::{FftField, Field, One, UniformRand};
use ark_poly::polynomial::univariate::DensePolynomial;
use ark_poly::{Polynomial, UVPolynomial};
use ark_poly::{polynomial::univariate::DensePolynomial, DenseUVPolynomial, Polynomial};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::test_rng;
use ark_test_curves::bls12_381::Fr;

+ 12
- 13
src/poly/polynomial/univariate/dense.rs

@ -1,11 +1,11 @@
use ark_ff::PrimeField;
use ark_relations::r1cs::SynthesisError;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::fields::{fp::FpVar, FieldVar};
use ark_std::vec::Vec;
/// Stores a polynomial in coefficient form, where coeffcient is represented by a list of `Fpvar<F>`.
/// Stores a polynomial in coefficient form, where coeffcient is represented by
/// a list of `Fpvar<F>`.
pub struct DensePolynomialVar<F: PrimeField> {
/// The coefficient of `x^i` is stored at location `i` in `self.coeffs`.
pub coeffs: Vec<FpVar<F>>,
@ -22,8 +22,9 @@ impl DensePolynomialVar {
Self { coeffs }
}
/// Evaluates `self` at the given `point` and just gives you the gadget for the result.
/// Caution for use in holographic lincheck: The output has 2 entries in one matrix
/// Evaluates `self` at the given `point` and just gives you the gadget for
/// the result. Caution for use in holographic lincheck: The output has
/// 2 entries in one matrix
pub fn evaluate(&self, point: &FpVar<F>) -> Result<FpVar<F>, SynthesisError> {
let mut result: FpVar<F> = FpVar::zero();
// current power of point
@ -40,15 +41,13 @@ impl DensePolynomialVar {
#[cfg(test)]
mod tests {
use crate::alloc::AllocVar;
use crate::fields::fp::FpVar;
use crate::poly::polynomial::univariate::dense::DensePolynomialVar;
use crate::R1CSVar;
use ark_poly::polynomial::univariate::DensePolynomial;
use ark_poly::{Polynomial, UVPolynomial};
use crate::{
alloc::AllocVar, fields::fp::FpVar,
poly::polynomial::univariate::dense::DensePolynomialVar, R1CSVar,
};
use ark_poly::{polynomial::univariate::DensePolynomial, DenseUVPolynomial, Polynomial};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::vec::Vec;
use ark_std::{test_rng, UniformRand};
use ark_std::{test_rng, vec::Vec, UniformRand};
use ark_test_curves::bls12_381::Fr;
#[test]

+ 4
- 2
src/select.rs

@ -21,10 +21,12 @@ where
) -> Result<Self, SynthesisError>;
/// Returns an element of `values` whose index in represented by `position`.
/// `position` is an array of boolean that represents an unsigned integer in big endian order.
/// `position` is an array of boolean that represents an unsigned integer in
/// big endian order.
///
/// # Example
/// To get the 6th element of `values`, convert unsigned integer 6 (`0b110`) to `position = [True, True, False]`,
/// To get the 6th element of `values`, convert unsigned integer 6 (`0b110`)
/// to `position = [True, True, False]`,
/// and call `conditionally_select_power_of_two_vector(position, values)`.
fn conditionally_select_power_of_two_vector(
position: &[Boolean<ConstraintF>],

+ 11
- 3
tests/arithmetic_tests.rs

@ -6,8 +6,15 @@ use ark_mnt4_753::MNT4_753;
use ark_mnt6_298::MNT6_298;
use ark_mnt6_753::MNT6_753;
use ark_r1cs_std::fields::nonnative::{AllocatedNonNativeFieldVar, NonNativeFieldVar};
use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::FieldVar, R1CSVar};
use ark_r1cs_std::{
alloc::AllocVar,
eq::EqGadget,
fields::{
nonnative::{AllocatedNonNativeFieldVar, NonNativeFieldVar},
FieldVar,
},
R1CSVar,
};
use ark_relations::r1cs::{ConstraintSystem, ConstraintSystemRef};
use ark_std::rand::RngCore;
@ -464,7 +471,8 @@ fn double_stress_test_1
|| Ok(num_native),
)
.unwrap();
// Add to at least BaseField::size_in_bits() to ensure that we teat the overflowing
// Add to at least BaseField::size_in_bits() to ensure that we teat the
// overflowing
for _ in 0..TEST_COUNT + BaseField::MODULUS_BIT_SIZE as usize {
// double
num_native = num_native + &num_native;

+ 5
- 3
tests/from_test.rs

@ -1,6 +1,8 @@
use ark_r1cs_std::alloc::AllocVar;
use ark_r1cs_std::fields::nonnative::{NonNativeFieldMulResultVar, NonNativeFieldVar};
use ark_r1cs_std::R1CSVar;
use ark_r1cs_std::{
alloc::AllocVar,
fields::nonnative::{NonNativeFieldMulResultVar, NonNativeFieldVar},
R1CSVar,
};
use ark_relations::r1cs::ConstraintSystem;
use ark_std::UniformRand;

+ 0
- 50
tests/to_bytes_test.rs

@ -1,50 +0,0 @@
use ark_ec::PairingEngine;
use ark_ff::{to_bytes, Zero};
use ark_mnt4_298::MNT4_298;
use ark_mnt6_298::MNT6_298;
use ark_r1cs_std::alloc::AllocVar;
use ark_r1cs_std::fields::nonnative::NonNativeFieldVar;
use ark_r1cs_std::{R1CSVar, ToBitsGadget, ToBytesGadget};
use ark_relations::r1cs::ConstraintSystem;
#[test]
fn to_bytes_test() {
let cs = ConstraintSystem::<<MNT6_298 as PairingEngine>::Fr>::new_ref();
let target_test_elem = <MNT4_298 as PairingEngine>::Fr::from(123456u128);
let target_test_gadget = NonNativeFieldVar::<
<MNT4_298 as PairingEngine>::Fr,
<MNT6_298 as PairingEngine>::Fr,
>::new_witness(cs, || Ok(target_test_elem))
.unwrap();
let target_to_bytes: Vec<u8> = target_test_gadget
.to_bytes()
.unwrap()
.iter()
.map(|v| v.value().unwrap())
.collect();
// 123456 = 65536 + 226 * 256 + 64
assert_eq!(target_to_bytes[0], 64);
assert_eq!(target_to_bytes[1], 226);
assert_eq!(target_to_bytes[2], 1);
for byte in target_to_bytes.iter().skip(3) {
assert_eq!(*byte, 0);
}
assert_eq!(to_bytes!(target_test_elem).unwrap(), target_to_bytes);
}
#[test]
fn to_bits_test() {
type F = ark_bls12_377::Fr;
type CF = ark_bls12_377::Fq;
let cs = ConstraintSystem::<CF>::new_ref();
let f = F::zero();
let f_var = NonNativeFieldVar::<F, CF>::new_input(cs.clone(), || Ok(f)).unwrap();
f_var.to_bits_le().unwrap();
}

+ 3
- 3
tests/to_constraint_field_test.rs

@ -1,6 +1,6 @@
use ark_r1cs_std::alloc::AllocVar;
use ark_r1cs_std::fields::nonnative::NonNativeFieldVar;
use ark_r1cs_std::{R1CSVar, ToConstraintFieldGadget};
use ark_r1cs_std::{
alloc::AllocVar, fields::nonnative::NonNativeFieldVar, R1CSVar, ToConstraintFieldGadget,
};
use ark_relations::r1cs::ConstraintSystem;
#[test]

Loading…
Cancel
Save