Rename NonNativeFieldVar -> EmulatedFpVar (#135)

This commit is contained in:
Pratyush Mishra
2023-12-28 13:14:08 -05:00
committed by GitHub
parent 1ff3a902bd
commit ed2d55e6ff
18 changed files with 666 additions and 737 deletions

View File

@@ -0,0 +1,914 @@
use super::{
params::{get_params, OptimizationType},
reduce::{bigint_to_basefield, limbs_to_bigint, Reducer},
AllocatedMulResultVar,
};
use crate::{fields::fp::FpVar, prelude::*, ToConstraintFieldGadget};
use ark_ff::{BigInteger, PrimeField};
use ark_relations::{
ns,
r1cs::{
ConstraintSystemRef, Namespace, OptimizationGoal, Result as R1CSResult, SynthesisError,
},
};
use ark_std::{
borrow::Borrow,
cmp::{max, min},
marker::PhantomData,
vec,
vec::Vec,
};
/// The allocated version of `EmulatedFpVar` (introduced below)
#[derive(Debug)]
#[must_use]
pub struct AllocatedEmulatedFpVar<TargetF: PrimeField, BaseF: PrimeField> {
/// Constraint system reference
pub cs: ConstraintSystemRef<BaseF>,
/// The limbs, each of which is a BaseF gadget.
pub limbs: Vec<FpVar<BaseF>>,
/// Number of additions done over this gadget, using which the gadget
/// decides when to reduce.
pub num_of_additions_over_normal_form: BaseF,
/// Whether the limb representation is the normal form (using only the bits
/// specified in the parameters, and the representation is strictly within
/// the range of TargetF).
pub is_in_the_normal_form: bool,
#[doc(hidden)]
pub target_phantom: PhantomData<TargetF>,
}
impl<TargetF: PrimeField, BaseF: PrimeField> AllocatedEmulatedFpVar<TargetF, BaseF> {
/// Return cs
pub fn cs(&self) -> ConstraintSystemRef<BaseF> {
self.cs.clone()
}
/// Obtain the value of limbs
pub fn limbs_to_value(limbs: Vec<BaseF>, optimization_type: OptimizationType) -> TargetF {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
optimization_type,
);
// Convert 2^{(params.bits_per_limb - 1)} into the TargetF and then double
// the base This is because 2^{(params.bits_per_limb)} might indeed be
// larger than the target field's prime.
let base_repr = TargetF::ONE.into_bigint() << (params.bits_per_limb - 1) as u32;
let mut base = TargetF::from_bigint(base_repr).unwrap();
base.double_in_place();
let mut result = TargetF::zero();
let mut power = TargetF::one();
for limb in limbs.iter().rev() {
let mut val = TargetF::zero();
let mut cur = TargetF::one();
for bit in limb.into_bigint().to_bits_be().iter().rev() {
if *bit {
val += &cur;
}
cur.double_in_place();
}
result += &(val * power);
power *= &base;
}
result
}
/// Obtain the value of a emulated field element
pub fn value(&self) -> R1CSResult<TargetF> {
let mut limbs = Vec::new();
for limb in self.limbs.iter() {
limbs.push(limb.value()?);
}
Ok(Self::limbs_to_value(limbs, self.get_optimization_type()))
}
/// Obtain the emulated field element of a constant value
pub fn constant(cs: ConstraintSystemRef<BaseF>, value: TargetF) -> R1CSResult<Self> {
let optimization_type = match cs.optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
};
let limbs_value = Self::get_limbs_representations(&value, optimization_type)?;
let mut limbs = Vec::new();
for limb_value in limbs_value.iter() {
limbs.push(FpVar::<BaseF>::new_constant(ns!(cs, "limb"), limb_value)?);
}
Ok(Self {
cs,
limbs,
num_of_additions_over_normal_form: BaseF::zero(),
is_in_the_normal_form: true,
target_phantom: PhantomData,
})
}
/// Obtain the emulated field element of one
pub fn one(cs: ConstraintSystemRef<BaseF>) -> R1CSResult<Self> {
Self::constant(cs, TargetF::one())
}
/// Obtain the emulated field element of zero
pub fn zero(cs: ConstraintSystemRef<BaseF>) -> R1CSResult<Self> {
Self::constant(cs, TargetF::zero())
}
/// Add a emulated field element
#[tracing::instrument(target = "r1cs")]
pub fn add(&self, other: &Self) -> R1CSResult<Self> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let mut limbs = Vec::new();
for (this_limb, other_limb) in self.limbs.iter().zip(other.limbs.iter()) {
limbs.push(this_limb + other_limb);
}
let mut res = Self {
cs: self.cs(),
limbs,
num_of_additions_over_normal_form: self
.num_of_additions_over_normal_form
.add(&other.num_of_additions_over_normal_form)
.add(&BaseF::one()),
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
Reducer::<TargetF, BaseF>::post_add_reduce(&mut res)?;
Ok(res)
}
/// Add a constant
#[tracing::instrument(target = "r1cs")]
pub fn add_constant(&self, other: &TargetF) -> R1CSResult<Self> {
let other_limbs = Self::get_limbs_representations(other, self.get_optimization_type())?;
let mut limbs = Vec::new();
for (this_limb, other_limb) in self.limbs.iter().zip(other_limbs.iter()) {
limbs.push(this_limb + *other_limb);
}
let mut res = Self {
cs: self.cs(),
limbs,
num_of_additions_over_normal_form: self
.num_of_additions_over_normal_form
.add(&BaseF::one()),
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
Reducer::<TargetF, BaseF>::post_add_reduce(&mut res)?;
Ok(res)
}
/// Subtract a emulated field element, without the final reduction step
#[tracing::instrument(target = "r1cs")]
pub fn sub_without_reduce(&self, other: &Self) -> R1CSResult<Self> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Step 1: reduce the `other` if needed
let mut surfeit = overhead!(other.num_of_additions_over_normal_form + BaseF::one()) + 1;
let mut other = other.clone();
if (surfeit + params.bits_per_limb > BaseF::MODULUS_BIT_SIZE as usize - 1)
|| (surfeit
+ (TargetF::MODULUS_BIT_SIZE as usize
- params.bits_per_limb * (params.num_limbs - 1))
> BaseF::MODULUS_BIT_SIZE as usize - 1)
{
Reducer::reduce(&mut other)?;
surfeit = overhead!(other.num_of_additions_over_normal_form + BaseF::ONE) + 1;
}
// Step 2: construct the padding
let mut pad_non_top_limb = BaseF::ONE.into_bigint();
let mut pad_top_limb = pad_non_top_limb;
pad_non_top_limb <<= (surfeit + params.bits_per_limb) as u32;
let pad_non_top_limb = BaseF::from_bigint(pad_non_top_limb).unwrap();
pad_top_limb <<= (surfeit + TargetF::MODULUS_BIT_SIZE as usize
- params.bits_per_limb * (params.num_limbs - 1)) as u32;
let pad_top_limb = BaseF::from_bigint(pad_top_limb).unwrap();
let mut pad_limbs = Vec::with_capacity(self.limbs.len());
pad_limbs.push(pad_top_limb);
for _ in 0..self.limbs.len() - 1 {
pad_limbs.push(pad_non_top_limb);
}
// Step 3: prepare to pad the padding to k * p for some k
let pad_to_kp_gap = Self::limbs_to_value(pad_limbs, self.get_optimization_type()).neg();
let pad_to_kp_limbs =
Self::get_limbs_representations(&pad_to_kp_gap, self.get_optimization_type())?;
// Step 4: the result is self + pad + pad_to_kp - other
let mut limbs = Vec::with_capacity(self.limbs.len());
for (i, ((this_limb, other_limb), pad_to_kp_limb)) in self
.limbs
.iter()
.zip(&other.limbs)
.zip(&pad_to_kp_limbs)
.enumerate()
{
if i != 0 {
limbs.push(this_limb + pad_non_top_limb + *pad_to_kp_limb - other_limb);
} else {
limbs.push(this_limb + pad_top_limb + *pad_to_kp_limb - other_limb);
}
}
let result = AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs: self.cs(),
limbs,
num_of_additions_over_normal_form: self.num_of_additions_over_normal_form
+ (other.num_of_additions_over_normal_form + BaseF::one())
+ (other.num_of_additions_over_normal_form + BaseF::one()),
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
Ok(result)
}
/// Subtract a emulated field element
#[tracing::instrument(target = "r1cs")]
pub fn sub(&self, other: &Self) -> R1CSResult<Self> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let mut result = self.sub_without_reduce(other)?;
Reducer::<TargetF, BaseF>::post_add_reduce(&mut result)?;
Ok(result)
}
/// Subtract a constant
#[tracing::instrument(target = "r1cs")]
pub fn sub_constant(&self, other: &TargetF) -> R1CSResult<Self> {
self.sub(&Self::constant(self.cs(), *other)?)
}
/// Multiply a emulated field element
#[tracing::instrument(target = "r1cs")]
pub fn mul(&self, other: &Self) -> R1CSResult<Self> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
self.mul_without_reduce(&other)?.reduce()
}
/// Multiply a constant
pub fn mul_constant(&self, other: &TargetF) -> R1CSResult<Self> {
self.mul(&Self::constant(self.cs(), *other)?)
}
/// Compute the negate of a emulated field element
#[tracing::instrument(target = "r1cs")]
pub fn negate(&self) -> R1CSResult<Self> {
Self::zero(self.cs())?.sub(self)
}
/// Compute the inverse of a emulated field element
#[tracing::instrument(target = "r1cs")]
pub fn inverse(&self) -> R1CSResult<Self> {
let inverse = Self::new_witness(self.cs(), || {
Ok(self.value()?.inverse().unwrap_or_else(TargetF::zero))
})?;
let actual_result = self.clone().mul(&inverse)?;
actual_result.conditional_enforce_equal(&Self::one(self.cs())?, &Boolean::TRUE)?;
Ok(inverse)
}
/// Convert a `TargetF` element into limbs (not constraints)
/// This is an internal function that would be reused by a number of other
/// functions
pub fn get_limbs_representations(
elem: &TargetF,
optimization_type: OptimizationType,
) -> R1CSResult<Vec<BaseF>> {
Self::get_limbs_representations_from_big_integer(&elem.into_bigint(), optimization_type)
}
/// Obtain the limbs directly from a big int
pub fn get_limbs_representations_from_big_integer(
elem: &<TargetF as PrimeField>::BigInt,
optimization_type: OptimizationType,
) -> R1CSResult<Vec<BaseF>> {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
optimization_type,
);
// push the lower limbs first
let mut limbs: Vec<BaseF> = Vec::new();
let mut cur = *elem;
for _ in 0..params.num_limbs {
let cur_bits = cur.to_bits_be(); // `to_bits` is big endian
let cur_mod_r = <BaseF as PrimeField>::BigInt::from_bits_be(
&cur_bits[cur_bits.len() - params.bits_per_limb..],
); // therefore, the lowest `bits_per_non_top_limb` bits is what we want.
limbs.push(BaseF::from_bigint(cur_mod_r).unwrap());
cur >>= params.bits_per_limb as u32;
}
// then we reserve, so that the limbs are ``big limb first''
limbs.reverse();
Ok(limbs)
}
/// for advanced use, multiply and output the intermediate representations
/// (without reduction) This intermediate representations can be added
/// with each other, and they can later be reduced back to the
/// `EmulatedFpVar`.
#[tracing::instrument(target = "r1cs")]
pub fn mul_without_reduce(
&self,
other: &Self,
) -> R1CSResult<AllocatedMulResultVar<TargetF, BaseF>> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Step 1: reduce `self` and `other` if neceessary
let mut self_reduced = self.clone();
let mut other_reduced = other.clone();
Reducer::<TargetF, BaseF>::pre_mul_reduce(&mut self_reduced, &mut other_reduced)?;
let mut prod_limbs = Vec::new();
if self.get_optimization_type() == OptimizationType::Weight {
let zero = FpVar::<BaseF>::zero();
for _ in 0..2 * params.num_limbs - 1 {
prod_limbs.push(zero.clone());
}
for i in 0..params.num_limbs {
for j in 0..params.num_limbs {
prod_limbs[i + j] =
&prod_limbs[i + j] + (&self_reduced.limbs[i] * &other_reduced.limbs[j]);
}
}
} else {
let cs = self.cs().or(other.cs());
for z_index in 0..2 * params.num_limbs - 1 {
prod_limbs.push(FpVar::new_witness(ns!(cs, "limb product"), || {
let mut z_i = BaseF::zero();
for i in 0..=min(params.num_limbs - 1, z_index) {
let j = z_index - i;
if j < params.num_limbs {
z_i += &self_reduced.limbs[i]
.value()?
.mul(&other_reduced.limbs[j].value()?);
}
}
Ok(z_i)
})?);
}
for c in 0..(2 * params.num_limbs - 1) {
let c_pows: Vec<_> = (0..(2 * params.num_limbs - 1))
.map(|i| BaseF::from((c + 1) as u128).pow(&vec![i as u64]))
.collect();
let x = self_reduced
.limbs
.iter()
.zip(c_pows.iter())
.map(|(var, c_pow)| var * *c_pow)
.fold(FpVar::zero(), |sum, i| sum + i);
let y = other_reduced
.limbs
.iter()
.zip(c_pows.iter())
.map(|(var, c_pow)| var * *c_pow)
.fold(FpVar::zero(), |sum, i| sum + i);
let z = prod_limbs
.iter()
.zip(c_pows.iter())
.map(|(var, c_pow)| var * *c_pow)
.fold(FpVar::zero(), |sum, i| sum + i);
z.enforce_equal(&(x * y))?;
}
}
Ok(AllocatedMulResultVar {
cs: self.cs(),
limbs: prod_limbs,
prod_of_num_of_additions: (self_reduced.num_of_additions_over_normal_form
+ BaseF::one())
* (other_reduced.num_of_additions_over_normal_form + BaseF::one()),
target_phantom: PhantomData,
})
}
pub(crate) fn frobenius_map(&self, _power: usize) -> R1CSResult<Self> {
Ok(self.clone())
}
pub(crate) fn conditional_enforce_equal(
&self,
other: &Self,
should_enforce: &Boolean<BaseF>,
) -> R1CSResult<()> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Get p
let p_representations =
AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations_from_big_integer(
&<TargetF as PrimeField>::MODULUS,
self.get_optimization_type(),
)?;
let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations);
let mut p_gadget_limbs = Vec::new();
for limb in p_representations.iter() {
p_gadget_limbs.push(FpVar::<BaseF>::Constant(*limb));
}
let p_gadget = AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs: self.cs(),
limbs: p_gadget_limbs,
num_of_additions_over_normal_form: BaseF::one(),
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
// Get delta = self - other
let cs = self.cs().or(other.cs()).or(should_enforce.cs());
let mut delta = self.sub_without_reduce(other)?;
delta = should_enforce.select(&delta, &Self::zero(cs.clone())?)?;
// Allocate k = delta / p
let k_gadget = FpVar::<BaseF>::new_witness(ns!(cs, "k"), || {
let mut delta_limbs_values = Vec::<BaseF>::new();
for limb in delta.limbs.iter() {
delta_limbs_values.push(limb.value()?);
}
let delta_bigint = limbs_to_bigint(params.bits_per_limb, &delta_limbs_values);
Ok(bigint_to_basefield::<BaseF>(&(delta_bigint / p_bigint)))
})?;
let surfeit = overhead!(delta.num_of_additions_over_normal_form + BaseF::one()) + 1;
Reducer::<TargetF, BaseF>::limb_to_bits(&k_gadget, surfeit)?;
// Compute k * p
let mut kp_gadget_limbs = Vec::new();
for limb in p_gadget.limbs.iter() {
kp_gadget_limbs.push(limb * &k_gadget);
}
// Enforce delta = kp
Reducer::<TargetF, BaseF>::group_and_check_equality(
surfeit,
params.bits_per_limb,
params.bits_per_limb,
&delta.limbs,
&kp_gadget_limbs,
)?;
Ok(())
}
#[tracing::instrument(target = "r1cs")]
pub(crate) fn conditional_enforce_not_equal(
&self,
other: &Self,
should_enforce: &Boolean<BaseF>,
) -> R1CSResult<()> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let cs = self.cs().or(other.cs()).or(should_enforce.cs());
let _ = should_enforce
.select(&self.sub(other)?, &Self::one(cs)?)?
.inverse()?;
Ok(())
}
pub(crate) fn get_optimization_type(&self) -> OptimizationType {
match self.cs().optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
}
}
/// Allocates a new variable, but does not check that the allocation's limbs
/// are in-range.
fn new_variable_unchecked<T: Borrow<TargetF>>(
cs: impl Into<Namespace<BaseF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> R1CSResult<Self> {
let ns = cs.into();
let cs = ns.cs();
let optimization_type = match cs.optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
};
let zero = TargetF::zero();
let elem = match f() {
Ok(t) => *(t.borrow()),
Err(_) => zero,
};
let elem_representations = Self::get_limbs_representations(&elem, optimization_type)?;
let mut limbs = Vec::new();
for limb in elem_representations.iter() {
limbs.push(FpVar::<BaseF>::new_variable(
ark_relations::ns!(cs, "alloc"),
|| Ok(limb),
mode,
)?);
}
let num_of_additions_over_normal_form = if mode != AllocationMode::Witness {
BaseF::zero()
} else {
BaseF::one()
};
Ok(Self {
cs,
limbs,
num_of_additions_over_normal_form,
is_in_the_normal_form: mode != AllocationMode::Witness,
target_phantom: PhantomData,
})
}
/// Check that this element is in-range; i.e., each limb is in-range, and
/// the whole number is less than the modulus.
///
/// Returns the bits of the element, in little-endian form
fn enforce_in_range(&self, cs: impl Into<Namespace<BaseF>>) -> R1CSResult<Vec<Boolean<BaseF>>> {
let ns = cs.into();
let cs = ns.cs();
let optimization_type = match cs.optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
};
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
optimization_type,
);
let mut bits = Vec::new();
for limb in self.limbs.iter().rev().take(params.num_limbs - 1) {
bits.extend(
Reducer::<TargetF, BaseF>::limb_to_bits(limb, params.bits_per_limb)?
.into_iter()
.rev(),
);
}
bits.extend(
Reducer::<TargetF, BaseF>::limb_to_bits(
&self.limbs[0],
TargetF::MODULUS_BIT_SIZE as usize - (params.num_limbs - 1) * params.bits_per_limb,
)?
.into_iter()
.rev(),
);
Ok(bits)
}
/// Allocates a new non-native field witness with value given by the
/// function `f`. Enforces that the field element has value in `[0, modulus)`,
/// and returns the bits of its binary representation.
/// The bits are in little-endian (i.e., the bit at index 0 is the LSB) and the
/// bit-vector is empty in non-witness allocation modes.
pub fn new_witness_with_le_bits<T: Borrow<TargetF>>(
cs: impl Into<Namespace<BaseF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
) -> R1CSResult<(Self, Vec<Boolean<BaseF>>)> {
let ns = cs.into();
let cs = ns.cs();
let this = Self::new_variable_unchecked(ns!(cs, "alloc"), f, AllocationMode::Witness)?;
let bits = this.enforce_in_range(ns!(cs, "bits"))?;
Ok((this, bits))
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToBitsGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(&self) -> R1CSResult<Vec<Boolean<BaseF>>> {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Reduce to the normal form
// Though, a malicious prover can make it slightly larger than p
let mut self_normal = self.clone();
Reducer::<TargetF, BaseF>::pre_eq_reduce(&mut self_normal)?;
// Therefore, we convert it to bits and enforce that it is in the field
let mut bits = Vec::<Boolean<BaseF>>::new();
for limb in self_normal.limbs.iter() {
bits.extend_from_slice(&Reducer::<TargetF, BaseF>::limb_to_bits(
&limb,
params.bits_per_limb,
)?);
}
bits.reverse();
let mut b = TargetF::characteristic().to_vec();
assert_eq!(b[0] % 2, 1);
b[0] -= 1; // This works, because the LSB is one, so there's no borrows.
let run = Boolean::<BaseF>::enforce_smaller_or_equal_than_le(&bits, b)?;
// We should always end in a "run" of zeros, because
// the characteristic is an odd prime. So, this should
// be empty.
assert!(run.is_empty());
Ok(bits)
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToBytesGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> R1CSResult<Vec<UInt8<BaseF>>> {
let mut bits = self.to_bits_le()?;
let num_bits = TargetF::BigInt::NUM_LIMBS * 64;
assert!(bits.len() <= num_bits);
bits.resize_with(num_bits, || Boolean::constant(false));
let bytes = bits.chunks(8).map(UInt8::from_bits_le).collect();
Ok(bytes)
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> CondSelectGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
#[tracing::instrument(target = "r1cs")]
fn conditionally_select(
cond: &Boolean<BaseF>,
true_value: &Self,
false_value: &Self,
) -> R1CSResult<Self> {
assert_eq!(
true_value.get_optimization_type(),
false_value.get_optimization_type()
);
let mut limbs_sel = Vec::with_capacity(true_value.limbs.len());
for (x, y) in true_value.limbs.iter().zip(&false_value.limbs) {
limbs_sel.push(FpVar::<BaseF>::conditionally_select(cond, x, y)?);
}
Ok(Self {
cs: true_value.cs().or(false_value.cs()),
limbs: limbs_sel,
num_of_additions_over_normal_form: max(
true_value.num_of_additions_over_normal_form,
false_value.num_of_additions_over_normal_form,
),
is_in_the_normal_form: true_value.is_in_the_normal_form
&& false_value.is_in_the_normal_form,
target_phantom: PhantomData,
})
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> TwoBitLookupGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
type TableConstant = TargetF;
#[tracing::instrument(target = "r1cs")]
fn two_bit_lookup(
bits: &[Boolean<BaseF>],
constants: &[Self::TableConstant],
) -> R1CSResult<Self> {
debug_assert!(bits.len() == 2);
debug_assert!(constants.len() == 4);
let cs = bits.cs();
let optimization_type = match cs.optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
};
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
optimization_type,
);
let mut limbs_constants = Vec::new();
for _ in 0..params.num_limbs {
limbs_constants.push(Vec::new());
}
for constant in constants.iter() {
let representations =
AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations(
constant,
optimization_type,
)?;
for (i, representation) in representations.iter().enumerate() {
limbs_constants[i].push(*representation);
}
}
let mut limbs = Vec::new();
for limbs_constant in limbs_constants.iter() {
limbs.push(FpVar::<BaseF>::two_bit_lookup(bits, limbs_constant)?);
}
Ok(AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs,
limbs,
num_of_additions_over_normal_form: BaseF::zero(),
is_in_the_normal_form: true,
target_phantom: PhantomData,
})
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ThreeBitCondNegLookupGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
type TableConstant = TargetF;
#[tracing::instrument(target = "r1cs")]
fn three_bit_cond_neg_lookup(
bits: &[Boolean<BaseF>],
b0b1: &Boolean<BaseF>,
constants: &[Self::TableConstant],
) -> R1CSResult<Self> {
debug_assert!(bits.len() == 3);
debug_assert!(constants.len() == 4);
let cs = bits.cs().or(b0b1.cs());
let optimization_type = match cs.optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
};
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
optimization_type,
);
let mut limbs_constants = Vec::new();
for _ in 0..params.num_limbs {
limbs_constants.push(Vec::new());
}
for constant in constants.iter() {
let representations =
AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations(
constant,
optimization_type,
)?;
for (i, representation) in representations.iter().enumerate() {
limbs_constants[i].push(*representation);
}
}
let mut limbs = Vec::new();
for limbs_constant in limbs_constants.iter() {
limbs.push(FpVar::<BaseF>::three_bit_cond_neg_lookup(
bits,
b0b1,
limbs_constant,
)?);
}
Ok(AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs,
limbs,
num_of_additions_over_normal_form: BaseF::zero(),
is_in_the_normal_form: true,
target_phantom: PhantomData,
})
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> AllocVar<TargetF, BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
fn new_variable<T: Borrow<TargetF>>(
cs: impl Into<Namespace<BaseF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> R1CSResult<Self> {
let ns = cs.into();
let cs = ns.cs();
let this = Self::new_variable_unchecked(ns!(cs, "alloc"), f, mode)?;
if mode == AllocationMode::Witness {
this.enforce_in_range(ns!(cs, "bits"))?;
}
Ok(this)
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToConstraintFieldGadget<BaseF>
for AllocatedEmulatedFpVar<TargetF, BaseF>
{
fn to_constraint_field(&self) -> R1CSResult<Vec<FpVar<BaseF>>> {
// provide a unique representation of the emulated variable
// step 1: convert it into a bit sequence
let bits = self.to_bits_le()?;
// step 2: obtain the parameters for weight-optimized (often, fewer limbs)
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
OptimizationType::Weight,
);
// step 3: assemble the limbs
let mut limbs = bits
.chunks(params.bits_per_limb)
.map(|chunk| {
let mut limb = FpVar::<BaseF>::zero();
let mut w = BaseF::one();
for b in chunk.iter() {
limb += FpVar::from(b.clone()) * w;
w.double_in_place();
}
limb
})
.collect::<Vec<FpVar<BaseF>>>();
limbs.reverse();
// step 4: output the limbs
Ok(limbs)
}
}
// Implementation of a few traits
impl<TargetF: PrimeField, BaseF: PrimeField> Clone for AllocatedEmulatedFpVar<TargetF, BaseF> {
fn clone(&self) -> Self {
AllocatedEmulatedFpVar {
cs: self.cs(),
limbs: self.limbs.clone(),
num_of_additions_over_normal_form: self.num_of_additions_over_normal_form,
is_in_the_normal_form: self.is_in_the_normal_form,
target_phantom: PhantomData,
}
}
}

View File

@@ -0,0 +1,286 @@
use super::{
params::{get_params, OptimizationType},
reduce::{bigint_to_basefield, limbs_to_bigint, Reducer},
AllocatedEmulatedFpVar,
};
use crate::{fields::fp::FpVar, prelude::*};
use ark_ff::PrimeField;
use ark_relations::{
ns,
r1cs::{ConstraintSystemRef, OptimizationGoal, Result as R1CSResult},
};
use ark_std::{marker::PhantomData, vec::Vec};
use num_bigint::BigUint;
/// The allocated form of `MulResultVar` (introduced below)
#[derive(Debug)]
#[must_use]
pub struct AllocatedMulResultVar<TargetF: PrimeField, BaseF: PrimeField> {
/// Constraint system reference
pub cs: ConstraintSystemRef<BaseF>,
/// Limbs of the intermediate representations
pub limbs: Vec<FpVar<BaseF>>,
/// The cumulative num of additions
pub prod_of_num_of_additions: BaseF,
#[doc(hidden)]
pub target_phantom: PhantomData<TargetF>,
}
impl<TargetF: PrimeField, BaseF: PrimeField> From<&AllocatedEmulatedFpVar<TargetF, BaseF>>
for AllocatedMulResultVar<TargetF, BaseF>
{
fn from(src: &AllocatedEmulatedFpVar<TargetF, BaseF>) -> Self {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
src.get_optimization_type(),
);
let mut limbs = src.limbs.clone();
limbs.reverse();
limbs.resize(2 * params.num_limbs - 1, FpVar::<BaseF>::zero());
limbs.reverse();
let prod_of_num_of_additions = src.num_of_additions_over_normal_form + &BaseF::one();
Self {
cs: src.cs(),
limbs,
prod_of_num_of_additions,
target_phantom: PhantomData,
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> AllocatedMulResultVar<TargetF, BaseF> {
/// Get the CS
pub fn cs(&self) -> ConstraintSystemRef<BaseF> {
self.cs.clone()
}
/// Get the value of the multiplication result
pub fn value(&self) -> R1CSResult<TargetF> {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
let p_representations =
AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations_from_big_integer(
&<TargetF as PrimeField>::MODULUS,
self.get_optimization_type(),
)?;
let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations);
let mut limbs_values = Vec::<BaseF>::new();
for limb in self.limbs.iter() {
limbs_values.push(limb.value().unwrap_or_default());
}
let value_bigint = limbs_to_bigint(params.bits_per_limb, &limbs_values);
let res = bigint_to_basefield::<TargetF>(&(value_bigint % p_bigint));
Ok(res)
}
/// Constraints for reducing the result of a multiplication mod p, to get an
/// original representation.
pub fn reduce(&self) -> R1CSResult<AllocatedEmulatedFpVar<TargetF, BaseF>> {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Step 1: get p
let p_representations =
AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations_from_big_integer(
&<TargetF as PrimeField>::MODULUS,
self.get_optimization_type(),
)?;
let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations);
let mut p_gadget_limbs = Vec::new();
for limb in p_representations.iter() {
p_gadget_limbs.push(FpVar::<BaseF>::new_constant(self.cs(), limb)?);
}
let p_gadget = AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs: self.cs(),
limbs: p_gadget_limbs,
num_of_additions_over_normal_form: BaseF::one(),
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
// Step 2: compute surfeit
let surfeit = overhead!(self.prod_of_num_of_additions + BaseF::one()) + 1 + 1;
// Step 3: allocate k
let k_bits = {
let mut res = Vec::new();
let mut limbs_values = Vec::<BaseF>::new();
for limb in self.limbs.iter() {
limbs_values.push(limb.value().unwrap_or_default());
}
let value_bigint = limbs_to_bigint(params.bits_per_limb, &limbs_values);
let mut k_cur = value_bigint / p_bigint;
let total_len = TargetF::MODULUS_BIT_SIZE as usize + surfeit;
for _ in 0..total_len {
res.push(Boolean::<BaseF>::new_witness(self.cs(), || {
Ok(&k_cur % 2u64 == BigUint::from(1u64))
})?);
k_cur /= 2u64;
}
res
};
let k_limbs = {
let zero = FpVar::Constant(BaseF::zero());
let mut limbs = Vec::new();
let mut k_bits_cur = k_bits.clone();
for i in 0..params.num_limbs {
let this_limb_size = if i != params.num_limbs - 1 {
params.bits_per_limb
} else {
k_bits.len() - (params.num_limbs - 1) * params.bits_per_limb
};
let this_limb_bits = k_bits_cur[0..this_limb_size].to_vec();
k_bits_cur = k_bits_cur[this_limb_size..].to_vec();
let mut limb = zero.clone();
let mut cur = BaseF::one();
for bit in this_limb_bits.iter() {
limb += &(FpVar::<BaseF>::from(bit.clone()) * cur);
cur.double_in_place();
}
limbs.push(limb);
}
limbs.reverse();
limbs
};
let k_gadget = AllocatedEmulatedFpVar::<TargetF, BaseF> {
cs: self.cs(),
limbs: k_limbs,
num_of_additions_over_normal_form: self.prod_of_num_of_additions,
is_in_the_normal_form: false,
target_phantom: PhantomData,
};
let cs = self.cs();
let r_gadget = AllocatedEmulatedFpVar::<TargetF, BaseF>::new_witness(ns!(cs, "r"), || {
Ok(self.value()?)
})?;
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
self.get_optimization_type(),
);
// Step 1: reduce `self` and `other` if neceessary
let mut prod_limbs = Vec::new();
let zero = FpVar::<BaseF>::zero();
for _ in 0..2 * params.num_limbs - 1 {
prod_limbs.push(zero.clone());
}
for i in 0..params.num_limbs {
for j in 0..params.num_limbs {
prod_limbs[i + j] = &prod_limbs[i + j] + (&p_gadget.limbs[i] * &k_gadget.limbs[j]);
}
}
let mut kp_plus_r_gadget = Self {
cs,
limbs: prod_limbs,
prod_of_num_of_additions: (p_gadget.num_of_additions_over_normal_form + BaseF::one())
* (k_gadget.num_of_additions_over_normal_form + BaseF::one()),
target_phantom: PhantomData,
};
let kp_plus_r_limbs_len = kp_plus_r_gadget.limbs.len();
for (i, limb) in r_gadget.limbs.iter().rev().enumerate() {
kp_plus_r_gadget.limbs[kp_plus_r_limbs_len - 1 - i] += limb;
}
Reducer::<TargetF, BaseF>::group_and_check_equality(
surfeit,
2 * params.bits_per_limb,
params.bits_per_limb,
&self.limbs,
&kp_plus_r_gadget.limbs,
)?;
Ok(r_gadget)
}
/// Add unreduced elements.
#[tracing::instrument(target = "r1cs")]
pub fn add(&self, other: &Self) -> R1CSResult<Self> {
assert_eq!(self.get_optimization_type(), other.get_optimization_type());
let mut new_limbs = Vec::new();
for (l1, l2) in self.limbs.iter().zip(other.limbs.iter()) {
let new_limb = l1 + l2;
new_limbs.push(new_limb);
}
Ok(Self {
cs: self.cs(),
limbs: new_limbs,
prod_of_num_of_additions: self.prod_of_num_of_additions
+ other.prod_of_num_of_additions,
target_phantom: PhantomData,
})
}
/// Add native constant elem
#[tracing::instrument(target = "r1cs")]
pub fn add_constant(&self, other: &TargetF) -> R1CSResult<Self> {
let mut other_limbs = AllocatedEmulatedFpVar::<TargetF, BaseF>::get_limbs_representations(
other,
self.get_optimization_type(),
)?;
other_limbs.reverse();
let mut new_limbs = Vec::new();
for (i, limb) in self.limbs.iter().rev().enumerate() {
if i < other_limbs.len() {
new_limbs.push(limb + other_limbs[i]);
} else {
new_limbs.push((*limb).clone());
}
}
new_limbs.reverse();
Ok(Self {
cs: self.cs(),
limbs: new_limbs,
prod_of_num_of_additions: self.prod_of_num_of_additions + BaseF::one(),
target_phantom: PhantomData,
})
}
pub(crate) fn get_optimization_type(&self) -> OptimizationType {
match self.cs().optimization_goal() {
OptimizationGoal::None => OptimizationType::Constraints,
OptimizationGoal::Constraints => OptimizationType::Constraints,
OptimizationGoal::Weight => OptimizationType::Weight,
}
}
}

View File

@@ -0,0 +1,472 @@
use super::{params::OptimizationType, AllocatedEmulatedFpVar, MulResultVar};
use crate::{
boolean::Boolean,
fields::{fp::FpVar, FieldVar},
prelude::*,
R1CSVar, ToConstraintFieldGadget,
};
use ark_ff::{BigInteger, PrimeField};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, Result as R1CSResult, SynthesisError};
use ark_std::{
borrow::Borrow,
hash::{Hash, Hasher},
vec::Vec,
};
/// A gadget for representing non-native (`TargetF`) field elements over the
/// constraint field (`BaseF`).
#[derive(Clone, Debug)]
#[must_use]
pub enum EmulatedFpVar<TargetF: PrimeField, BaseF: PrimeField> {
/// Constant
Constant(TargetF),
/// Allocated gadget
Var(AllocatedEmulatedFpVar<TargetF, BaseF>),
}
impl<TargetF: PrimeField, BaseF: PrimeField> PartialEq for EmulatedFpVar<TargetF, BaseF> {
fn eq(&self, other: &Self) -> bool {
self.value()
.unwrap_or_default()
.eq(&other.value().unwrap_or_default())
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> Eq for EmulatedFpVar<TargetF, BaseF> {}
impl<TargetF: PrimeField, BaseF: PrimeField> Hash for EmulatedFpVar<TargetF, BaseF> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.value().unwrap_or_default().hash(state);
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> R1CSVar<BaseF> for EmulatedFpVar<TargetF, BaseF> {
type Value = TargetF;
fn cs(&self) -> ConstraintSystemRef<BaseF> {
match self {
Self::Constant(_) => ConstraintSystemRef::None,
Self::Var(a) => a.cs(),
}
}
fn value(&self) -> R1CSResult<Self::Value> {
match self {
Self::Constant(v) => Ok(*v),
Self::Var(v) => v.value(),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> From<Boolean<BaseF>>
for EmulatedFpVar<TargetF, BaseF>
{
fn from(other: Boolean<BaseF>) -> Self {
if let Boolean::Constant(b) = other {
Self::Constant(<TargetF as From<u128>>::from(b as u128))
} else {
// `other` is a variable
let one = Self::Constant(TargetF::one());
let zero = Self::Constant(TargetF::zero());
Self::conditionally_select(&other, &one, &zero).unwrap()
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> From<AllocatedEmulatedFpVar<TargetF, BaseF>>
for EmulatedFpVar<TargetF, BaseF>
{
fn from(other: AllocatedEmulatedFpVar<TargetF, BaseF>) -> Self {
Self::Var(other)
}
}
impl<'a, TargetF: PrimeField, BaseF: PrimeField> FieldOpsBounds<'a, TargetF, Self>
for EmulatedFpVar<TargetF, BaseF>
{
}
impl<'a, TargetF: PrimeField, BaseF: PrimeField>
FieldOpsBounds<'a, TargetF, EmulatedFpVar<TargetF, BaseF>>
for &'a EmulatedFpVar<TargetF, BaseF>
{
}
impl<TargetF: PrimeField, BaseF: PrimeField> FieldVar<TargetF, BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
fn zero() -> Self {
Self::Constant(TargetF::zero())
}
fn one() -> Self {
Self::Constant(TargetF::one())
}
fn constant(v: TargetF) -> Self {
Self::Constant(v)
}
#[tracing::instrument(target = "r1cs")]
fn negate(&self) -> R1CSResult<Self> {
match self {
Self::Constant(c) => Ok(Self::Constant(-*c)),
Self::Var(v) => Ok(Self::Var(v.negate()?)),
}
}
#[tracing::instrument(target = "r1cs")]
fn inverse(&self) -> R1CSResult<Self> {
match self {
Self::Constant(c) => Ok(Self::Constant(c.inverse().unwrap_or_default())),
Self::Var(v) => Ok(Self::Var(v.inverse()?)),
}
}
#[tracing::instrument(target = "r1cs")]
fn frobenius_map(&self, power: usize) -> R1CSResult<Self> {
match self {
Self::Constant(c) => Ok(Self::Constant({
let mut tmp = *c;
tmp.frobenius_map_in_place(power);
tmp
})),
Self::Var(v) => Ok(Self::Var(v.frobenius_map(power)?)),
}
}
}
impl_bounded_ops!(
EmulatedFpVar<TargetF, BaseF>,
TargetF,
Add,
add,
AddAssign,
add_assign,
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: &'a EmulatedFpVar<TargetF, BaseF>| {
use EmulatedFpVar::*;
match (this, other) {
(Constant(c1), Constant(c2)) => Constant(*c1 + c2),
(Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.add_constant(c).unwrap()),
(Var(v1), Var(v2)) => Var(v1.add(v2).unwrap()),
}
},
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: TargetF| { this + &EmulatedFpVar::Constant(other) },
(TargetF: PrimeField, BaseF: PrimeField),
);
impl_bounded_ops!(
EmulatedFpVar<TargetF, BaseF>,
TargetF,
Sub,
sub,
SubAssign,
sub_assign,
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: &'a EmulatedFpVar<TargetF, BaseF>| {
use EmulatedFpVar::*;
match (this, other) {
(Constant(c1), Constant(c2)) => Constant(*c1 - c2),
(Var(v), Constant(c)) => Var(v.sub_constant(c).unwrap()),
(Constant(c), Var(v)) => Var(v.sub_constant(c).unwrap().negate().unwrap()),
(Var(v1), Var(v2)) => Var(v1.sub(v2).unwrap()),
}
},
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: TargetF| {
this - &EmulatedFpVar::Constant(other)
},
(TargetF: PrimeField, BaseF: PrimeField),
);
impl_bounded_ops!(
EmulatedFpVar<TargetF, BaseF>,
TargetF,
Mul,
mul,
MulAssign,
mul_assign,
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: &'a EmulatedFpVar<TargetF, BaseF>| {
use EmulatedFpVar::*;
match (this, other) {
(Constant(c1), Constant(c2)) => Constant(*c1 * c2),
(Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.mul_constant(c).unwrap()),
(Var(v1), Var(v2)) => Var(v1.mul(v2).unwrap()),
}
},
|this: &'a EmulatedFpVar<TargetF, BaseF>, other: TargetF| {
if other.is_zero() {
EmulatedFpVar::zero()
} else {
this * &EmulatedFpVar::Constant(other)
}
},
(TargetF: PrimeField, BaseF: PrimeField),
);
/// *************************************************************************
/// *************************************************************************
impl<TargetF: PrimeField, BaseF: PrimeField> EqGadget<BaseF> for EmulatedFpVar<TargetF, BaseF> {
#[tracing::instrument(target = "r1cs")]
fn is_eq(&self, other: &Self) -> R1CSResult<Boolean<BaseF>> {
let cs = self.cs().or(other.cs());
if cs == ConstraintSystemRef::None {
Ok(Boolean::Constant(self.value()? == other.value()?))
} else {
let should_enforce_equal =
Boolean::new_witness(cs, || Ok(self.value()? == other.value()?))?;
self.conditional_enforce_equal(other, &should_enforce_equal)?;
self.conditional_enforce_not_equal(other, &should_enforce_equal.not())?;
Ok(should_enforce_equal)
}
}
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
should_enforce: &Boolean<BaseF>,
) -> R1CSResult<()> {
match (self, other) {
(Self::Constant(c1), Self::Constant(c2)) => {
if c1 != c2 {
should_enforce.enforce_equal(&Boolean::FALSE)?;
}
Ok(())
},
(Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => {
let cs = v.cs();
let c = AllocatedEmulatedFpVar::new_constant(cs, c)?;
c.conditional_enforce_equal(v, should_enforce)
},
(Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_equal(v2, should_enforce),
}
}
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
should_enforce: &Boolean<BaseF>,
) -> R1CSResult<()> {
match (self, other) {
(Self::Constant(c1), Self::Constant(c2)) => {
if c1 == c2 {
should_enforce.enforce_equal(&Boolean::FALSE)?;
}
Ok(())
},
(Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => {
let cs = v.cs();
let c = AllocatedEmulatedFpVar::new_constant(cs, c)?;
c.conditional_enforce_not_equal(v, should_enforce)
},
(Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_not_equal(v2, should_enforce),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToBitsGadget<BaseF> for EmulatedFpVar<TargetF, BaseF> {
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(&self) -> R1CSResult<Vec<Boolean<BaseF>>> {
match self {
Self::Constant(_) => self.to_non_unique_bits_le(),
Self::Var(v) => v.to_bits_le(),
}
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bits_le(&self) -> R1CSResult<Vec<Boolean<BaseF>>> {
use ark_ff::BitIteratorLE;
match self {
Self::Constant(c) => Ok(BitIteratorLE::new(&c.into_bigint())
.take((TargetF::MODULUS_BIT_SIZE) as usize)
.map(Boolean::constant)
.collect::<Vec<_>>()),
Self::Var(v) => v.to_non_unique_bits_le(),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToBytesGadget<BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
/// Outputs the unique byte decomposition of `self` in *little-endian*
/// form.
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> R1CSResult<Vec<UInt8<BaseF>>> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_le().as_slice(),
)),
Self::Var(v) => v.to_bytes(),
}
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> R1CSResult<Vec<UInt8<BaseF>>> {
match self {
Self::Constant(c) => Ok(UInt8::constant_vec(
c.into_bigint().to_bytes_le().as_slice(),
)),
Self::Var(v) => v.to_non_unique_bytes(),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> CondSelectGadget<BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
#[tracing::instrument(target = "r1cs")]
fn conditionally_select(
cond: &Boolean<BaseF>,
true_value: &Self,
false_value: &Self,
) -> R1CSResult<Self> {
match cond {
Boolean::Constant(true) => Ok(true_value.clone()),
Boolean::Constant(false) => Ok(false_value.clone()),
_ => {
let cs = cond.cs();
let true_value = match true_value {
Self::Constant(f) => AllocatedEmulatedFpVar::new_constant(cs.clone(), f)?,
Self::Var(v) => v.clone(),
};
let false_value = match false_value {
Self::Constant(f) => AllocatedEmulatedFpVar::new_constant(cs, f)?,
Self::Var(v) => v.clone(),
};
cond.select(&true_value, &false_value).map(Self::Var)
},
}
}
}
/// Uses two bits to perform a lookup into a table
/// `b` is little-endian: `b[0]` is LSB.
impl<TargetF: PrimeField, BaseF: PrimeField> TwoBitLookupGadget<BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
type TableConstant = TargetF;
#[tracing::instrument(target = "r1cs")]
fn two_bit_lookup(b: &[Boolean<BaseF>], c: &[Self::TableConstant]) -> R1CSResult<Self> {
debug_assert_eq!(b.len(), 2);
debug_assert_eq!(c.len(), 4);
if b.cs().is_none() {
// We're in the constant case
let lsb = b[0].value()? as usize;
let msb = b[1].value()? as usize;
let index = lsb + (msb << 1);
Ok(Self::Constant(c[index]))
} else {
AllocatedEmulatedFpVar::two_bit_lookup(b, c).map(Self::Var)
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ThreeBitCondNegLookupGadget<BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
type TableConstant = TargetF;
#[tracing::instrument(target = "r1cs")]
fn three_bit_cond_neg_lookup(
b: &[Boolean<BaseF>],
b0b1: &Boolean<BaseF>,
c: &[Self::TableConstant],
) -> R1CSResult<Self> {
debug_assert_eq!(b.len(), 3);
debug_assert_eq!(c.len(), 4);
if b.cs().or(b0b1.cs()).is_none() {
// We're in the constant case
let lsb = b[0].value()? as usize;
let msb = b[1].value()? as usize;
let index = lsb + (msb << 1);
let intermediate = c[index];
let is_negative = b[2].value()?;
let y = if is_negative {
-intermediate
} else {
intermediate
};
Ok(Self::Constant(y))
} else {
AllocatedEmulatedFpVar::three_bit_cond_neg_lookup(b, b0b1, c).map(Self::Var)
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> AllocVar<TargetF, BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
fn new_variable<T: Borrow<TargetF>>(
cs: impl Into<Namespace<BaseF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> R1CSResult<Self> {
let ns = cs.into();
let cs = ns.cs();
if cs == ConstraintSystemRef::None || mode == AllocationMode::Constant {
Ok(Self::Constant(*f()?.borrow()))
} else {
AllocatedEmulatedFpVar::new_variable(cs, f, mode).map(Self::Var)
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> ToConstraintFieldGadget<BaseF>
for EmulatedFpVar<TargetF, BaseF>
{
#[tracing::instrument(target = "r1cs")]
fn to_constraint_field(&self) -> R1CSResult<Vec<FpVar<BaseF>>> {
// Use one group element to represent the optimization type.
//
// By default, the constant is converted in the weight-optimized type, because
// it results in fewer elements.
match self {
Self::Constant(c) => Ok(AllocatedEmulatedFpVar::get_limbs_representations(
c,
OptimizationType::Weight,
)?
.into_iter()
.map(FpVar::constant)
.collect()),
Self::Var(v) => v.to_constraint_field(),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> EmulatedFpVar<TargetF, BaseF> {
/// The `mul_without_reduce` for `EmulatedFpVar`
#[tracing::instrument(target = "r1cs")]
pub fn mul_without_reduce(&self, other: &Self) -> R1CSResult<MulResultVar<TargetF, BaseF>> {
match self {
Self::Constant(c) => match other {
Self::Constant(other_c) => Ok(MulResultVar::Constant(*c * other_c)),
Self::Var(other_v) => {
let self_v =
AllocatedEmulatedFpVar::<TargetF, BaseF>::new_constant(self.cs(), c)?;
Ok(MulResultVar::Var(other_v.mul_without_reduce(&self_v)?))
},
},
Self::Var(v) => {
let other_v = match other {
Self::Constant(other_c) => {
AllocatedEmulatedFpVar::<TargetF, BaseF>::new_constant(self.cs(), other_c)?
},
Self::Var(other_v) => other_v.clone(),
};
Ok(MulResultVar::Var(v.mul_without_reduce(&other_v)?))
},
}
}
}

View File

@@ -0,0 +1,202 @@
//! ## Overview
//!
//! This module implements a field gadget for a prime field `Fp` over another
//! prime field `Fq` where `p != q`.
//!
//! When writing constraint systems for many cryptographic proofs, we are
//! restricted to a native field (e.g., the scalar field of the pairing-friendly
//! curve). This can be inconvenient; for example, the recursive composition of
//! proofs via cycles of curves requires the verifier to compute over a
//! non-native field.
//!
//! The library makes it possible to write computations over a non-native field
//! in the same way one would write computations over the native field. This
//! naturally introduces additional overhead, which we minimize using a variety
//! of optimizations. (Nevertheless, the overhead is still substantial, and
//! native fields should be used where possible.)
//!
//! ## Usage
//!
//! Because [`EmulatedFpVar`] implements the [`FieldVar`] trait in arkworks,
//! we can treat it like a native prime field variable ([`FpVar`]).
//!
//! We can do the standard field operations, such as `+`, `-`, and `*`. See the
//! following example:
//!
//! ```rust
//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
//! # use ark_std::UniformRand;
//! # use ark_relations::{ns, r1cs::ConstraintSystem};
//! # use ark_r1cs_std::prelude::*;
//! use ark_r1cs_std::fields::emulated_fp::EmulatedFpVar;
//! use ark_bls12_377::{Fr, Fq};
//!
//! # let mut rng = ark_std::test_rng();
//! # let a_value = Fr::rand(&mut rng);
//! # let b_value = Fr::rand(&mut rng);
//! # let cs = ConstraintSystem::<Fq>::new_ref();
//!
//! let a = EmulatedFpVar::<Fr, Fq>::new_witness(ns!(cs, "a"), || Ok(a_value))?;
//! let b = EmulatedFpVar::<Fr, Fq>::new_witness(ns!(cs, "b"), || Ok(b_value))?;
//!
//! // add
//! let a_plus_b = &a + &b;
//!
//! // sub
//! let a_minus_b = &a - &b;
//!
//! // multiply
//! let a_times_b = &a * &b;
//!
//! // enforce equality
//! a.enforce_equal(&b)?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Advanced optimization
//!
//! After each multiplication, our library internally performs a *reduce*
//! operation, which reduces an intermediate type [`MulResultVar`]
//! to the normalized type [`EmulatedFpVar`]. This enables a user to
//! seamlessly perform a sequence of operations without worrying about the
//! underlying details.
//!
//! However, this operation is expensive and is sometimes avoidable. We can
//! reduce the number of constraints by using this intermediate type, which only
//! supports additions. To multiply, it must be reduced back to
//! [`EmulatedFpVar`]. See below for a skeleton example.
//!
//! ---
//!
//! To compute `a * b + c * d`, the straightforward (but more expensive)
//! implementation is as follows:
//!
//! ```ignore
//! let a_times_b = &a * &b;
//! let c_times_d = &c * &d;
//! let res = &a_times_b + &c_times_d;
//! ```
//!
//! This performs two *reduce* operations in total, one for each multiplication.
//!
//! ---
//!
//! We can save one reduction by using [`MulResultVar`], as
//! follows:
//!
//! ```ignore
//! let a_times_b = a.mul_without_reduce(&b)?;
//! let c_times_d = c.mul_without_reduce(&d)?;
//! let res = (&a_times_b + &c_times_d)?.reduce()?;
//! ```
//!
//! It performs only one *reduce* operation and is roughly 2x faster than the
//! first implementation.
//!
//! ## Inspiration and basic design
//!
//! This implementation employs the standard idea of using multiple **limbs** to
//! represent an element of the target field. For example, an element in the
//! TargetF may be represented by three BaseF elements (i.e., the
//! limbs).
//!
//! ```text
//! TargetF -> limb 1, limb 2, and limb 3 (each is a BaseF element)
//! ```
//!
//! After some computation, the limbs become saturated and need to be
//! **reduced**, in order to engage in more computation.
//!
//! We heavily use the optimization techniques in [\[KPS18\]](https://akosba.github.io/papers/xjsnark.pdf) and [\[OWWB20\]](https://eprint.iacr.org/2019/1494).
//! Both works have their own open-source libraries:
//! [xJsnark](https://github.com/akosba/xjsnark) and
//! [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat).
//! Compared with these, this module works with the `arkworks` ecosystem.
//! It also provides the option (based on an `optimization_goal` for the
//! constraint system) to optimize for constraint density instead of number of
//! constraints, which improves efficiency in proof systems like [Marlin](https://github.com/arkworks-rs/marlin).
//!
//! ## References
//! \[KPS18\]: A. E. Kosba, C. Papamanthou, and E. Shi. "xJsnark: a framework for efficient verifiable computation," in *Proceedings of the 39th Symposium on Security and Privacy*, ser. S&P 18, 2018, pp. 944961.
//!
//! \[OWWB20\]: A. Ozdemir, R. S. Wahby, B. Whitehat, and D. Boneh. "Scaling verifiable computation using efficient set accumulators," in *Proceedings of the 29th USENIX Security Symposium*, ser. Security 20, 2020.
//!
//! [`EmulatedFpVar`]: crate::fields::emulated_fp::EmulatedFpVar
//! [`MulResultVar`]: crate::fields::emulated_fp::MulResultVar
//! [`FpVar`]: crate::fields::fp::FpVar
#![allow(
clippy::redundant_closure_call,
clippy::enum_glob_use,
clippy::missing_errors_doc,
clippy::cast_possible_truncation,
clippy::unseparated_literal_suffix
)]
use ark_std::fmt::Debug;
/// Utilities for sampling parameters for non-native field gadgets
///
/// - `BaseF`: the constraint field
/// - `TargetF`: the field being simulated
/// - `num_limbs`: how many limbs are used
/// - `bits_per_limb`: the size of the limbs
pub mod params;
/// How are non-native elements reduced?
pub(crate) mod reduce;
/// a macro for computing ceil(log2(x)) for a field element x
macro_rules! overhead {
($x:expr) => {{
use ark_ff::BigInteger;
let num = $x;
let num_bits = num.into_bigint().to_bits_be();
let mut skipped_bits = 0;
for b in num_bits.iter() {
if *b == false {
skipped_bits += 1;
} else {
break;
}
}
let mut is_power_of_2 = true;
for b in num_bits.iter().skip(skipped_bits + 1) {
if *b == true {
is_power_of_2 = false;
}
}
if is_power_of_2 {
num_bits.len() - skipped_bits
} else {
num_bits.len() - skipped_bits + 1
}
}};
}
pub(crate) use overhead;
/// Parameters for a specific `EmulatedFpVar` instantiation
#[derive(Clone, Debug)]
pub struct NonNativeFieldConfig {
/// The number of limbs (`BaseF` elements) used to represent a
/// `TargetF` element. Highest limb first.
pub num_limbs: usize,
/// The number of bits of the limb
pub bits_per_limb: usize,
}
mod allocated_field_var;
pub use allocated_field_var::*;
mod allocated_mul_result;
pub use allocated_mul_result::*;
mod field_var;
pub use field_var::*;
mod mul_result;
pub use mul_result::*;

View File

@@ -0,0 +1,73 @@
use super::{AllocatedMulResultVar, EmulatedFpVar};
use ark_ff::PrimeField;
use ark_relations::r1cs::Result as R1CSResult;
/// An intermediate representation especially for the result of a
/// multiplication, containing more limbs. It is intended for advanced usage to
/// improve the efficiency.
///
/// That is, instead of calling `mul`, one can call `mul_without_reduce` to
/// obtain this intermediate representation, which can still be added.
/// Then, one can call `reduce` to reduce it back to `EmulatedFpVar`.
/// This may help cut the number of reduce operations.
#[derive(Debug)]
#[must_use]
pub enum MulResultVar<TargetF: PrimeField, BaseF: PrimeField> {
/// as a constant
Constant(TargetF),
/// as an allocated gadget
Var(AllocatedMulResultVar<TargetF, BaseF>),
}
impl<TargetF: PrimeField, BaseF: PrimeField> MulResultVar<TargetF, BaseF> {
/// Create a zero `MulResultVar` (used for additions)
pub fn zero() -> Self {
Self::Constant(TargetF::zero())
}
/// Create an `MulResultVar` from a constant
pub fn constant(v: TargetF) -> Self {
Self::Constant(v)
}
/// Reduce the `MulResultVar` back to EmulatedFpVar
#[tracing::instrument(target = "r1cs")]
pub fn reduce(&self) -> R1CSResult<EmulatedFpVar<TargetF, BaseF>> {
match self {
Self::Constant(c) => Ok(EmulatedFpVar::Constant(*c)),
Self::Var(v) => Ok(EmulatedFpVar::Var(v.reduce()?)),
}
}
}
impl<TargetF: PrimeField, BaseF: PrimeField> From<&EmulatedFpVar<TargetF, BaseF>>
for MulResultVar<TargetF, BaseF>
{
fn from(src: &EmulatedFpVar<TargetF, BaseF>) -> Self {
match src {
EmulatedFpVar::Constant(c) => MulResultVar::Constant(*c),
EmulatedFpVar::Var(v) => {
MulResultVar::Var(AllocatedMulResultVar::<TargetF, BaseF>::from(v))
},
}
}
}
impl_bounded_ops!(
MulResultVar<TargetF, BaseF>,
TargetF,
Add,
add,
AddAssign,
add_assign,
|this: &'a MulResultVar<TargetF, BaseF>, other: &'a MulResultVar<TargetF, BaseF>| {
use MulResultVar::*;
match (this, other) {
(Constant(c1), Constant(c2)) => Constant(*c1 + c2),
(Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.add_constant(c).unwrap()),
(Var(v1), Var(v2)) => Var(v1.add(v2).unwrap()),
}
},
|this: &'a MulResultVar<TargetF, BaseF>, other: TargetF| { this + &MulResultVar::Constant(other) },
(TargetF: PrimeField, BaseF: PrimeField),
);

View File

@@ -0,0 +1,97 @@
use super::NonNativeFieldConfig;
/// Obtain the parameters from a `ConstraintSystem`'s cache or generate a new
/// one
#[must_use]
pub const fn get_params(
target_field_size: usize,
base_field_size: usize,
optimization_type: OptimizationType,
) -> NonNativeFieldConfig {
let (num_of_limbs, limb_size) =
find_parameters(base_field_size, target_field_size, optimization_type);
NonNativeFieldConfig {
num_limbs: num_of_limbs,
bits_per_limb: limb_size,
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// The type of optimization target for the parameters searching
pub enum OptimizationType {
/// Optimized for constraints
Constraints,
/// Optimized for weight
Weight,
}
/// A function to search for parameters for emulated field gadgets
pub const fn find_parameters(
base_field_prime_length: usize,
target_field_prime_bit_length: usize,
optimization_type: OptimizationType,
) -> (usize, usize) {
let mut found = false;
let mut min_cost = 0usize;
let mut min_cost_limb_size = 0usize;
let mut min_cost_num_of_limbs = 0usize;
let surfeit = 10;
let mut max_limb_size = (base_field_prime_length - 1 - surfeit - 1) / 2 - 1;
if max_limb_size > target_field_prime_bit_length {
max_limb_size = target_field_prime_bit_length;
}
let mut limb_size = 1;
while limb_size <= max_limb_size {
let num_of_limbs = (target_field_prime_bit_length + limb_size - 1) / limb_size;
let group_size =
(base_field_prime_length - 1 - surfeit - 1 - 1 - limb_size + limb_size - 1) / limb_size;
let num_of_groups = (2 * num_of_limbs - 1 + group_size - 1) / group_size;
let mut this_cost = 0;
match optimization_type {
OptimizationType::Constraints => {
this_cost += 2 * num_of_limbs - 1;
},
OptimizationType::Weight => {
this_cost += 6 * num_of_limbs * num_of_limbs;
},
};
match optimization_type {
OptimizationType::Constraints => {
this_cost += target_field_prime_bit_length; // allocation of k
this_cost += target_field_prime_bit_length + num_of_limbs; // allocation of r
// this_cost += 2 * num_of_limbs - 1; // compute kp
this_cost += num_of_groups + (num_of_groups - 1) * (limb_size * 2 + surfeit) + 1;
// equality check
},
OptimizationType::Weight => {
this_cost += target_field_prime_bit_length * 3 + target_field_prime_bit_length; // allocation of k
this_cost += target_field_prime_bit_length * 3
+ target_field_prime_bit_length
+ num_of_limbs; // allocation of r
this_cost += num_of_limbs * num_of_limbs + 2 * (2 * num_of_limbs - 1); // compute kp
this_cost += num_of_limbs
+ num_of_groups
+ 6 * num_of_groups
+ (num_of_groups - 1) * (2 * limb_size + surfeit) * 4
+ 2; // equality check
},
};
if !found || this_cost < min_cost {
found = true;
min_cost = this_cost;
min_cost_limb_size = limb_size;
min_cost_num_of_limbs = num_of_limbs;
}
limb_size += 1;
}
(min_cost_num_of_limbs, min_cost_limb_size)
}

View File

@@ -0,0 +1,324 @@
use super::{overhead, params::get_params, AllocatedEmulatedFpVar};
use crate::{
alloc::AllocVar,
boolean::Boolean,
eq::EqGadget,
fields::{fp::FpVar, FieldVar},
R1CSVar,
};
use ark_ff::{biginteger::BigInteger, BitIteratorBE, One, PrimeField, Zero};
use ark_relations::{
ns,
r1cs::{ConstraintSystemRef, Result as R1CSResult},
};
use ark_std::{cmp::min, marker::PhantomData, vec, vec::Vec};
use num_bigint::BigUint;
use num_integer::Integer;
pub fn limbs_to_bigint<BaseF: PrimeField>(bits_per_limb: usize, limbs: &[BaseF]) -> BigUint {
let mut val = BigUint::zero();
let mut big_cur = BigUint::one();
let two = BigUint::from(2u32);
for limb in limbs.iter().rev() {
let limb_repr = limb.into_bigint().to_bits_le();
let mut small_cur = big_cur.clone();
for limb_bit in limb_repr.iter() {
if *limb_bit {
val += &small_cur;
}
small_cur *= 2u32;
}
big_cur *= two.pow(bits_per_limb as u32);
}
val
}
pub fn bigint_to_basefield<BaseF: PrimeField>(bigint: &BigUint) -> BaseF {
let mut val = BaseF::zero();
let mut cur = BaseF::one();
let bytes = bigint.to_bytes_be();
let basefield_256 = BaseF::from_bigint(<BaseF as PrimeField>::BigInt::from(256u64)).unwrap();
for byte in bytes.iter().rev() {
let bytes_basefield = BaseF::from(*byte as u128);
val += cur * bytes_basefield;
cur *= &basefield_256;
}
val
}
/// the collections of methods for reducing the presentations
pub struct Reducer<TargetF: PrimeField, BaseF: PrimeField> {
pub target_phantom: PhantomData<TargetF>,
pub base_phantom: PhantomData<BaseF>,
}
impl<TargetF: PrimeField, BaseF: PrimeField> Reducer<TargetF, BaseF> {
/// convert limbs to bits (take at most `BaseF::MODULUS_BIT_SIZE as
/// usize - 1` bits) This implementation would be more efficient than
/// the original `to_bits` or `to_non_unique_bits` since we enforce that
/// some bits are always zero.
#[tracing::instrument(target = "r1cs")]
pub fn limb_to_bits(limb: &FpVar<BaseF>, num_bits: usize) -> R1CSResult<Vec<Boolean<BaseF>>> {
let cs = limb.cs();
let num_bits = min(BaseF::MODULUS_BIT_SIZE as usize - 1, num_bits);
let mut bits_considered = Vec::with_capacity(num_bits);
let limb_value = limb.value().unwrap_or_default();
let num_bits_to_shave = BaseF::BigInt::NUM_LIMBS * 64 - (BaseF::MODULUS_BIT_SIZE as usize);
for b in BitIteratorBE::new(limb_value.into_bigint())
.skip(num_bits_to_shave + (BaseF::MODULUS_BIT_SIZE as usize - num_bits))
{
bits_considered.push(b);
}
if cs == ConstraintSystemRef::None {
let mut bits = vec![];
for b in bits_considered {
bits.push(Boolean::<BaseF>::Constant(b));
}
Ok(bits)
} else {
let mut bits = vec![];
for b in bits_considered {
bits.push(Boolean::<BaseF>::new_witness(
ark_relations::ns!(cs, "bit"),
|| Ok(b),
)?);
}
let mut bit_sum = FpVar::<BaseF>::zero();
let mut coeff = BaseF::one();
for bit in bits.iter().rev() {
bit_sum += <FpVar<BaseF> as From<Boolean<BaseF>>>::from((*bit).clone()) * coeff;
coeff.double_in_place();
}
bit_sum.enforce_equal(limb)?;
Ok(bits)
}
}
/// Reduction to the normal form
#[tracing::instrument(target = "r1cs")]
pub fn reduce(elem: &mut AllocatedEmulatedFpVar<TargetF, BaseF>) -> R1CSResult<()> {
let new_elem = AllocatedEmulatedFpVar::new_witness(ns!(elem.cs(), "normal_form"), || {
Ok(elem.value().unwrap_or_default())
})?;
elem.conditional_enforce_equal(&new_elem, &Boolean::TRUE)?;
*elem = new_elem;
Ok(())
}
/// Reduction to be enforced after additions
#[tracing::instrument(target = "r1cs")]
pub fn post_add_reduce(elem: &mut AllocatedEmulatedFpVar<TargetF, BaseF>) -> R1CSResult<()> {
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
elem.get_optimization_type(),
);
let surfeit = overhead!(elem.num_of_additions_over_normal_form + BaseF::one()) + 1;
if BaseF::MODULUS_BIT_SIZE as usize > 2 * params.bits_per_limb + surfeit + 1 {
Ok(())
} else {
Self::reduce(elem)
}
}
/// Reduction used before multiplication to reduce the representations in a
/// way that allows efficient multiplication
#[tracing::instrument(target = "r1cs")]
pub fn pre_mul_reduce(
elem: &mut AllocatedEmulatedFpVar<TargetF, BaseF>,
elem_other: &mut AllocatedEmulatedFpVar<TargetF, BaseF>,
) -> R1CSResult<()> {
assert_eq!(
elem.get_optimization_type(),
elem_other.get_optimization_type()
);
let params = get_params(
TargetF::MODULUS_BIT_SIZE as usize,
BaseF::MODULUS_BIT_SIZE as usize,
elem.get_optimization_type(),
);
if 2 * params.bits_per_limb + ark_std::log2(params.num_limbs) as usize
> BaseF::MODULUS_BIT_SIZE as usize - 1
{
panic!("The current limb parameters do not support multiplication.");
}
loop {
let prod_of_num_of_additions = (elem.num_of_additions_over_normal_form + BaseF::one())
* (elem_other.num_of_additions_over_normal_form + BaseF::one());
let overhead_limb = overhead!(prod_of_num_of_additions.mul(
&BaseF::from_bigint(<BaseF as PrimeField>::BigInt::from(
(params.num_limbs) as u64
))
.unwrap()
));
let bits_per_mulresult_limb = 2 * (params.bits_per_limb + 1) + overhead_limb;
if bits_per_mulresult_limb < BaseF::MODULUS_BIT_SIZE as usize {
break;
}
if elem.num_of_additions_over_normal_form
>= elem_other.num_of_additions_over_normal_form
{
Self::reduce(elem)?;
} else {
Self::reduce(elem_other)?;
}
}
Ok(())
}
/// Reduction to the normal form
#[tracing::instrument(target = "r1cs")]
pub fn pre_eq_reduce(elem: &mut AllocatedEmulatedFpVar<TargetF, BaseF>) -> R1CSResult<()> {
if elem.is_in_the_normal_form {
return Ok(());
}
Self::reduce(elem)
}
/// Group and check equality
#[tracing::instrument(target = "r1cs")]
pub fn group_and_check_equality(
surfeit: usize,
bits_per_limb: usize,
shift_per_limb: usize,
left: &[FpVar<BaseF>],
right: &[FpVar<BaseF>],
) -> R1CSResult<()> {
let cs = left.cs().or(right.cs());
let zero = FpVar::<BaseF>::zero();
let mut limb_pairs = Vec::<(FpVar<BaseF>, FpVar<BaseF>)>::new();
let num_limb_in_a_group = (BaseF::MODULUS_BIT_SIZE as usize
- 1
- surfeit
- 1
- 1
- 1
- (bits_per_limb - shift_per_limb))
/ shift_per_limb;
let shift_array = {
let mut array = Vec::new();
let mut cur = BaseF::one().into_bigint();
for _ in 0..num_limb_in_a_group {
array.push(BaseF::from_bigint(cur).unwrap());
cur <<= shift_per_limb as u32;
}
array
};
for (left_limb, right_limb) in left.iter().zip(right.iter()).rev() {
// note: the `rev` operation is here, so that the first limb (and the first
// groupped limb) will be the least significant limb.
limb_pairs.push((left_limb.clone(), right_limb.clone()));
}
let mut groupped_limb_pairs = Vec::<(FpVar<BaseF>, FpVar<BaseF>, usize)>::new();
for limb_pairs_in_a_group in limb_pairs.chunks(num_limb_in_a_group) {
let mut left_total_limb = zero.clone();
let mut right_total_limb = zero.clone();
for ((left_limb, right_limb), shift) in
limb_pairs_in_a_group.iter().zip(shift_array.iter())
{
left_total_limb += &(left_limb * *shift);
right_total_limb += &(right_limb * *shift);
}
groupped_limb_pairs.push((
left_total_limb,
right_total_limb,
limb_pairs_in_a_group.len(),
));
}
// This part we mostly use the techniques in bellman-bignat
// The following code is adapted from https://github.com/alex-ozdemir/bellman-bignat/blob/master/src/mp/bignat.rs#L567
let mut carry_in = zero;
let mut carry_in_value = BaseF::zero();
let mut accumulated_extra = BigUint::zero();
for (group_id, (left_total_limb, right_total_limb, num_limb_in_this_group)) in
groupped_limb_pairs.iter().enumerate()
{
let mut pad_limb_repr = BaseF::ONE.into_bigint();
pad_limb_repr <<= (surfeit
+ (bits_per_limb - shift_per_limb)
+ shift_per_limb * num_limb_in_this_group
+ 1
+ 1) as u32;
let pad_limb = BaseF::from_bigint(pad_limb_repr).unwrap();
let left_total_limb_value = left_total_limb.value().unwrap_or_default();
let right_total_limb_value = right_total_limb.value().unwrap_or_default();
let mut carry_value =
left_total_limb_value + carry_in_value + pad_limb - right_total_limb_value;
let carry_repr =
carry_value.into_bigint() >> (shift_per_limb * num_limb_in_this_group) as u32;
carry_value = BaseF::from_bigint(carry_repr).unwrap();
let carry = FpVar::new_witness(cs.clone(), || Ok(carry_value))?;
accumulated_extra += limbs_to_bigint(bits_per_limb, &[pad_limb]);
let (new_accumulated_extra, remainder) = accumulated_extra.div_rem(
&BigUint::from(2u64).pow((shift_per_limb * num_limb_in_this_group) as u32),
);
let remainder_limb = bigint_to_basefield::<BaseF>(&remainder);
// Now check
// left_total_limb + pad_limb + carry_in - right_total_limb
// = carry shift by (shift_per_limb * num_limb_in_this_group) + remainder
let eqn_left = left_total_limb + pad_limb + &carry_in - right_total_limb;
let eqn_right = &carry
* BaseF::from(2u64).pow(&[(shift_per_limb * num_limb_in_this_group) as u64])
+ remainder_limb;
eqn_left.conditional_enforce_equal(&eqn_right, &Boolean::<BaseF>::TRUE)?;
accumulated_extra = new_accumulated_extra;
carry_in = carry.clone();
carry_in_value = carry_value;
if group_id == groupped_limb_pairs.len() - 1 {
carry.enforce_equal(&FpVar::<BaseF>::Constant(bigint_to_basefield(
&accumulated_extra,
)))?;
} else {
Reducer::<TargetF, BaseF>::limb_to_bits(&carry, surfeit + bits_per_limb)?;
}
}
Ok(())
}
}