Browse Source

Introduce `mul_by_inverse_unchecked`, and use it (#75)

master
Pratyush Mishra 3 years ago
committed by GitHub
parent
commit
b6e7e94521
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 126 additions and 67 deletions
  1. +1
    -0
      CHANGELOG.md
  2. +8
    -0
      src/fields/fp/mod.rs
  3. +31
    -3
      src/fields/mod.rs
  4. +12
    -29
      src/groups/curves/short_weierstrass/non_zero_affine.rs
  5. +18
    -2
      src/poly/domain/mod.rs
  6. +7
    -6
      src/poly/evaluations/univariate/lagrange_interpolator.rs
  7. +49
    -27
      src/poly/evaluations/univariate/mod.rs

+ 1
- 0
CHANGELOG.md

@ -17,6 +17,7 @@
### Features
- [\#71](https://github.com/arkworks-rs/r1cs-std/pull/71) Implement the `Sum` trait for `FpVar`.
- [\#75](https://github.com/arkworks-rs/r1cs-std/pull/71) Introduce `mul_by_inverse_unchecked` for `FieldVar`. This accompanies the bug fix in [\#70](https://github.com/arkworks-rs/r1cs-std/pull/70).
### Bug Fixes

+ 8
- 0
src/fields/fp/mod.rs

@ -421,6 +421,14 @@ impl AllocatedFp {
other: &Self,
should_enforce: &Boolean<F>,
) -> Result<(), SynthesisError> {
// The high level logic is as follows:
// We want to check that self - other != 0. We do this by checking that
// (self - other).inverse() exists. In more detail, we check the following:
// If `should_enforce == true`, then we set `multiplier = (self - other).inverse()`,
// and check that (self - other) * multiplier == 1. (i.e., that the inverse exists)
//
// If `should_enforce == false`, then we set `multiplier == 0`, and check that
// (self - other) * 0 == 0, which is always satisfied.
let multiplier = Self::new_witness(self.cs.clone(), || {
if should_enforce.value()? {
(self.value.get()? - other.value.get()?).inverse().get()

+ 31
- 3
src/fields/mod.rs

@ -1,5 +1,5 @@
use ark_ff::{prelude::*, BitIteratorBE};
use ark_relations::r1cs::SynthesisError;
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
use core::{
fmt::Debug,
ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign},
@ -158,8 +158,36 @@ pub trait FieldVar:
/// Returns `(self / d)`.
/// The constraint system will be unsatisfiable when `d = 0`.
fn mul_by_inverse(&self, d: &Self) -> Result<Self, SynthesisError> {
let d_inv = d.inverse()?;
Ok(d_inv * self)
// Enforce that `d` is not zero.
d.enforce_not_equal(&Self::zero())?;
self.mul_by_inverse_unchecked(d)
}
/// Returns `(self / d)`.
///
/// The precondition for this method is that `d != 0`. If `d == 0`, this
/// method offers no guarantees about the soundness of the resulting
/// constraint system. For example, if `self == d == 0`, the current
/// implementation allows the constraint system to be trivially satisfiable.
fn mul_by_inverse_unchecked(&self, d: &Self) -> Result<Self, SynthesisError> {
let cs = self.cs().or(d.cs());
match cs {
// If we're in the constant case, we just allocate a new constant having value equalling
// `self * d.inverse()`.
ConstraintSystemRef::None => Self::new_constant(
cs,
self.value()? * d.value()?.inverse().expect("division by zero"),
),
// If not, we allocate `result` as a new witness having value `self * d.inverse()`,
// and check that `result * d = self`.
_ => {
let result = Self::new_witness(ark_relations::ns!(cs, "self * d_inv"), || {
Ok(self.value()? * &d.value()?.inverse().unwrap_or(F::zero()))
})?;
result.mul_equals(d, self)?;
Ok(result)
}
}
}
/// Computes the frobenius map over `self`.

+ 12
- 29
src/groups/curves/short_weierstrass/non_zero_affine.rs

@ -55,7 +55,9 @@ where
// y3 = lambda * (x1 - x3) - y1
let numerator = y2 - y1;
let denominator = x2 - x1;
let lambda = numerator.mul_by_inverse(&denominator)?;
// It's okay to use `unchecked` here, because the precondition of `add_unchecked` is that
// self != ±other, which means that `numerator` and `denominator` are both non-zero.
let lambda = numerator.mul_by_inverse_unchecked(&denominator)?;
let x3 = lambda.square()? - x1 - x2;
let y3 = lambda * &(x1 - &x3) - y1;
Ok(Self::new(x3, y3))
@ -80,7 +82,9 @@ where
// y3 = lambda * (x1 - x3) - y1
let numerator = x1_sqr.double()? + &x1_sqr + P::COEFF_A;
let denominator = y1.double()?;
let lambda = numerator.mul_by_inverse(&denominator)?;
// It's okay to use `unchecked` here, because the precondition of `double` is that
// self != zero.
let lambda = numerator.mul_by_inverse_unchecked(&denominator)?;
let x3 = lambda.square()? - x1.double()?;
let y3 = lambda * &(x1 - &x3) - y1;
Ok(Self::new(x3, y3))
@ -92,10 +96,11 @@ where
///
/// This follows the formulae from [\[ELM03\]](https://arxiv.org/abs/math/0208038).
#[tracing::instrument(target = "r1cs", skip(self))]
pub(crate) fn double_and_add(&self, other: &Self) -> Result<Self, SynthesisError> {
pub(crate) fn double_and_add_unchecked(&self, other: &Self) -> Result<Self, SynthesisError> {
if [self].is_constant() || other.is_constant() {
self.double()?.add_unchecked(other)
} else {
// It's okay to use `unchecked` the precondition is that `self != ±other` (i.e. same logic as in `add_unchecked`)
let (x1, y1) = (&self.x, &self.y);
let (x2, y2) = (&other.x, &other.y);
@ -105,12 +110,13 @@ where
// y3 = lambda * (x1 - x3) - y1
let numerator = y2 - y1;
let denominator = x2 - x1;
let lambda_1 = numerator.mul_by_inverse(&denominator)?;
let lambda_1 = numerator.mul_by_inverse_unchecked(&denominator)?;
let x3 = lambda_1.square()? - x1 - x2;
// Calculate final addition slope:
let lambda_2 = (lambda_1 + y1.double()?.mul_by_inverse(&(&x3 - x1))?).negate()?;
let lambda_2 =
(lambda_1 + y1.double()?.mul_by_inverse_unchecked(&(&x3 - x1))?).negate()?;
let x4 = lambda_2.square()? - x1 - x3;
let y4 = lambda_2 * &(x1 - &x4) - y1;
@ -284,7 +290,7 @@ mod test_non_zero_affine {
let mut cur = a.double().unwrap();
for _ in 1..10 {
cur = cur.double_and_add(&a).unwrap();
cur = cur.double_and_add_unchecked(&a).unwrap();
}
(cur.x.value().unwrap(), cur.y.value().unwrap())
@ -294,27 +300,4 @@ mod test_non_zero_affine {
assert_eq!(sum_a.0, sum_b.0);
assert_eq!(sum_a.1, sum_b.1);
}
#[test]
fn soundness_test() {
let cs = ConstraintSystem::<Fq>::new_ref();
let x = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.0)
})
.unwrap(),
);
let y = FpVar::Var(
AllocatedFp::<Fq>::new_witness(cs.clone(), || {
Ok(G1Parameters::AFFINE_GENERATOR_COEFFS.1)
})
.unwrap(),
);
let a = NonZeroAffineVar::<G1Parameters, FpVar<Fq>>::new(x, y);
let _ = a.double_and_add(&a).unwrap();
assert!(!cs.is_satisfied().unwrap());
}
}

+ 18
- 2
src/poly/domain/mod.rs

@ -14,15 +14,31 @@ pub mod vanishing_poly;
/// Native code corresponds to `ark-poly::univariate::domain::radix2`, but `ark-poly` only supports
/// subgroup for now.
///
/// TODO: support cosets in `ark-poly`.
// TODO: support cosets in `ark-poly`.
pub struct Radix2DomainVar<F: PrimeField> {
/// generator of subgroup g
pub gen: F,
/// index of the quotient group (i.e. the `offset`)
pub offset: FpVar<F>,
offset: FpVar<F>,
/// dimension of evaluation domain
pub dim: u64,
}
impl<F: PrimeField> Radix2DomainVar<F> {
/// Construct an evaluation domain with the given offset.
pub fn new(gen: F, dimension: u64, offset: FpVar<F>) -> Result<Self, SynthesisError> {
offset.enforce_not_equal(&FpVar::zero())?;
Ok(Self {
gen,
offset,
dim: dimension,
})
}
/// What is the offset of `self`?
pub fn offset(&self) -> &FpVar<F> {
&self.offset
}
}
impl<F: PrimeField> EqGadget<F> for Radix2DomainVar<F> {
fn is_eq(&self, other: &Self) -> Result<Boolean<F>, SynthesisError> {

+ 7
- 6
src/poly/evaluations/univariate/lagrange_interpolator.rs

@ -115,13 +115,14 @@ mod tests {
let poly = DensePolynomial::rand(15, &mut rng);
let gen = Fr::get_root_of_unity(1 << 4).unwrap();
assert_eq!(gen.pow(&[1 << 4]), Fr::one());
let domain = Radix2DomainVar {
let domain = Radix2DomainVar::new(
gen,
offset: FpVar::constant(Fr::multiplicative_generator()),
dim: 4, // 2^4 = 16
};
4, // 2^4 = 16
FpVar::constant(Fr::multiplicative_generator()),
)
.unwrap();
// generate evaluations of `poly` on this domain
let mut coset_point = domain.offset.value().unwrap();
let mut coset_point = domain.offset().value().unwrap();
let mut oracle_evals = Vec::new();
for _ in 0..(1 << 4) {
oracle_evals.push(poly.evaluate(&coset_point));
@ -129,7 +130,7 @@ mod tests {
}
let interpolator = LagrangeInterpolator::new(
domain.offset.value().unwrap(),
domain.offset().value().unwrap(),
domain.gen,
domain.dim,
oracle_evals,

+ 49
- 27
src/poly/evaluations/univariate/mod.rs

@ -1,6 +1,7 @@
pub mod lagrange_interpolator;
use crate::alloc::AllocVar;
use crate::eq::EqGadget;
use crate::fields::fp::FpVar;
use crate::fields::FieldVar;
use crate::poly::domain::Radix2DomainVar;
@ -55,11 +56,11 @@ impl EvaluationsVar {
/// Precompute necessary calculation for lagrange interpolation and mark it ready to interpolate
pub fn generate_interpolation_cache(&mut self) {
if self.domain.offset.is_constant() {
if self.domain.offset().is_constant() {
let poly_evaluations_val: Vec<_> =
self.evals.iter().map(|v| v.value().unwrap()).collect();
let domain = &self.domain;
let lagrange_interpolator = if let FpVar::Constant(x) = domain.offset {
let lagrange_interpolator = if let &FpVar::Constant(x) = domain.offset() {
LagrangeInterpolator::new(x, domain.gen, domain.dim, poly_evaluations_val)
} else {
panic!("Domain offset needs to be constant.")
@ -129,7 +130,7 @@ impl EvaluationsVar {
interpolation_point: &FpVar<F>,
) -> Result<FpVar<F>, SynthesisError> {
// specialize: if domain offset is constant, we can optimize to have fewer constraints
if self.domain.offset.is_constant() {
if self.domain.offset().is_constant() {
self.lagrange_interpolate_with_constant_offset(interpolation_point)
} else {
// if domain offset is not constant, then we use standard lagrange interpolation code
@ -158,7 +159,7 @@ impl EvaluationsVar {
/// Generate interpolation constraints. We assume at compile time we know the base coset (i.e. `gen`) but not know `offset`.
fn lagrange_interpolate_with_non_constant_offset(
&self,
interpolation_point: &FpVar<F>,
interpolation_point: &FpVar<F>, // = alpha in the following code
) -> Result<FpVar<F>, SynthesisError> {
// first, make sure `subgroup_points` is made
let subgroup_points = self.subgroup_points.as_ref()
@ -169,16 +170,27 @@ impl EvaluationsVar {
// \frac{1}{size * offset ^ size} * \frac{alpha^size - offset^size}{alpha * a^{-1} - 1}
// Notice that a = (offset * a') where a' is the corresponding element of base coset
// let `lhs` become \frac{alpha^size - offset^size}{size * offset ^ size}. This part is shared by all lagrange polynomials
let coset_offset_to_size = self.domain.offset.pow_by_constant(&[self.domain.size()])?; // offset^size
let alpha_to_s = interpolation_point.pow_by_constant(&[self.domain.size()])?;
let lhs_numerator = &alpha_to_s - &coset_offset_to_size;
// let `lhs` be \frac{alpha^size - offset^size}{size * offset ^ size}. This part is shared by all lagrange polynomials
let coset_offset_to_size = self
.domain
.offset()
.pow_by_constant(&[self.domain.size()])?; // offset^size
let alpha_to_size = interpolation_point.pow_by_constant(&[self.domain.size()])?;
let lhs_numerator = &alpha_to_size - &coset_offset_to_size;
// This enforces that `alpha` is not in the coset.
// This also means that the denominator is
lhs_numerator.enforce_not_equal(&FpVar::zero())?;
// `domain.offset()` is non-zero by construction, so `coset_offset_to_size` is also non-zero, which means `lhs_denominator` is non-zero
let lhs_denominator = &coset_offset_to_size * FpVar::constant(F::from(self.domain.size()));
let lhs = lhs_numerator.mul_by_inverse(&lhs_denominator)?;
// unchecked is okay because the denominator is non-zero.
let lhs = lhs_numerator.mul_by_inverse_unchecked(&lhs_denominator)?;
// `rhs` for coset element `a` is \frac{1}{alpha * a^{-1} - 1} = \frac{1}{alpha * offset^{-1} * a'^{-1} - 1}
let alpha_coset_offset_inv = interpolation_point.mul_by_inverse(&self.domain.offset)?;
// domain.offset() is non-zero by construction.
let alpha_coset_offset_inv =
interpolation_point.mul_by_inverse_unchecked(&self.domain.offset())?;
// `res` stores the sum of all lagrange polynomials evaluated at alpha
let mut res = FpVar::<F>::zero();
@ -189,8 +201,16 @@ impl EvaluationsVar {
let subgroup_point_inv = subgroup_points[(domain_size - i) % domain_size];
debug_assert_eq!(subgroup_points[i] * subgroup_point_inv, F::one());
// alpha * offset^{-1} * a'^{-1} - 1
let lag_donom = &alpha_coset_offset_inv * subgroup_point_inv - F::one();
let lag_coeff = lhs.mul_by_inverse(&lag_donom)?;
let lag_denom = &alpha_coset_offset_inv * subgroup_point_inv - F::one();
// lag_denom cannot be zero, so we use `unchecked`.
//
// Proof: lag_denom is zero if and only if alpha * (coset_offset * subgroup_point)^{-1} == 1.
// This can happen only if `alpha` is itself in the coset.
//
// Earlier we asserted that `lhs_numerator` is not zero.
// Since `lhs_numerator` is just the vanishing polynomial for the coset evaluated at `alpha`,
// and since this is non-zero, `alpha` is not in the coset.
let lag_coeff = lhs.mul_by_inverse_unchecked(&lag_denom)?;
let lag_interpoland = &self.evals[i] * lag_coeff;
res += lag_interpoland
@ -345,12 +365,13 @@ mod tests {
let poly = DensePolynomial::rand(15, &mut rng);
let gen = Fr::get_root_of_unity(1 << 4).unwrap();
assert_eq!(gen.pow(&[1 << 4]), Fr::one());
let domain = Radix2DomainVar {
let domain = Radix2DomainVar::new(
gen,
offset: FpVar::constant(Fr::rand(&mut rng)),
dim: 4, // 2^4 = 16
};
let mut coset_point = domain.offset.value().unwrap();
4, // 2^4 = 16
FpVar::constant(Fr::rand(&mut rng)),
)
.unwrap();
let mut coset_point = domain.offset().value().unwrap();
let mut oracle_evals = Vec::new();
for _ in 0..(1 << 4) {
oracle_evals.push(poly.evaluate(&coset_point));
@ -387,12 +408,13 @@ mod tests {
let gen = Fr::get_root_of_unity(1 << 4).unwrap();
assert_eq!(gen.pow(&[1 << 4]), Fr::one());
let cs = ConstraintSystem::new_ref();
let domain = Radix2DomainVar {
let domain = Radix2DomainVar::new(
gen,
offset: FpVar::new_witness(ns!(cs, "offset"), || Ok(Fr::rand(&mut rng))).unwrap(),
dim: 4, // 2^4 = 16
};
let mut coset_point = domain.offset.value().unwrap();
4, // 2^4 = 16
FpVar::new_witness(ns!(cs, "offset"), || Ok(Fr::rand(&mut rng))).unwrap(),
)
.unwrap();
let mut coset_point = domain.offset().value().unwrap();
let mut oracle_evals = Vec::new();
for _ in 0..(1 << 4) {
oracle_evals.push(poly.evaluate(&coset_point));
@ -427,12 +449,12 @@ mod tests {
let mut rng = test_rng();
let gen = Fr::get_root_of_unity(1 << 4).unwrap();
assert_eq!(gen.pow(&[1 << 4]), Fr::one());
let domain = Radix2DomainVar {
let domain = Radix2DomainVar::new(
gen,
offset: FpVar::constant(Fr::multiplicative_generator()),
dim: 4, // 2^4 = 16
};
4, // 2^4 = 16
FpVar::constant(Fr::multiplicative_generator()),
)
.unwrap();
let cs = ConstraintSystem::new_ref();
let ev_a = EvaluationsVar::from_vec_and_domain(

Loading…
Cancel
Save