Update to arkworks libraries (#3)

Co-authored-by: Nicholas Ward <npward@berkeley.edu>
This commit is contained in:
Pratyush Mishra
2020-10-19 11:07:59 -07:00
committed by GitHub
parent cf4301cb75
commit 636f93a3e5
122 changed files with 910 additions and 9328 deletions

101
src/alloc.rs Normal file
View File

@@ -0,0 +1,101 @@
use crate::Vec;
use ark_ff::Field;
use ark_relations::r1cs::{Namespace, SynthesisError};
use core::borrow::Borrow;
/// Describes the mode that a variable should be allocated in within
/// a `ConstraintSystem`.
#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Copy, Clone)]
pub enum AllocationMode {
/// Indicate to the `ConstraintSystem` that the high-level variable should
/// be allocated as a constant. That is, no `Variable`s should be
/// generated.
Constant = 0,
/// Indicate to the `ConstraintSystem` that the high-level variable should
/// be allocated as a public input to the `ConstraintSystem`.
Input = 1,
/// Indicate to the `ConstraintSystem` that the high-level variable should
/// be allocated as a private witness to the `ConstraintSystem`.
Witness = 2,
}
impl AllocationMode {
/// Outputs the maximum according to the relation `Constant < Input <
/// Witness`.
pub fn max(&self, other: Self) -> Self {
use AllocationMode::*;
match (self, other) {
(Constant, _) => other,
(Input, Constant) => *self,
(Input, _) => other,
(Witness, _) => *self,
}
}
}
/// Specifies how variables of type `Self` should be allocated in a
/// `ConstraintSystem`.
pub trait AllocVar<V, F: Field>
where
Self: Sized,
V: ?Sized,
{
/// Allocates a new variable of type `Self` in the `ConstraintSystem` `cs`.
/// The mode of allocation is decided by `mode`.
fn new_variable<T: Borrow<V>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError>;
/// Allocates a new constant of type `Self` in the `ConstraintSystem` `cs`.
///
/// This should *not* allocate any new variables or constraints in `cs`.
#[tracing::instrument(target = "r1cs", skip(cs, t))]
fn new_constant(
cs: impl Into<Namespace<F>>,
t: impl Borrow<V>,
) -> Result<Self, SynthesisError> {
Self::new_variable(cs, || Ok(t), AllocationMode::Constant)
}
/// Allocates a new public input of type `Self` in the `ConstraintSystem`
/// `cs`.
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_input<T: Borrow<V>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
) -> Result<Self, SynthesisError> {
Self::new_variable(cs, f, AllocationMode::Input)
}
/// Allocates a new private witness of type `Self` in the `ConstraintSystem`
/// `cs`.
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_witness<T: Borrow<V>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
) -> Result<Self, SynthesisError> {
Self::new_variable(cs, f, AllocationMode::Witness)
}
}
/// This blanket implementation just allocates variables in `Self`
/// element by element.
impl<I, F: Field, A: AllocVar<I, F>> AllocVar<[I], F> for Vec<A> {
fn new_variable<T: Borrow<[I]>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let mut vec = Vec::new();
for value in f()?.borrow().iter() {
vec.push(A::new_variable(cs.clone(), || Ok(value), mode)?);
}
Ok(vec)
}
}

1739
src/bits/boolean.rs Normal file

File diff suppressed because it is too large Load Diff

128
src/bits/mod.rs Normal file
View File

@@ -0,0 +1,128 @@
use crate::{
bits::{boolean::Boolean, uint8::UInt8},
Vec,
};
use ark_ff::Field;
use ark_relations::r1cs::SynthesisError;
/// This module contains `Boolean`, a R1CS equivalent of the `bool` type.
pub mod boolean;
/// This module contains `UInt8`, a R1CS equivalent of the `u8` type.
pub mod uint8;
/// This module contains a macro for generating `UIntN` types, which are R1CS
/// equivalents of `N`-bit unsigned integers.
#[macro_use]
pub mod uint;
make_uint!(UInt16, 16, u16, uint16, "16");
make_uint!(UInt32, 32, u32, uint32, "32");
make_uint!(UInt64, 64, u64, uint64, "64");
/// Specifies constraints for conversion to a little-endian bit representation
/// of `self`.
pub trait ToBitsGadget<F: Field> {
/// Outputs the canonical little-endian bit-wise representation of `self`.
///
/// This is the correct default for 99% of use cases.
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError>;
/// Outputs a possibly non-unique little-endian bit-wise representation of
/// `self`.
///
/// If you're not absolutely certain that your usecase can get away with a
/// non-canonical representation, please use `self.to_bits()` instead.
fn to_non_unique_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
self.to_bits_le()
}
/// Outputs the canonical big-endian bit-wise representation of `self`.
fn to_bits_be(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
let mut res = self.to_bits_le()?;
res.reverse();
Ok(res)
}
/// Outputs a possibly non-unique big-endian bit-wise representation of
/// `self`.
fn to_non_unique_bits_be(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
let mut res = self.to_non_unique_bits_le()?;
res.reverse();
Ok(res)
}
}
impl<F: Field> ToBitsGadget<F> for Boolean<F> {
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
Ok(vec![self.clone()])
}
}
impl<F: Field> ToBitsGadget<F> for [Boolean<F>] {
/// Outputs `self`.
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
Ok(self.to_vec())
}
}
impl<F: Field> ToBitsGadget<F> for UInt8<F> {
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
Ok(self.bits.to_vec())
}
}
impl<F: Field> ToBitsGadget<F> for [UInt8<F>] {
/// Interprets `self` as an integer, and outputs the little-endian
/// bit-wise decomposition of that integer.
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
let bits = self.iter().flat_map(|b| &b.bits).cloned().collect();
Ok(bits)
}
}
impl<F: Field, T> ToBitsGadget<F> for Vec<T>
where
[T]: ToBitsGadget<F>,
{
fn to_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
self.as_slice().to_bits_le().map(|v| v.to_vec())
}
fn to_non_unique_bits_le(&self) -> Result<Vec<Boolean<F>>, SynthesisError> {
self.as_slice().to_non_unique_bits_le().map(|v| v.to_vec())
}
}
/// Specifies constraints for conversion to a little-endian byte representation
/// of `self`.
pub trait ToBytesGadget<F: Field> {
/// Outputs a canonical, little-endian, byte decomposition of `self`.
///
/// This is the correct default for 99% of use cases.
fn to_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError>;
/// Outputs a possibly non-unique byte decomposition of `self`.
///
/// If you're not absolutely certain that your usecase can get away with a
/// non-canonical representation, please use `self.to_bytes(cs)` instead.
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
self.to_bytes()
}
}
impl<F: Field> ToBytesGadget<F> for [UInt8<F>] {
fn to_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
Ok(self.to_vec())
}
}
impl<'a, F: Field, T: 'a + ToBytesGadget<F>> ToBytesGadget<F> for &'a T {
fn to_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
(*self).to_bytes()
}
}
impl<'a, F: Field> ToBytesGadget<F> for &'a [UInt8<F>] {
fn to_bytes(&self) -> Result<Vec<UInt8<F>>, SynthesisError> {
Ok(self.to_vec())
}
}

539
src/bits/uint.rs Normal file
View File

@@ -0,0 +1,539 @@
macro_rules! make_uint {
($name:ident, $size:expr, $native:ident, $mod_name:ident, $native_doc_name:expr) => {
#[doc = "This module contains a `UInt"]
#[doc = $native_doc_name]
#[doc = "`, a R1CS equivalent of the `u"]
#[doc = $native_doc_name]
#[doc = "`type."]
pub mod $mod_name {
use ark_ff::{Field, FpParameters, PrimeField};
use core::borrow::Borrow;
use core::convert::TryFrom;
use ark_relations::r1cs::{
ConstraintSystemRef, LinearCombination, Namespace, SynthesisError, Variable,
};
use crate::{
boolean::{AllocatedBit, Boolean},
prelude::*,
Assignment, Vec,
};
#[doc = "This struct represent an unsigned"]
#[doc = $native_doc_name]
#[doc = "-bit integer as a sequence of "]
#[doc = $native_doc_name]
#[doc = " `Boolean`s\n"]
#[doc = "This is the R1CS equivalent of the native `u"]
#[doc = $native_doc_name]
#[doc = "` unsigned integer type."]
#[derive(Clone, Debug)]
pub struct $name<F: Field> {
// Least significant bit first
bits: Vec<Boolean<F>>,
value: Option<$native>,
}
impl<F: Field> R1CSVar<F> for $name<F> {
type Value = $native;
fn cs(&self) -> ConstraintSystemRef<F> {
self.bits.as_slice().cs()
}
fn value(&self) -> Result<Self::Value, SynthesisError> {
let mut value = None;
for (i, bit) in self.bits.iter().enumerate() {
let b = $native::from(bit.value()?);
value = match value {
Some(value) => Some(value + (b << i)),
None => Some(b << i),
};
}
debug_assert_eq!(self.value, value);
value.get()
}
}
impl<F: Field> $name<F> {
#[doc = "Construct a constant `UInt"]
#[doc = $native_doc_name]
#[doc = "` from the native `u"]
#[doc = $native_doc_name]
#[doc = "` type."]
pub fn constant(value: $native) -> Self {
let mut bits = Vec::with_capacity($size);
let mut tmp = value;
for _ in 0..$size {
if tmp & 1 == 1 {
bits.push(Boolean::constant(true))
} else {
bits.push(Boolean::constant(false))
}
tmp >>= 1;
}
$name {
bits,
value: Some(value),
}
}
/// Turns `self` into the underlying little-endian bits.
pub fn to_bits_le(&self) -> Vec<Boolean<F>> {
self.bits.clone()
}
/// Construct `Self` from a slice of `Boolean`s.
///
/// # Panics
///
/// This method panics if `bits.len() != u
#[doc($native_doc_name)]
#[doc("`.")]
pub fn from_bits_le(bits: &[Boolean<F>]) -> Self {
assert_eq!(bits.len(), $size);
let bits = bits.to_vec();
let mut value = Some(0);
for b in bits.iter().rev() {
value.as_mut().map(|v| *v <<= 1);
match b {
&Boolean::Constant(b) => {
value.as_mut().map(|v| *v |= $native::from(b));
}
&Boolean::Is(ref b) => match b.value() {
Ok(b) => {
value.as_mut().map(|v| *v |= $native::from(b));
}
Err(_) => value = None,
},
&Boolean::Not(ref b) => match b.value() {
Ok(b) => {
value.as_mut().map(|v| *v |= $native::from(!b));
}
Err(_) => value = None,
},
}
}
Self { value, bits }
}
/// Rotates `self` to the right by `by` steps, wrapping around.
#[tracing::instrument(target = "r1cs", skip(self))]
pub fn rotr(&self, by: usize) -> Self {
let by = by % $size;
let new_bits = self
.bits
.iter()
.skip(by)
.chain(self.bits.iter())
.take($size)
.cloned()
.collect();
$name {
bits: new_bits,
value: self
.value
.map(|v| v.rotate_right(u32::try_from(by).unwrap())),
}
}
/// Outputs `self ^ other`.
///
/// If at least one of `self` and `other` are constants, then this method
/// *does not* create any constraints or variables.
#[tracing::instrument(target = "r1cs", skip(self, other))]
pub fn xor(&self, other: &Self) -> Result<Self, SynthesisError> {
let new_value = match (self.value, other.value) {
(Some(a), Some(b)) => Some(a ^ b),
_ => None,
};
let bits = self
.bits
.iter()
.zip(other.bits.iter())
.map(|(a, b)| a.xor(b))
.collect::<Result<_, _>>()?;
Ok($name {
bits,
value: new_value,
})
}
/// Perform modular addition of `operands`.
///
/// The user must ensure that overflow does not occur.
#[tracing::instrument(target = "r1cs", skip(operands))]
pub fn addmany(operands: &[Self]) -> Result<Self, SynthesisError>
where
F: PrimeField,
{
// Make some arbitrary bounds for ourselves to avoid overflows
// in the scalar field
assert!(F::Params::MODULUS_BITS >= 2 * $size);
assert!(operands.len() >= 1);
assert!($size * operands.len() <= F::Params::MODULUS_BITS as usize);
if operands.len() == 1 {
return Ok(operands[0].clone());
}
// Compute the maximum value of the sum so we allocate enough bits for
// the result
let mut max_value = (operands.len() as u128) * u128::from($native::max_value());
// Keep track of the resulting value
let mut result_value = Some(0u128);
// This is a linear combination that we will enforce to be "zero"
let mut lc = LinearCombination::zero();
let mut all_constants = true;
// Iterate over the operands
for op in operands {
// Accumulate the value
match op.value {
Some(val) => {
result_value.as_mut().map(|v| *v += u128::from(val));
}
None => {
// If any of our operands have unknown value, we won't
// know the value of the result
result_value = None;
}
}
// Iterate over each bit_gadget of the operand and add the operand to
// the linear combination
let mut coeff = F::one();
for bit in &op.bits {
match *bit {
Boolean::Is(ref bit) => {
all_constants = false;
// Add coeff * bit_gadget
lc += (coeff, bit.variable());
}
Boolean::Not(ref bit) => {
all_constants = false;
// Add coeff * (1 - bit_gadget) = coeff * ONE - coeff * bit_gadget
lc = lc + (coeff, Variable::One) - (coeff, bit.variable());
}
Boolean::Constant(bit) => {
if bit {
lc += (coeff, Variable::One);
}
}
}
coeff.double_in_place();
}
}
// The value of the actual result is modulo 2^$size
let modular_value = result_value.map(|v| v as $native);
if all_constants && modular_value.is_some() {
// We can just return a constant, rather than
// unpacking the result into allocated bits.
return Ok($name::constant(modular_value.unwrap()));
}
let cs = operands.cs();
// Storage area for the resulting bits
let mut result_bits = vec![];
// Allocate each bit_gadget of the result
let mut coeff = F::one();
let mut i = 0;
while max_value != 0 {
// Allocate the bit_gadget
let b = AllocatedBit::new_witness(cs.clone(), || {
result_value.map(|v| (v >> i) & 1 == 1).get()
})?;
// Subtract this bit_gadget from the linear combination to ensure the sums
// balance out
lc = lc - (coeff, b.variable());
result_bits.push(b.into());
max_value >>= 1;
i += 1;
coeff.double_in_place();
}
// Enforce that the linear combination equals zero
cs.enforce_constraint(lc!(), lc!(), lc)?;
// Discard carry bits that we don't care about
result_bits.truncate($size);
Ok($name {
bits: result_bits,
value: modular_value,
})
}
}
impl<ConstraintF: Field> ToBytesGadget<ConstraintF> for $name<ConstraintF> {
#[tracing::instrument(target = "r1cs", skip(self))]
fn to_bytes(&self) -> Result<Vec<UInt8<ConstraintF>>, SynthesisError> {
Ok(self
.to_bits_le()
.chunks(8)
.map(UInt8::from_bits_le)
.collect())
}
}
impl<ConstraintF: Field> EqGadget<ConstraintF> for $name<ConstraintF> {
#[tracing::instrument(target = "r1cs", skip(self))]
fn is_eq(&self, other: &Self) -> Result<Boolean<ConstraintF>, SynthesisError> {
self.bits.as_slice().is_eq(&other.bits)
}
#[tracing::instrument(target = "r1cs", skip(self))]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<ConstraintF>,
) -> Result<(), SynthesisError> {
self.bits.conditional_enforce_equal(&other.bits, condition)
}
#[tracing::instrument(target = "r1cs", skip(self))]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<ConstraintF>,
) -> Result<(), SynthesisError> {
self.bits
.conditional_enforce_not_equal(&other.bits, condition)
}
}
impl<ConstraintF: Field> AllocVar<$native, ConstraintF> for $name<ConstraintF> {
fn new_variable<T: Borrow<$native>>(
cs: impl Into<Namespace<ConstraintF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let value = f().map(|f| *f.borrow());
let values = match value {
Ok(val) => (0..$size).map(|i| Some((val >> i) & 1 == 1)).collect(),
_ => vec![None; $size],
};
let bits = values
.into_iter()
.map(|v| Boolean::new_variable(cs.clone(), || v.get(), mode))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
bits,
value: value.ok(),
})
}
}
#[cfg(test)]
mod test {
use super::$name;
use crate::{bits::boolean::Boolean, prelude::*, Vec};
use ark_test_curves::bls12_381::Fr;
use ark_relations::r1cs::{ConstraintSystem, SynthesisError};
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_from_bits() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let v = (0..$size)
.map(|_| Boolean::constant(rng.gen()))
.collect::<Vec<Boolean<Fr>>>();
let b = $name::from_bits_le(&v);
for (i, bit) in b.bits.iter().enumerate() {
match bit {
&Boolean::Constant(bit) => {
assert_eq!(bit, ((b.value()? >> i) & 1 == 1));
}
_ => unreachable!(),
}
}
let expected_to_be_same = b.to_bits_le();
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
Ok(())
}
#[test]
fn test_xor() -> Result<(), SynthesisError> {
use Boolean::*;
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a: $native = rng.gen();
let b: $native = rng.gen();
let c: $native = rng.gen();
let mut expected = a ^ b ^ c;
let a_bit = $name::new_witness(cs.clone(), || Ok(a))?;
let b_bit = $name::constant(b);
let c_bit = $name::new_witness(cs.clone(), || Ok(c))?;
let r = a_bit.xor(&b_bit).unwrap();
let r = r.xor(&c_bit).unwrap();
assert!(cs.is_satisfied().unwrap());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Is(b) => assert_eq!(b.value()?, (expected & 1 == 1)),
Not(b) => assert_eq!(!b.value()?, (expected & 1 == 1)),
Constant(b) => assert_eq!(*b, (expected & 1 == 1)),
}
expected >>= 1;
}
}
Ok(())
}
#[test]
fn test_addmany_constants() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a: $native = rng.gen();
let b: $native = rng.gen();
let c: $native = rng.gen();
let a_bit = $name::new_constant(cs.clone(), a)?;
let b_bit = $name::new_constant(cs.clone(), b)?;
let c_bit = $name::new_constant(cs.clone(), c)?;
let mut expected = a.wrapping_add(b).wrapping_add(c);
let r = $name::addmany(&[a_bit, b_bit, c_bit]).unwrap();
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Boolean::Is(_) => unreachable!(),
Boolean::Not(_) => unreachable!(),
Boolean::Constant(b) => assert_eq!(*b, (expected & 1 == 1)),
}
expected >>= 1;
}
}
Ok(())
}
#[test]
fn test_addmany() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a: $native = rng.gen();
let b: $native = rng.gen();
let c: $native = rng.gen();
let d: $native = rng.gen();
let mut expected = (a ^ b).wrapping_add(c).wrapping_add(d);
let a_bit = $name::new_witness(ark_relations::ns!(cs, "a_bit"), || Ok(a))?;
let b_bit = $name::constant(b);
let c_bit = $name::constant(c);
let d_bit = $name::new_witness(ark_relations::ns!(cs, "d_bit"), || Ok(d))?;
let r = a_bit.xor(&b_bit).unwrap();
let r = $name::addmany(&[r, c_bit, d_bit]).unwrap();
assert!(cs.is_satisfied().unwrap());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Boolean::Is(b) => assert_eq!(b.value()?, (expected & 1 == 1)),
Boolean::Not(b) => assert_eq!(!b.value()?, (expected & 1 == 1)),
Boolean::Constant(_) => unreachable!(),
}
expected >>= 1;
}
}
Ok(())
}
#[test]
fn test_rotr() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
let mut num = rng.gen();
let a: $name<Fr> = $name::constant(num);
for i in 0..$size {
let b = a.rotr(i);
assert!(b.value.unwrap() == num);
let mut tmp = num;
for b in &b.bits {
match b {
Boolean::Constant(b) => assert_eq!(*b, tmp & 1 == 1),
_ => unreachable!(),
}
tmp >>= 1;
}
num = num.rotate_right(1);
}
Ok(())
}
}
}
};
}

422
src/bits/uint8.rs Normal file
View File

@@ -0,0 +1,422 @@
use ark_ff::{Field, FpParameters, PrimeField, ToConstraintField};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use crate::{fields::fp::AllocatedFp, prelude::*, Assignment, Vec};
use core::borrow::Borrow;
/// Represents an interpretation of 8 `Boolean` objects as an
/// unsigned integer.
#[derive(Clone, Debug)]
pub struct UInt8<F: Field> {
/// Little-endian representation: least significant bit first
pub(crate) bits: Vec<Boolean<F>>,
pub(crate) value: Option<u8>,
}
impl<F: Field> R1CSVar<F> for UInt8<F> {
type Value = u8;
fn cs(&self) -> ConstraintSystemRef<F> {
self.bits.as_slice().cs()
}
fn value(&self) -> Result<Self::Value, SynthesisError> {
let mut value = None;
for (i, bit) in self.bits.iter().enumerate() {
let b = u8::from(bit.value()?);
value = match value {
Some(value) => Some(value + (b << i)),
None => Some(b << i),
};
}
debug_assert_eq!(self.value, value);
value.get()
}
}
impl<F: Field> UInt8<F> {
/// Construct a constant vector of `UInt8` from a vector of `u8`
///
/// This *does not* create any new variables or constraints.
/// ```
/// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
/// // We'll use the BLS12-381 scalar field for our constraints.
/// use ark_test_curves::bls12_381::Fr;
/// use ark_relations::r1cs::*;
/// use ark_r1cs_std::prelude::*;
///
/// let cs = ConstraintSystem::<Fr>::new_ref();
/// let var = vec![UInt8::new_witness(cs.clone(), || Ok(2))?];
///
/// let constant = UInt8::constant_vec(&[2]);
/// var.enforce_equal(&constant)?;
/// assert!(cs.is_satisfied().unwrap());
/// # Ok(())
/// # }
/// ```
pub fn constant_vec(values: &[u8]) -> Vec<Self> {
let mut result = Vec::new();
for value in values {
result.push(UInt8::constant(*value));
}
result
}
/// Construct a constant `UInt8` from a `u8`
///
/// This *does not* create new variables or constraints.
///
/// ```
/// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
/// // We'll use the BLS12-381 scalar field for our constraints.
/// use ark_test_curves::bls12_381::Fr;
/// use ark_relations::r1cs::*;
/// use ark_r1cs_std::prelude::*;
///
/// let cs = ConstraintSystem::<Fr>::new_ref();
/// let var = UInt8::new_witness(cs.clone(), || Ok(2))?;
///
/// let constant = UInt8::constant(2);
/// var.enforce_equal(&constant)?;
/// assert!(cs.is_satisfied().unwrap());
/// # Ok(())
/// # }
/// ```
pub fn constant(value: u8) -> Self {
let mut bits = Vec::with_capacity(8);
let mut tmp = value;
for _ in 0..8 {
// If last bit is one, push one.
bits.push(Boolean::constant(tmp & 1 == 1));
tmp >>= 1;
}
Self {
bits,
value: Some(value),
}
}
/// Allocates a slice of `u8`'s as private witnesses.
pub fn new_witness_vec(
cs: impl Into<Namespace<F>>,
values: &[impl Into<Option<u8>> + Copy],
) -> Result<Vec<Self>, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let mut output_vec = Vec::with_capacity(values.len());
for value in values {
let byte: Option<u8> = Into::into(*value);
output_vec.push(Self::new_witness(cs.clone(), || byte.get())?);
}
Ok(output_vec)
}
/// Allocates a slice of `u8`'s as public inputs by first packing them into
/// elements of `F`, (thus reducing the number of input allocations),
/// allocating these elements as public inputs, and then converting
/// these field variables `FpVar<F>` variables back into bytes.
///
/// From a user perspective, this trade-off adds constraints, but improves
/// verifier time and verification key size.
///
/// ```
/// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
/// // We'll use the BLS12-381 scalar field for our constraints.
/// use ark_test_curves::bls12_381::Fr;
/// use ark_relations::r1cs::*;
/// use ark_r1cs_std::prelude::*;
///
/// let cs = ConstraintSystem::<Fr>::new_ref();
/// let two = UInt8::new_witness(cs.clone(), || Ok(2))?;
/// let var = vec![two.clone(); 32];
///
/// let c = UInt8::new_input_vec(cs.clone(), &[2; 32])?;
/// var.enforce_equal(&c)?;
/// assert!(cs.is_satisfied().unwrap());
/// # Ok(())
/// # }
/// ```
pub fn new_input_vec(
cs: impl Into<Namespace<F>>,
values: &[u8],
) -> Result<Vec<Self>, SynthesisError>
where
F: PrimeField,
{
let ns = cs.into();
let cs = ns.cs();
let values_len = values.len();
let field_elements: Vec<F> = ToConstraintField::<F>::to_field_elements(values).unwrap();
let max_size = 8 * (F::Params::CAPACITY / 8) as usize;
let mut allocated_bits = Vec::new();
for field_element in field_elements.into_iter() {
let fe = AllocatedFp::new_input(cs.clone(), || Ok(field_element))?;
let fe_bits = fe.to_bits_le()?;
// Remove the most significant bit, because we know it should be zero
// because `values.to_field_elements()` only
// packs field elements up to the penultimate bit.
// That is, the most significant bit (`ConstraintF::NUM_BITS`-th bit) is
// unset, so we can just pop it off.
allocated_bits.extend_from_slice(&fe_bits[0..max_size]);
}
// Chunk up slices of 8 bit into bytes.
Ok(allocated_bits[0..(8 * values_len)]
.chunks(8)
.map(Self::from_bits_le)
.collect())
}
/// Converts a little-endian byte order representation of bits into a
/// `UInt8`.
///
/// ```
/// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
/// // We'll use the BLS12-381 scalar field for our constraints.
/// use ark_test_curves::bls12_381::Fr;
/// use ark_relations::r1cs::*;
/// use ark_r1cs_std::prelude::*;
///
/// let cs = ConstraintSystem::<Fr>::new_ref();
/// let var = UInt8::new_witness(cs.clone(), || Ok(128))?;
///
/// let f = Boolean::FALSE;
/// let t = Boolean::TRUE;
///
/// // Construct [0, 0, 0, 0, 0, 0, 0, 1]
/// let mut bits = vec![f.clone(); 7];
/// bits.push(t);
///
/// let mut c = UInt8::from_bits_le(&bits);
/// var.enforce_equal(&c)?;
/// assert!(cs.is_satisfied().unwrap());
/// # Ok(())
/// # }
/// ```
#[tracing::instrument(target = "r1cs")]
pub fn from_bits_le(bits: &[Boolean<F>]) -> Self {
assert_eq!(bits.len(), 8);
let bits = bits.to_vec();
let mut value = Some(0u8);
for (i, b) in bits.iter().enumerate() {
value = match b.value().ok() {
Some(b) => value.map(|v| v + (u8::from(b) << i)),
None => None,
}
}
Self { value, bits }
}
/// Outputs `self ^ other`.
///
/// If at least one of `self` and `other` are constants, then this method
/// *does not* create any constraints or variables.
///
/// ```
/// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> {
/// // We'll use the BLS12-381 scalar field for our constraints.
/// use ark_test_curves::bls12_381::Fr;
/// use ark_relations::r1cs::*;
/// use ark_r1cs_std::prelude::*;
///
/// let cs = ConstraintSystem::<Fr>::new_ref();
/// let a = UInt8::new_witness(cs.clone(), || Ok(16))?;
/// let b = UInt8::new_witness(cs.clone(), || Ok(17))?;
/// let c = UInt8::new_witness(cs.clone(), || Ok(1))?;
///
/// a.xor(&b)?.enforce_equal(&c)?;
/// assert!(cs.is_satisfied().unwrap());
/// # Ok(())
/// # }
/// ```
#[tracing::instrument(target = "r1cs")]
pub fn xor(&self, other: &Self) -> Result<Self, SynthesisError> {
let new_value = match (self.value, other.value) {
(Some(a), Some(b)) => Some(a ^ b),
_ => None,
};
let bits = self
.bits
.iter()
.zip(other.bits.iter())
.map(|(a, b)| a.xor(b))
.collect::<Result<_, _>>()?;
Ok(Self {
bits,
value: new_value,
})
}
}
impl<ConstraintF: Field> EqGadget<ConstraintF> for UInt8<ConstraintF> {
#[tracing::instrument(target = "r1cs")]
fn is_eq(&self, other: &Self) -> Result<Boolean<ConstraintF>, SynthesisError> {
self.bits.as_slice().is_eq(&other.bits)
}
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<ConstraintF>,
) -> Result<(), SynthesisError> {
self.bits.conditional_enforce_equal(&other.bits, condition)
}
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<ConstraintF>,
) -> Result<(), SynthesisError> {
self.bits
.conditional_enforce_not_equal(&other.bits, condition)
}
}
impl<ConstraintF: Field> AllocVar<u8, ConstraintF> for UInt8<ConstraintF> {
fn new_variable<T: Borrow<u8>>(
cs: impl Into<Namespace<ConstraintF>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let value = f().map(|f| *f.borrow());
let values = match value {
Ok(val) => (0..8).map(|i| Some((val >> i) & 1 == 1)).collect(),
_ => vec![None; 8],
};
let bits = values
.into_iter()
.map(|v| Boolean::new_variable(cs.clone(), || v.get(), mode))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
bits,
value: value.ok(),
})
}
}
#[cfg(test)]
mod test {
use super::UInt8;
use crate::{prelude::*, Vec};
use ark_relations::r1cs::{ConstraintSystem, SynthesisError};
use ark_test_curves::bls12_381::Fr;
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_uint8_from_bits_to_bits() -> Result<(), SynthesisError> {
let cs = ConstraintSystem::<Fr>::new_ref();
let byte_val = 0b01110001;
let byte =
UInt8::new_witness(ark_relations::ns!(cs, "alloc value"), || Ok(byte_val)).unwrap();
let bits = byte.to_bits_le()?;
for (i, bit) in bits.iter().enumerate() {
assert_eq!(bit.value()?, (byte_val >> i) & 1 == 1)
}
Ok(())
}
#[test]
fn test_uint8_new_input_vec() -> Result<(), SynthesisError> {
let cs = ConstraintSystem::<Fr>::new_ref();
let byte_vals = (64u8..128u8).collect::<Vec<_>>();
let bytes =
UInt8::new_input_vec(ark_relations::ns!(cs, "alloc value"), &byte_vals).unwrap();
dbg!(bytes.value())?;
for (native, variable) in byte_vals.into_iter().zip(bytes) {
let bits = variable.to_bits_le()?;
for (i, bit) in bits.iter().enumerate() {
assert_eq!(
bit.value()?,
(native >> i) & 1 == 1,
"native value {}: bit {:?}",
native,
i
)
}
}
Ok(())
}
#[test]
fn test_uint8_from_bits() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let v = (0..8)
.map(|_| Boolean::<Fr>::Constant(rng.gen()))
.collect::<Vec<_>>();
let val = UInt8::from_bits_le(&v);
for (i, bit) in val.bits.iter().enumerate() {
match bit {
Boolean::Constant(b) => assert!(*b == ((val.value()? >> i) & 1 == 1)),
_ => unreachable!(),
}
}
let expected_to_be_same = val.to_bits_le()?;
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
Ok(())
}
#[test]
fn test_uint8_xor() -> Result<(), SynthesisError> {
let mut rng = XorShiftRng::seed_from_u64(1231275789u64);
for _ in 0..1000 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a: u8 = rng.gen();
let b: u8 = rng.gen();
let c: u8 = rng.gen();
let mut expected = a ^ b ^ c;
let a_bit = UInt8::new_witness(ark_relations::ns!(cs, "a_bit"), || Ok(a)).unwrap();
let b_bit = UInt8::constant(b);
let c_bit = UInt8::new_witness(ark_relations::ns!(cs, "c_bit"), || Ok(c)).unwrap();
let r = a_bit.xor(&b_bit).unwrap();
let r = r.xor(&c_bit).unwrap();
assert!(cs.is_satisfied().unwrap());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Boolean::Is(b) => assert!(b.value()? == (expected & 1 == 1)),
Boolean::Not(b) => assert!(!b.value()? == (expected & 1 == 1)),
Boolean::Constant(b) => assert!(*b == (expected & 1 == 1)),
}
expected >>= 1;
}
}
Ok(())
}
}

130
src/eq.rs Normal file
View File

@@ -0,0 +1,130 @@
use crate::{prelude::*, Vec};
use ark_ff::Field;
use ark_relations::r1cs::SynthesisError;
/// Specifies how to generate constraints that check for equality for two
/// variables of type `Self`.
pub trait EqGadget<F: Field> {
/// Output a `Boolean` value representing whether `self.value() ==
/// other.value()`.
fn is_eq(&self, other: &Self) -> Result<Boolean<F>, SynthesisError>;
/// Output a `Boolean` value representing whether `self.value() !=
/// other.value()`.
///
/// By default, this is defined as `self.is_eq(other)?.not()`.
fn is_neq(&self, other: &Self) -> Result<Boolean<F>, SynthesisError> {
Ok(self.is_eq(other)?.not())
}
/// If `should_enforce == true`, enforce that `self` and `other` are equal;
/// else, enforce a vacuously true statement.
///
/// A safe default implementation is provided that generates the following
/// constraints: `self.is_eq(other)?.conditional_enforce_equal(&Boolean:
/// :TRUE, should_enforce)`.
///
/// More efficient specialized implementation may be possible; implementors
/// are encouraged to carefully analyze the efficiency and safety of these.
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn conditional_enforce_equal(
&self,
other: &Self,
should_enforce: &Boolean<F>,
) -> Result<(), SynthesisError> {
self.is_eq(&other)?
.conditional_enforce_equal(&Boolean::constant(true), should_enforce)
}
/// Enforce that `self` and `other` are equal.
///
/// A safe default implementation is provided that generates the following
/// constraints: `self.conditional_enforce_equal(other,
/// &Boolean::TRUE)`.
///
/// More efficient specialized implementation may be possible; implementors
/// are encouraged to carefully analyze the efficiency and safety of these.
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn enforce_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.conditional_enforce_equal(other, &Boolean::constant(true))
}
/// If `should_enforce == true`, enforce that `self` and `other` are *not*
/// equal; else, enforce a vacuously true statement.
///
/// A safe default implementation is provided that generates the following
/// constraints: `self.is_neq(other)?.conditional_enforce_equal(&
/// Boolean::TRUE, should_enforce)`.
///
/// More efficient specialized implementation may be possible; implementors
/// are encouraged to carefully analyze the efficiency and safety of these.
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn conditional_enforce_not_equal(
&self,
other: &Self,
should_enforce: &Boolean<F>,
) -> Result<(), SynthesisError> {
self.is_neq(&other)?
.conditional_enforce_equal(&Boolean::constant(true), should_enforce)
}
/// Enforce that `self` and `other` are *not* equal.
///
/// A safe default implementation is provided that generates the following
/// constraints: `self.conditional_enforce_not_equal(other,
/// &Boolean::TRUE)`.
///
/// More efficient specialized implementation may be possible; implementors
/// are encouraged to carefully analyze the efficiency and safety of these.
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn enforce_not_equal(&self, other: &Self) -> Result<(), SynthesisError> {
self.conditional_enforce_not_equal(other, &Boolean::constant(true))
}
}
impl<T: EqGadget<F> + R1CSVar<F>, F: Field> EqGadget<F> for [T] {
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn is_eq(&self, other: &Self) -> Result<Boolean<F>, SynthesisError> {
assert_eq!(self.len(), other.len());
assert!(!self.is_empty());
let mut results = Vec::with_capacity(self.len());
for (a, b) in self.iter().zip(other) {
results.push(a.is_eq(b)?);
}
Boolean::kary_and(&results)
}
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<F>,
) -> Result<(), SynthesisError> {
assert_eq!(self.len(), other.len());
for (a, b) in self.iter().zip(other) {
a.conditional_enforce_equal(b, condition)?;
}
Ok(())
}
#[tracing::instrument(target = "r1cs", skip(self, other))]
fn conditional_enforce_not_equal(
&self,
other: &Self,
should_enforce: &Boolean<F>,
) -> Result<(), SynthesisError> {
assert_eq!(self.len(), other.len());
let some_are_different = self.is_neq(other)?;
if [&some_are_different, should_enforce].is_constant() {
assert!(some_are_different.value().unwrap());
Ok(())
} else {
let cs = [&some_are_different, should_enforce].cs();
cs.enforce_constraint(
some_are_different.lc(),
should_enforce.lc(),
should_enforce.lc(),
)
}
}
}

View File

@@ -0,0 +1,579 @@
use ark_ff::{
fields::{CubicExtField, CubicExtParameters, Field},
Zero,
};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use core::{borrow::Borrow, marker::PhantomData};
use crate::{
fields::{fp::FpVar, FieldOpsBounds, FieldVar},
prelude::*,
ToConstraintFieldGadget, Vec,
};
/// This struct is the `R1CS` equivalent of the cubic extension field type
/// in `ark-ff`, i.e. `ark_ff::CubicExtField`.
#[derive(Derivative)]
#[derivative(Debug(bound = "BF: core::fmt::Debug"), Clone(bound = "BF: Clone"))]
#[must_use]
pub struct CubicExtVar<BF: FieldVar<P::BaseField, P::BasePrimeField>, P: CubicExtVarParams<BF>>
where
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
{
/// The zero-th coefficient of this field element.
pub c0: BF,
/// The first coefficient of this field element.
pub c1: BF,
/// The second coefficient of this field element.
pub c2: BF,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
/// This trait describes parameters that are used to implement arithmetic for
/// `CubicExtVar`.
pub trait CubicExtVarParams<BF: FieldVar<Self::BaseField, Self::BasePrimeField>>:
CubicExtParameters
where
for<'a> &'a BF: FieldOpsBounds<'a, Self::BaseField, BF>,
{
/// Multiply the base field of the `CubicExtVar` by the appropriate
/// Frobenius coefficient. This is equivalent to
/// `Self::mul_base_field_by_frob_coeff(c1, c2, power)`.
fn mul_base_field_vars_by_frob_coeff(c1: &mut BF, c2: &mut BF, power: usize);
}
impl<BF: FieldVar<P::BaseField, P::BasePrimeField>, P: CubicExtVarParams<BF>> CubicExtVar<BF, P>
where
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
{
/// Constructs a `CubicExtVar` from the underlying coefficients.
#[inline]
pub fn new(c0: BF, c1: BF, c2: BF) -> Self {
let _params = PhantomData;
Self {
c0,
c1,
c2,
_params,
}
}
/// Multiplies a variable of the base field by the cubic nonresidue
/// `P::NONRESIDUE` that is used to construct the extension field.
#[inline]
pub fn mul_base_field_by_nonresidue(fe: &BF) -> Result<BF, SynthesisError> {
Ok(fe * P::NONRESIDUE)
}
/// Multiplies `self` by a constant from the base field.
#[inline]
pub fn mul_by_base_field_constant(&self, fe: P::BaseField) -> Self {
let c0 = &self.c0 * fe;
let c1 = &self.c1 * fe;
let c2 = &self.c2 * fe;
Self::new(c0, c1, c2)
}
/// Sets `self = self.mul_by_base_field_constant(fe)`.
#[inline]
pub fn mul_assign_by_base_field_constant(&mut self, fe: P::BaseField) {
*self = (&*self).mul_by_base_field_constant(fe);
}
}
impl<BF, P> R1CSVar<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
type Value = CubicExtField<P>;
fn cs(&self) -> ConstraintSystemRef<P::BasePrimeField> {
[&self.c0, &self.c1, &self.c2].cs()
}
#[inline]
fn value(&self) -> Result<Self::Value, SynthesisError> {
match (self.c0.value(), self.c1.value(), self.c2.value()) {
(Ok(c0), Ok(c1), Ok(c2)) => Ok(CubicExtField::new(c0, c1, c2)),
(..) => Err(SynthesisError::AssignmentMissing),
}
}
}
impl<BF, P> From<Boolean<P::BasePrimeField>> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
fn from(other: Boolean<P::BasePrimeField>) -> Self {
let c0 = BF::from(other);
let c1 = BF::zero();
let c2 = BF::zero();
Self::new(c0, c1, c2)
}
}
impl<'a, BF, P> FieldOpsBounds<'a, CubicExtField<P>, CubicExtVar<BF, P>> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
}
impl<'a, BF, P> FieldOpsBounds<'a, CubicExtField<P>, CubicExtVar<BF, P>> for &'a CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
}
impl<BF, P> FieldVar<CubicExtField<P>, P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
fn constant(other: CubicExtField<P>) -> Self {
let c0 = BF::constant(other.c0);
let c1 = BF::constant(other.c1);
let c2 = BF::constant(other.c2);
Self::new(c0, c1, c2)
}
fn zero() -> Self {
let c0 = BF::zero();
let c1 = BF::zero();
let c2 = BF::zero();
Self::new(c0, c1, c2)
}
fn one() -> Self {
let c0 = BF::one();
let c1 = BF::zero();
let c2 = BF::zero();
Self::new(c0, c1, c2)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn double(&self) -> Result<Self, SynthesisError> {
let c0 = self.c0.double()?;
let c1 = self.c1.double()?;
let c2 = self.c2.double()?;
Ok(Self::new(c0, c1, c2))
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn negate(&self) -> Result<Self, SynthesisError> {
let mut result = self.clone();
result.c0.negate_in_place()?;
result.c1.negate_in_place()?;
result.c2.negate_in_place()?;
Ok(result)
}
/// Use the Chung-Hasan asymmetric squaring formula.
///
/// (Devegili OhEig Scott Dahab --- Multiplication and Squaring on
/// Abstract Pairing-Friendly
/// Fields.pdf; Section 4 (CH-SQR2))
#[inline]
#[tracing::instrument(target = "r1cs")]
fn square(&self) -> Result<Self, SynthesisError> {
let a = self.c0.clone();
let b = self.c1.clone();
let c = self.c2.clone();
let s0 = a.square()?;
let ab = &a * &b;
let s1 = ab.double()?;
let s2 = (&a - &b + &c).square()?;
let s3 = (&b * &c).double()?;
let s4 = c.square()?;
let c0 = Self::mul_base_field_by_nonresidue(&s3)? + &s0;
let c1 = Self::mul_base_field_by_nonresidue(&s4)? + &s1;
let c2 = s1 + &s2 + &s3 - &s0 - &s4;
Ok(Self::new(c0, c1, c2))
}
#[tracing::instrument(target = "r1cs")]
fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> {
// Karatsuba multiplication for cubic extensions:
// v0 = A.c0 * B.c0
// v1 = A.c1 * B.c1
// v2 = A.c2 * B.c2
// result.c0 = v0 + β((a1 + a2)(b1 + b2) v1 v2)
// result.c1 = (a0 + a1)(b0 + b1) v0 v1 + βv2
// result.c2 = (a0 + a2)(b0 + b2) v0 + v1 v2,
// We enforce this with six constraints:
//
// v0 = A.c0 * B.c0
// v1 = A.c1 * B.c1
// v2 = A.c2 * B.c2
//
// result.c0 - v0 + \beta*(v1 + v2) = β(a1 + a2)(b1 + b2))
// result.c1 + v0 + v1 - βv2 = (a0 + a1)(b0 + b1)
// result.c2 + v0 - v1 + v2 = (a0 + a2)(b0 + b2)
// Reference:
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Devegili, OhEigeartaigh, Scott, Dahab
//
// This implementation adapted from
// https://github.com/ZencashOfficial/ginger-lib/blob/development/r1cs/gadgets/std/src/fields/fp3.rs
let v0 = &self.c0 * &other.c0;
let v1 = &self.c1 * &other.c1;
let v2 = &self.c2 * &other.c2;
// Check c0
let nr_a1_plus_a2 = (&self.c1 + &self.c2) * P::NONRESIDUE;
let b1_plus_b2 = &other.c1 + &other.c2;
let nr_v1 = &v1 * P::NONRESIDUE;
let nr_v2 = &v2 * P::NONRESIDUE;
let to_check = &result.c0 - &v0 + &nr_v1 + &nr_v2;
nr_a1_plus_a2.mul_equals(&b1_plus_b2, &to_check)?;
// Check c1
let a0_plus_a1 = &self.c0 + &self.c1;
let b0_plus_b1 = &other.c0 + &other.c1;
let to_check = &result.c1 - &nr_v2 + &v0 + &v1;
a0_plus_a1.mul_equals(&b0_plus_b1, &to_check)?;
// Check c2
let a0_plus_a2 = &self.c0 + &self.c2;
let b0_plus_b2 = &other.c0 + &other.c2;
let to_check = &result.c2 + &v0 - &v1 + &v2;
a0_plus_a2.mul_equals(&b0_plus_b2, &to_check)?;
Ok(())
}
#[tracing::instrument(target = "r1cs")]
fn frobenius_map(&self, power: usize) -> Result<Self, SynthesisError> {
let mut result = self.clone();
result.c0.frobenius_map_in_place(power)?;
result.c1.frobenius_map_in_place(power)?;
result.c2.frobenius_map_in_place(power)?;
P::mul_base_field_vars_by_frob_coeff(&mut result.c1, &mut result.c2, power);
Ok(result)
}
#[tracing::instrument(target = "r1cs")]
fn inverse(&self) -> Result<Self, SynthesisError> {
let mode = if self.is_constant() {
AllocationMode::Constant
} else {
AllocationMode::Witness
};
let inverse = Self::new_variable(
self.cs(),
|| {
self.value()
.map(|f| f.inverse().unwrap_or(CubicExtField::zero()))
},
mode,
)?;
self.mul_equals(&inverse, &Self::one())?;
Ok(inverse)
}
}
impl_bounded_ops!(
CubicExtVar<BF, P>,
CubicExtField<P>,
Add,
add,
AddAssign,
add_assign,
|this: &'a CubicExtVar<BF, P>, other: &'a CubicExtVar<BF, P>| {
let c0 = &this.c0 + &other.c0;
let c1 = &this.c1 + &other.c1;
let c2 = &this.c2 + &other.c2;
CubicExtVar::new(c0, c1, c2)
},
|this: &'a CubicExtVar<BF, P>, other: CubicExtField<P>| {
this + CubicExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: CubicExtVarParams<BF>),
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
);
impl_bounded_ops!(
CubicExtVar<BF, P>,
CubicExtField<P>,
Sub,
sub,
SubAssign,
sub_assign,
|this: &'a CubicExtVar<BF, P>, other: &'a CubicExtVar<BF, P>| {
let c0 = &this.c0 - &other.c0;
let c1 = &this.c1 - &other.c1;
let c2 = &this.c2 - &other.c2;
CubicExtVar::new(c0, c1, c2)
},
|this: &'a CubicExtVar<BF, P>, other: CubicExtField<P>| {
this - CubicExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: CubicExtVarParams<BF>),
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
);
impl_bounded_ops!(
CubicExtVar<BF, P>,
CubicExtField<P>,
Mul,
mul,
MulAssign,
mul_assign,
|this: &'a CubicExtVar<BF, P>, other: &'a CubicExtVar<BF, P>| {
// Karatsuba multiplication for cubic extensions:
// v0 = A.c0 * B.c0
// v1 = A.c1 * B.c1
// v2 = A.c2 * B.c2
// result.c0 = v0 + β((a1 + a2)(b1 + b2) v1 v2)
// result.c1 = (a0 + a1)(b0 + b1) v0 v1 + βv2
// result.c2 = (a0 + a2)(b0 + b2) v0 + v1 v2,
//
// Reference:
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Devegili, OhEigeartaigh, Scott, Dahab
let v0 = &this.c0 * &other.c0;
let v1 = &this.c1 * &other.c1;
let v2 = &this.c2 * &other.c2;
let c0 =
(((&this.c1 + &this.c2) * (&other.c1 + &other.c2) - &v1 - &v2) * P::NONRESIDUE) + &v0 ;
let c1 =
(&this.c0 + &this.c1) * (&other.c0 + &other.c1) - &v0 - &v1 + (&v2 * P::NONRESIDUE);
let c2 =
(&this.c0 + &this.c2) * (&other.c0 + &other.c2) - &v0 + &v1 - &v2;
CubicExtVar::new(c0, c1, c2)
},
|this: &'a CubicExtVar<BF, P>, other: CubicExtField<P>| {
this * CubicExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: CubicExtVarParams<BF>),
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
);
impl<BF, P> EqGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn is_eq(&self, other: &Self) -> Result<Boolean<P::BasePrimeField>, SynthesisError> {
let b0 = self.c0.is_eq(&other.c0)?;
let b1 = self.c1.is_eq(&other.c1)?;
let b2 = self.c2.is_eq(&other.c2)?;
b0.and(&b1)?.and(&b2)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<P::BasePrimeField>,
) -> Result<(), SynthesisError> {
self.c0.conditional_enforce_equal(&other.c0, condition)?;
self.c1.conditional_enforce_equal(&other.c1, condition)?;
self.c2.conditional_enforce_equal(&other.c2, condition)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<P::BasePrimeField>,
) -> Result<(), SynthesisError> {
let is_equal = self.is_eq(other)?;
is_equal
.and(condition)?
.enforce_equal(&Boolean::Constant(false))
}
}
impl<BF, P> ToBitsGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(&self) -> Result<Vec<Boolean<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_bits_le()?;
let mut c1 = self.c1.to_bits_le()?;
let mut c2 = self.c2.to_bits_le()?;
c0.append(&mut c1);
c0.append(&mut c2);
Ok(c0)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bits_le(&self) -> Result<Vec<Boolean<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_non_unique_bits_le()?;
let mut c1 = self.c1.to_non_unique_bits_le()?;
let mut c2 = self.c2.to_non_unique_bits_le()?;
c0.append(&mut c1);
c0.append(&mut c2);
Ok(c0)
}
}
impl<BF, P> ToBytesGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_bytes()?;
let mut c1 = self.c1.to_bytes()?;
let mut c2 = self.c2.to_bytes()?;
c0.append(&mut c1);
c0.append(&mut c2);
Ok(c0)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_non_unique_bytes()?;
let mut c1 = self.c1.to_non_unique_bytes()?;
let mut c2 = self.c2.to_non_unique_bytes()?;
c0.append(&mut c1);
c0.append(&mut c2);
Ok(c0)
}
}
impl<BF, P> ToConstraintFieldGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
BF: ToConstraintFieldGadget<P::BasePrimeField>,
{
#[tracing::instrument(target = "r1cs")]
fn to_constraint_field(&self) -> Result<Vec<FpVar<P::BasePrimeField>>, SynthesisError> {
let mut res = Vec::new();
res.extend_from_slice(&self.c0.to_constraint_field()?);
res.extend_from_slice(&self.c1.to_constraint_field()?);
res.extend_from_slice(&self.c2.to_constraint_field()?);
Ok(res)
}
}
impl<BF, P> CondSelectGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditionally_select(
cond: &Boolean<P::BasePrimeField>,
true_value: &Self,
false_value: &Self,
) -> Result<Self, SynthesisError> {
let c0 = BF::conditionally_select(cond, &true_value.c0, &false_value.c0)?;
let c1 = BF::conditionally_select(cond, &true_value.c1, &false_value.c1)?;
let c2 = BF::conditionally_select(cond, &true_value.c2, &false_value.c2)?;
Ok(Self::new(c0, c1, c2))
}
}
impl<BF, P> TwoBitLookupGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>
+ TwoBitLookupGadget<P::BasePrimeField, TableConstant = P::BaseField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
type TableConstant = CubicExtField<P>;
#[tracing::instrument(target = "r1cs")]
fn two_bit_lookup(
b: &[Boolean<P::BasePrimeField>],
c: &[Self::TableConstant],
) -> Result<Self, SynthesisError> {
let c0s = c.iter().map(|f| f.c0).collect::<Vec<_>>();
let c1s = c.iter().map(|f| f.c1).collect::<Vec<_>>();
let c2s = c.iter().map(|f| f.c2).collect::<Vec<_>>();
let c0 = BF::two_bit_lookup(b, &c0s)?;
let c1 = BF::two_bit_lookup(b, &c1s)?;
let c2 = BF::two_bit_lookup(b, &c2s)?;
Ok(Self::new(c0, c1, c2))
}
}
impl<BF, P> ThreeBitCondNegLookupGadget<P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>
+ ThreeBitCondNegLookupGadget<P::BasePrimeField, TableConstant = P::BaseField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
type TableConstant = CubicExtField<P>;
#[tracing::instrument(target = "r1cs")]
fn three_bit_cond_neg_lookup(
b: &[Boolean<P::BasePrimeField>],
b0b1: &Boolean<P::BasePrimeField>,
c: &[Self::TableConstant],
) -> Result<Self, SynthesisError> {
let c0s = c.iter().map(|f| f.c0).collect::<Vec<_>>();
let c1s = c.iter().map(|f| f.c1).collect::<Vec<_>>();
let c2s = c.iter().map(|f| f.c2).collect::<Vec<_>>();
let c0 = BF::three_bit_cond_neg_lookup(b, b0b1, &c0s)?;
let c1 = BF::three_bit_cond_neg_lookup(b, b0b1, &c1s)?;
let c2 = BF::three_bit_cond_neg_lookup(b, b0b1, &c2s)?;
Ok(Self::new(c0, c1, c2))
}
}
impl<BF, P> AllocVar<CubicExtField<P>, P::BasePrimeField> for CubicExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: CubicExtVarParams<BF>,
{
fn new_variable<T: Borrow<CubicExtField<P>>>(
cs: impl Into<Namespace<P::BasePrimeField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
use SynthesisError::*;
let (c0, c1, c2) = match f() {
Ok(fe) => (Ok(fe.borrow().c0), Ok(fe.borrow().c1), Ok(fe.borrow().c2)),
Err(_) => (
Err(AssignmentMissing),
Err(AssignmentMissing),
Err(AssignmentMissing),
),
};
let c0 = BF::new_variable(ark_relations::ns!(cs, "c0"), || c0, mode)?;
let c1 = BF::new_variable(ark_relations::ns!(cs, "c1"), || c1, mode)?;
let c2 = BF::new_variable(ark_relations::ns!(cs, "c2"), || c2, mode)?;
Ok(Self::new(c0, c1, c2))
}
}

247
src/fields/fp/cmp.rs Normal file
View File

@@ -0,0 +1,247 @@
use crate::{
boolean::Boolean,
fields::{fp::FpVar, FieldVar},
prelude::*,
ToBitsGadget,
};
use ark_ff::PrimeField;
use ark_relations::r1cs::{SynthesisError, Variable};
use core::cmp::Ordering;
impl<F: PrimeField> FpVar<F> {
/// This function enforces the ordering between `self` and `other`. The
/// constraint system will not be satisfied otherwise. If `self` should
/// also be checked for equality, e.g. `self <= other` instead of `self <
/// other`, set `should_also_check_quality` to `true`. This variant
/// verifies `self` and `other` are `<= (p-1)/2`.
#[tracing::instrument(target = "r1cs")]
pub fn enforce_cmp(
&self,
other: &FpVar<F>,
ordering: Ordering,
should_also_check_equality: bool,
) -> Result<(), SynthesisError> {
let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?;
left.enforce_smaller_than(&right)
}
/// This function enforces the ordering between `self` and `other`. The
/// constraint system will not be satisfied otherwise. If `self` should
/// also be checked for equality, e.g. `self <= other` instead of `self <
/// other`, set `should_also_check_quality` to `true`. This variant
/// assumes `self` and `other` are `<= (p-1)/2` and does not generate
/// constraints to verify that.
#[tracing::instrument(target = "r1cs")]
pub fn enforce_cmp_unchecked(
&self,
other: &FpVar<F>,
ordering: Ordering,
should_also_check_equality: bool,
) -> Result<(), SynthesisError> {
let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?;
left.enforce_smaller_than_unchecked(&right)
}
/// This function checks the ordering between `self` and `other`. It outputs
/// self `Boolean` that contains the result - `1` if true, `0`
/// otherwise. The constraint system will be satisfied in any case. If
/// `self` should also be checked for equality, e.g. `self <= other`
/// instead of `self < other`, set `should_also_check_quality` to
/// `true`. This variant verifies `self` and `other` are `<= (p-1)/2`.
#[tracing::instrument(target = "r1cs")]
pub fn is_cmp(
&self,
other: &FpVar<F>,
ordering: Ordering,
should_also_check_equality: bool,
) -> Result<Boolean<F>, SynthesisError> {
let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?;
left.is_smaller_than(&right)
}
/// This function checks the ordering between `self` and `other`. It outputs
/// a `Boolean` that contains the result - `1` if true, `0` otherwise.
/// The constraint system will be satisfied in any case. If `self`
/// should also be checked for equality, e.g. `self <= other` instead of
/// `self < other`, set `should_also_check_quality` to `true`. This
/// variant assumes `self` and `other` are `<= (p-1)/2` and does not
/// generate constraints to verify that.
#[tracing::instrument(target = "r1cs")]
pub fn is_cmp_unchecked(
&self,
other: &FpVar<F>,
ordering: Ordering,
should_also_check_equality: bool,
) -> Result<Boolean<F>, SynthesisError> {
let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?;
left.is_smaller_than_unchecked(&right)
}
fn process_cmp_inputs(
&self,
other: &Self,
ordering: Ordering,
should_also_check_equality: bool,
) -> Result<(Self, Self), SynthesisError> {
let (left, right) = match ordering {
Ordering::Less => (self, other),
Ordering::Greater => (other, self),
Ordering::Equal => Err(SynthesisError::Unsatisfiable)?,
};
let right_for_check = if should_also_check_equality {
right + F::one()
} else {
right.clone()
};
Ok((left.clone(), right_for_check))
}
/// Helper function to enforce that `self <= (p-1)/2`.
#[tracing::instrument(target = "r1cs")]
pub fn enforce_smaller_or_equal_than_mod_minus_one_div_two(
&self,
) -> Result<(), SynthesisError> {
// It's okay to use `to_non_unique_bits` bits here because we're enforcing
// self <= (p-1)/2, which implies self < p.
let _ = Boolean::enforce_smaller_or_equal_than_le(
&self.to_non_unique_bits_le()?,
F::modulus_minus_one_div_two(),
)?;
Ok(())
}
/// Helper function to check `self < other` and output a result bit. This
/// function verifies `self` and `other` are `<= (p-1)/2`.
fn is_smaller_than(&self, other: &FpVar<F>) -> Result<Boolean<F>, SynthesisError> {
self.enforce_smaller_or_equal_than_mod_minus_one_div_two()?;
other.enforce_smaller_or_equal_than_mod_minus_one_div_two()?;
self.is_smaller_than_unchecked(other)
}
/// Helper function to check `self < other` and output a result bit. This
/// function assumes `self` and `other` are `<= (p-1)/2` and does not
/// generate constraints to verify that.
fn is_smaller_than_unchecked(&self, other: &FpVar<F>) -> Result<Boolean<F>, SynthesisError> {
Ok((self - other)
.double()?
.to_bits_le()?
.first()
.unwrap()
.clone())
}
/// Helper function to enforce `self < other`. This function verifies `self`
/// and `other` are `<= (p-1)/2`.
fn enforce_smaller_than(&self, other: &FpVar<F>) -> Result<(), SynthesisError> {
self.enforce_smaller_or_equal_than_mod_minus_one_div_two()?;
other.enforce_smaller_or_equal_than_mod_minus_one_div_two()?;
self.enforce_smaller_than_unchecked(other)
}
/// Helper function to enforce `self < other`. This function assumes `self`
/// and `other` are `<= (p-1)/2` and does not generate constraints to
/// verify that.
fn enforce_smaller_than_unchecked(&self, other: &FpVar<F>) -> Result<(), SynthesisError> {
let is_smaller_than = self.is_smaller_than_unchecked(other)?;
let lc_one = lc!() + Variable::One;
[self, other]
.cs()
.enforce_constraint(is_smaller_than.lc(), lc_one.clone(), lc_one)
}
}
#[cfg(test)]
mod test {
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
use std::cmp::Ordering;
use crate::{alloc::AllocVar, fields::fp::FpVar};
use ark_ff::{PrimeField, UniformRand};
use ark_relations::r1cs::ConstraintSystem;
use ark_test_curves::bls12_381::Fr;
#[test]
fn test_cmp() {
let mut rng = &mut XorShiftRng::from_seed([
0x5d, 0xbe, 0x62, 0x59, 0x8d, 0x31, 0x3d, 0x76, 0x32, 0x37, 0xdb, 0x17, 0xe5, 0xbc,
0x06, 0x54,
]);
fn rand_in_range<R: Rng>(rng: &mut R) -> Fr {
let pminusonedivtwo: Fr = Fr::modulus_minus_one_div_two().into();
let mut r;
loop {
r = Fr::rand(rng);
if r <= pminusonedivtwo {
break;
}
}
r
}
for i in 0..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a = rand_in_range(&mut rng);
let a_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(a)).unwrap();
let b = rand_in_range(&mut rng);
let b_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(b)).unwrap();
match a.cmp(&b) {
Ordering::Less => {
a_var.enforce_cmp(&b_var, Ordering::Less, false).unwrap();
a_var.enforce_cmp(&b_var, Ordering::Less, true).unwrap();
}
Ordering::Greater => {
a_var.enforce_cmp(&b_var, Ordering::Greater, false).unwrap();
a_var.enforce_cmp(&b_var, Ordering::Greater, true).unwrap();
}
_ => {}
}
if i == 0 {
println!("number of constraints: {}", cs.num_constraints());
}
assert!(cs.is_satisfied().unwrap());
}
println!("Finished with satisfaction tests");
for _i in 0..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a = rand_in_range(&mut rng);
let a_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(a)).unwrap();
let b = rand_in_range(&mut rng);
let b_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(b)).unwrap();
match b.cmp(&a) {
Ordering::Less => {
a_var.enforce_cmp(&b_var, Ordering::Less, false).unwrap();
a_var.enforce_cmp(&b_var, Ordering::Less, true).unwrap();
}
Ordering::Greater => {
a_var.enforce_cmp(&b_var, Ordering::Greater, false).unwrap();
a_var.enforce_cmp(&b_var, Ordering::Greater, true).unwrap();
}
_ => {}
}
assert!(!cs.is_satisfied().unwrap());
}
for _i in 0..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a = rand_in_range(&mut rng);
let a_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(a)).unwrap();
a_var.enforce_cmp(&a_var, Ordering::Less, false).unwrap();
assert!(!cs.is_satisfied().unwrap());
}
for _i in 0..10 {
let cs = ConstraintSystem::<Fr>::new_ref();
let a = rand_in_range(&mut rng);
let a_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(a)).unwrap();
a_var.enforce_cmp(&a_var, Ordering::Less, true).unwrap();
assert!(cs.is_satisfied().unwrap());
}
}
}

1015
src/fields/fp/mod.rs Normal file

File diff suppressed because it is too large Load Diff

171
src/fields/fp12.rs Normal file
View File

@@ -0,0 +1,171 @@
use crate::fields::{fp2::Fp2Var, fp6_3over2::Fp6Var, quadratic_extension::*, FieldVar};
use ark_ff::fields::{fp12_2over3over2::*, fp6_3over2::Fp6Parameters, Field, QuadExtParameters};
use ark_relations::r1cs::SynthesisError;
/// A degree-12 extension field constructed as the tower of a
/// quadratic extension over a cubic extension over a quadratic extension field.
/// This is the R1CS equivalent of `ark_ff::fp12_2over3over2::Fp12<P>`.
pub type Fp12Var<P> = QuadExtVar<Fp6Var<<P as Fp12Parameters>::Fp6Params>, Fp12ParamsWrapper<P>>;
type Fp2Params<P> = <<P as Fp12Parameters>::Fp6Params as Fp6Parameters>::Fp2Params;
impl<P: Fp12Parameters> QuadExtVarParams<Fp6Var<P::Fp6Params>> for Fp12ParamsWrapper<P> {
fn mul_base_field_var_by_frob_coeff(fe: &mut Fp6Var<P::Fp6Params>, power: usize) {
fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
fe.c2 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}
impl<P: Fp12Parameters> Fp12Var<P> {
/// Multiplies by a sparse element of the form `(c0 = (c0, c1, 0), c1 = (0,
/// d1, 0))`.
#[inline]
pub fn mul_by_014(
&self,
c0: &Fp2Var<Fp2Params<P>>,
c1: &Fp2Var<Fp2Params<P>>,
d1: &Fp2Var<Fp2Params<P>>,
) -> Result<Self, SynthesisError> {
let v0 = self.c0.mul_by_c0_c1_0(&c0, &c1)?;
let v1 = self.c1.mul_by_0_c1_0(&d1)?;
let new_c0 = Self::mul_base_field_by_nonresidue(&v1)? + &v0;
let new_c1 = (&self.c0 + &self.c1).mul_by_c0_c1_0(&c0, &(c1 + d1))? - &v0 - &v1;
Ok(Self::new(new_c0, new_c1))
}
/// Multiplies by a sparse element of the form `(c0 = (c0, 0, 0), c1 = (d0,
/// d1, 0))`.
#[inline]
pub fn mul_by_034(
&self,
c0: &Fp2Var<Fp2Params<P>>,
d0: &Fp2Var<Fp2Params<P>>,
d1: &Fp2Var<Fp2Params<P>>,
) -> Result<Self, SynthesisError> {
let a0 = &self.c0.c0 * c0;
let a1 = &self.c0.c1 * c0;
let a2 = &self.c0.c2 * c0;
let a = Fp6Var::new(a0, a1, a2);
let b = self.c1.mul_by_c0_c1_0(&d0, &d1)?;
let c0 = c0 + d0;
let c1 = d1;
let e = (&self.c0 + &self.c1).mul_by_c0_c1_0(&c0, &c1)?;
let new_c1 = e - (&a + &b);
let new_c0 = Self::mul_base_field_by_nonresidue(&b)? + &a;
Ok(Self::new(new_c0, new_c1))
}
/// Squares `self` when `self` is in the cyclotomic subgroup.
pub fn cyclotomic_square(&self) -> Result<Self, SynthesisError> {
if characteristic_square_mod_6_is_one(Fp12::<P>::characteristic()) {
let fp2_nr = <P::Fp6Params as Fp6Parameters>::NONRESIDUE;
let z0 = &self.c0.c0;
let z4 = &self.c0.c1;
let z3 = &self.c0.c2;
let z2 = &self.c1.c0;
let z1 = &self.c1.c1;
let z5 = &self.c1.c2;
// t0 + t1*y = (z0 + z1*y)^2 = a^2
let tmp = z0 * z1;
let t0 = {
let tmp1 = z0 + z1;
let tmp2 = z1 * fp2_nr + z0;
let tmp4 = &tmp * fp2_nr + &tmp;
tmp1 * tmp2 - tmp4
};
let t1 = tmp.double()?;
// t2 + t3*y = (z2 + z3*y)^2 = b^2
let tmp = z2 * z3;
let t2 = {
// (z2 + &z3) * &(z2 + &(fp2_nr * &z3)) - &tmp - &(tmp * &fp2_nr);
let tmp1 = z2 + z3;
let tmp2 = z3 * fp2_nr + z2;
let tmp4 = &tmp * fp2_nr + &tmp;
tmp1 * tmp2 - tmp4
};
let t3 = tmp.double()?;
// t4 + t5*y = (z4 + z5*y)^2 = c^2
let tmp = z4 * z5;
let t4 = {
// (z4 + &z5) * &(z4 + &(fp2_nr * &z5)) - &tmp - &(tmp * &fp2_nr);
let tmp1 = z4 + z5;
let tmp2 = (z5 * fp2_nr) + z4;
let tmp4 = (&tmp * fp2_nr) + &tmp;
(tmp1 * tmp2) - tmp4
};
let t5 = tmp.double()?;
// for A
// z0 = 3 * t0 - 2 * z0
let c0_c0 = (&t0 - z0).double()? + &t0;
// z1 = 3 * t1 + 2 * z1
let c1_c1 = (&t1 + z1).double()? + &t1;
// for B
// z2 = 3 * (xi * t5) + 2 * z2
let c1_c0 = {
let tmp = &t5 * fp2_nr;
(z2 + &tmp).double()? + &tmp
};
// z3 = 3 * t4 - 2 * z3
let c0_c2 = (&t4 - z3).double()? + &t4;
// for C
// z4 = 3 * t2 - 2 * z4
let c0_c1 = (&t2 - z4).double()? + &t2;
// z5 = 3 * t3 + 2 * z5
let c1_c2 = (&t3 + z5).double()? + &t3;
let c0 = Fp6Var::new(c0_c0, c0_c1, c0_c2);
let c1 = Fp6Var::new(c1_c0, c1_c1, c1_c2);
Ok(Self::new(c0, c1))
} else {
self.square()
}
}
/// Like `Self::cyclotomic_exp`, but additionally uses cyclotomic squaring.
pub fn optimized_cyclotomic_exp(
&self,
exponent: impl AsRef<[u64]>,
) -> Result<Self, SynthesisError> {
use ark_ff::biginteger::arithmetic::find_wnaf;
let mut res = Self::one();
let self_inverse = self.unitary_inverse()?;
let mut found_nonzero = false;
let naf = find_wnaf(exponent.as_ref());
for &value in naf.iter().rev() {
if found_nonzero {
res = res.cyclotomic_square()?;
}
if value != 0 {
found_nonzero = true;
if value > 0 {
res *= self;
} else {
res *= &self_inverse;
}
}
}
Ok(res)
}
}

12
src/fields/fp2.rs Normal file
View File

@@ -0,0 +1,12 @@
use crate::fields::{fp::FpVar, quadratic_extension::*};
use ark_ff::fields::{Fp2Parameters, Fp2ParamsWrapper, QuadExtParameters};
/// A quadratic extension field constructed over a prime field.
/// This is the R1CS equivalent of `ark_ff::Fp2<P>`.
pub type Fp2Var<P> = QuadExtVar<FpVar<<P as Fp2Parameters>::Fp>, Fp2ParamsWrapper<P>>;
impl<P: Fp2Parameters> QuadExtVarParams<FpVar<P::Fp>> for Fp2ParamsWrapper<P> {
fn mul_base_field_var_by_frob_coeff(fe: &mut FpVar<P::Fp>, power: usize) {
*fe *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}

17
src/fields/fp3.rs Normal file
View File

@@ -0,0 +1,17 @@
use crate::fields::{cubic_extension::*, fp::FpVar};
use ark_ff::fields::{CubicExtParameters, Fp3Parameters, Fp3ParamsWrapper};
/// A cubic extension field constructed over a prime field.
/// This is the R1CS equivalent of `ark_ff::Fp3<P>`.
pub type Fp3Var<P> = CubicExtVar<FpVar<<P as Fp3Parameters>::Fp>, Fp3ParamsWrapper<P>>;
impl<P: Fp3Parameters> CubicExtVarParams<FpVar<P::Fp>> for Fp3ParamsWrapper<P> {
fn mul_base_field_vars_by_frob_coeff(
c1: &mut FpVar<P::Fp>,
c2: &mut FpVar<P::Fp>,
power: usize,
) {
*c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
*c2 *= Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}

14
src/fields/fp4.rs Normal file
View File

@@ -0,0 +1,14 @@
use crate::fields::{fp2::Fp2Var, quadratic_extension::*};
use ark_ff::fields::{Fp4Parameters, Fp4ParamsWrapper, QuadExtParameters};
/// A quartic extension field constructed as the tower of a
/// quadratic extension over a quadratic extension field.
/// This is the R1CS equivalent of `ark_ff::Fp4<P>`.
pub type Fp4Var<P> = QuadExtVar<Fp2Var<<P as Fp4Parameters>::Fp2Params>, Fp4ParamsWrapper<P>>;
impl<P: Fp4Parameters> QuadExtVarParams<Fp2Var<P::Fp2Params>> for Fp4ParamsWrapper<P> {
fn mul_base_field_var_by_frob_coeff(fe: &mut Fp2Var<P::Fp2Params>, power: usize) {
fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}

15
src/fields/fp6_2over3.rs Normal file
View File

@@ -0,0 +1,15 @@
use crate::fields::{fp3::Fp3Var, quadratic_extension::*};
use ark_ff::fields::{fp6_2over3::*, QuadExtParameters};
/// A sextic extension field constructed as the tower of a
/// quadratic extension over a cubic extension field.
/// This is the R1CS equivalent of `ark_ff::fp6_2over3::Fp6<P>`.
pub type Fp6Var<P> = QuadExtVar<Fp3Var<<P as Fp6Parameters>::Fp3Params>, Fp6ParamsWrapper<P>>;
impl<P: Fp6Parameters> QuadExtVarParams<Fp3Var<P::Fp3Params>> for Fp6ParamsWrapper<P> {
fn mul_base_field_var_by_frob_coeff(fe: &mut Fp3Var<P::Fp3Params>, power: usize) {
fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
fe.c2 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}

85
src/fields/fp6_3over2.rs Normal file
View File

@@ -0,0 +1,85 @@
use crate::fields::{cubic_extension::*, fp2::*};
use ark_ff::fields::{fp6_3over2::*, CubicExtParameters, Fp2};
use ark_relations::r1cs::SynthesisError;
use core::ops::MulAssign;
/// A sextic extension field constructed as the tower of a
/// cubic extension over a quadratic extension field.
/// This is the R1CS equivalent of `ark_ff::fp6_3over3::Fp6<P>`.
pub type Fp6Var<P> = CubicExtVar<Fp2Var<<P as Fp6Parameters>::Fp2Params>, Fp6ParamsWrapper<P>>;
impl<P: Fp6Parameters> CubicExtVarParams<Fp2Var<P::Fp2Params>> for Fp6ParamsWrapper<P> {
fn mul_base_field_vars_by_frob_coeff(
c1: &mut Fp2Var<P::Fp2Params>,
c2: &mut Fp2Var<P::Fp2Params>,
power: usize,
) {
*c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
*c2 *= Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD];
}
}
impl<P: Fp6Parameters> Fp6Var<P> {
/// Multiplies `self` by a sparse element which has `c0 == c2 == zero`.
pub fn mul_by_0_c1_0(&self, c1: &Fp2Var<P::Fp2Params>) -> Result<Self, SynthesisError> {
// Karatsuba multiplication
// v0 = a0 * b0 = 0
// v1 = a1 * b1
let v1 = &self.c1 * c1;
// v2 = a2 * b2 = 0
let a1_plus_a2 = &self.c1 + &self.c2;
let b1_plus_b2 = c1.clone();
let a0_plus_a1 = &self.c0 + &self.c1;
// c0 = (NONRESIDUE * ((a1 + a2)*(b1 + b2) - v1 - v2)) + v0
// = NONRESIDUE * ((a1 + a2) * b1 - v1)
let c0 = &(a1_plus_a2 * &b1_plus_b2 - &v1) * P::NONRESIDUE;
// c1 = (a0 + a1) * (b0 + b1) - v0 - v1 + NONRESIDUE * v2
// = (a0 + a1) * b1 - v1
let c1 = a0_plus_a1 * c1 - &v1;
// c2 = (a0 + a2) * (b0 + b2) - v0 - v2 + v1
// = v1
let c2 = v1;
Ok(Self::new(c0, c1, c2))
}
/// Multiplies `self` by a sparse element which has `c2 == zero`.
pub fn mul_by_c0_c1_0(
&self,
c0: &Fp2Var<P::Fp2Params>,
c1: &Fp2Var<P::Fp2Params>,
) -> Result<Self, SynthesisError> {
let v0 = &self.c0 * c0;
let v1 = &self.c1 * c1;
// v2 = 0.
let a1_plus_a2 = &self.c1 + &self.c2;
let a0_plus_a1 = &self.c0 + &self.c1;
let a0_plus_a2 = &self.c0 + &self.c2;
let b1_plus_b2 = c1.clone();
let b0_plus_b1 = c0 + c1;
let b0_plus_b2 = c0.clone();
let c0 = (&a1_plus_a2 * &b1_plus_b2 - &v1) * P::NONRESIDUE + &v0;
let c1 = a0_plus_a1 * &b0_plus_b1 - &v0 - &v1;
let c2 = a0_plus_a2 * &b0_plus_b2 - &v0 + &v1;
Ok(Self::new(c0, c1, c2))
}
}
impl<P: Fp6Parameters> MulAssign<Fp2<P::Fp2Params>> for Fp6Var<P> {
fn mul_assign(&mut self, other: Fp2<P::Fp2Params>) {
self.c0 *= other;
self.c1 *= other;
self.c2 *= other;
}
}

207
src/fields/mod.rs Normal file
View File

@@ -0,0 +1,207 @@
use ark_ff::{prelude::*, BitIteratorBE};
use ark_relations::r1cs::SynthesisError;
use core::{
fmt::Debug,
ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign},
};
use crate::{prelude::*, Assignment};
/// This module contains a generic implementation of cubic extension field
/// variables. That is, it implements the R1CS equivalent of
/// `ark_ff::CubicExtField`.
pub mod cubic_extension;
/// This module contains a generic implementation of quadratic extension field
/// variables. That is, it implements the R1CS equivalent of
/// `ark_ff::QuadExtField`.
pub mod quadratic_extension;
/// This module contains a generic implementation of prime field variables.
/// That is, it implements the R1CS equivalent of `ark_ff::Fp*`.
pub mod fp;
/// This module contains a generic implementation of the degree-12 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::Fp12`
pub mod fp12;
/// This module contains a generic implementation of the degree-2 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::Fp2`
pub mod fp2;
/// This module contains a generic implementation of the degree-3 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::Fp3`
pub mod fp3;
/// This module contains a generic implementation of the degree-4 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::Fp4`
pub mod fp4;
/// This module contains a generic implementation of the degree-6 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::fp6_2over3::Fp6`
pub mod fp6_2over3;
/// This module contains a generic implementation of the degree-6 tower
/// extension field. That is, it implements the R1CS equivalent of
/// `ark_ff::fp6_3over2::Fp6`
pub mod fp6_3over2;
/// This trait is a hack used to work around the lack of implied bounds.
pub trait FieldOpsBounds<'a, F, T: 'a>:
Sized
+ Add<&'a T, Output = T>
+ Sub<&'a T, Output = T>
+ Mul<&'a T, Output = T>
+ Add<T, Output = T>
+ Sub<T, Output = T>
+ Mul<T, Output = T>
+ Add<F, Output = T>
+ Sub<F, Output = T>
+ Mul<F, Output = T>
{
}
/// A variable representing a field. Corresponds to the native type `F`.
pub trait FieldVar<F: Field, ConstraintF: Field>:
'static
+ Clone
+ From<Boolean<ConstraintF>>
+ R1CSVar<ConstraintF, Value = F>
+ EqGadget<ConstraintF>
+ ToBitsGadget<ConstraintF>
+ AllocVar<F, ConstraintF>
+ ToBytesGadget<ConstraintF>
+ CondSelectGadget<ConstraintF>
+ for<'a> FieldOpsBounds<'a, F, Self>
+ for<'a> AddAssign<&'a Self>
+ for<'a> SubAssign<&'a Self>
+ for<'a> MulAssign<&'a Self>
+ AddAssign<Self>
+ SubAssign<Self>
+ MulAssign<Self>
+ AddAssign<F>
+ SubAssign<F>
+ MulAssign<F>
+ Debug
{
/// Returns the constant `F::zero()`.
fn zero() -> Self;
/// Returns a `Boolean` representing whether `self == Self::zero()`.
fn is_zero(&self) -> Result<Boolean<ConstraintF>, SynthesisError> {
self.is_eq(&Self::zero())
}
/// Returns the constant `F::one()`.
fn one() -> Self;
/// Returns a `Boolean` representing whether `self == Self::one()`.
fn is_one(&self) -> Result<Boolean<ConstraintF>, SynthesisError> {
self.is_eq(&Self::one())
}
/// Returns a constant with value `v`.
///
/// This *should not* allocate any variables.
fn constant(v: F) -> Self;
/// Computes `self + self`.
fn double(&self) -> Result<Self, SynthesisError> {
Ok(self.clone() + self)
}
/// Sets `self = self + self`.
fn double_in_place(&mut self) -> Result<&mut Self, SynthesisError> {
*self += self.double()?;
Ok(self)
}
/// Coputes `-self`.
fn negate(&self) -> Result<Self, SynthesisError>;
/// Sets `self = -self`.
#[inline]
fn negate_in_place(&mut self) -> Result<&mut Self, SynthesisError> {
*self = self.negate()?;
Ok(self)
}
/// Computes `self * self`.
///
/// A default implementation is provided which just invokes the underlying
/// multiplication routine. However, this method should be specialized
/// for extension fields, where faster algorithms exist for squaring.
fn square(&self) -> Result<Self, SynthesisError> {
Ok(self.clone() * self)
}
/// Sets `self = self.square()`.
fn square_in_place(&mut self) -> Result<&mut Self, SynthesisError> {
*self = self.square()?;
Ok(self)
}
/// Enforces that `self * other == result`.
fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> {
let actual_result = self.clone() * other;
result.enforce_equal(&actual_result)
}
/// Enforces that `self * self == result`.
fn square_equals(&self, result: &Self) -> Result<(), SynthesisError> {
let actual_result = self.square()?;
result.enforce_equal(&actual_result)
}
/// Computes `result` such that `self * result == Self::one()`.
fn inverse(&self) -> Result<Self, SynthesisError>;
/// Returns `(self / denominator)`. but requires fewer constraints than
/// `self * denominator.inverse()`.
/// It is up to the caller to ensure that denominator is non-zero,
/// since in that case the result is unconstrained.
fn mul_by_inverse(&self, denominator: &Self) -> Result<Self, SynthesisError> {
let result = Self::new_witness(self.cs(), || {
let denominator_inv_native = denominator.value()?.inverse().get()?;
let result = self.value()? * &denominator_inv_native;
Ok(result)
})?;
result.mul_equals(&denominator, &self)?;
Ok(result)
}
/// Computes the frobenius map over `self`.
fn frobenius_map(&self, power: usize) -> Result<Self, SynthesisError>;
/// Sets `self = self.frobenius_map()`.
fn frobenius_map_in_place(&mut self, power: usize) -> Result<&mut Self, SynthesisError> {
*self = self.frobenius_map(power)?;
Ok(self)
}
/// Comptues `self^bits`, where `bits` is a *little-endian* bit-wise
/// decomposition of the exponent.
fn pow_le(&self, bits: &[Boolean<ConstraintF>]) -> Result<Self, SynthesisError> {
let mut res = Self::one();
let mut power = self.clone();
for bit in bits {
let tmp = res.clone() * &power;
res = bit.select(&tmp, &res)?;
power.square_in_place()?;
}
Ok(res)
}
/// Computes `self^S`, where S is interpreted as an little-endian
/// u64-decomposition of an integer.
fn pow_by_constant<S: AsRef<[u64]>>(&self, exp: S) -> Result<Self, SynthesisError> {
let mut res = Self::one();
for i in BitIteratorBE::without_leading_zeros(exp) {
res.square_in_place()?;
if i {
res *= self;
}
}
Ok(res)
}
}

View File

@@ -0,0 +1,561 @@
use ark_ff::{
fields::{Field, QuadExtField, QuadExtParameters},
Zero,
};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use core::{borrow::Borrow, marker::PhantomData};
use crate::{
fields::{fp::FpVar, FieldOpsBounds, FieldVar},
prelude::*,
ToConstraintFieldGadget, Vec,
};
/// This struct is the `R1CS` equivalent of the quadratic extension field type
/// in `ark-ff`, i.e. `ark_ff::QuadExtField`.
#[derive(Derivative)]
#[derivative(Debug(bound = "BF: core::fmt::Debug"), Clone(bound = "BF: Clone"))]
#[must_use]
pub struct QuadExtVar<BF: FieldVar<P::BaseField, P::BasePrimeField>, P: QuadExtVarParams<BF>>
where
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
{
/// The zero-th coefficient of this field element.
pub c0: BF,
/// The first coefficient of this field element.
pub c1: BF,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
/// This trait describes parameters that are used to implement arithmetic for
/// `QuadExtVar`.
pub trait QuadExtVarParams<BF: FieldVar<Self::BaseField, Self::BasePrimeField>>:
QuadExtParameters
where
for<'a> &'a BF: FieldOpsBounds<'a, Self::BaseField, BF>,
{
/// Multiply the base field of the `QuadExtVar` by the appropriate Frobenius
/// coefficient. This is equivalent to
/// `Self::mul_base_field_by_frob_coeff(power)`.
fn mul_base_field_var_by_frob_coeff(fe: &mut BF, power: usize);
}
impl<BF: FieldVar<P::BaseField, P::BasePrimeField>, P: QuadExtVarParams<BF>> QuadExtVar<BF, P>
where
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
{
/// Constructs a `QuadExtVar` from the underlying coefficients.
pub fn new(c0: BF, c1: BF) -> Self {
Self {
c0,
c1,
_params: PhantomData,
}
}
/// Multiplies a variable of the base field by the quadratic nonresidue
/// `P::NONRESIDUE` that is used to construct the extension field.
#[inline]
pub fn mul_base_field_by_nonresidue(fe: &BF) -> Result<BF, SynthesisError> {
Ok(fe * P::NONRESIDUE)
}
/// Multiplies `self` by a constant from the base field.
#[inline]
pub fn mul_by_base_field_constant(&self, fe: P::BaseField) -> Self {
let c0 = self.c0.clone() * fe;
let c1 = self.c1.clone() * fe;
QuadExtVar::new(c0, c1)
}
/// Sets `self = self.mul_by_base_field_constant(fe)`.
#[inline]
pub fn mul_assign_by_base_field_constant(&mut self, fe: P::BaseField) {
*self = (&*self).mul_by_base_field_constant(fe);
}
/// This is only to be used when the element is *known* to be in the
/// cyclotomic subgroup.
#[inline]
pub fn unitary_inverse(&self) -> Result<Self, SynthesisError> {
Ok(Self::new(self.c0.clone(), self.c1.negate()?))
}
/// This is only to be used when the element is *known* to be in the
/// cyclotomic subgroup.
#[inline]
#[tracing::instrument(target = "r1cs", skip(exponent))]
pub fn cyclotomic_exp(&self, exponent: impl AsRef<[u64]>) -> Result<Self, SynthesisError>
where
Self: FieldVar<QuadExtField<P>, P::BasePrimeField>,
{
let mut res = Self::one();
let self_inverse = self.unitary_inverse()?;
let mut found_nonzero = false;
let naf = ark_ff::biginteger::arithmetic::find_wnaf(exponent.as_ref());
for &value in naf.iter().rev() {
if found_nonzero {
res.square_in_place()?;
}
if value != 0 {
found_nonzero = true;
if value > 0 {
res *= self;
} else {
res *= &self_inverse;
}
}
}
Ok(res)
}
}
impl<BF, P> R1CSVar<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
type Value = QuadExtField<P>;
fn cs(&self) -> ConstraintSystemRef<P::BasePrimeField> {
[&self.c0, &self.c1].cs()
}
#[inline]
fn value(&self) -> Result<Self::Value, SynthesisError> {
match (self.c0.value(), self.c1.value()) {
(Ok(c0), Ok(c1)) => Ok(QuadExtField::new(c0, c1)),
(..) => Err(SynthesisError::AssignmentMissing),
}
}
}
impl<BF, P> From<Boolean<P::BasePrimeField>> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
fn from(other: Boolean<P::BasePrimeField>) -> Self {
let c0 = BF::from(other);
let c1 = BF::zero();
Self::new(c0, c1)
}
}
impl<'a, BF, P> FieldOpsBounds<'a, QuadExtField<P>, QuadExtVar<BF, P>> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
}
impl<'a, BF, P> FieldOpsBounds<'a, QuadExtField<P>, QuadExtVar<BF, P>> for &'a QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
}
impl<BF, P> FieldVar<QuadExtField<P>, P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
fn constant(other: QuadExtField<P>) -> Self {
let c0 = BF::constant(other.c0);
let c1 = BF::constant(other.c1);
Self::new(c0, c1)
}
fn zero() -> Self {
let c0 = BF::zero();
let c1 = BF::zero();
Self::new(c0, c1)
}
fn one() -> Self {
let c0 = BF::one();
let c1 = BF::zero();
Self::new(c0, c1)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn double(&self) -> Result<Self, SynthesisError> {
let c0 = self.c0.double()?;
let c1 = self.c1.double()?;
Ok(Self::new(c0, c1))
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn negate(&self) -> Result<Self, SynthesisError> {
let mut result = self.clone();
result.c0.negate_in_place()?;
result.c1.negate_in_place()?;
Ok(result)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn square(&self) -> Result<Self, SynthesisError> {
// From Libsnark/fp2_gadget.tcc
// Complex multiplication for Fp2:
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Devegili, OhEigeartaigh, Scott, Dahab
// v0 = c0 - c1
let mut v0 = &self.c0 - &self.c1;
// v3 = c0 - beta * c1
let v3 = &self.c0 - &Self::mul_base_field_by_nonresidue(&self.c1)?;
// v2 = c0 * c1
let v2 = &self.c0 * &self.c1;
// v0 = (v0 * v3) + v2
v0 *= &v3;
v0 += &v2;
let c0 = &v0 + &Self::mul_base_field_by_nonresidue(&v2)?;
let c1 = v2.double()?;
Ok(Self::new(c0, c1))
}
#[tracing::instrument(target = "r1cs")]
fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> {
// Karatsuba multiplication for Fp2:
// v0 = A.c0 * B.c0
// v1 = A.c1 * B.c1
// result.c0 = v0 + non_residue * v1
// result.c1 = (A.c0 + A.c1) * (B.c0 + B.c1) - v0 - v1
// Enforced with 3 constraints:
// A.c1 * B.c1 = v1
// A.c0 * B.c0 = result.c0 - non_residue * v1
// (A.c0+A.c1)*(B.c0+B.c1) = result.c1 + result.c0 + (1 - non_residue) * v1
// Reference:
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Devegili, OhEigeartaigh, Scott, Dahab
// Compute v1
let v1 = &self.c1 * &other.c1;
// Perform second check
let non_residue_times_v1 = Self::mul_base_field_by_nonresidue(&v1)?;
let rhs = &result.c0 - &non_residue_times_v1;
self.c0.mul_equals(&other.c0, &rhs)?;
// Last check
let a0_plus_a1 = &self.c0 + &self.c1;
let b0_plus_b1 = &other.c0 + &other.c1;
let one_minus_non_residue_v1 = &v1 - &non_residue_times_v1;
let tmp = &(&result.c1 + &result.c0) + &one_minus_non_residue_v1;
a0_plus_a1.mul_equals(&b0_plus_b1, &tmp)?;
Ok(())
}
#[tracing::instrument(target = "r1cs")]
fn frobenius_map(&self, power: usize) -> Result<Self, SynthesisError> {
let mut result = self.clone();
result.c0.frobenius_map_in_place(power)?;
result.c1.frobenius_map_in_place(power)?;
P::mul_base_field_var_by_frob_coeff(&mut result.c1, power);
Ok(result)
}
#[tracing::instrument(target = "r1cs")]
fn inverse(&self) -> Result<Self, SynthesisError> {
let mode = if self.is_constant() {
AllocationMode::Constant
} else {
AllocationMode::Witness
};
let inverse = Self::new_variable(
self.cs(),
|| {
self.value()
.map(|f| f.inverse().unwrap_or(QuadExtField::zero()))
},
mode,
)?;
self.mul_equals(&inverse, &Self::one())?;
Ok(inverse)
}
}
impl_bounded_ops!(
QuadExtVar<BF, P>,
QuadExtField<P>,
Add,
add,
AddAssign,
add_assign,
|this: &'a QuadExtVar<BF, P>, other: &'a QuadExtVar<BF, P>| {
let c0 = &this.c0 + &other.c0;
let c1 = &this.c1 + &other.c1;
QuadExtVar::new(c0, c1)
},
|this: &'a QuadExtVar<BF, P>, other: QuadExtField<P>| {
this + QuadExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: QuadExtVarParams<BF>),
for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>
);
impl_bounded_ops!(
QuadExtVar<BF, P>,
QuadExtField<P>,
Sub,
sub,
SubAssign,
sub_assign,
|this: &'a QuadExtVar<BF, P>, other: &'a QuadExtVar<BF, P>| {
let c0 = &this.c0 - &other.c0;
let c1 = &this.c1 - &other.c1;
QuadExtVar::new(c0, c1)
},
|this: &'a QuadExtVar<BF, P>, other: QuadExtField<P>| {
this - QuadExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: QuadExtVarParams<BF>),
for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>
);
impl_bounded_ops!(
QuadExtVar<BF, P>,
QuadExtField<P>,
Mul,
mul,
MulAssign,
mul_assign,
|this: &'a QuadExtVar<BF, P>, other: &'a QuadExtVar<BF, P>| {
// Karatsuba multiplication for Fp2:
// v0 = A.c0 * B.c0
// v1 = A.c1 * B.c1
// result.c0 = v0 + non_residue * v1
// result.c1 = (A.c0 + A.c1) * (B.c0 + B.c1) - v0 - v1
// Enforced with 3 constraints:
// A.c1 * B.c1 = v1
// A.c0 * B.c0 = result.c0 - non_residue * v1
// (A.c0+A.c1)*(B.c0+B.c1) = result.c1 + result.c0 + (1 - non_residue) * v1
// Reference:
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Devegili, OhEigeartaigh, Scott, Dahab
let mut result = this.clone();
let v0 = &this.c0 * &other.c0;
let v1 = &this.c1 * &other.c1;
result.c1 += &this.c0;
result.c1 *= &other.c0 + &other.c1;
result.c1 -= &v0;
result.c1 -= &v1;
result.c0 = v0 + &QuadExtVar::<BF, P>::mul_base_field_by_nonresidue(&v1).unwrap();
result
},
|this: &'a QuadExtVar<BF, P>, other: QuadExtField<P>| {
this * QuadExtVar::constant(other)
},
(BF: FieldVar<P::BaseField, P::BasePrimeField>, P: QuadExtVarParams<BF>),
for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>
);
impl<BF, P> EqGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn is_eq(&self, other: &Self) -> Result<Boolean<P::BasePrimeField>, SynthesisError> {
let b0 = self.c0.is_eq(&other.c0)?;
let b1 = self.c1.is_eq(&other.c1)?;
b0.and(&b1)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<P::BasePrimeField>,
) -> Result<(), SynthesisError> {
self.c0.conditional_enforce_equal(&other.c0, condition)?;
self.c1.conditional_enforce_equal(&other.c1, condition)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<P::BasePrimeField>,
) -> Result<(), SynthesisError> {
let is_equal = self.is_eq(other)?;
is_equal
.and(condition)?
.enforce_equal(&Boolean::Constant(false))
}
}
impl<BF, P> ToBitsGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(&self) -> Result<Vec<Boolean<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_bits_le()?;
let mut c1 = self.c1.to_bits_le()?;
c0.append(&mut c1);
Ok(c0)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bits_le(&self) -> Result<Vec<Boolean<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_non_unique_bits_le()?;
let mut c1 = self.c1.to_non_unique_bits_le()?;
c0.append(&mut c1);
Ok(c0)
}
}
impl<BF, P> ToBytesGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_bytes()?;
let mut c1 = self.c1.to_bytes()?;
c0.append(&mut c1);
Ok(c0)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::BasePrimeField>>, SynthesisError> {
let mut c0 = self.c0.to_non_unique_bytes()?;
let mut c1 = self.c1.to_non_unique_bytes()?;
c0.append(&mut c1);
Ok(c0)
}
}
impl<BF, P> ToConstraintFieldGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
BF: ToConstraintFieldGadget<P::BasePrimeField>,
{
#[tracing::instrument(target = "r1cs")]
fn to_constraint_field(&self) -> Result<Vec<FpVar<P::BasePrimeField>>, SynthesisError> {
let mut res = Vec::new();
res.extend_from_slice(&self.c0.to_constraint_field()?);
res.extend_from_slice(&self.c1.to_constraint_field()?);
Ok(res)
}
}
impl<BF, P> CondSelectGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
#[inline]
fn conditionally_select(
cond: &Boolean<P::BasePrimeField>,
true_value: &Self,
false_value: &Self,
) -> Result<Self, SynthesisError> {
let c0 = BF::conditionally_select(cond, &true_value.c0, &false_value.c0)?;
let c1 = BF::conditionally_select(cond, &true_value.c1, &false_value.c1)?;
Ok(Self::new(c0, c1))
}
}
impl<BF, P> TwoBitLookupGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>
+ TwoBitLookupGadget<P::BasePrimeField, TableConstant = P::BaseField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
type TableConstant = QuadExtField<P>;
#[tracing::instrument(target = "r1cs")]
fn two_bit_lookup(
b: &[Boolean<P::BasePrimeField>],
c: &[Self::TableConstant],
) -> Result<Self, SynthesisError> {
let c0s = c.iter().map(|f| f.c0).collect::<Vec<_>>();
let c1s = c.iter().map(|f| f.c1).collect::<Vec<_>>();
let c0 = BF::two_bit_lookup(b, &c0s)?;
let c1 = BF::two_bit_lookup(b, &c1s)?;
Ok(Self::new(c0, c1))
}
}
impl<BF, P> ThreeBitCondNegLookupGadget<P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>
+ ThreeBitCondNegLookupGadget<P::BasePrimeField, TableConstant = P::BaseField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
type TableConstant = QuadExtField<P>;
#[tracing::instrument(target = "r1cs")]
fn three_bit_cond_neg_lookup(
b: &[Boolean<P::BasePrimeField>],
b0b1: &Boolean<P::BasePrimeField>,
c: &[Self::TableConstant],
) -> Result<Self, SynthesisError> {
let c0s = c.iter().map(|f| f.c0).collect::<Vec<_>>();
let c1s = c.iter().map(|f| f.c1).collect::<Vec<_>>();
let c0 = BF::three_bit_cond_neg_lookup(b, b0b1, &c0s)?;
let c1 = BF::three_bit_cond_neg_lookup(b, b0b1, &c1s)?;
Ok(Self::new(c0, c1))
}
}
impl<BF, P> AllocVar<QuadExtField<P>, P::BasePrimeField> for QuadExtVar<BF, P>
where
BF: FieldVar<P::BaseField, P::BasePrimeField>,
for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>,
P: QuadExtVarParams<BF>,
{
fn new_variable<T: Borrow<QuadExtField<P>>>(
cs: impl Into<Namespace<P::BasePrimeField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let (c0, c1) = match f() {
Ok(fe) => (Ok(fe.borrow().c0), Ok(fe.borrow().c1)),
Err(_) => (
Err(SynthesisError::AssignmentMissing),
Err(SynthesisError::AssignmentMissing),
),
};
let c0 = BF::new_variable(ark_relations::ns!(cs, "c0"), || c0, mode)?;
let c1 = BF::new_variable(ark_relations::ns!(cs, "c1"), || c1, mode)?;
Ok(Self::new(c0, c1))
}
}

9
src/groups/curves/mod.rs Normal file
View File

@@ -0,0 +1,9 @@
/// This module generically implements arithmetic for Short
/// Weierstrass elliptic curves by following the complete formulae of
/// [[Renes, Costello, Batina 2015]](https://eprint.iacr.org/2015/1060).
pub mod short_weierstrass;
/// This module generically implements arithmetic for Twisted
/// Edwards elliptic curves by following the complete formulae described in the
/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html).
pub mod twisted_edwards;

View File

@@ -0,0 +1,247 @@
use ark_ec::{
bls12::{Bls12Parameters, G1Prepared, G2Prepared, TwistType},
short_weierstrass_jacobian::GroupAffine,
};
use ark_ff::{BitIteratorBE, Field, One};
use ark_relations::r1cs::{Namespace, SynthesisError};
use crate::{
fields::{fp::FpVar, fp2::Fp2Var, FieldVar},
groups::curves::short_weierstrass::*,
Vec,
};
use core::fmt::Debug;
/// Represents a projective point in G1.
pub type G1Var<P> =
ProjectiveVar<<P as Bls12Parameters>::G1Parameters, FpVar<<P as Bls12Parameters>::Fp>>;
/// Represents an affine point on G1. Should be used only for comparison and
/// when a canonical representation of a point is required, and not for
/// arithmetic.
pub type G1AffineVar<P> =
AffineVar<<P as Bls12Parameters>::G1Parameters, FpVar<<P as Bls12Parameters>::Fp>>;
/// Represents a projective point in G2.
pub type G2Var<P> = ProjectiveVar<<P as Bls12Parameters>::G2Parameters, Fp2G<P>>;
/// Represents an affine point on G2. Should be used only for comparison and
/// when a canonical representation of a point is required, and not for
/// arithmetic.
pub type G2AffineVar<P> = AffineVar<<P as Bls12Parameters>::G2Parameters, Fp2G<P>>;
/// Represents the cached precomputation that can be performed on a G1 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(Clone(bound = "G1Var<P>: Clone"), Debug(bound = "G1Var<P>: Debug"))]
pub struct G1PreparedVar<P: Bls12Parameters>(pub AffineVar<P::G1Parameters, FpVar<P::Fp>>);
impl<P: Bls12Parameters> G1PreparedVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<G1Prepared<P>, SynthesisError> {
let x = self.0.x.value()?;
let y = self.0.y.value()?;
let infinity = self.0.infinity.value()?;
let g = GroupAffine::new(x, y, infinity);
Ok(g.into())
}
/// Constructs `Self` from a `G1Var`.
pub fn from_group_var(q: &G1Var<P>) -> Result<Self, SynthesisError> {
let g = q.to_affine()?;
Ok(Self(g))
}
}
impl<P: Bls12Parameters> AllocVar<G1Prepared<P>, P::Fp> for G1PreparedVar<P> {
fn new_variable<T: Borrow<G1Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g1_prep = f().map(|b| b.borrow().0);
let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?;
let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?;
let infinity = Boolean::new_variable(
ark_relations::ns!(cs, "inf"),
|| g1_prep.map(|g| g.infinity),
mode,
)?;
let g = AffineVar::new(x, y, infinity);
Ok(Self(g))
}
}
impl<P: Bls12Parameters> ToBytesGadget<P::Fp> for G1PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut bytes = self.0.x.to_bytes()?;
let y_bytes = self.0.y.to_bytes()?;
let inf_bytes = self.0.infinity.to_bytes()?;
bytes.extend_from_slice(&y_bytes);
bytes.extend_from_slice(&inf_bytes);
Ok(bytes)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut bytes = self.0.x.to_bytes()?;
let y_bytes = self.0.y.to_bytes()?;
let inf_bytes = self.0.infinity.to_bytes()?;
bytes.extend_from_slice(&y_bytes);
bytes.extend_from_slice(&inf_bytes);
Ok(bytes)
}
}
type Fp2G<P> = Fp2Var<<P as Bls12Parameters>::Fp2Params>;
type LCoeff<P> = (Fp2G<P>, Fp2G<P>);
/// Represents the cached precomputation that can be performed on a G2 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(
Clone(bound = "Fp2Var<P::Fp2Params>: Clone"),
Debug(bound = "Fp2Var<P::Fp2Params>: Debug")
)]
pub struct G2PreparedVar<P: Bls12Parameters> {
#[doc(hidden)]
pub ell_coeffs: Vec<LCoeff<P>>,
}
impl<P: Bls12Parameters> AllocVar<G2Prepared<P>, P::Fp> for G2PreparedVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f, mode))]
fn new_variable<T: Borrow<G2Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g2_prep = f().map(|b| {
let projective_coeffs = &b.borrow().ell_coeffs;
let mut z_s = projective_coeffs
.iter()
.map(|(_, _, z)| *z)
.collect::<Vec<_>>();
ark_ff::fields::batch_inversion(&mut z_s);
projective_coeffs
.iter()
.zip(z_s)
.map(|((x, y, _), z_inv)| (*x * &z_inv, *y * &z_inv))
.collect::<Vec<_>>()
});
let l = Vec::new_variable(
ark_relations::ns!(cs, "l"),
|| {
g2_prep
.clone()
.map(|c| c.iter().map(|(l, _)| *l).collect::<Vec<_>>())
},
mode,
)?;
let r = Vec::new_variable(
ark_relations::ns!(cs, "r"),
|| g2_prep.map(|c| c.iter().map(|(_, r)| *r).collect::<Vec<_>>()),
mode,
)?;
let ell_coeffs = l.into_iter().zip(r).collect();
Ok(Self { ell_coeffs })
}
}
impl<P: Bls12Parameters> ToBytesGadget<P::Fp> for G2PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut bytes = Vec::new();
for coeffs in &self.ell_coeffs {
bytes.extend_from_slice(&coeffs.0.to_bytes()?);
bytes.extend_from_slice(&coeffs.1.to_bytes()?);
}
Ok(bytes)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut bytes = Vec::new();
for coeffs in &self.ell_coeffs {
bytes.extend_from_slice(&coeffs.0.to_non_unique_bytes()?);
bytes.extend_from_slice(&coeffs.1.to_non_unique_bytes()?);
}
Ok(bytes)
}
}
impl<P: Bls12Parameters> G2PreparedVar<P> {
/// Constructs `Self` from a `G2Var`.
#[tracing::instrument(target = "r1cs")]
pub fn from_group_var(q: &G2Var<P>) -> Result<Self, SynthesisError> {
let q = q.to_affine()?;
let two_inv = P::Fp::one().double().inverse().unwrap();
// Enforce that `q` is not the point at infinity.
q.infinity.enforce_not_equal(&Boolean::Constant(true))?;
let mut ell_coeffs = vec![];
let mut r = q.clone();
for i in BitIteratorBE::new(P::X).skip(1) {
ell_coeffs.push(Self::double(&mut r, &two_inv)?);
if i {
ell_coeffs.push(Self::add(&mut r, &q)?);
}
}
Ok(Self { ell_coeffs })
}
#[tracing::instrument(target = "r1cs")]
fn double(r: &mut G2AffineVar<P>, two_inv: &P::Fp) -> Result<LCoeff<P>, SynthesisError> {
let a = r.y.inverse()?;
let mut b = r.x.square()?;
let b_tmp = b.clone();
b.mul_assign_by_base_field_constant(*two_inv);
b += &b_tmp;
let c = &a * &b;
let d = r.x.double()?;
let x3 = c.square()? - &d;
let e = &c * &r.x - &r.y;
let c_x3 = &c * &x3;
let y3 = &e - &c_x3;
let mut f = c;
f.negate_in_place()?;
r.x = x3;
r.y = y3;
match P::TWIST_TYPE {
TwistType::M => Ok((e, f)),
TwistType::D => Ok((f, e)),
}
}
#[tracing::instrument(target = "r1cs")]
fn add(r: &mut G2AffineVar<P>, q: &G2AffineVar<P>) -> Result<LCoeff<P>, SynthesisError> {
let a = (&q.x - &r.x).inverse()?;
let b = &q.y - &r.y;
let c = &a * &b;
let d = &r.x + &q.x;
let x3 = c.square()? - &d;
let e = (&r.x - &x3) * &c;
let y3 = e - &r.y;
let g = &c * &r.x - &r.y;
let mut f = c;
f.negate_in_place()?;
r.x = x3;
r.y = y3;
match P::TWIST_TYPE {
TwistType::M => Ok((g, f)),
TwistType::D => Ok((f, g)),
}
}
}

View File

@@ -0,0 +1,495 @@
use ark_ec::mnt4::{
g2::{AteAdditionCoefficients, AteDoubleCoefficients},
G1Prepared, G2Prepared, MNT4Parameters,
};
use ark_ff::Field;
use ark_relations::r1cs::{Namespace, SynthesisError};
use crate::{
fields::{fp::FpVar, fp2::Fp2Var, FieldVar},
groups::curves::short_weierstrass::ProjectiveVar,
pairing::mnt4::PairingVar,
prelude::*,
Vec,
};
use core::borrow::Borrow;
/// Represents a projective point in G1.
pub type G1Var<P> =
ProjectiveVar<<P as MNT4Parameters>::G1Parameters, FpVar<<P as MNT4Parameters>::Fp>>;
/// Represents a projective point in G2.
pub type G2Var<P> = ProjectiveVar<<P as MNT4Parameters>::G2Parameters, Fp2G<P>>;
/// Represents the cached precomputation that can be performed on a G1 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))]
pub struct G1PreparedVar<P: MNT4Parameters> {
#[doc(hidden)]
pub x: FpVar<P::Fp>,
#[doc(hidden)]
pub y: FpVar<P::Fp>,
#[doc(hidden)]
pub x_twist: Fp2Var<P::Fp2Params>,
#[doc(hidden)]
pub y_twist: Fp2Var<P::Fp2Params>,
}
impl<P: MNT4Parameters> AllocVar<G1Prepared<P>, P::Fp> for G1PreparedVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<G1Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g1_prep = f().map(|b| *b.borrow());
let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?;
let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?;
let x_twist = Fp2Var::new_variable(
ark_relations::ns!(cs, "x_twist"),
|| g1_prep.map(|g| g.x_twist),
mode,
)?;
let y_twist = Fp2Var::new_variable(
ark_relations::ns!(cs, "y_twist"),
|| g1_prep.map(|g| g.y_twist),
mode,
)?;
Ok(Self {
x,
y,
x_twist,
y_twist,
})
}
}
impl<P: MNT4Parameters> G1PreparedVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<G1Prepared<P>, SynthesisError> {
let (x, y, x_twist, y_twist) = (
self.x.value()?,
self.y.value()?,
self.x_twist.value()?,
self.y_twist.value()?,
);
Ok(G1Prepared {
x,
y,
x_twist,
y_twist,
})
}
/// Constructs `Self` from a `G1Var`.
#[tracing::instrument(target = "r1cs")]
pub fn from_group_var(q: &G1Var<P>) -> Result<Self, SynthesisError> {
let q = q.to_affine()?;
let x_twist = Fp2Var::new(&q.x * P::TWIST.c0, &q.x * P::TWIST.c1);
let y_twist = Fp2Var::new(&q.y * P::TWIST.c0, &q.y * P::TWIST.c1);
Ok(G1PreparedVar {
x: q.x.clone(),
y: q.y.clone(),
x_twist,
y_twist,
})
}
}
impl<P: MNT4Parameters> ToBytesGadget<P::Fp> for G1PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_bytes()?;
let mut y = self.y.to_bytes()?;
let mut x_twist = self.x_twist.to_bytes()?;
let mut y_twist = self.y_twist.to_bytes()?;
x.append(&mut y);
x.append(&mut x_twist);
x.append(&mut y_twist);
Ok(x)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_non_unique_bytes()?;
let mut y = self.y.to_non_unique_bytes()?;
let mut x_twist = self.x_twist.to_non_unique_bytes()?;
let mut y_twist = self.y_twist.to_non_unique_bytes()?;
x.append(&mut y);
x.append(&mut x_twist);
x.append(&mut y_twist);
Ok(x)
}
}
type Fp2G<P> = Fp2Var<<P as MNT4Parameters>::Fp2Params>;
/// Represents the cached precomputation that can be performed on a G2 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))]
pub struct G2PreparedVar<P: MNT4Parameters> {
#[doc(hidden)]
pub x: Fp2Var<P::Fp2Params>,
#[doc(hidden)]
pub y: Fp2Var<P::Fp2Params>,
#[doc(hidden)]
pub x_over_twist: Fp2Var<P::Fp2Params>,
#[doc(hidden)]
pub y_over_twist: Fp2Var<P::Fp2Params>,
#[doc(hidden)]
pub double_coefficients: Vec<AteDoubleCoefficientsVar<P>>,
#[doc(hidden)]
pub addition_coefficients: Vec<AteAdditionCoefficientsVar<P>>,
}
impl<P: MNT4Parameters> AllocVar<G2Prepared<P>, P::Fp> for G2PreparedVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<G2Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g2_prep = f().map(|b| b.borrow().clone());
let g2 = g2_prep.as_ref().map_err(|e| *e);
let x = Fp2Var::new_variable(ark_relations::ns!(cs, "x"), || g2.map(|g| g.x), mode)?;
let y = Fp2Var::new_variable(ark_relations::ns!(cs, "y"), || g2.map(|g| g.y), mode)?;
let x_over_twist = Fp2Var::new_variable(
ark_relations::ns!(cs, "x_over_twist"),
|| g2.map(|g| g.x_over_twist),
mode,
)?;
let y_over_twist = Fp2Var::new_variable(
ark_relations::ns!(cs, "y_over_twist"),
|| g2.map(|g| g.y_over_twist),
mode,
)?;
let double_coefficients = Vec::new_variable(
ark_relations::ns!(cs, "double coeffs"),
|| g2.map(|g| g.double_coefficients.clone()),
mode,
)?;
let addition_coefficients = Vec::new_variable(
ark_relations::ns!(cs, "add coeffs"),
|| g2.map(|g| g.addition_coefficients.clone()),
mode,
)?;
Ok(Self {
x,
y,
x_over_twist,
y_over_twist,
double_coefficients,
addition_coefficients,
})
}
}
impl<P: MNT4Parameters> ToBytesGadget<P::Fp> for G2PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_bytes()?;
let mut y = self.y.to_bytes()?;
let mut x_over_twist = self.x_over_twist.to_bytes()?;
let mut y_over_twist = self.y_over_twist.to_bytes()?;
x.append(&mut y);
x.append(&mut x_over_twist);
x.append(&mut y_over_twist);
for coeff in &self.double_coefficients {
x.extend_from_slice(&coeff.to_bytes()?);
}
for coeff in &self.addition_coefficients {
x.extend_from_slice(&coeff.to_bytes()?);
}
Ok(x)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_non_unique_bytes()?;
let mut y = self.y.to_non_unique_bytes()?;
let mut x_over_twist = self.x_over_twist.to_non_unique_bytes()?;
let mut y_over_twist = self.y_over_twist.to_non_unique_bytes()?;
x.append(&mut y);
x.append(&mut x_over_twist);
x.append(&mut y_over_twist);
for coeff in &self.double_coefficients {
x.extend_from_slice(&coeff.to_non_unique_bytes()?);
}
for coeff in &self.addition_coefficients {
x.extend_from_slice(&coeff.to_non_unique_bytes()?);
}
Ok(x)
}
}
impl<P: MNT4Parameters> G2PreparedVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<G2Prepared<P>, SynthesisError> {
let x = self.x.value()?;
let y = self.y.value()?;
let x_over_twist = self.x_over_twist.value()?;
let y_over_twist = self.y_over_twist.value()?;
let double_coefficients = self
.double_coefficients
.iter()
.map(|coeff| coeff.value())
.collect::<Result<Vec<AteDoubleCoefficients<P>>, _>>()?;
let addition_coefficients = self
.addition_coefficients
.iter()
.map(|coeff| coeff.value())
.collect::<Result<Vec<AteAdditionCoefficients<P>>, _>>()?;
Ok(G2Prepared {
x,
y,
x_over_twist,
y_over_twist,
double_coefficients,
addition_coefficients,
})
}
/// Constructs `Self` from a `G2Var`.
#[tracing::instrument(target = "r1cs")]
pub fn from_group_var(q: &G2Var<P>) -> Result<Self, SynthesisError> {
let twist_inv = P::TWIST.inverse().unwrap();
let q = q.to_affine()?;
let mut g2p = G2PreparedVar {
x: q.x.clone(),
y: q.y.clone(),
x_over_twist: &q.x * twist_inv,
y_over_twist: &q.y * twist_inv,
double_coefficients: vec![],
addition_coefficients: vec![],
};
let mut r = G2ProjectiveExtendedVar {
x: q.x.clone(),
y: q.y.clone(),
z: Fp2G::<P>::one(),
t: Fp2G::<P>::one(),
};
for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() {
let mut tmp = *value;
let skip_extraneous_bits = 64 - value.leading_zeros();
let mut v = Vec::with_capacity(16);
for i in 0..64 {
if idx == 0 && (i == 0 || i >= skip_extraneous_bits) {
continue;
}
v.push(tmp & 1 == 1);
tmp >>= 1;
}
for bit in v.iter().rev() {
let (r2, coeff) = PairingVar::<P>::doubling_step_for_flipped_miller_loop(&r)?;
g2p.double_coefficients.push(coeff);
r = r2;
if *bit {
let (r2, coeff) = PairingVar::<P>::mixed_addition_step_for_flipped_miller_loop(
&q.x, &q.y, &r,
)?;
g2p.addition_coefficients.push(coeff);
r = r2;
}
tmp >>= 1;
}
}
if P::ATE_IS_LOOP_COUNT_NEG {
let rz_inv = r.z.inverse()?;
let rz2_inv = rz_inv.square()?;
let rz3_inv = &rz_inv * &rz2_inv;
let minus_r_affine_x = &r.x * &rz2_inv;
let minus_r_affine_y = r.y.negate()? * &rz3_inv;
let add_result = PairingVar::<P>::mixed_addition_step_for_flipped_miller_loop(
&minus_r_affine_x,
&minus_r_affine_y,
&r,
)?;
g2p.addition_coefficients.push(add_result.1);
}
Ok(g2p)
}
}
#[doc(hidden)]
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))]
pub struct AteDoubleCoefficientsVar<P: MNT4Parameters> {
pub c_h: Fp2Var<P::Fp2Params>,
pub c_4c: Fp2Var<P::Fp2Params>,
pub c_j: Fp2Var<P::Fp2Params>,
pub c_l: Fp2Var<P::Fp2Params>,
}
impl<P: MNT4Parameters> AllocVar<AteDoubleCoefficients<P>, P::Fp> for AteDoubleCoefficientsVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<AteDoubleCoefficients<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let c_prep = f().map(|c| c.borrow().clone());
let c = c_prep.as_ref().map_err(|e| *e);
let c_h = Fp2Var::new_variable(ark_relations::ns!(cs, "c_h"), || c.map(|c| c.c_h), mode)?;
let c_4c =
Fp2Var::new_variable(ark_relations::ns!(cs, "c_4c"), || c.map(|c| c.c_4c), mode)?;
let c_j = Fp2Var::new_variable(ark_relations::ns!(cs, "c_j"), || c.map(|c| c.c_j), mode)?;
let c_l = Fp2Var::new_variable(ark_relations::ns!(cs, "c_l"), || c.map(|c| c.c_l), mode)?;
Ok(Self {
c_h,
c_4c,
c_j,
c_l,
})
}
}
impl<P: MNT4Parameters> ToBytesGadget<P::Fp> for AteDoubleCoefficientsVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_h = self.c_h.to_bytes()?;
let mut c_4c = self.c_4c.to_bytes()?;
let mut c_j = self.c_j.to_bytes()?;
let mut c_l = self.c_l.to_bytes()?;
c_h.append(&mut c_4c);
c_h.append(&mut c_j);
c_h.append(&mut c_l);
Ok(c_h)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_h = self.c_h.to_non_unique_bytes()?;
let mut c_4c = self.c_4c.to_non_unique_bytes()?;
let mut c_j = self.c_j.to_non_unique_bytes()?;
let mut c_l = self.c_l.to_non_unique_bytes()?;
c_h.append(&mut c_4c);
c_h.append(&mut c_j);
c_h.append(&mut c_l);
Ok(c_h)
}
}
impl<P: MNT4Parameters> AteDoubleCoefficientsVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<AteDoubleCoefficients<P>, SynthesisError> {
let (c_h, c_4c, c_j, c_l) = (
self.c_l.value()?,
self.c_4c.value()?,
self.c_j.value()?,
self.c_l.value()?,
);
Ok(AteDoubleCoefficients {
c_h,
c_4c,
c_j,
c_l,
})
}
}
#[doc(hidden)]
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))]
pub struct AteAdditionCoefficientsVar<P: MNT4Parameters> {
pub c_l1: Fp2Var<P::Fp2Params>,
pub c_rz: Fp2Var<P::Fp2Params>,
}
impl<P: MNT4Parameters> AllocVar<AteAdditionCoefficients<P>, P::Fp>
for AteAdditionCoefficientsVar<P>
{
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<AteAdditionCoefficients<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let c_prep = f().map(|c| c.borrow().clone());
let c = c_prep.as_ref().map_err(|e| *e);
let c_l1 =
Fp2Var::new_variable(ark_relations::ns!(cs, "c_l1"), || c.map(|c| c.c_l1), mode)?;
let c_rz =
Fp2Var::new_variable(ark_relations::ns!(cs, "c_rz"), || c.map(|c| c.c_rz), mode)?;
Ok(Self { c_l1, c_rz })
}
}
impl<P: MNT4Parameters> ToBytesGadget<P::Fp> for AteAdditionCoefficientsVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_l1 = self.c_l1.to_bytes()?;
let mut c_rz = self.c_rz.to_bytes()?;
c_l1.append(&mut c_rz);
Ok(c_l1)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_l1 = self.c_l1.to_non_unique_bytes()?;
let mut c_rz = self.c_rz.to_non_unique_bytes()?;
c_l1.append(&mut c_rz);
Ok(c_l1)
}
}
impl<P: MNT4Parameters> AteAdditionCoefficientsVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<AteAdditionCoefficients<P>, SynthesisError> {
let (c_l1, c_rz) = (self.c_l1.value()?, self.c_rz.value()?);
Ok(AteAdditionCoefficients { c_l1, c_rz })
}
}
#[doc(hidden)]
pub struct G2ProjectiveExtendedVar<P: MNT4Parameters> {
pub x: Fp2Var<P::Fp2Params>,
pub y: Fp2Var<P::Fp2Params>,
pub z: Fp2Var<P::Fp2Params>,
pub t: Fp2Var<P::Fp2Params>,
}

View File

@@ -0,0 +1,494 @@
use ark_ec::mnt6::{
g2::{AteAdditionCoefficients, AteDoubleCoefficients},
G1Prepared, G2Prepared, MNT6Parameters,
};
use ark_ff::Field;
use ark_relations::r1cs::{Namespace, SynthesisError};
use crate::{
fields::{fp::FpVar, fp3::Fp3Var, FieldVar},
groups::curves::short_weierstrass::ProjectiveVar,
pairing::mnt6::PairingVar,
prelude::*,
Vec,
};
use core::borrow::Borrow;
/// Represents a projective point in G1.
pub type G1Var<P> =
ProjectiveVar<<P as MNT6Parameters>::G1Parameters, FpVar<<P as MNT6Parameters>::Fp>>;
/// Represents a projective point in G2.
pub type G2Var<P> = ProjectiveVar<<P as MNT6Parameters>::G2Parameters, Fp3G<P>>;
/// Represents the cached precomputation that can be performed on a G1 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))]
pub struct G1PreparedVar<P: MNT6Parameters> {
#[doc(hidden)]
pub x: FpVar<P::Fp>,
#[doc(hidden)]
pub y: FpVar<P::Fp>,
#[doc(hidden)]
pub x_twist: Fp3Var<P::Fp3Params>,
#[doc(hidden)]
pub y_twist: Fp3Var<P::Fp3Params>,
}
impl<P: MNT6Parameters> G1PreparedVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<G1Prepared<P>, SynthesisError> {
let x = self.x.value()?;
let y = self.y.value()?;
let x_twist = self.x_twist.value()?;
let y_twist = self.y_twist.value()?;
Ok(G1Prepared {
x,
y,
x_twist,
y_twist,
})
}
/// Constructs `Self` from a `G1Var`.
#[tracing::instrument(target = "r1cs")]
pub fn from_group_var(q: &G1Var<P>) -> Result<Self, SynthesisError> {
let q = q.to_affine()?;
let zero = FpVar::<P::Fp>::zero();
let x_twist = Fp3Var::new(q.x.clone(), zero.clone(), zero.clone()) * P::TWIST;
let y_twist = Fp3Var::new(q.y.clone(), zero.clone(), zero.clone()) * P::TWIST;
let result = G1PreparedVar {
x: q.x.clone(),
y: q.y.clone(),
x_twist,
y_twist,
};
Ok(result)
}
}
impl<P: MNT6Parameters> AllocVar<G1Prepared<P>, P::Fp> for G1PreparedVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<G1Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g1_prep = f().map(|b| *b.borrow());
let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?;
let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?;
let x_twist = Fp3Var::new_variable(
ark_relations::ns!(cs, "x_twist"),
|| g1_prep.map(|g| g.x_twist),
mode,
)?;
let y_twist = Fp3Var::new_variable(
ark_relations::ns!(cs, "y_twist"),
|| g1_prep.map(|g| g.y_twist),
mode,
)?;
Ok(Self {
x,
y,
x_twist,
y_twist,
})
}
}
impl<P: MNT6Parameters> ToBytesGadget<P::Fp> for G1PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_bytes()?;
let mut y = self.y.to_bytes()?;
let mut x_twist = self.x_twist.to_bytes()?;
let mut y_twist = self.y_twist.to_bytes()?;
x.append(&mut y);
x.append(&mut x_twist);
x.append(&mut y_twist);
Ok(x)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_non_unique_bytes()?;
let mut y = self.y.to_non_unique_bytes()?;
let mut x_twist = self.x_twist.to_non_unique_bytes()?;
let mut y_twist = self.y_twist.to_non_unique_bytes()?;
x.append(&mut y);
x.append(&mut x_twist);
x.append(&mut y_twist);
Ok(x)
}
}
type Fp3G<P> = Fp3Var<<P as MNT6Parameters>::Fp3Params>;
/// Represents the cached precomputation that can be performed on a G2 element
/// which enables speeding up pairing computation.
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))]
pub struct G2PreparedVar<P: MNT6Parameters> {
#[doc(hidden)]
pub x: Fp3Var<P::Fp3Params>,
#[doc(hidden)]
pub y: Fp3Var<P::Fp3Params>,
#[doc(hidden)]
pub x_over_twist: Fp3Var<P::Fp3Params>,
#[doc(hidden)]
pub y_over_twist: Fp3Var<P::Fp3Params>,
#[doc(hidden)]
pub double_coefficients: Vec<AteDoubleCoefficientsVar<P>>,
#[doc(hidden)]
pub addition_coefficients: Vec<AteAdditionCoefficientsVar<P>>,
}
impl<P: MNT6Parameters> AllocVar<G2Prepared<P>, P::Fp> for G2PreparedVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<G2Prepared<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g2_prep = f().map(|b| b.borrow().clone());
let g2 = g2_prep.as_ref().map_err(|e| *e);
let x = Fp3Var::new_variable(ark_relations::ns!(cs, "x"), || g2.map(|g| g.x), mode)?;
let y = Fp3Var::new_variable(ark_relations::ns!(cs, "y"), || g2.map(|g| g.y), mode)?;
let x_over_twist = Fp3Var::new_variable(
ark_relations::ns!(cs, "x_over_twist"),
|| g2.map(|g| g.x_over_twist),
mode,
)?;
let y_over_twist = Fp3Var::new_variable(
ark_relations::ns!(cs, "y_over_twist"),
|| g2.map(|g| g.y_over_twist),
mode,
)?;
let double_coefficients = Vec::new_variable(
ark_relations::ns!(cs, "double coeffs"),
|| g2.map(|g| g.double_coefficients.clone()),
mode,
)?;
let addition_coefficients = Vec::new_variable(
ark_relations::ns!(cs, "add coeffs"),
|| g2.map(|g| g.addition_coefficients.clone()),
mode,
)?;
Ok(Self {
x,
y,
x_over_twist,
y_over_twist,
double_coefficients,
addition_coefficients,
})
}
}
impl<P: MNT6Parameters> ToBytesGadget<P::Fp> for G2PreparedVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_bytes()?;
let mut y = self.y.to_bytes()?;
let mut x_over_twist = self.x_over_twist.to_bytes()?;
let mut y_over_twist = self.y_over_twist.to_bytes()?;
x.append(&mut y);
x.append(&mut x_over_twist);
x.append(&mut y_over_twist);
for coeff in self.double_coefficients.iter() {
x.extend_from_slice(&coeff.to_bytes()?);
}
for coeff in self.addition_coefficients.iter() {
x.extend_from_slice(&coeff.to_bytes()?);
}
Ok(x)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut x = self.x.to_non_unique_bytes()?;
let mut y = self.y.to_non_unique_bytes()?;
let mut x_over_twist = self.x_over_twist.to_non_unique_bytes()?;
let mut y_over_twist = self.y_over_twist.to_non_unique_bytes()?;
x.append(&mut y);
x.append(&mut x_over_twist);
x.append(&mut y_over_twist);
for coeff in self.double_coefficients.iter() {
x.extend_from_slice(&coeff.to_non_unique_bytes()?);
}
for coeff in self.addition_coefficients.iter() {
x.extend_from_slice(&coeff.to_non_unique_bytes()?);
}
Ok(x)
}
}
impl<P: MNT6Parameters> G2PreparedVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<G2Prepared<P>, SynthesisError> {
let x = self.x.value()?;
let y = self.y.value()?;
let x_over_twist = self.x_over_twist.value()?;
let y_over_twist = self.y_over_twist.value()?;
let double_coefficients = self
.double_coefficients
.iter()
.map(|coeff| coeff.value())
.collect::<Result<Vec<_>, SynthesisError>>()?;
let addition_coefficients = self
.addition_coefficients
.iter()
.map(|coeff| coeff.value())
.collect::<Result<Vec<_>, SynthesisError>>()?;
Ok(G2Prepared {
x,
y,
x_over_twist,
y_over_twist,
double_coefficients,
addition_coefficients,
})
}
/// Constructs `Self` from a `G2Var`.
#[tracing::instrument(target = "r1cs")]
pub fn from_group_var(q: &G2Var<P>) -> Result<Self, SynthesisError> {
let q = q.to_affine()?;
let twist_inv = P::TWIST.inverse().unwrap();
let mut g2p = G2PreparedVar {
x: q.x.clone(),
y: q.y.clone(),
x_over_twist: &q.x * twist_inv,
y_over_twist: &q.y * twist_inv,
double_coefficients: vec![],
addition_coefficients: vec![],
};
let mut r = G2ProjectiveExtendedVar {
x: q.x.clone(),
y: q.y.clone(),
z: Fp3G::<P>::one(),
t: Fp3G::<P>::one(),
};
for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() {
let mut tmp = *value;
let skip_extraneous_bits = 64 - value.leading_zeros();
let mut v = Vec::with_capacity(16);
for i in 0..64 {
if idx == 0 && (i == 0 || i >= skip_extraneous_bits) {
continue;
}
v.push(tmp & 1 == 1);
tmp >>= 1;
}
for bit in v.iter().rev() {
let (r2, coeff) = PairingVar::<P>::doubling_step_for_flipped_miller_loop(&r)?;
g2p.double_coefficients.push(coeff);
r = r2;
if *bit {
let (r2, coeff) = PairingVar::<P>::mixed_addition_step_for_flipped_miller_loop(
&q.x, &q.y, &r,
)?;
g2p.addition_coefficients.push(coeff);
r = r2;
}
tmp >>= 1;
}
}
if P::ATE_IS_LOOP_COUNT_NEG {
let rz_inv = r.z.inverse()?;
let rz2_inv = rz_inv.square()?;
let rz3_inv = &rz_inv * &rz2_inv;
let minus_r_affine_x = &r.x * &rz2_inv;
let minus_r_affine_y = r.y.negate()? * &rz3_inv;
let add_result = PairingVar::<P>::mixed_addition_step_for_flipped_miller_loop(
&minus_r_affine_x,
&minus_r_affine_y,
&r,
)?;
g2p.addition_coefficients.push(add_result.1);
}
Ok(g2p)
}
}
#[doc(hidden)]
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))]
pub struct AteDoubleCoefficientsVar<P: MNT6Parameters> {
pub c_h: Fp3Var<P::Fp3Params>,
pub c_4c: Fp3Var<P::Fp3Params>,
pub c_j: Fp3Var<P::Fp3Params>,
pub c_l: Fp3Var<P::Fp3Params>,
}
impl<P: MNT6Parameters> AllocVar<AteDoubleCoefficients<P>, P::Fp> for AteDoubleCoefficientsVar<P> {
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<AteDoubleCoefficients<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let c_prep = f().map(|c| c.borrow().clone());
let c = c_prep.as_ref().map_err(|e| *e);
let c_h = Fp3Var::new_variable(ark_relations::ns!(cs, "c_h"), || c.map(|c| c.c_h), mode)?;
let c_4c =
Fp3Var::new_variable(ark_relations::ns!(cs, "c_4c"), || c.map(|c| c.c_4c), mode)?;
let c_j = Fp3Var::new_variable(ark_relations::ns!(cs, "c_j"), || c.map(|c| c.c_j), mode)?;
let c_l = Fp3Var::new_variable(ark_relations::ns!(cs, "c_l"), || c.map(|c| c.c_l), mode)?;
Ok(Self {
c_h,
c_4c,
c_j,
c_l,
})
}
}
impl<P: MNT6Parameters> ToBytesGadget<P::Fp> for AteDoubleCoefficientsVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_h = self.c_h.to_bytes()?;
let mut c_4c = self.c_4c.to_bytes()?;
let mut c_j = self.c_j.to_bytes()?;
let mut c_l = self.c_l.to_bytes()?;
c_h.append(&mut c_4c);
c_h.append(&mut c_j);
c_h.append(&mut c_l);
Ok(c_h)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_h = self.c_h.to_non_unique_bytes()?;
let mut c_4c = self.c_4c.to_non_unique_bytes()?;
let mut c_j = self.c_j.to_non_unique_bytes()?;
let mut c_l = self.c_l.to_non_unique_bytes()?;
c_h.append(&mut c_4c);
c_h.append(&mut c_j);
c_h.append(&mut c_l);
Ok(c_h)
}
}
impl<P: MNT6Parameters> AteDoubleCoefficientsVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<AteDoubleCoefficients<P>, SynthesisError> {
let c_h = self.c_h.value()?;
let c_4c = self.c_4c.value()?;
let c_j = self.c_j.value()?;
let c_l = self.c_l.value()?;
Ok(AteDoubleCoefficients {
c_h,
c_4c,
c_j,
c_l,
})
}
}
#[doc(hidden)]
#[derive(Derivative)]
#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))]
pub struct AteAdditionCoefficientsVar<P: MNT6Parameters> {
pub c_l1: Fp3Var<P::Fp3Params>,
pub c_rz: Fp3Var<P::Fp3Params>,
}
impl<P: MNT6Parameters> AllocVar<AteAdditionCoefficients<P>, P::Fp>
for AteAdditionCoefficientsVar<P>
{
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<T: Borrow<AteAdditionCoefficients<P>>>(
cs: impl Into<Namespace<P::Fp>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let c_prep = f().map(|c| c.borrow().clone());
let c = c_prep.as_ref().map_err(|e| *e);
let c_l1 =
Fp3Var::new_variable(ark_relations::ns!(cs, "c_l1"), || c.map(|c| c.c_l1), mode)?;
let c_rz =
Fp3Var::new_variable(ark_relations::ns!(cs, "c_rz"), || c.map(|c| c.c_rz), mode)?;
Ok(Self { c_l1, c_rz })
}
}
impl<P: MNT6Parameters> ToBytesGadget<P::Fp> for AteAdditionCoefficientsVar<P> {
#[inline]
#[tracing::instrument(target = "r1cs")]
fn to_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_l1 = self.c_l1.to_bytes()?;
let mut c_rz = self.c_rz.to_bytes()?;
c_l1.append(&mut c_rz);
Ok(c_l1)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(&self) -> Result<Vec<UInt8<P::Fp>>, SynthesisError> {
let mut c_l1 = self.c_l1.to_non_unique_bytes()?;
let mut c_rz = self.c_rz.to_non_unique_bytes()?;
c_l1.append(&mut c_rz);
Ok(c_l1)
}
}
impl<P: MNT6Parameters> AteAdditionCoefficientsVar<P> {
/// Returns the value assigned to `self` in the underlying constraint
/// system.
pub fn value(&self) -> Result<AteAdditionCoefficients<P>, SynthesisError> {
let c_l1 = self.c_l1.value()?;
let c_rz = self.c_rz.value()?;
Ok(AteAdditionCoefficients { c_l1, c_rz })
}
}
#[doc(hidden)]
pub struct G2ProjectiveExtendedVar<P: MNT6Parameters> {
pub x: Fp3Var<P::Fp3Params>,
pub y: Fp3Var<P::Fp3Params>,
pub z: Fp3Var<P::Fp3Params>,
pub t: Fp3Var<P::Fp3Params>,
}

View File

@@ -0,0 +1,707 @@
use ark_ec::{
short_weierstrass_jacobian::{GroupAffine as SWAffine, GroupProjective as SWProjective},
AffineCurve, ProjectiveCurve, SWModelParameters,
};
use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use core::{borrow::Borrow, marker::PhantomData};
use crate::{fields::fp::FpVar, prelude::*, ToConstraintFieldGadget, Vec};
/// This module provides a generic implementation of G1 and G2 for
/// the [[BLS12]](https://eprint.iacr.org/2002/088.pdf) family of bilinear groups.
pub mod bls12;
/// This module provides a generic implementation of G1 and G2 for
/// the [[MNT4]](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.8113&rep=rep1&type=pdf)
/// family of bilinear groups.
pub mod mnt4;
/// This module provides a generic implementation of G1 and G2 for
/// the [[MNT6]](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.8113&rep=rep1&type=pdf)
/// family of bilinear groups.
pub mod mnt6;
/// An implementation of arithmetic for Short Weierstrass curves that relies on
/// the complete formulae derived in the paper of
/// [[Renes, Costello, Batina 2015]](https://eprint.iacr.org/2015/1060).
#[derive(Derivative)]
#[derivative(Debug, Clone)]
#[must_use]
pub struct ProjectiveVar<
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
> where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// The x-coordinate.
pub x: F,
/// The y-coordinate.
pub y: F,
/// The z-coordinate.
pub z: F,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
/// An affine representation of a curve point.
#[derive(Derivative)]
#[derivative(Debug, Clone)]
#[must_use]
pub struct AffineVar<
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
> where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// The x-coordinate.
pub x: F,
/// The y-coordinate.
pub y: F,
/// Is `self` the point at infinity.
pub infinity: Boolean<<P::BaseField as Field>::BasePrimeField>,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
impl<P, F> AffineVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
fn new(x: F, y: F, infinity: Boolean<<P::BaseField as Field>::BasePrimeField>) -> Self {
Self {
x,
y,
infinity,
_params: PhantomData,
}
}
/// Returns the value assigned to `self` in the underlying
/// constraint system.
pub fn value(&self) -> Result<SWAffine<P>, SynthesisError> {
Ok(SWAffine::new(
self.x.value()?,
self.y.value()?,
self.infinity.value()?,
))
}
}
impl<P, F> ToConstraintFieldGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
F: ToConstraintFieldGadget<<P::BaseField as Field>::BasePrimeField>,
{
fn to_constraint_field(
&self,
) -> Result<Vec<FpVar<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut res = Vec::<FpVar<<P::BaseField as Field>::BasePrimeField>>::new();
res.extend_from_slice(&self.x.to_constraint_field()?);
res.extend_from_slice(&self.y.to_constraint_field()?);
Ok(res)
}
}
impl<P, F> R1CSVar<<P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
type Value = SWProjective<P>;
fn cs(&self) -> ConstraintSystemRef<<P::BaseField as Field>::BasePrimeField> {
self.x.cs().or(self.y.cs()).or(self.z.cs())
}
fn value(&self) -> Result<Self::Value, SynthesisError> {
let (x, y, z) = (self.x.value()?, self.y.value()?, self.z.value()?);
let result = if let Some(z_inv) = z.inverse() {
SWAffine::new(x * &z_inv, y * &z_inv, false)
} else {
SWAffine::zero()
};
Ok(result.into())
}
}
impl<P: SWModelParameters, F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>>
ProjectiveVar<P, F>
where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// Constructs `Self` from an `(x, y, z)` coordinate triple.
pub fn new(x: F, y: F, z: F) -> Self {
Self {
x,
y,
z,
_params: PhantomData,
}
}
/// Convert this point into affine form.
#[tracing::instrument(target = "r1cs")]
pub fn to_affine(&self) -> Result<AffineVar<P, F>, SynthesisError> {
let cs = self.cs();
let mode = if self.is_constant() {
let point = self.value()?.into_affine();
let x = F::new_constant(ConstraintSystemRef::None, point.x)?;
let y = F::new_constant(ConstraintSystemRef::None, point.y)?;
let infinity = Boolean::constant(point.infinity);
return Ok(AffineVar::new(x, y, infinity));
} else {
AllocationMode::Witness
};
let infinity = self.is_zero()?;
let zero_x = F::zero();
let zero_y = F::one();
let non_zero_x = F::new_variable(
ark_relations::ns!(cs, "non-zero x"),
|| {
let z_inv = self.z.value()?.inverse().unwrap_or(P::BaseField::zero());
Ok(self.x.value()? * &z_inv)
},
mode,
)?;
let non_zero_y = F::new_variable(
ark_relations::ns!(cs, "non-zero y"),
|| {
let z_inv = self.z.value()?.inverse().unwrap_or(P::BaseField::zero());
Ok(self.y.value()? * &z_inv)
},
mode,
)?;
let x = infinity.select(&zero_x, &non_zero_x)?;
let y = infinity.select(&zero_y, &non_zero_y)?;
Ok(AffineVar::new(x, y, infinity))
}
/// Allocates a new variable without performing an on-curve check, which is
/// useful if the variable is known to be on the curve (eg., if the point
/// is a constant or is a public input).
#[tracing::instrument(target = "r1cs", skip(cs, f))]
pub fn new_variable_omit_on_curve_check(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<SWProjective<P>, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let (x, y, z) = match f() {
Ok(ge) => {
let ge = ge.into_affine();
if ge.is_zero() {
(
Ok(P::BaseField::zero()),
Ok(P::BaseField::one()),
Ok(P::BaseField::zero()),
)
} else {
(Ok(ge.x), Ok(ge.y), Ok(P::BaseField::one()))
}
}
_ => (
Err(SynthesisError::AssignmentMissing),
Err(SynthesisError::AssignmentMissing),
Err(SynthesisError::AssignmentMissing),
),
};
let x = F::new_variable(ark_relations::ns!(cs, "x"), || x, mode)?;
let y = F::new_variable(ark_relations::ns!(cs, "y"), || y, mode)?;
let z = F::new_variable(ark_relations::ns!(cs, "z"), || z, mode)?;
Ok(Self::new(x, y, z))
}
}
impl<P, F> CurveVar<SWProjective<P>, <P::BaseField as Field>::BasePrimeField>
for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
fn constant(g: SWProjective<P>) -> Self {
let cs = ConstraintSystemRef::None;
Self::new_variable_omit_on_curve_check(cs, || Ok(g), AllocationMode::Constant).unwrap()
}
fn zero() -> Self {
Self::new(F::zero(), F::one(), F::zero())
}
fn is_zero(&self) -> Result<Boolean<<P::BaseField as Field>::BasePrimeField>, SynthesisError> {
self.z.is_zero()
}
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable_omit_prime_order_check(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<SWProjective<P>, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
// Curve equation in projective form:
// E: Y² * Z = X³ + aX * Z² + bZ³
//
// This can be re-written as
// E: Y² * Z - bZ³ = X³ + aX * Z²
// E: Z * (Y² - bZ²) = X * (X² + aZ²)
// so, compute X², Y², Z²,
// compute temp = X * (X² + aZ²)
// check Z.mul_equals((Y² - bZ²), temp)
//
// A total of 5 multiplications
let g = Self::new_variable_omit_on_curve_check(cs, f, mode)?;
if mode != AllocationMode::Constant {
// Perform on-curve check.
let b = P::COEFF_B;
let a = P::COEFF_A;
let x2 = g.x.square()?;
let y2 = g.y.square()?;
let z2 = g.z.square()?;
let t = &g.x * (x2 + &z2 * a);
g.z.mul_equals(&(y2 - z2 * b), &t)?;
}
Ok(g)
}
/// Enforce that `self` is in the prime-order subgroup.
///
/// Does so by multiplying by the prime order, and checking that the result
/// is unchanged.
// TODO: at the moment this doesn't work, because the addition and doubling
// formulae are incomplete for even-order points.
#[tracing::instrument(target = "r1cs")]
fn enforce_prime_order(&self) -> Result<(), SynthesisError> {
let r_minus_1 = (-P::ScalarField::one()).into_repr();
let mut result = Self::zero();
for b in BitIteratorBE::without_leading_zeros(r_minus_1) {
result.double_in_place()?;
if b {
result += self;
}
}
self.negate()?.enforce_equal(&result)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn double_in_place(&mut self) -> Result<(), SynthesisError> {
// Complete doubling formula from Renes-Costello-Batina 2015
// Algorithm 3
// (https://eprint.iacr.org/2015/1060).
//
// Adapted from code in
// https://github.com/RustCrypto/elliptic-curves/blob/master/p256/src/arithmetic.rs
let three_b = P::COEFF_B.double() + &P::COEFF_B;
let xx = self.x.square()?; // 1
let yy = self.y.square()?; // 2
let zz = self.z.square()?; // 3
let xy2 = (&self.x * &self.y).double()?; // 4, 5
let xz2 = (&self.x * &self.z).double()?; // 6, 7
let axz2 = mul_by_coeff_a::<P, F>(&xz2); // 8
let bzz3_part = &axz2 + &zz * three_b; // 9, 10
let yy_m_bzz3 = &yy - &bzz3_part; // 11
let yy_p_bzz3 = &yy + &bzz3_part; // 12
let y_frag = yy_p_bzz3 * &yy_m_bzz3; // 13
let x_frag = yy_m_bzz3 * &xy2; // 14
let bxz3 = xz2 * three_b; // 15
let azz = mul_by_coeff_a::<P, F>(&zz); // 16
let b3_xz_pairs = mul_by_coeff_a::<P, F>(&(&xx - &azz)) + &bxz3; // 15, 16, 17, 18, 19
let xx3_p_azz = (xx.double()? + &xx + &azz) * &b3_xz_pairs; // 23, 24, 25
let y = y_frag + &xx3_p_azz; // 26, 27
let yz2 = (&self.y * &self.z).double()?; // 28, 29
let x = x_frag - &(b3_xz_pairs * &yz2); // 30, 31
let z = (yz2 * &yy).double()?.double()?; // 32, 33, 34
self.x = x;
self.y = y;
self.z = z;
Ok(())
}
#[tracing::instrument(target = "r1cs")]
fn negate(&self) -> Result<Self, SynthesisError> {
Ok(Self::new(self.x.clone(), self.y.negate()?, self.z.clone()))
}
}
fn mul_by_coeff_a<
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
>(
f: &F,
) -> F
where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
if !P::COEFF_A.is_zero() {
f * P::COEFF_A
} else {
F::zero()
}
}
impl_bounded_ops!(
ProjectiveVar<P, F>,
SWProjective<P>,
Add,
add,
AddAssign,
add_assign,
|this: &'a ProjectiveVar<P, F>, other: &'a ProjectiveVar<P, F>| {
// Complete addition formula from Renes-Costello-Batina 2015
// Algorithm 1
// (https://eprint.iacr.org/2015/1060).
//
// Adapted from code in
// https://github.com/RustCrypto/elliptic-curves/blob/master/p256/src/arithmetic.rs
let three_b = P::COEFF_B.double() + &P::COEFF_B;
let xx = &this.x * &other.x; // 1
let yy = &this.y * &other.y; // 2
let zz = &this.z * &other.z; // 3
let xy_pairs = ((&this.x + &this.y) * &(&other.x + &other.y)) - (&xx + &yy); // 4, 5, 6, 7, 8
let xz_pairs = ((&this.x + &this.z) * &(&other.x + &other.z)) - (&xx + &zz); // 9, 10, 11, 12, 13
let yz_pairs = ((&this.y + &this.z) * &(&other.y + &other.z)) - (&yy + &zz); // 14, 15, 16, 17, 18
let axz = mul_by_coeff_a::<P, F>(&xz_pairs); // 19
let bzz3_part = &axz + &zz * three_b; // 20, 21
let yy_m_bzz3 = &yy - &bzz3_part; // 22
let yy_p_bzz3 = &yy + &bzz3_part; // 23
let azz = mul_by_coeff_a::<P, F>(&zz);
let xx3_p_azz = xx.double().unwrap() + &xx + &azz; // 25, 26, 27, 29
let bxz3 = &xz_pairs * three_b; // 28
let b3_xz_pairs = mul_by_coeff_a::<P, F>(&(&xx - &azz)) + &bxz3; // 30, 31, 32
let x = (&yy_m_bzz3 * &xy_pairs) - &yz_pairs * &b3_xz_pairs; // 35, 39, 40
let y = (&yy_p_bzz3 * &yy_m_bzz3) + &xx3_p_azz * b3_xz_pairs; // 24, 36, 37, 38
let z = (&yy_p_bzz3 * &yz_pairs) + xy_pairs * xx3_p_azz; // 41, 42, 43
ProjectiveVar::new(x, y, z)
},
|this: &'a ProjectiveVar<P, F>, other: SWProjective<P>| {
this + ProjectiveVar::constant(other)
},
(F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>, P: SWModelParameters),
for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
);
impl_bounded_ops!(
ProjectiveVar<P, F>,
SWProjective<P>,
Sub,
sub,
SubAssign,
sub_assign,
|this: &'a ProjectiveVar<P, F>, other: &'a ProjectiveVar<P, F>| this + other.negate().unwrap(),
|this: &'a ProjectiveVar<P, F>, other: SWProjective<P>| this - ProjectiveVar::constant(other),
(F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>, P: SWModelParameters),
for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>
);
impl<'a, P, F> GroupOpsBounds<'a, SWProjective<P>, ProjectiveVar<P, F>> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
}
impl<'a, P, F> GroupOpsBounds<'a, SWProjective<P>, ProjectiveVar<P, F>> for &'a ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
}
impl<P, F> CondSelectGadget<<P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditionally_select(
cond: &Boolean<<P::BaseField as Field>::BasePrimeField>,
true_value: &Self,
false_value: &Self,
) -> Result<Self, SynthesisError> {
let x = cond.select(&true_value.x, &false_value.x)?;
let y = cond.select(&true_value.y, &false_value.y)?;
let z = cond.select(&true_value.z, &false_value.z)?;
Ok(Self::new(x, y, z))
}
}
impl<P, F> EqGadget<<P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn is_eq(
&self,
other: &Self,
) -> Result<Boolean<<P::BaseField as Field>::BasePrimeField>, SynthesisError> {
let x_equal = (&self.x * &other.z).is_eq(&(&other.x * &self.z))?;
let y_equal = (&self.y * &other.z).is_eq(&(&other.y * &self.z))?;
let coordinates_equal = x_equal.and(&y_equal)?;
let both_are_zero = self.is_zero()?.and(&other.is_zero()?)?;
both_are_zero.or(&coordinates_equal)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<<P::BaseField as Field>::BasePrimeField>,
) -> Result<(), SynthesisError> {
let x_equal = (&self.x * &other.z).is_eq(&(&other.x * &self.z))?;
let y_equal = (&self.y * &other.z).is_eq(&(&other.y * &self.z))?;
let coordinates_equal = x_equal.and(&y_equal)?;
let both_are_zero = self.is_zero()?.and(&other.is_zero()?)?;
both_are_zero
.or(&coordinates_equal)?
.conditional_enforce_equal(&Boolean::Constant(true), condition)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<<P::BaseField as Field>::BasePrimeField>,
) -> Result<(), SynthesisError> {
let is_equal = self.is_eq(other)?;
is_equal
.and(condition)?
.enforce_equal(&Boolean::Constant(false))
}
}
impl<P, F> AllocVar<SWAffine<P>, <P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
fn new_variable<T: Borrow<SWAffine<P>>>(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
Self::new_variable(cs, || f().map(|b| b.borrow().into_projective()), mode)
}
}
impl<P, F> AllocVar<SWProjective<P>, <P::BaseField as Field>::BasePrimeField>
for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
fn new_variable<T: Borrow<SWProjective<P>>>(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let f = || Ok(*f()?.borrow());
match mode {
AllocationMode::Constant => Self::new_variable_omit_prime_order_check(cs, f, mode),
AllocationMode::Input => Self::new_variable_omit_prime_order_check(cs, f, mode),
AllocationMode::Witness => {
// if cofactor.is_even():
// divide until you've removed all even factors
// else:
// just directly use double and add.
let mut power_of_2: u32 = 0;
let mut cofactor = P::COFACTOR.to_vec();
while cofactor[0] % 2 == 0 {
div2(&mut cofactor);
power_of_2 += 1;
}
let cofactor_weight = BitIteratorBE::new(cofactor.as_slice())
.filter(|b| *b)
.count();
let modulus_minus_1 = (-P::ScalarField::one()).into_repr(); // r - 1
let modulus_minus_1_weight =
BitIteratorBE::new(modulus_minus_1).filter(|b| *b).count();
// We pick the most efficient method of performing the prime order check:
// If the cofactor has lower hamming weight than the scalar field's modulus,
// we first multiply by the inverse of the cofactor, and then, after allocating,
// multiply by the cofactor. This ensures the resulting point has no cofactors
//
// Else, we multiply by the scalar field's modulus and ensure that the result
// equals the identity.
let (mut ge, iter) = if cofactor_weight < modulus_minus_1_weight {
let ge = Self::new_variable_omit_prime_order_check(
ark_relations::ns!(cs, "Witness without subgroup check with cofactor mul"),
|| f().map(|g| g.borrow().into_affine().mul_by_cofactor_inv().into()),
mode,
)?;
(
ge,
BitIteratorBE::without_leading_zeros(cofactor.as_slice()),
)
} else {
let ge = Self::new_variable_omit_prime_order_check(
ark_relations::ns!(cs, "Witness without subgroup check with `r` check"),
|| {
f().map(|g| {
let g = g.into_affine();
let mut power_of_two = P::ScalarField::one().into_repr();
power_of_two.muln(power_of_2);
let power_of_two_inv = P::ScalarField::from_repr(power_of_two)
.and_then(|n| n.inverse())
.unwrap();
g.mul(power_of_two_inv)
})
},
mode,
)?;
(
ge,
BitIteratorBE::without_leading_zeros(modulus_minus_1.as_ref()),
)
};
// Remove the even part of the cofactor
for _ in 0..power_of_2 {
ge.double_in_place()?;
}
let mut result = Self::zero();
for b in iter {
result.double_in_place()?;
if b {
result += &ge
}
}
if cofactor_weight < modulus_minus_1_weight {
Ok(result)
} else {
ge.enforce_equal(&ge)?;
Ok(ge)
}
}
}
}
}
#[inline]
fn div2(limbs: &mut [u64]) {
let mut t = 0;
for i in limbs.iter_mut().rev() {
let t2 = *i << 63;
*i >>= 1;
*i |= t;
t = t2;
}
}
impl<P, F> ToBitsGadget<<P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(
&self,
) -> Result<Vec<Boolean<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let g = self.to_affine()?;
let mut bits = g.x.to_bits_le()?;
let y_bits = g.y.to_bits_le()?;
bits.extend_from_slice(&y_bits);
bits.push(g.infinity);
Ok(bits)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bits_le(
&self,
) -> Result<Vec<Boolean<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let g = self.to_affine()?;
let mut bits = g.x.to_non_unique_bits_le()?;
let y_bits = g.y.to_non_unique_bits_le()?;
bits.extend_from_slice(&y_bits);
bits.push(g.infinity);
Ok(bits)
}
}
impl<P, F> ToBytesGadget<<P::BaseField as Field>::BasePrimeField> for ProjectiveVar<P, F>
where
P: SWModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bytes(
&self,
) -> Result<Vec<UInt8<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let g = self.to_affine()?;
let mut bytes = g.x.to_bytes()?;
let y_bytes = g.y.to_bytes()?;
let inf_bytes = g.infinity.to_bytes()?;
bytes.extend_from_slice(&y_bytes);
bytes.extend_from_slice(&inf_bytes);
Ok(bytes)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(
&self,
) -> Result<Vec<UInt8<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let g = self.to_affine()?;
let mut bytes = g.x.to_non_unique_bytes()?;
let y_bytes = g.y.to_non_unique_bytes()?;
let inf_bytes = g.infinity.to_non_unique_bytes()?;
bytes.extend_from_slice(&y_bytes);
bytes.extend_from_slice(&inf_bytes);
Ok(bytes)
}
}

View File

@@ -0,0 +1,921 @@
use ark_ec::{
twisted_edwards_extended::{GroupAffine as TEAffine, GroupProjective as TEProjective},
AffineCurve, MontgomeryModelParameters, ProjectiveCurve, TEModelParameters,
};
use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero};
use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError};
use crate::{prelude::*, ToConstraintFieldGadget, Vec};
use crate::fields::fp::FpVar;
use core::{borrow::Borrow, marker::PhantomData};
/// An implementation of arithmetic for Montgomery curves that relies on
/// incomplete addition formulae for the affine model, as outlined in the
/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-montgom.html).
///
/// This is intended for use primarily for implementing efficient
/// multi-scalar-multiplication in the Bowe-Hopwood-Pedersen hash.
#[derive(Derivative)]
#[derivative(Debug, Clone)]
#[must_use]
pub struct MontgomeryAffineVar<
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
> where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// The x-coordinate.
pub x: F,
/// The y-coordinate.
pub y: F,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
mod montgomery_affine_impl {
use super::*;
use ark_ec::twisted_edwards_extended::GroupAffine;
use ark_ff::Field;
use core::ops::Add;
impl<P, F> R1CSVar<<P::BaseField as Field>::BasePrimeField> for MontgomeryAffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
type Value = (P::BaseField, P::BaseField);
fn cs(&self) -> ConstraintSystemRef<<P::BaseField as Field>::BasePrimeField> {
self.x.cs().or(self.y.cs())
}
fn value(&self) -> Result<Self::Value, SynthesisError> {
let x = self.x.value()?;
let y = self.y.value()?;
Ok((x, y))
}
}
impl<
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
> MontgomeryAffineVar<P, F>
where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// Constructs `Self` from an `(x, y)` coordinate pair.
pub fn new(x: F, y: F) -> Self {
Self {
x,
y,
_params: PhantomData,
}
}
/// Converts a Twisted Edwards curve point to coordinates for the
/// corresponding affine Montgomery curve point.
#[tracing::instrument(target = "r1cs")]
pub fn from_edwards_to_coords(
p: &TEAffine<P>,
) -> Result<(P::BaseField, P::BaseField), SynthesisError> {
let montgomery_point: GroupAffine<P> = if p.y == P::BaseField::one() {
GroupAffine::zero()
} else if p.x == P::BaseField::zero() {
GroupAffine::new(P::BaseField::zero(), P::BaseField::zero())
} else {
let u =
(P::BaseField::one() + &p.y) * &(P::BaseField::one() - &p.y).inverse().unwrap();
let v = u * &p.x.inverse().unwrap();
GroupAffine::new(u, v)
};
Ok((montgomery_point.x, montgomery_point.y))
}
/// Converts a Twisted Edwards curve point to coordinates for the
/// corresponding affine Montgomery curve point.
#[tracing::instrument(target = "r1cs")]
pub fn new_witness_from_edwards(
cs: ConstraintSystemRef<<P::BaseField as Field>::BasePrimeField>,
p: &TEAffine<P>,
) -> Result<Self, SynthesisError> {
let montgomery_coords = Self::from_edwards_to_coords(p)?;
let u = F::new_witness(ark_relations::ns!(cs, "u"), || Ok(montgomery_coords.0))?;
let v = F::new_witness(ark_relations::ns!(cs, "v"), || Ok(montgomery_coords.1))?;
Ok(Self::new(u, v))
}
/// Converts `self` into a Twisted Edwards curve point variable.
#[tracing::instrument(target = "r1cs")]
pub fn into_edwards(&self) -> Result<AffineVar<P, F>, SynthesisError> {
let cs = self.cs();
// Compute u = x / y
let u = F::new_witness(ark_relations::ns!(cs, "u"), || {
let y_inv = self
.y
.value()?
.inverse()
.ok_or(SynthesisError::DivisionByZero)?;
Ok(self.x.value()? * &y_inv)
})?;
u.mul_equals(&self.y, &self.x)?;
let v = F::new_witness(ark_relations::ns!(cs, "v"), || {
let mut t0 = self.x.value()?;
let mut t1 = t0;
t0 -= &P::BaseField::one();
t1 += &P::BaseField::one();
Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?)
})?;
let xplusone = &self.x + P::BaseField::one();
let xminusone = &self.x - P::BaseField::one();
v.mul_equals(&xplusone, &xminusone)?;
Ok(AffineVar::new(u, v))
}
}
impl<'a, P, F> Add<&'a MontgomeryAffineVar<P, F>> for MontgomeryAffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
type Output = MontgomeryAffineVar<P, F>;
#[tracing::instrument(target = "r1cs")]
fn add(self, other: &'a Self) -> Self::Output {
let cs = [&self, other].cs();
let mode = if cs.is_none() {
AllocationMode::Constant
} else {
AllocationMode::Witness
};
let coeff_b = P::MontgomeryModelParameters::COEFF_B;
let coeff_a = P::MontgomeryModelParameters::COEFF_A;
let lambda = F::new_variable(
ark_relations::ns!(cs, "lambda"),
|| {
let n = other.y.value()? - &self.y.value()?;
let d = other.x.value()? - &self.x.value()?;
Ok(n * &d.inverse().ok_or(SynthesisError::DivisionByZero)?)
},
mode,
)
.unwrap();
let lambda_n = &other.y - &self.y;
let lambda_d = &other.x - &self.x;
lambda_d.mul_equals(&lambda, &lambda_n).unwrap();
// Compute x'' = B*lambda^2 - A - x - x'
let xprime = F::new_variable(
ark_relations::ns!(cs, "xprime"),
|| {
Ok(lambda.value()?.square() * &coeff_b
- &coeff_a
- &self.x.value()?
- &other.x.value()?)
},
mode,
)
.unwrap();
let xprime_lc = &self.x + &other.x + &xprime + coeff_a;
// (lambda) * (lambda) = (A + x + x' + x'')
let lambda_b = &lambda * coeff_b;
lambda_b.mul_equals(&lambda, &xprime_lc).unwrap();
let yprime = F::new_variable(
ark_relations::ns!(cs, "yprime"),
|| {
Ok(-(self.y.value()?
+ &(lambda.value()? * &(xprime.value()? - &self.x.value()?))))
},
mode,
)
.unwrap();
let xres = &self.x - &xprime;
let yres = &self.y + &yprime;
lambda.mul_equals(&xres, &yres).unwrap();
MontgomeryAffineVar::new(xprime, yprime)
}
}
}
/// An implementation of arithmetic for Twisted Edwards curves that relies on
/// the complete formulae for the affine model, as outlined in the
/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html).
#[derive(Derivative)]
#[derivative(Debug, Clone)]
#[must_use]
pub struct AffineVar<
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
> where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// The x-coordinate.
pub x: F,
/// The y-coordinate.
pub y: F,
#[derivative(Debug = "ignore")]
_params: PhantomData<P>,
}
impl<P: TEModelParameters, F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>>
AffineVar<P, F>
where
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// Constructs `Self` from an `(x, y)` coordinate triple.
pub fn new(x: F, y: F) -> Self {
Self {
x,
y,
_params: PhantomData,
}
}
/// Allocates a new variable without performing an on-curve check, which is
/// useful if the variable is known to be on the curve (eg., if the point
/// is a constant or is a public input).
#[tracing::instrument(target = "r1cs", skip(cs, f))]
pub fn new_variable_omit_on_curve_check<T: Into<TEAffine<P>>>(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let (x, y) = match f() {
Ok(ge) => {
let ge: TEAffine<P> = ge.into();
(Ok(ge.x), Ok(ge.y))
}
_ => (
Err(SynthesisError::AssignmentMissing),
Err(SynthesisError::AssignmentMissing),
),
};
let x = F::new_variable(ark_relations::ns!(cs, "x"), || x, mode)?;
let y = F::new_variable(ark_relations::ns!(cs, "y"), || y, mode)?;
Ok(Self::new(x, y))
}
}
impl<P: TEModelParameters, F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>>
AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>
+ ThreeBitCondNegLookupGadget<
<P::BaseField as Field>::BasePrimeField,
TableConstant = P::BaseField,
>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
/// Compute a scalar multiplication of `bases` with respect to `scalars`,
/// where the elements of `scalars` are length-three slices of bits, and
/// which such that the first two bits are use to select one of the
/// bases, while the third bit is used to conditionally negate the
/// selection.
#[tracing::instrument(target = "r1cs", skip(bases, scalars))]
pub fn precomputed_base_3_bit_signed_digit_scalar_mul<J>(
bases: &[impl Borrow<[TEProjective<P>]>],
scalars: &[impl Borrow<[J]>],
) -> Result<Self, SynthesisError>
where
J: Borrow<[Boolean<<P::BaseField as Field>::BasePrimeField>]>,
{
const CHUNK_SIZE: usize = 3;
let mut ed_result: Option<AffineVar<P, F>> = None;
let mut result: Option<MontgomeryAffineVar<P, F>> = None;
let mut process_segment_result = |result: &MontgomeryAffineVar<P, F>| {
let sgmt_result = result.into_edwards()?;
ed_result = match ed_result.as_ref() {
None => Some(sgmt_result),
Some(r) => Some(sgmt_result + r),
};
Ok::<(), SynthesisError>(())
};
// Compute ∏(h_i^{m_i}) for all i.
for (segment_bits_chunks, segment_powers) in scalars.iter().zip(bases) {
for (bits, base_power) in segment_bits_chunks
.borrow()
.iter()
.zip(segment_powers.borrow())
{
let base_power = base_power.borrow();
let mut acc_power = *base_power;
let mut coords = vec![];
for _ in 0..4 {
coords.push(acc_power);
acc_power += base_power;
}
let bits = bits.borrow().to_bits_le()?;
if bits.len() != CHUNK_SIZE {
return Err(SynthesisError::Unsatisfiable);
}
let coords = coords
.iter()
.map(|p| MontgomeryAffineVar::from_edwards_to_coords(&p.into_affine()))
.collect::<Result<Vec<_>, _>>()?;
let x_coeffs = coords.iter().map(|p| p.0).collect::<Vec<_>>();
let y_coeffs = coords.iter().map(|p| p.1).collect::<Vec<_>>();
let precomp = bits[0].and(&bits[1])?;
let x = F::zero()
+ x_coeffs[0]
+ F::from(bits[0].clone()) * (x_coeffs[1] - &x_coeffs[0])
+ F::from(bits[1].clone()) * (x_coeffs[2] - &x_coeffs[0])
+ F::from(precomp.clone())
* (x_coeffs[3] - &x_coeffs[2] - &x_coeffs[1] + &x_coeffs[0]);
let y = F::three_bit_cond_neg_lookup(&bits, &precomp, &y_coeffs)?;
let tmp = MontgomeryAffineVar::new(x, y);
result = match result.as_ref() {
None => Some(tmp),
Some(r) => Some(tmp + r),
};
}
process_segment_result(&result.unwrap())?;
result = None;
}
if result.is_some() {
process_segment_result(&result.unwrap())?;
}
Ok(ed_result.unwrap())
}
}
impl<P, F> R1CSVar<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
type Value = TEProjective<P>;
fn cs(&self) -> ConstraintSystemRef<<P::BaseField as Field>::BasePrimeField> {
self.x.cs().or(self.y.cs())
}
#[inline]
fn value(&self) -> Result<TEProjective<P>, SynthesisError> {
let (x, y) = (self.x.value()?, self.y.value()?);
let result = TEAffine::new(x, y);
Ok(result.into())
}
}
impl<P, F> CurveVar<TEProjective<P>, <P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
fn constant(g: TEProjective<P>) -> Self {
let cs = ConstraintSystemRef::None;
Self::new_variable_omit_on_curve_check(cs, || Ok(g), AllocationMode::Constant).unwrap()
}
fn zero() -> Self {
Self::new(F::zero(), F::one())
}
fn is_zero(&self) -> Result<Boolean<<P::BaseField as Field>::BasePrimeField>, SynthesisError> {
self.x.is_zero()?.and(&self.x.is_one()?)
}
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable_omit_prime_order_check(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<TEProjective<P>, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let g = Self::new_variable_omit_on_curve_check(cs, f, mode)?;
if mode != AllocationMode::Constant {
let d = P::COEFF_D;
let a = P::COEFF_A;
// Check that ax^2 + y^2 = 1 + dx^2y^2
// We do this by checking that ax^2 - 1 = y^2 * (dx^2 - 1)
let x2 = g.x.square()?;
let y2 = g.y.square()?;
let one = P::BaseField::one();
let d_x2_minus_one = &x2 * d - one;
let a_x2_minus_one = &x2 * a - one;
d_x2_minus_one.mul_equals(&y2, &a_x2_minus_one)?;
}
Ok(g)
}
/// Enforce that `self` is in the prime-order subgroup.
///
/// Does so by multiplying by the prime order, and checking that the result
/// is unchanged.
#[tracing::instrument(target = "r1cs")]
fn enforce_prime_order(&self) -> Result<(), SynthesisError> {
let r_minus_1 = (-P::ScalarField::one()).into_repr();
let mut result = Self::zero();
for b in BitIteratorBE::without_leading_zeros(r_minus_1) {
result.double_in_place()?;
if b {
result += self;
}
}
self.negate()?.enforce_equal(&result)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn double_in_place(&mut self) -> Result<(), SynthesisError> {
if self.is_constant() {
let value = self.value()?;
*self = Self::constant(value.double());
} else {
let cs = self.cs();
let a = P::COEFF_A;
// xy
let xy = &self.x * &self.y;
let x2 = self.x.square()?;
let y2 = self.y.square()?;
let a_x2 = &x2 * a;
// Compute x3 = (2xy) / (ax^2 + y^2)
let x3 = F::new_witness(ark_relations::ns!(cs, "x3"), || {
let t0 = xy.value()?.double();
let t1 = a * &x2.value()? + &y2.value()?;
Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?)
})?;
let a_x2_plus_y2 = &a_x2 + &y2;
let two_xy = xy.double()?;
x3.mul_equals(&a_x2_plus_y2, &two_xy)?;
// Compute y3 = (y^2 - ax^2) / (2 - ax^2 - y^2)
let two = P::BaseField::one().double();
let y3 = F::new_witness(ark_relations::ns!(cs, "y3"), || {
let a_x2 = a * &x2.value()?;
let t0 = y2.value()? - &a_x2;
let t1 = two - &a_x2 - &y2.value()?;
Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?)
})?;
let y2_minus_a_x2 = &y2 - &a_x2;
let two_minus_ax2_minus_y2 = (&a_x2 + &y2).negate()? + two;
y3.mul_equals(&two_minus_ax2_minus_y2, &y2_minus_a_x2)?;
self.x = x3;
self.y = y3;
}
Ok(())
}
#[tracing::instrument(target = "r1cs")]
fn negate(&self) -> Result<Self, SynthesisError> {
Ok(Self::new(self.x.negate()?, self.y.clone()))
}
#[tracing::instrument(target = "r1cs", skip(scalar_bits_with_base_powers))]
fn precomputed_base_scalar_mul_le<'a, I, B>(
&mut self,
scalar_bits_with_base_powers: I,
) -> Result<(), SynthesisError>
where
I: Iterator<Item = (B, &'a TEProjective<P>)>,
B: Borrow<Boolean<<P::BaseField as Field>::BasePrimeField>>,
{
let scalar_bits_with_base_powers = scalar_bits_with_base_powers
.map(|(bit, base)| (bit.borrow().clone(), (*base).into()))
.collect::<Vec<(_, TEProjective<P>)>>();
let zero = TEProjective::zero();
for bits_base_powers in scalar_bits_with_base_powers.chunks(2) {
if bits_base_powers.len() == 2 {
let bits = [bits_base_powers[0].0.clone(), bits_base_powers[1].0.clone()];
let base_powers = [&bits_base_powers[0].1, &bits_base_powers[1].1];
let mut table = [
zero,
*base_powers[0],
*base_powers[1],
*base_powers[0] + base_powers[1],
];
TEProjective::batch_normalization(&mut table);
let x_s = [table[0].x, table[1].x, table[2].x, table[3].x];
let y_s = [table[0].y, table[1].y, table[2].y, table[3].y];
let x = F::two_bit_lookup(&bits, &x_s)?;
let y = F::two_bit_lookup(&bits, &y_s)?;
*self += Self::new(x, y);
} else if bits_base_powers.len() == 1 {
let bit = bits_base_powers[0].0.clone();
let base_power = bits_base_powers[0].1;
let new_encoded = &*self + base_power;
*self = bit.select(&new_encoded, &self)?;
}
}
Ok(())
}
}
impl<P, F> AllocVar<TEProjective<P>, <P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<Point: Borrow<TEProjective<P>>>(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<Point, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
let ns = cs.into();
let cs = ns.cs();
let f = || Ok(*f()?.borrow());
match mode {
AllocationMode::Constant => Self::new_variable_omit_prime_order_check(cs, f, mode),
AllocationMode::Input => Self::new_variable_omit_prime_order_check(cs, f, mode),
AllocationMode::Witness => {
// if cofactor.is_even():
// divide until you've removed all even factors
// else:
// just directly use double and add.
let mut power_of_2: u32 = 0;
let mut cofactor = P::COFACTOR.to_vec();
while cofactor[0] % 2 == 0 {
div2(&mut cofactor);
power_of_2 += 1;
}
let cofactor_weight = BitIteratorBE::new(cofactor.as_slice())
.filter(|b| *b)
.count();
let modulus_minus_1 = (-P::ScalarField::one()).into_repr(); // r - 1
let modulus_minus_1_weight =
BitIteratorBE::new(modulus_minus_1).filter(|b| *b).count();
// We pick the most efficient method of performing the prime order check:
// If the cofactor has lower hamming weight than the scalar field's modulus,
// we first multiply by the inverse of the cofactor, and then, after allocating,
// multiply by the cofactor. This ensures the resulting point has no cofactors
//
// Else, we multiply by the scalar field's modulus and ensure that the result
// equals the identity.
let (mut ge, iter) = if cofactor_weight < modulus_minus_1_weight {
let ge = Self::new_variable_omit_prime_order_check(
ark_relations::ns!(cs, "Witness without subgroup check with cofactor mul"),
|| f().map(|g| g.borrow().into_affine().mul_by_cofactor_inv().into()),
mode,
)?;
(
ge,
BitIteratorBE::without_leading_zeros(cofactor.as_slice()),
)
} else {
let ge = Self::new_variable_omit_prime_order_check(
ark_relations::ns!(cs, "Witness without subgroup check with `r` check"),
|| {
f().map(|g| {
let g = g.into_affine();
let mut power_of_two = P::ScalarField::one().into_repr();
power_of_two.muln(power_of_2);
let power_of_two_inv = P::ScalarField::from_repr(power_of_two)
.and_then(|n| n.inverse())
.unwrap();
g.mul(power_of_two_inv)
})
},
mode,
)?;
(
ge,
BitIteratorBE::without_leading_zeros(modulus_minus_1.as_ref()),
)
};
// Remove the even part of the cofactor
for _ in 0..power_of_2 {
ge.double_in_place()?;
}
let mut result = Self::zero();
for b in iter {
result.double_in_place()?;
if b {
result += &ge;
}
}
if cofactor_weight < modulus_minus_1_weight {
Ok(result)
} else {
ge.enforce_equal(&ge)?;
Ok(ge)
}
}
}
}
}
impl<P, F> AllocVar<TEAffine<P>, <P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs", skip(cs, f))]
fn new_variable<Point: Borrow<TEAffine<P>>>(
cs: impl Into<Namespace<<P::BaseField as Field>::BasePrimeField>>,
f: impl FnOnce() -> Result<Point, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
Self::new_variable(cs, || f().map(|b| b.borrow().into_projective()), mode)
}
}
impl<P, F> ToConstraintFieldGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>,
F: ToConstraintFieldGadget<<P::BaseField as Field>::BasePrimeField>,
{
fn to_constraint_field(
&self,
) -> Result<Vec<FpVar<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut res = Vec::new();
res.extend_from_slice(&self.x.to_constraint_field()?);
res.extend_from_slice(&self.y.to_constraint_field()?);
Ok(res)
}
}
#[inline]
fn div2(limbs: &mut [u64]) {
let mut t = 0;
for i in limbs.iter_mut().rev() {
let t2 = *i << 63;
*i >>= 1;
*i |= t;
t = t2;
}
}
impl_bounded_ops!(
AffineVar<P, F>,
TEProjective<P>,
Add,
add,
AddAssign,
add_assign,
|this: &'a AffineVar<P, F>, other: &'a AffineVar<P, F>| {
if [this, other].is_constant() {
assert!(this.is_constant() && other.is_constant());
AffineVar::constant(this.value().unwrap() + &other.value().unwrap())
} else {
let cs = [this, other].cs();
let a = P::COEFF_A;
let d = P::COEFF_D;
// Compute U = (x1 + y1) * (x2 + y2)
let u1 = (&this.x * -a) + &this.y;
let u2 = &other.x + &other.y;
let u = u1 * &u2;
// Compute v0 = x1 * y2
let v0 = &other.y * &this.x;
// Compute v1 = x2 * y1
let v1 = &other.x * &this.y;
// Compute C = d*v0*v1
let v2 = &v0 * &v1 * d;
// Compute x3 = (v0 + v1) / (1 + v2)
let x3 = F::new_witness(ark_relations::ns!(cs, "x3"), || {
let t0 = v0.value()? + &v1.value()?;
let t1 = P::BaseField::one() + &v2.value()?;
Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?)
}).unwrap();
let v2_plus_one = &v2 + P::BaseField::one();
let v0_plus_v1 = &v0 + &v1;
x3.mul_equals(&v2_plus_one, &v0_plus_v1).unwrap();
// Compute y3 = (U + a * v0 - v1) / (1 - v2)
let y3 = F::new_witness(ark_relations::ns!(cs, "y3"), || {
let t0 = u.value()? + &(a * &v0.value()?) - &v1.value()?;
let t1 = P::BaseField::one() - &v2.value()?;
Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?)
}).unwrap();
let one_minus_v2 = (&v2 - P::BaseField::one()).negate().unwrap();
let a_v0 = &v0 * a;
let u_plus_a_v0_minus_v1 = &u + &a_v0 - &v1;
y3.mul_equals(&one_minus_v2, &u_plus_a_v0_minus_v1).unwrap();
AffineVar::new(x3, y3)
}
},
|this: &'a AffineVar<P, F>, other: TEProjective<P>| this + AffineVar::constant(other),
(
F :FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
P: TEModelParameters,
),
for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
);
impl_bounded_ops!(
AffineVar<P, F>,
TEProjective<P>,
Sub,
sub,
SubAssign,
sub_assign,
|this: &'a AffineVar<P, F>, other: &'a AffineVar<P, F>| this + other.negate().unwrap(),
|this: &'a AffineVar<P, F>, other: TEProjective<P>| this - AffineVar::constant(other),
(
F :FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
P: TEModelParameters,
),
for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>
);
impl<'a, P, F> GroupOpsBounds<'a, TEProjective<P>, AffineVar<P, F>> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
}
impl<'a, P, F> GroupOpsBounds<'a, TEProjective<P>, AffineVar<P, F>> for &'a AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>
+ TwoBitLookupGadget<<P::BaseField as Field>::BasePrimeField, TableConstant = P::BaseField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
}
impl<P, F> CondSelectGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditionally_select(
cond: &Boolean<<P::BaseField as Field>::BasePrimeField>,
true_value: &Self,
false_value: &Self,
) -> Result<Self, SynthesisError> {
let x = cond.select(&true_value.x, &false_value.x)?;
let y = cond.select(&true_value.y, &false_value.y)?;
Ok(Self::new(x, y))
}
}
impl<P, F> EqGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn is_eq(
&self,
other: &Self,
) -> Result<Boolean<<P::BaseField as Field>::BasePrimeField>, SynthesisError> {
let x_equal = self.x.is_eq(&other.x)?;
let y_equal = self.y.is_eq(&other.y)?;
x_equal.and(&y_equal)
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_equal(
&self,
other: &Self,
condition: &Boolean<<P::BaseField as Field>::BasePrimeField>,
) -> Result<(), SynthesisError> {
self.x.conditional_enforce_equal(&other.x, condition)?;
self.y.conditional_enforce_equal(&other.y, condition)?;
Ok(())
}
#[inline]
#[tracing::instrument(target = "r1cs")]
fn conditional_enforce_not_equal(
&self,
other: &Self,
condition: &Boolean<<P::BaseField as Field>::BasePrimeField>,
) -> Result<(), SynthesisError> {
self.is_eq(other)?
.and(condition)?
.enforce_equal(&Boolean::Constant(false))
}
}
impl<P, F> ToBitsGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bits_le(
&self,
) -> Result<Vec<Boolean<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut x_bits = self.x.to_bits_le()?;
let y_bits = self.y.to_bits_le()?;
x_bits.extend_from_slice(&y_bits);
Ok(x_bits)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bits_le(
&self,
) -> Result<Vec<Boolean<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut x_bits = self.x.to_non_unique_bits_le()?;
let y_bits = self.y.to_non_unique_bits_le()?;
x_bits.extend_from_slice(&y_bits);
Ok(x_bits)
}
}
impl<P, F> ToBytesGadget<<P::BaseField as Field>::BasePrimeField> for AffineVar<P, F>
where
P: TEModelParameters,
F: FieldVar<P::BaseField, <P::BaseField as Field>::BasePrimeField>,
for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>,
{
#[tracing::instrument(target = "r1cs")]
fn to_bytes(
&self,
) -> Result<Vec<UInt8<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut x_bytes = self.x.to_bytes()?;
let y_bytes = self.y.to_bytes()?;
x_bytes.extend_from_slice(&y_bytes);
Ok(x_bytes)
}
#[tracing::instrument(target = "r1cs")]
fn to_non_unique_bytes(
&self,
) -> Result<Vec<UInt8<<P::BaseField as Field>::BasePrimeField>>, SynthesisError> {
let mut x_bytes = self.x.to_non_unique_bytes()?;
let y_bytes = self.y.to_non_unique_bytes()?;
x_bytes.extend_from_slice(&y_bytes);
Ok(x_bytes)
}
}

148
src/groups/mod.rs Normal file
View File

@@ -0,0 +1,148 @@
use crate::prelude::*;
use ark_ec::ProjectiveCurve;
use ark_ff::Field;
use ark_relations::r1cs::{Namespace, SynthesisError};
use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::{borrow::Borrow, fmt::Debug};
/// This module contains implementations of arithmetic for various curve models.
pub mod curves;
pub use self::curves::short_weierstrass::{bls12, mnt4, mnt6};
/// A hack used to work around the lack of implied bounds.
pub trait GroupOpsBounds<'a, F, T: 'a>:
Sized
+ Add<&'a T, Output = T>
+ Sub<&'a T, Output = T>
+ Add<T, Output = T>
+ Sub<T, Output = T>
+ Add<F, Output = T>
+ Sub<F, Output = T>
{
}
/// A variable that represents a curve point for
/// the curve `C`.
pub trait CurveVar<C: ProjectiveCurve, ConstraintF: Field>:
'static
+ Sized
+ Clone
+ Debug
+ R1CSVar<ConstraintF, Value = C>
+ ToBitsGadget<ConstraintF>
+ ToBytesGadget<ConstraintF>
+ EqGadget<ConstraintF>
+ CondSelectGadget<ConstraintF>
+ AllocVar<C, ConstraintF>
+ AllocVar<C::Affine, ConstraintF>
+ for<'a> GroupOpsBounds<'a, C, Self>
+ for<'a> AddAssign<&'a Self>
+ for<'a> SubAssign<&'a Self>
+ AddAssign<C>
+ SubAssign<C>
+ AddAssign<Self>
+ SubAssign<Self>
{
/// Returns the constant `F::zero()`. This is the identity
/// of the group.
fn zero() -> Self;
/// Returns a `Boolean` representing whether `self == Self::zero()`.
#[tracing::instrument(target = "r1cs")]
fn is_zero(&self) -> Result<Boolean<ConstraintF>, SynthesisError> {
self.is_eq(&Self::zero())
}
/// Returns a constant with value `v`.
///
/// This *should not* allocate any variables.
fn constant(other: C) -> Self;
/// Allocates a variable in the subgroup without checking if it's in the
/// prime-order subgroup.
fn new_variable_omit_prime_order_check(
cs: impl Into<Namespace<ConstraintF>>,
f: impl FnOnce() -> Result<C, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError>;
/// Enforce that `self` is in the prime-order subgroup.
fn enforce_prime_order(&self) -> Result<(), SynthesisError>;
/// Computes `self + self`.
#[tracing::instrument(target = "r1cs")]
fn double(&self) -> Result<Self, SynthesisError> {
let mut result = self.clone();
result.double_in_place()?;
Ok(result)
}
/// Sets `self = self + self`.
fn double_in_place(&mut self) -> Result<(), SynthesisError>;
/// Coputes `-self`.
fn negate(&self) -> Result<Self, SynthesisError>;
/// Computes `bits * self`, where `bits` is a little-endian
/// `Boolean` representation of a scalar.
#[tracing::instrument(target = "r1cs", skip(bits))]
fn scalar_mul_le<'a>(
&self,
bits: impl Iterator<Item = &'a Boolean<ConstraintF>>,
) -> Result<Self, SynthesisError> {
let mut res = Self::zero();
let mut multiple = self.clone();
for bit in bits {
let tmp = res.clone() + &multiple;
res = bit.select(&tmp, &res)?;
multiple.double_in_place()?;
}
Ok(res)
}
/// Computes a `I * self` in place, where `I` is a `Boolean` *little-endian*
/// representation of the scalar.
///
/// The base powers are precomputed power-of-two multiples of a single
/// base.
#[tracing::instrument(target = "r1cs", skip(scalar_bits_with_base_powers))]
fn precomputed_base_scalar_mul_le<'a, I, B>(
&mut self,
scalar_bits_with_base_powers: I,
) -> Result<(), SynthesisError>
where
I: Iterator<Item = (B, &'a C)>,
B: Borrow<Boolean<ConstraintF>>,
C: 'a,
{
for (bit, base_power) in scalar_bits_with_base_powers {
let new_encoded = self.clone() + *base_power;
*self = bit.borrow().select(&new_encoded, self)?;
}
Ok(())
}
/// Computes a `\sum_j I_j * B_j`, where `I_j` is a `Boolean`
/// representation of the j-th scalar.
#[tracing::instrument(target = "r1cs", skip(bases, scalars))]
fn precomputed_base_multiscalar_mul_le<'a, T, I, B>(
bases: &[B],
scalars: I,
) -> Result<Self, SynthesisError>
where
T: 'a + ToBitsGadget<ConstraintF> + ?Sized,
I: Iterator<Item = &'a T>,
B: Borrow<[C]>,
{
let mut result = Self::zero();
// Compute ∏(h_i^{m_i}) for all i.
for (bits, base_powers) in scalars.zip(bases) {
let base_powers = base_powers.borrow();
let bits = bits.to_bits_le()?;
result.precomputed_base_scalar_mul_le(bits.iter().zip(base_powers))?;
}
Ok(result)
}
}

143
src/lib.rs Normal file
View File

@@ -0,0 +1,143 @@
#![cfg_attr(not(feature = "std"), no_std)]
//! This crate implements common "gadgets" that make
//! programming rank-1 constraint systems easier.
#![deny(
warnings,
unused,
future_incompatible,
nonstandard_style,
rust_2018_idioms
)]
#[macro_use]
extern crate ark_std;
#[macro_use]
extern crate ark_relations;
#[doc(hidden)]
#[macro_use]
extern crate derivative;
/// Some utility macros for making downstream impls easier.
#[macro_use]
pub mod macros;
pub(crate) use ark_std::vec::Vec;
use ark_ff::Field;
/// This module implements gadgets related to bit manipulation, such as
/// `Boolean` and `UInt`s.
pub mod bits;
pub use self::bits::*;
/// This module implements gadgets related to field arithmetic.
pub mod fields;
/// This module implements gadgets related to group arithmetic, and specifically
/// elliptic curve arithmetic.
pub mod groups;
/// This module implements gadgets related to computing pairings in bilinear
/// groups.
pub mod pairing;
/// This module describes a trait for allocating new variables in a constraint
/// system.
pub mod alloc;
/// This module describes a trait for checking equality of variables.
pub mod eq;
/// This module describes traits for conditionally selecting a variable from a
/// list of variables.
pub mod select;
#[allow(missing_docs)]
pub mod prelude {
pub use crate::{
alloc::*,
bits::{boolean::Boolean, uint32::UInt32, uint8::UInt8, ToBitsGadget, ToBytesGadget},
eq::*,
fields::{FieldOpsBounds, FieldVar},
groups::{CurveVar, GroupOpsBounds},
pairing::PairingVar,
select::*,
R1CSVar,
};
}
/// This trait describes some core functionality that is common to high-level
/// variables, such as `Boolean`s, `FieldVar`s, `GroupVar`s, etc.
pub trait R1CSVar<F: Field> {
/// The type of the "native" value that `Self` represents in the constraint
/// system.
type Value: core::fmt::Debug + Eq + Clone;
/// Returns the underlying `ConstraintSystemRef`.
///
/// If `self` is a constant value, then this *must* return
/// `ark_relations::r1cs::ConstraintSystemRef::None`.
fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef<F>;
/// Returns `true` if `self` is a circuit-generation-time constant.
fn is_constant(&self) -> bool {
self.cs().is_none()
}
/// Returns the value that is assigned to `self` in the underlying
/// `ConstraintSystem`.
fn value(&self) -> Result<Self::Value, ark_relations::r1cs::SynthesisError>;
}
impl<F: Field, T: R1CSVar<F>> R1CSVar<F> for [T] {
type Value = Vec<T::Value>;
fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef<F> {
let mut result = ark_relations::r1cs::ConstraintSystemRef::None;
for var in self {
result = var.cs().or(result);
}
result
}
fn value(&self) -> Result<Self::Value, ark_relations::r1cs::SynthesisError> {
let mut result = Vec::new();
for var in self {
result.push(var.value()?);
}
Ok(result)
}
}
impl<'a, F: Field, T: 'a + R1CSVar<F>> R1CSVar<F> for &'a T {
type Value = T::Value;
fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef<F> {
(*self).cs()
}
fn value(&self) -> Result<Self::Value, ark_relations::r1cs::SynthesisError> {
(*self).value()
}
}
/// A utility trait to convert `Self` to `Result<T, SynthesisErrorA`.>
pub trait Assignment<T> {
/// Converts `self` to `Result`.
fn get(self) -> Result<T, ark_relations::r1cs::SynthesisError>;
}
impl<T> Assignment<T> for Option<T> {
fn get(self) -> Result<T, ark_relations::r1cs::SynthesisError> {
self.ok_or(ark_relations::r1cs::SynthesisError::AssignmentMissing)
}
}
/// Specifies how to convert a variable of type `Self` to variables of
/// type `FpVar<ConstraintF>`
pub trait ToConstraintFieldGadget<ConstraintF: ark_ff::PrimeField> {
/// Converts `self` to `FpVar<ConstraintF>` variables.
fn to_constraint_field(
&self,
) -> Result<Vec<crate::fields::fp::FpVar<ConstraintF>>, ark_relations::r1cs::SynthesisError>;
}

167
src/macros.rs Normal file
View File

@@ -0,0 +1,167 @@
#[allow(unused_braces)]
/// Implements arithmetic traits (eg: `Add`, `Sub`, `Mul`) for the given type
/// using the impl in `$impl`.
///
/// Used primarily for implementing these traits for `FieldVar`s and
/// `GroupVar`s.
#[macro_export]
macro_rules! impl_ops {
(
$type: ty,
$native: ty,
$trait: ident,
$fn: ident,
$assign_trait: ident,
$assign_fn: ident,
$impl: expr,
$constant_impl: expr,
$($args:tt)*
) => {
impl_bounded_ops!($type, $native, $trait, $fn, $assign_trait, $assign_fn, $impl, $constant_impl, ($($args)+), );
};
}
/// Implements arithmetic traits (eg: `Add`, `Sub`, `Mul`) for the given type
/// using the impl in `$impl`.
///
/// Used primarily for implementing these traits for `FieldVar`s and
/// `GroupVar`s.
///
/// When compared to `impl_ops`, this macro allows specifying additional trait
/// bounds.
#[macro_export]
macro_rules! impl_bounded_ops {
(
$type: ty,
$native: ty,
$trait: ident,
$fn: ident,
$assign_trait: ident,
$assign_fn: ident,
$impl: expr,
$constant_impl: expr,
($($params:tt)+),
$($bounds:tt)*
) => {
impl<'a, $($params)+> core::ops::$trait<&'a $type> for &'a $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: Self) -> Self::Output {
($impl)(self, other)
}
}
impl<'a, $($params)+> core::ops::$trait<$type> for &'a $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: $type) -> Self::Output {
core::ops::$trait::$fn(self, &other)
}
}
impl<'a, $($params)+> core::ops::$trait<&'a $type> for $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: &'a $type) -> Self::Output {
core::ops::$trait::$fn(&self, other)
}
}
impl<$($params)+> core::ops::$trait<$type> for $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: $type) -> Self::Output {
core::ops::$trait::$fn(&self, &other)
}
}
impl<$($params)+> core::ops::$assign_trait<$type> for $type
where
$($bounds)*
{
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $assign_fn(&mut self, other: $type) {
let result = core::ops::$trait::$fn(&*self, &other);
*self = result
}
}
impl<'a, $($params)+> core::ops::$assign_trait<&'a $type> for $type
where
$($bounds)*
{
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $assign_fn(&mut self, other: &'a $type) {
let result = core::ops::$trait::$fn(&*self, other);
*self = result
}
}
impl<'a, $($params)+> core::ops::$trait<$native> for &'a $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: $native) -> Self::Output {
($constant_impl)(self, other)
}
}
impl<$($params)+> core::ops::$trait<$native> for $type
where
$($bounds)*
{
type Output = $type;
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $fn(self, other: $native) -> Self::Output {
core::ops::$trait::$fn(&self, other)
}
}
impl<$($params)+> core::ops::$assign_trait<$native> for $type
where
$($bounds)*
{
#[tracing::instrument(target = "r1cs", skip(self))]
#[allow(unused_braces)]
fn $assign_fn(&mut self, other: $native) {
let result = core::ops::$trait::$fn(&*self, other);
*self = result
}
}
}
}

167
src/pairing/bls12/mod.rs Normal file
View File

@@ -0,0 +1,167 @@
use ark_relations::r1cs::SynthesisError;
use super::PairingVar as PG;
use crate::{
fields::{fp::FpVar, fp12::Fp12Var, fp2::Fp2Var, FieldVar},
groups::bls12::{G1AffineVar, G1PreparedVar, G1Var, G2PreparedVar, G2Var},
};
use ark_ec::bls12::{Bls12, Bls12Parameters, TwistType};
use ark_ff::fields::BitIteratorBE;
use core::marker::PhantomData;
/// Specifies the constraints for computing a pairing in a BLS12 bilinear group.
pub struct PairingVar<P: Bls12Parameters>(PhantomData<P>);
type Fp2V<P> = Fp2Var<<P as Bls12Parameters>::Fp2Params>;
impl<P: Bls12Parameters> PairingVar<P> {
// Evaluate the line function at point p.
#[tracing::instrument(target = "r1cs")]
fn ell(
f: &mut Fp12Var<P::Fp12Params>,
coeffs: &(Fp2V<P>, Fp2V<P>),
p: &G1AffineVar<P>,
) -> Result<(), SynthesisError> {
let zero = FpVar::<P::Fp>::zero();
match P::TWIST_TYPE {
TwistType::M => {
let c0 = coeffs.0.clone();
let mut c1 = coeffs.1.clone();
let c2 = Fp2V::<P>::new(p.y.clone(), zero);
c1.c0 = c1.c0 * &p.x;
c1.c1 = c1.c1 * &p.x;
*f = f.mul_by_014(&c0, &c1, &c2)?;
Ok(())
}
TwistType::D => {
let c0 = Fp2V::<P>::new(p.y.clone(), zero);
let mut c1 = coeffs.0.clone();
let c2 = coeffs.1.clone();
c1.c0 = c1.c0 * &p.x;
c1.c1 = c1.c1 * &p.x;
*f = f.mul_by_034(&c0, &c1, &c2)?;
Ok(())
}
}
}
#[tracing::instrument(target = "r1cs")]
fn exp_by_x(f: &Fp12Var<P::Fp12Params>) -> Result<Fp12Var<P::Fp12Params>, SynthesisError> {
let mut result = f.optimized_cyclotomic_exp(P::X)?;
if P::X_IS_NEGATIVE {
result = result.unitary_inverse()?;
}
Ok(result)
}
}
impl<P: Bls12Parameters> PG<Bls12<P>, P::Fp> for PairingVar<P> {
type G1Var = G1Var<P>;
type G2Var = G2Var<P>;
type G1PreparedVar = G1PreparedVar<P>;
type G2PreparedVar = G2PreparedVar<P>;
type GTVar = Fp12Var<P::Fp12Params>;
#[tracing::instrument(target = "r1cs")]
fn miller_loop(
ps: &[Self::G1PreparedVar],
qs: &[Self::G2PreparedVar],
) -> Result<Self::GTVar, SynthesisError> {
let mut pairs = vec![];
for (p, q) in ps.iter().zip(qs.iter()) {
pairs.push((p, q.ell_coeffs.iter()));
}
let mut f = Self::GTVar::one();
for i in BitIteratorBE::new(P::X).skip(1) {
f.square_in_place()?;
for &mut (p, ref mut coeffs) in pairs.iter_mut() {
Self::ell(&mut f, coeffs.next().unwrap(), &p.0)?;
}
if i {
for &mut (p, ref mut coeffs) in pairs.iter_mut() {
Self::ell(&mut f, &coeffs.next().unwrap(), &p.0)?;
}
}
}
if P::X_IS_NEGATIVE {
f = f.unitary_inverse()?;
}
Ok(f)
}
#[tracing::instrument(target = "r1cs")]
fn final_exponentiation(f: &Self::GTVar) -> Result<Self::GTVar, SynthesisError> {
// Computing the final exponentation following
// https://eprint.iacr.org/2016/130.pdf.
// We don't use their "faster" formula because it is difficult to make
// it work for curves with odd `P::X`.
// Hence we implement the slower algorithm from Table 1 below.
let f1 = f.frobenius_map(6)?;
f.inverse().and_then(|mut f2| {
// f2 = f^(-1);
// r = f^(p^6 - 1)
let mut r = f1;
r *= &f2;
// f2 = f^(p^6 - 1)
f2 = r.clone();
// r = f^((p^6 - 1)(p^2))
r.frobenius_map_in_place(2)?;
// r = f^((p^6 - 1)(p^2) + (p^6 - 1))
// r = f^((p^6 - 1)(p^2 + 1))
r *= &f2;
// Hard part of the final exponentation is below:
// From https://eprint.iacr.org/2016/130.pdf, Table 1
let mut y0 = r.cyclotomic_square()?;
y0 = y0.unitary_inverse()?;
let mut y5 = Self::exp_by_x(&r)?;
let mut y1 = y5.cyclotomic_square()?;
let mut y3 = y0 * &y5;
y0 = Self::exp_by_x(&y3)?;
let y2 = Self::exp_by_x(&y0)?;
let mut y4 = Self::exp_by_x(&y2)?;
y4 *= &y1;
y1 = Self::exp_by_x(&y4)?;
y3 = y3.unitary_inverse()?;
y1 *= &y3;
y1 *= &r;
y3 = r.clone();
y3 = y3.unitary_inverse()?;
y0 *= &r;
y0.frobenius_map_in_place(3)?;
y4 *= &y3;
y4.frobenius_map_in_place(1)?;
y5 *= &y2;
y5.frobenius_map_in_place(2)?;
y5 *= &y0;
y5 *= &y4;
y5 *= &y1;
Ok(y5)
})
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g1(p: &Self::G1Var) -> Result<Self::G1PreparedVar, SynthesisError> {
Self::G1PreparedVar::from_group_var(p)
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g2(q: &Self::G2Var) -> Result<Self::G2PreparedVar, SynthesisError> {
Self::G2PreparedVar::from_group_var(q)
}
}

222
src/pairing/mnt4/mod.rs Normal file
View File

@@ -0,0 +1,222 @@
use ark_relations::r1cs::SynthesisError;
use super::PairingVar as PG;
use crate::{
fields::{fp::FpVar, fp2::Fp2Var, fp4::Fp4Var, FieldVar},
groups::mnt4::{
AteAdditionCoefficientsVar, AteDoubleCoefficientsVar, G1PreparedVar, G1Var, G2PreparedVar,
G2ProjectiveExtendedVar, G2Var,
},
};
use ark_ec::mnt4::{MNT4Parameters, MNT4};
use ark_ff::BitIteratorBE;
use core::marker::PhantomData;
/// Specifies the constraints for computing a pairing in a MNT4 bilinear group.
pub struct PairingVar<P: MNT4Parameters>(PhantomData<P>);
type Fp2G<P> = Fp2Var<<P as MNT4Parameters>::Fp2Params>;
type Fp4G<P> = Fp4Var<<P as MNT4Parameters>::Fp4Params>;
/// A variable corresponding to `ark_ec::mnt4::GT`.
pub type GTVar<P> = Fp4G<P>;
impl<P: MNT4Parameters> PairingVar<P> {
#[tracing::instrument(target = "r1cs", skip(r))]
pub(crate) fn doubling_step_for_flipped_miller_loop(
r: &G2ProjectiveExtendedVar<P>,
) -> Result<(G2ProjectiveExtendedVar<P>, AteDoubleCoefficientsVar<P>), SynthesisError> {
let a = r.t.square()?;
let b = r.x.square()?;
let c = r.y.square()?;
let d = c.square()?;
let e = (&r.x + &c).square()? - &b - &d;
let f = (b.double()? + &b) + &a * P::TWIST_COEFF_A;
let g = f.square()?;
let d_eight = d.double()?.double()?.double()?;
let e2 = e.double()?;
let x = &g - &e2.double()?;
let y = &f * (&e2 - &x) - &d_eight;
let z = (&r.y + &r.z).square()? - &c - &r.z.square()?;
let t = z.square()?;
let r2 = G2ProjectiveExtendedVar { x, y, z, t };
let c_h = (&r2.z + &r.t).square()? - &r2.t - &a;
let c_4c = c.double()?.double()?;
let c_j = (&f + &r.t).square()? - &g - &a;
let c_l = (&f + &r.x).square()? - &g - &b;
let coeff = AteDoubleCoefficientsVar {
c_h,
c_4c,
c_j,
c_l,
};
Ok((r2, coeff))
}
#[tracing::instrument(target = "r1cs", skip(r))]
pub(crate) fn mixed_addition_step_for_flipped_miller_loop(
x: &Fp2G<P>,
y: &Fp2G<P>,
r: &G2ProjectiveExtendedVar<P>,
) -> Result<(G2ProjectiveExtendedVar<P>, AteAdditionCoefficientsVar<P>), SynthesisError> {
let a = y.square()?;
let b = &r.t * x;
let d = ((&r.z + y).square()? - &a - &r.t) * &r.t;
let h = &b - &r.x;
let i = h.square()?;
let e = i.double()?.double()?;
let j = &h * &e;
let v = &r.x * &e;
let ry2 = r.y.double()?;
let l1 = &d - &ry2;
let x = l1.square()? - &j - &v.double()?;
let y = &l1 * &(&v - &x) - j * &ry2;
let z = (&r.z + &h).square()? - &r.t - &i;
let t = z.square()?;
let r2 = G2ProjectiveExtendedVar {
x,
y,
z: z.clone(),
t,
};
let coeff = AteAdditionCoefficientsVar { c_l1: l1, c_rz: z };
Ok((r2, coeff))
}
#[tracing::instrument(target = "r1cs", skip(p, q))]
pub(crate) fn ate_miller_loop(
p: &G1PreparedVar<P>,
q: &G2PreparedVar<P>,
) -> Result<Fp4G<P>, SynthesisError> {
let l1_coeff = Fp2G::<P>::new(p.x.clone(), FpVar::<P::Fp>::zero()) - &q.x_over_twist;
let mut f = Fp4G::<P>::one();
let mut dbl_idx: usize = 0;
let mut add_idx: usize = 0;
// code below gets executed for all bits (EXCEPT the MSB itself) of
// mnt6_param_p (skipping leading zeros) in MSB to LSB order
for bit in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT).skip(1) {
let dc = &q.double_coefficients[dbl_idx];
dbl_idx += 1;
let g_rr_at_p = Fp4G::<P>::new(
&dc.c_l - &dc.c_4c - &dc.c_j * &p.x_twist,
&dc.c_h * &p.y_twist,
);
f = f.square()? * &g_rr_at_p;
if bit {
let ac = &q.addition_coefficients[add_idx];
add_idx += 1;
let g_rq_at_p = Fp4G::<P>::new(
&ac.c_rz * &p.y_twist,
(&q.y_over_twist * &ac.c_rz + &l1_coeff * &ac.c_l1).negate()?,
);
f *= &g_rq_at_p;
}
}
if P::ATE_IS_LOOP_COUNT_NEG {
let ac = &q.addition_coefficients[add_idx];
let g_rnegr_at_p = Fp4G::<P>::new(
&ac.c_rz * &p.y_twist,
(&q.y_over_twist * &ac.c_rz + &l1_coeff * &ac.c_l1).negate()?,
);
f = (&f * &g_rnegr_at_p).inverse()?;
}
Ok(f)
}
#[tracing::instrument(target = "r1cs", skip(value))]
pub(crate) fn final_exponentiation(value: &Fp4G<P>) -> Result<GTVar<P>, SynthesisError> {
let value_inv = value.inverse()?;
let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv)?;
let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value)?;
Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk)
}
#[tracing::instrument(target = "r1cs", skip(elt, elt_inv))]
fn final_exponentiation_first_chunk(
elt: &Fp4G<P>,
elt_inv: &Fp4G<P>,
) -> Result<Fp4G<P>, SynthesisError> {
// (q^2-1)
// elt_q2 = elt^(q^2)
let elt_q2 = elt.unitary_inverse()?;
// elt_q2_over_elt = elt^(q^2-1)
Ok(elt_q2 * elt_inv)
}
#[tracing::instrument(target = "r1cs", skip(elt, elt_inv))]
fn final_exponentiation_last_chunk(
elt: &Fp4G<P>,
elt_inv: &Fp4G<P>,
) -> Result<Fp4G<P>, SynthesisError> {
let elt_clone = elt.clone();
let elt_inv_clone = elt_inv.clone();
let mut elt_q = elt.clone();
elt_q.frobenius_map_in_place(1)?;
let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1)?;
let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG {
elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)?
} else {
elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)?
};
Ok(w1_part * &w0_part)
}
}
impl<P: MNT4Parameters> PG<MNT4<P>, P::Fp> for PairingVar<P> {
type G1Var = G1Var<P>;
type G2Var = G2Var<P>;
type G1PreparedVar = G1PreparedVar<P>;
type G2PreparedVar = G2PreparedVar<P>;
type GTVar = GTVar<P>;
#[tracing::instrument(target = "r1cs")]
fn miller_loop(
ps: &[Self::G1PreparedVar],
qs: &[Self::G2PreparedVar],
) -> Result<Self::GTVar, SynthesisError> {
let mut result = Fp4G::<P>::one();
for (p, q) in ps.iter().zip(qs) {
result *= Self::ate_miller_loop(p, q)?;
}
Ok(result)
}
#[tracing::instrument(target = "r1cs")]
fn final_exponentiation(r: &Self::GTVar) -> Result<Self::GTVar, SynthesisError> {
Self::final_exponentiation(r)
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g1(p: &Self::G1Var) -> Result<Self::G1PreparedVar, SynthesisError> {
Self::G1PreparedVar::from_group_var(p)
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g2(q: &Self::G2Var) -> Result<Self::G2PreparedVar, SynthesisError> {
Self::G2PreparedVar::from_group_var(q)
}
}

217
src/pairing/mnt6/mod.rs Normal file
View File

@@ -0,0 +1,217 @@
use ark_relations::r1cs::SynthesisError;
use super::PairingVar as PG;
use crate::{
fields::{fp::FpVar, fp3::Fp3Var, fp6_2over3::Fp6Var, FieldVar},
groups::mnt6::{
AteAdditionCoefficientsVar, AteDoubleCoefficientsVar, G1PreparedVar, G1Var, G2PreparedVar,
G2ProjectiveExtendedVar, G2Var,
},
};
use ark_ec::mnt6::{MNT6Parameters, MNT6};
use ark_ff::fields::BitIteratorBE;
use core::marker::PhantomData;
/// Specifies the constraints for computing a pairing in a MNT6 bilinear group.
pub struct PairingVar<P: MNT6Parameters>(PhantomData<P>);
type Fp3G<P> = Fp3Var<<P as MNT6Parameters>::Fp3Params>;
type Fp6G<P> = Fp6Var<<P as MNT6Parameters>::Fp6Params>;
/// A variable corresponding to `ark_ec::mnt6::GT`.
pub type GTVar<P> = Fp6G<P>;
impl<P: MNT6Parameters> PairingVar<P> {
#[tracing::instrument(target = "r1cs", skip(r))]
pub(crate) fn doubling_step_for_flipped_miller_loop(
r: &G2ProjectiveExtendedVar<P>,
) -> Result<(G2ProjectiveExtendedVar<P>, AteDoubleCoefficientsVar<P>), SynthesisError> {
let a = r.t.square()?;
let b = r.x.square()?;
let c = r.y.square()?;
let d = c.square()?;
let e = (&r.x + &c).square()? - &b - &d;
let f = b.double()? + &b + &(&a * P::TWIST_COEFF_A);
let g = f.square()?;
let d_eight = d.double()?.double()?.double()?;
let e2 = e.double()?;
let x = &g - e2.double()?;
let y = &f * (e2 - &x) - d_eight;
let z = (&r.y + &r.z).square()? - &c - &r.z.square()?;
let t = z.square()?;
let r2 = G2ProjectiveExtendedVar { x, y, z, t };
let coeff = AteDoubleCoefficientsVar {
c_h: (&r2.z + &r.t).square()? - &r2.t - &a,
c_4c: c.double()?.double()?,
c_j: (&f + &r.t).square()? - &g - &a,
c_l: (&f + &r.x).square()? - &g - &b,
};
Ok((r2, coeff))
}
#[tracing::instrument(target = "r1cs", skip(r))]
pub(crate) fn mixed_addition_step_for_flipped_miller_loop(
x: &Fp3G<P>,
y: &Fp3G<P>,
r: &G2ProjectiveExtendedVar<P>,
) -> Result<(G2ProjectiveExtendedVar<P>, AteAdditionCoefficientsVar<P>), SynthesisError> {
let a = y.square()?;
let b = &r.t * x;
let d = ((&r.z + y).square()? - &a - &r.t) * &r.t;
let h = &b - &r.x;
let i = h.square()?;
let e = i.double()?.double()?;
let j = &h * &e;
let v = &r.x * &e;
let ry2 = r.y.double()?;
let l1 = &d - &ry2;
let x = l1.square()? - &j - &v.double()?;
let y = &l1 * &(&v - &x) - &j * ry2;
let z = (&r.z + &h).square()? - &r.t - &i;
let t = z.square()?;
let r2 = G2ProjectiveExtendedVar {
x,
y,
z: z.clone(),
t,
};
let coeff = AteAdditionCoefficientsVar { c_l1: l1, c_rz: z };
Ok((r2, coeff))
}
#[tracing::instrument(target = "r1cs", skip(p, q))]
pub(crate) fn ate_miller_loop(
p: &G1PreparedVar<P>,
q: &G2PreparedVar<P>,
) -> Result<Fp6G<P>, SynthesisError> {
let zero = FpVar::<P::Fp>::zero();
let l1_coeff = Fp3Var::new(p.x.clone(), zero.clone(), zero) - &q.x_over_twist;
let mut f = Fp6G::<P>::one();
let mut dbl_idx: usize = 0;
let mut add_idx: usize = 0;
// code below gets executed for all bits (EXCEPT the MSB itself) of
// mnt6_param_p (skipping leading zeros) in MSB to LSB order
for bit in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT).skip(1) {
let dc = &q.double_coefficients[dbl_idx];
dbl_idx += 1;
let g_rr_at_p = Fp6Var::new(
&dc.c_l - &dc.c_4c - &dc.c_j * &p.x_twist,
&dc.c_h * &p.y_twist,
);
f = f.square()? * &g_rr_at_p;
if bit {
let ac = &q.addition_coefficients[add_idx];
add_idx += 1;
let g_rq_at_p = Fp6Var::new(
&ac.c_rz * &p.y_twist,
(&q.y_over_twist * &ac.c_rz + &(&l1_coeff * &ac.c_l1)).negate()?,
);
f *= &g_rq_at_p;
}
}
if P::ATE_IS_LOOP_COUNT_NEG {
let ac = &q.addition_coefficients[add_idx];
let g_rnegr_at_p = Fp6Var::new(
&ac.c_rz * &p.y_twist,
(&q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)).negate()?,
);
f = (f * &g_rnegr_at_p).inverse()?;
}
Ok(f)
}
#[tracing::instrument(target = "r1cs")]
pub(crate) fn final_exponentiation(value: &Fp6G<P>) -> Result<GTVar<P>, SynthesisError> {
let value_inv = value.inverse()?;
let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv)?;
let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value)?;
Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk)
}
#[tracing::instrument(target = "r1cs", skip(elt, elt_inv))]
fn final_exponentiation_first_chunk(
elt: &Fp6G<P>,
elt_inv: &Fp6G<P>,
) -> Result<Fp6G<P>, SynthesisError> {
// (q^3-1)*(q+1)
// elt_q3 = elt^(q^3)
let elt_q3 = elt.unitary_inverse()?;
// elt_q3_over_elt = elt^(q^3-1)
let elt_q3_over_elt = elt_q3 * elt_inv;
// alpha = elt^((q^3-1) * q)
let alpha = elt_q3_over_elt.frobenius_map(1)?;
// beta = elt^((q^3-1)*(q+1)
Ok(alpha * &elt_q3_over_elt)
}
#[tracing::instrument(target = "r1cs", skip(elt, elt_inv))]
fn final_exponentiation_last_chunk(
elt: &Fp6G<P>,
elt_inv: &Fp6G<P>,
) -> Result<Fp6G<P>, SynthesisError> {
let elt_q = elt.frobenius_map(1)?;
let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1)?;
let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG {
elt_inv.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)?
} else {
elt.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)?
};
Ok(w1_part * &w0_part)
}
}
impl<P: MNT6Parameters> PG<MNT6<P>, P::Fp> for PairingVar<P> {
type G1Var = G1Var<P>;
type G2Var = G2Var<P>;
type G1PreparedVar = G1PreparedVar<P>;
type G2PreparedVar = G2PreparedVar<P>;
type GTVar = GTVar<P>;
#[tracing::instrument(target = "r1cs")]
fn miller_loop(
ps: &[Self::G1PreparedVar],
qs: &[Self::G2PreparedVar],
) -> Result<Self::GTVar, SynthesisError> {
let mut result = Fp6G::<P>::one();
for (p, q) in ps.iter().zip(qs) {
result *= Self::ate_miller_loop(p, q)?;
}
Ok(result)
}
#[tracing::instrument(target = "r1cs")]
fn final_exponentiation(r: &Self::GTVar) -> Result<Self::GTVar, SynthesisError> {
Self::final_exponentiation(r)
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g1(p: &Self::G1Var) -> Result<Self::G1PreparedVar, SynthesisError> {
Self::G1PreparedVar::from_group_var(p)
}
#[tracing::instrument(target = "r1cs")]
fn prepare_g2(q: &Self::G2Var) -> Result<Self::G2PreparedVar, SynthesisError> {
Self::G2PreparedVar::from_group_var(q)
}
}

84
src/pairing/mod.rs Normal file
View File

@@ -0,0 +1,84 @@
use crate::prelude::*;
use ark_ec::PairingEngine;
use ark_ff::Field;
use ark_relations::r1cs::SynthesisError;
use core::fmt::Debug;
/// This module implements pairings for BLS12 bilinear groups.
pub mod bls12;
/// This module implements pairings for MNT4 bilinear groups.
pub mod mnt4;
/// This module implements pairings for MNT6 bilinear groups.
pub mod mnt6;
/// Specifies the constraints for computing a pairing in the yybilinear group
/// `E`.
pub trait PairingVar<E: PairingEngine, ConstraintF: Field = <E as PairingEngine>::Fq> {
/// An variable representing an element of `G1`.
/// This is the R1CS equivalent of `E::G1Projective`.
type G1Var: CurveVar<E::G1Projective, ConstraintF>
+ AllocVar<E::G1Projective, ConstraintF>
+ AllocVar<E::G1Affine, ConstraintF>;
/// An variable representing an element of `G2`.
/// This is the R1CS equivalent of `E::G2Projective`.
type G2Var: CurveVar<E::G2Projective, ConstraintF>
+ AllocVar<E::G2Projective, ConstraintF>
+ AllocVar<E::G2Affine, ConstraintF>;
/// An variable representing an element of `GT`.
/// This is the R1CS equivalent of `E::GT`.
type GTVar: FieldVar<E::Fqk, ConstraintF>;
/// An variable representing cached precomputation that can speed up
/// pairings computations. This is the R1CS equivalent of
/// `E::G1Prepared`.
type G1PreparedVar: ToBytesGadget<ConstraintF>
+ AllocVar<E::G1Prepared, ConstraintF>
+ Clone
+ Debug;
/// An variable representing cached precomputation that can speed up
/// pairings computations. This is the R1CS equivalent of
/// `E::G2Prepared`.
type G2PreparedVar: ToBytesGadget<ConstraintF>
+ AllocVar<E::G2Prepared, ConstraintF>
+ Clone
+ Debug;
/// Computes a multi-miller loop between elements
/// of `p` and `q`.
fn miller_loop(
p: &[Self::G1PreparedVar],
q: &[Self::G2PreparedVar],
) -> Result<Self::GTVar, SynthesisError>;
/// Computes a final exponentiation over `p`.
fn final_exponentiation(p: &Self::GTVar) -> Result<Self::GTVar, SynthesisError>;
/// Computes a pairing over `p` and `q`.
#[tracing::instrument(target = "r1cs")]
fn pairing(
p: Self::G1PreparedVar,
q: Self::G2PreparedVar,
) -> Result<Self::GTVar, SynthesisError> {
let tmp = Self::miller_loop(&[p], &[q])?;
Self::final_exponentiation(&tmp)
}
/// Computes a product of pairings over the elements in `p` and `q`.
#[must_use]
#[tracing::instrument(target = "r1cs")]
fn product_of_pairings(
p: &[Self::G1PreparedVar],
q: &[Self::G2PreparedVar],
) -> Result<Self::GTVar, SynthesisError> {
let miller_result = Self::miller_loop(p, q)?;
Self::final_exponentiation(&miller_result)
}
/// Performs the precomputation to generate `Self::G1PreparedVar`.
fn prepare_g1(q: &Self::G1Var) -> Result<Self::G1PreparedVar, SynthesisError>;
/// Performs the precomputation to generate `Self::G2PreparedVar`.
fn prepare_g2(q: &Self::G2Var) -> Result<Self::G2PreparedVar, SynthesisError>;
}

72
src/select.rs Normal file
View File

@@ -0,0 +1,72 @@
use crate::prelude::*;
use ark_ff::Field;
use ark_relations::r1cs::SynthesisError;
/// Generates constraints for selecting between one of two values.
pub trait CondSelectGadget<ConstraintF: Field>
where
Self: Sized,
{
/// If `cond == &Boolean::TRUE`, then this returns `true_value`; else,
/// returns `false_value`.
///
/// # Note
/// `Self::conditionally_select(cond, true_value, false_value)?` can be more
/// succinctly written as `cond.select(&true_value, &false_value)?`.
fn conditionally_select(
cond: &Boolean<ConstraintF>,
true_value: &Self,
false_value: &Self,
) -> Result<Self, SynthesisError>;
}
/// Performs a lookup in a 4-element table using two bits.
pub trait TwoBitLookupGadget<ConstraintF: Field>
where
Self: Sized,
{
/// The type of values being looked up.
type TableConstant;
/// Interprets the slice `bits` as a two-bit integer `b = bits[0] + (bits[1]
/// << 1)`, and then outputs `constants[b]`.
///
/// For example, if `bits == [0, 1]`, and `constants == [0, 1, 2, 3]`, this
/// method should output a variable corresponding to `2`.
///
/// # Panics
///
/// This method panics if `bits.len() != 2` or `constants.len() != 4`.
fn two_bit_lookup(
bits: &[Boolean<ConstraintF>],
constants: &[Self::TableConstant],
) -> Result<Self, SynthesisError>;
}
/// Uses three bits to perform a lookup into a table, where the last bit
/// conditionally negates the looked-up value.
pub trait ThreeBitCondNegLookupGadget<ConstraintF: Field>
where
Self: Sized,
{
/// The type of values being looked up.
type TableConstant;
/// Interprets the slice `bits` as a two-bit integer `b = bits[0] + (bits[1]
/// << 1)`, and then outputs `constants[b] * c`, where `c = if bits[2] {
/// -1 } else { 1 };`.
///
/// That is, `bits[2]` conditionally negates the looked-up value.
///
/// For example, if `bits == [1, 0, 1]`, and `constants == [0, 1, 2, 3]`,
/// this method should output a variable corresponding to `-1`.
///
/// # Panics
///
/// This method panics if `bits.len() != 3` or `constants.len() != 4`.
fn three_bit_cond_neg_lookup(
bits: &[Boolean<ConstraintF>],
b0b1: &Boolean<ConstraintF>,
constants: &[Self::TableConstant],
) -> Result<Self, SynthesisError>;
}