mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 21:26:41 +01:00
added spqlios as submodule
This commit is contained in:
36
rns/src/modulus/barrett.rs
Normal file
36
rns/src/modulus/barrett.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct Barrett<O>(pub O, pub O);
|
||||
|
||||
impl<O> Barrett<O> {
|
||||
#[inline(always)]
|
||||
pub fn value(&self) -> &O {
|
||||
&self.0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn quotient(&self) -> &O {
|
||||
&self.1
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct BarrettPrecomp<O> {
|
||||
pub q: O,
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub lo: O,
|
||||
pub hi: O,
|
||||
pub one: Barrett<O>,
|
||||
}
|
||||
|
||||
impl<O> BarrettPrecomp<O> {
|
||||
#[inline(always)]
|
||||
pub fn value_hi(&self) -> &O {
|
||||
&self.hi
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn value_lo(&self) -> &O {
|
||||
&self.lo
|
||||
}
|
||||
}
|
||||
78
rns/src/modulus/impl_u64/barrett.rs
Normal file
78
rns/src/modulus/impl_u64/barrett.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use crate::modulus::barrett::{Barrett, BarrettPrecomp};
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::{BARRETT, BARRETTLAZY, FOURTIMES, NONE, ONCE, REDUCEMOD, TWICE};
|
||||
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::cast::ToPrimitive;
|
||||
|
||||
impl BarrettPrecomp<u64> {
|
||||
pub fn new(q: u64) -> BarrettPrecomp<u64> {
|
||||
let big_r: BigUint =
|
||||
(BigUint::from(1 as usize) << ((u64::BITS << 1) as usize)) / BigUint::from(q);
|
||||
let lo: u64 = (&big_r & BigUint::from(u64::MAX)).to_u64().unwrap();
|
||||
let hi: u64 = (big_r >> u64::BITS).to_u64().unwrap();
|
||||
let mut precomp: BarrettPrecomp<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
lo: lo,
|
||||
hi: hi,
|
||||
one: Barrett(0, 0),
|
||||
};
|
||||
precomp.one = precomp.prepare(1);
|
||||
precomp
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn one(&self) -> Barrett<u64> {
|
||||
self.one
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE: REDUCEMOD>(&self, x: &mut u64) {
|
||||
match REDUCE {
|
||||
NONE => {}
|
||||
ONCE => x.reduce_once_assign(self.q),
|
||||
TWICE => x.reduce_once_assign(self.two_q),
|
||||
FOURTIMES => x.reduce_once_assign(self.four_q),
|
||||
BARRETT => {
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q);
|
||||
x.reduce_once_assign(self.q);
|
||||
}
|
||||
BARRETTLAZY => {
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q)
|
||||
}
|
||||
_ => unreachable!("invalid REDUCE argument"),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn reduce<const REDUCE: REDUCEMOD>(&self, x: &u64) -> u64 {
|
||||
let mut r = *x;
|
||||
self.reduce_assign::<REDUCE>(&mut r);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn prepare(&self, v: u64) -> Barrett<u64> {
|
||||
debug_assert!(v < self.q);
|
||||
let quotient: u64 = (((v as u128) << 64) / self.q as u128) as _;
|
||||
Barrett(v, quotient)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &u64) -> u64 {
|
||||
let mut r: u64 = *rhs;
|
||||
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &mut u64) {
|
||||
let t: u64 = ((*lhs.quotient() as u128 * *rhs as u128) >> 64) as _;
|
||||
*rhs = (rhs.wrapping_mul(*lhs.value())).wrapping_sub(self.q.wrapping_mul(t));
|
||||
self.reduce_assign::<REDUCE>(rhs);
|
||||
}
|
||||
}
|
||||
143
rns/src/modulus/impl_u64/generation.rs
Normal file
143
rns/src/modulus/impl_u64/generation.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use crate::modulus::prime;
|
||||
|
||||
use prime::Prime;
|
||||
use primality_test::is_prime;
|
||||
|
||||
impl NTTFriendlyPrimesGenerator<u64> {
|
||||
|
||||
pub fn new(bit_size: u64, nth_root: u64) -> Self{
|
||||
let mut check_next_prime: bool = true;
|
||||
let mut check_prev_prime: bool = true;
|
||||
let next_prime = (1<<bit_size) + 1;
|
||||
let mut prev_prime = next_prime;
|
||||
|
||||
if next_prime > nth_root.wrapping_neg(){
|
||||
check_next_prime = false;
|
||||
}
|
||||
|
||||
if prev_prime < nth_root{
|
||||
check_prev_prime = false
|
||||
}
|
||||
|
||||
prev_prime -= nth_root;
|
||||
|
||||
Self{
|
||||
size: bit_size as f64,
|
||||
check_next_prime,
|
||||
check_prev_prime,
|
||||
nth_root,
|
||||
next_prime,
|
||||
prev_prime,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_upstream_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_upstream_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_downstream_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_downstream_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_alternating_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_alternating_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_upstream_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if self.check_next_prime{
|
||||
if (self.next_prime as f64).log2() - self.size >= 0.5 || self.next_prime > 0xffff_ffff_ffff_ffff-self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
panic!("prime list for upstream primes is exhausted (overlap with next bit-size or prime > 2^64)");
|
||||
}
|
||||
}else{
|
||||
if is_prime(self.next_prime) {
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.next_prime += self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.next_prime += self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_downstream_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if self.size - (self.prev_prime as f64).log2() >= 0.5 || self.prev_prime < self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
panic!("prime list for downstream primes is exhausted (overlap with previous bit-size or prime < nth_root)")
|
||||
}else{
|
||||
if is_prime(self.prev_prime){
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.prev_prime -= self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.prev_prime -= self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_alternating_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if !(self.check_next_prime || self.check_prev_prime){
|
||||
panic!("prime list for upstream and downstream prime is exhausted for the (overlap with previous/next bit-size or NthRoot > prime > 2^64)")
|
||||
}
|
||||
|
||||
if self.check_next_prime{
|
||||
if (self.next_prime as f64).log2() - self.size >= 0.5 || self.next_prime > 0xffff_ffff_ffff_ffff-self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
}else{
|
||||
if is_prime(self.next_prime){
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.next_prime += self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.next_prime += self.nth_root;
|
||||
}
|
||||
}
|
||||
|
||||
if self.check_prev_prime {
|
||||
if self.size - (self.prev_prime as f64).log2() >= 0.5 || self.prev_prime < self.nth_root{
|
||||
self.check_prev_prime = false;
|
||||
}else{
|
||||
if is_prime(self.prev_prime){
|
||||
let prime = Prime::new_unchecked(self.prev_prime);
|
||||
self.prev_prime -= self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.prev_prime -= self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::modulus::prime_generator;
|
||||
|
||||
#[test]
|
||||
fn prime_generation() {
|
||||
let nth_root: u64 = 1<<16 ;
|
||||
let mut g: prime_generator::NTTFriendlyPrimesGenerator = prime_generator::NTTFriendlyPrimesGenerator::new(30, nth_root);
|
||||
|
||||
let primes = g.next_alternating_primes(10);
|
||||
println!("{:?}", primes);
|
||||
for prime in primes.iter(){
|
||||
assert!(prime.q() % nth_root == 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
32
rns/src/modulus/impl_u64/mod.rs
Normal file
32
rns/src/modulus/impl_u64/mod.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
pub mod barrett;
|
||||
pub mod montgomery;
|
||||
pub mod operations;
|
||||
pub mod prime;
|
||||
|
||||
use crate::modulus::ReduceOnce;
|
||||
|
||||
impl ReduceOnce<u64> for u64 {
|
||||
#[inline(always)]
|
||||
fn reduce_once_constant_time_assign(&mut self, q: u64) {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self -= (q.wrapping_sub(*self) >> 63) * q;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once_constant_time(&self, q: u64) -> u64 {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
self - (q.wrapping_sub(*self) >> 63) * q
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once_assign(&mut self, q: u64) {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self = *self.min(&mut self.wrapping_sub(q))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once(&self, q: u64) -> u64 {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self.min(&mut self.wrapping_sub(q))
|
||||
}
|
||||
}
|
||||
214
rns/src/modulus/impl_u64/montgomery.rs
Normal file
214
rns/src/modulus/impl_u64/montgomery.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::{ONCE, REDUCEMOD};
|
||||
extern crate test;
|
||||
|
||||
/// MontgomeryPrecomp is a set of methods implemented for MontgomeryPrecomp<u64>
|
||||
/// enabling Montgomery arithmetic over u64 values.
|
||||
impl MontgomeryPrecomp<u64> {
|
||||
/// Returns an new instance of MontgomeryPrecomp<u64>.
|
||||
/// This method will fail if gcd(q, 2^64) != 1.
|
||||
#[inline(always)]
|
||||
pub fn new(q: u64) -> MontgomeryPrecomp<u64> {
|
||||
assert!(
|
||||
q & 1 != 0,
|
||||
"Invalid argument: gcd(q={}, radix=2^64) != 1",
|
||||
q
|
||||
);
|
||||
let mut q_inv: u64 = 1;
|
||||
let mut q_pow = q;
|
||||
for _i in 0..63 {
|
||||
q_inv = q_inv.wrapping_mul(q_pow);
|
||||
q_pow = q_pow.wrapping_mul(q_pow);
|
||||
}
|
||||
let mut precomp: MontgomeryPrecomp<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
barrett: BarrettPrecomp::new(q),
|
||||
q_inv: q_inv,
|
||||
one: 0,
|
||||
minus_one: 0,
|
||||
};
|
||||
|
||||
precomp.one = precomp.prepare::<ONCE>(1);
|
||||
precomp.minus_one = q - precomp.one;
|
||||
|
||||
precomp
|
||||
}
|
||||
|
||||
/// Returns 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn one(&self) -> Montgomery<u64> {
|
||||
self.one
|
||||
}
|
||||
|
||||
/// Returns (q-1) * 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn minus_one(&self) -> Montgomery<u64> {
|
||||
self.minus_one
|
||||
}
|
||||
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce<const REDUCE: REDUCEMOD>(&self, x: u64) -> u64 {
|
||||
let mut r: u64 = x;
|
||||
self.reduce_assign::<REDUCE>(&mut r);
|
||||
r
|
||||
}
|
||||
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE: REDUCEMOD>(&self, x: &mut u64) {
|
||||
self.barrett.reduce_assign::<REDUCE>(x);
|
||||
}
|
||||
|
||||
/// Returns lhs * 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn prepare<const REDUCE: REDUCEMOD>(&self, lhs: u64) -> Montgomery<u64> {
|
||||
let mut rhs: u64 = 0;
|
||||
self.prepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||
rhs
|
||||
}
|
||||
|
||||
/// Assigns lhs * 2^64 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn prepare_assign<const REDUCE: REDUCEMOD>(&self, lhs: u64, rhs: &mut Montgomery<u64>) {
|
||||
let (_, mhi) = lhs.widening_mul(*self.barrett.value_lo());
|
||||
*rhs = (lhs.wrapping_mul(*self.barrett.value_hi()).wrapping_add(mhi))
|
||||
.wrapping_mul(self.q)
|
||||
.wrapping_neg();
|
||||
self.reduce_assign::<REDUCE>(rhs);
|
||||
}
|
||||
|
||||
/// Returns lhs * (2^64)^-1 mod q as a u64.
|
||||
#[inline(always)]
|
||||
pub fn unprepare<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>) -> u64 {
|
||||
let mut rhs = 0u64;
|
||||
self.unprepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||
rhs
|
||||
}
|
||||
|
||||
/// Assigns lhs * (2^64)^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn unprepare_assign<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: &mut u64) {
|
||||
let (_, r) = self.q.widening_mul(lhs.wrapping_mul(self.q_inv));
|
||||
*rhs = self.reduce::<REDUCE>(self.q.wrapping_sub(r));
|
||||
}
|
||||
|
||||
/// Returns lhs * rhs * (2^{64})^-1 mod q.
|
||||
#[inline(always)]
|
||||
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: u64) -> u64 {
|
||||
let mut r: u64 = rhs;
|
||||
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||
r
|
||||
}
|
||||
|
||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut u64,
|
||||
) {
|
||||
let (mlo, mhi) = lhs.widening_mul(*rhs);
|
||||
let (_, hhi) = self.q.widening_mul(mlo.wrapping_mul(self.q_inv));
|
||||
*rhs = self.reduce::<REDUCE>(mhi.wrapping_sub(hhi).wrapping_add(self.q));
|
||||
}
|
||||
|
||||
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
||||
#[inline(always)]
|
||||
pub fn mul_internal<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: Montgomery<u64>,
|
||||
) -> Montgomery<u64> {
|
||||
self.mul_external::<REDUCE>(lhs, rhs)
|
||||
}
|
||||
|
||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn mul_internal_assign<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.mul_external_assign::<REDUCE>(lhs, rhs);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_internal(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64> {
|
||||
rhs + lhs
|
||||
}
|
||||
|
||||
/// Assigns lhs + rhs to rhs.
|
||||
#[inline(always)]
|
||||
pub fn add_internal_lazy_assign(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>) {
|
||||
*rhs += lhs
|
||||
}
|
||||
|
||||
/// Assigns lhs + rhs - q if (lhs + rhs) >= q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn add_internal_reduce_once_assign<const LAZY: bool>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.add_internal_lazy_assign(lhs, rhs);
|
||||
rhs.reduce_once_assign(self.q);
|
||||
}
|
||||
|
||||
/// Returns (x^exponent) * 2^64 mod q.
|
||||
#[inline(always)]
|
||||
pub fn pow(&self, x: Montgomery<u64>, exponent: u64) -> Montgomery<u64> {
|
||||
let mut y: Montgomery<u64> = self.one();
|
||||
let mut x_mut: Montgomery<u64> = x;
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
self.mul_internal_assign::<ONCE>(x_mut, &mut y);
|
||||
}
|
||||
self.mul_internal_assign::<ONCE>(x_mut, &mut x_mut);
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
y.reduce_once_assign(self.q);
|
||||
y
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::modulus::montgomery;
|
||||
use test::Bencher;
|
||||
|
||||
#[test]
|
||||
fn test_mul_external() {
|
||||
let q: u64 = 0x1fffffffffe00001;
|
||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||
let x: u64 = 0x5f876e514845cc8b;
|
||||
let y: u64 = 0xad726f98f24a761a;
|
||||
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||
assert!(
|
||||
m_precomp.mul_external::<ONCE>(y_mont, x) == (x as u128 * y as u128 % q as u128) as u64
|
||||
);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_mul_external(b: &mut Bencher) {
|
||||
let q: u64 = 0x1fffffffffe00001;
|
||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||
let mut x: u64 = 0x5f876e514845cc8b;
|
||||
let y: u64 = 0xad726f98f24a761a;
|
||||
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||
b.iter(|| m_precomp.mul_external_assign::<ONCE>(y_mont, &mut x));
|
||||
}
|
||||
}
|
||||
707
rns/src/modulus/impl_u64/operations.rs
Normal file
707
rns/src/modulus/impl_u64/operations.rs
Normal file
@@ -0,0 +1,707 @@
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::{ScalarOperations, VectorOperations};
|
||||
use crate::modulus::{NONE, REDUCEMOD};
|
||||
use crate::{
|
||||
apply_ssv, apply_sv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv, apply_vvssv,
|
||||
apply_vvsv, apply_vvv,
|
||||
};
|
||||
use itertools::izip;
|
||||
|
||||
impl ScalarOperations<u64> for Prime<u64> {
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - TWO: subtracts 2q if x >= 2q.
|
||||
/// - FOUR: subtracts 4q if x >= 4q.
|
||||
/// - BARRETT: maps x to x mod q using Barrett reduction.
|
||||
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
|
||||
#[inline(always)]
|
||||
fn sa_reduce_into_sa<const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
||||
self.montgomery.reduce_assign::<REDUCE>(a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_into_sc<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64) {
|
||||
*c = a.wrapping_add(*b);
|
||||
self.sa_reduce_into_sa::<REDUCE>(c);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
*b = a.wrapping_add(*b);
|
||||
self.sa_reduce_into_sa::<REDUCE>(b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &mut u64,
|
||||
) {
|
||||
match SBRANGE {
|
||||
1 => *c = *a + self.q - *b,
|
||||
2 => *c = *a + self.two_q - *b,
|
||||
4 => *c = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(c)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *a = *a + self.q - *b,
|
||||
2 => *a = *a + self.two_q - *b,
|
||||
4 => *a = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *b = *a + self.q - *b,
|
||||
2 => *b = *a + self.two_q - *b,
|
||||
4 => *b = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *a = self.q - *a,
|
||||
2 => *a = self.two_q - *a,
|
||||
4 => *a = self.four_q - *a,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *b = self.q - *a,
|
||||
2 => *b = self.two_q - *a,
|
||||
4 => *b = self.four_q - *a,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_prepare_montgomery_into_sa<const REDUCE: REDUCEMOD>(&self, a: &mut Montgomery<u64>) {
|
||||
*a = self.montgomery.prepare::<REDUCE>(*a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_prepare_montgomery_into_sb<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.montgomery.prepare_assign::<REDUCE>(*a, b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Montgomery<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c = self.montgomery.mul_external::<REDUCE>(*a, *b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_add_sc_into_sc<const REDUCE1: REDUCEMOD, const REDUCE2: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Montgomery<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c += self.montgomery.mul_external::<REDUCE1>(*a, *b);
|
||||
self.sa_reduce_into_sa::<REDUCE2>(c);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Montgomery<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
self.montgomery.mul_external_assign::<REDUCE>(*b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_barrett_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Barrett<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c = self.barrett.mul_external::<REDUCE>(b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_barrett_into_sa<const REDUCE: REDUCEMOD>(&self, b: &Barrett<u64>, a: &mut u64) {
|
||||
self.barrett.mul_external_assign::<REDUCE>(b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut u64,
|
||||
) {
|
||||
match VBRANGE {
|
||||
1 => *d = a + self.q - b,
|
||||
2 => *d = a + self.two_q - b,
|
||||
4 => *d = a + self.four_q - b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.barrett.mul_external_assign::<REDUCE>(c, d);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
c: &Barrett<u64>,
|
||||
b: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(a, b);
|
||||
self.barrett.mul_external_assign::<REDUCE>(c, b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_mul_sc_barrett_into_sd<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut u64,
|
||||
) {
|
||||
*d = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_mul_sc_barrett_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
*a = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
e: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
|
||||
self.barrett.mul_external_assign::<REDUCE>(d, e);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
|
||||
self.barrett.mul_external_assign::<REDUCE>(d, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_into_sa(&self, b: &usize, c: &u64, a: &mut u64) {
|
||||
*a = (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64) {
|
||||
*d = (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_add_sd_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64) {
|
||||
*d += (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_signed_digit_into_sb<const CARRYOVERWRITE: bool, const BALANCED: bool>(
|
||||
&self,
|
||||
a: &u64,
|
||||
base: &u64,
|
||||
shift: &usize,
|
||||
mask: &u64,
|
||||
carry: &mut u64,
|
||||
b: &mut u64,
|
||||
) {
|
||||
if CARRYOVERWRITE {
|
||||
self.sa_rsh_sb_mask_sc_into_sd(a, shift, mask, carry);
|
||||
} else {
|
||||
self.sa_rsh_sb_mask_sc_add_sd_into_sd(a, shift, mask, carry);
|
||||
}
|
||||
|
||||
let c: u64 = if BALANCED && *carry == base >> 1 {
|
||||
a & 1
|
||||
} else {
|
||||
((*carry | (*carry << 1)) >> shift) & 1
|
||||
};
|
||||
|
||||
*b = *carry + (self.q - base) * c;
|
||||
*carry = c;
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorOperations<u64> for Prime<u64> {
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - TWO: subtracts 2q if x >= 2q.
|
||||
/// - FOUR: subtracts 4q if x >= 4q.
|
||||
/// - BARRETT: maps x to x mod q using Barrett reduction.
|
||||
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
|
||||
#[inline(always)]
|
||||
fn va_reduce_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &mut [u64]) {
|
||||
apply_v!(self, Self::sa_reduce_into_sa::<REDUCE>, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(self, Self::sa_add_sb_into_sb::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_sv!(self, Self::sa_add_sb_into_sb::<REDUCE>, b, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_sb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &u64,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[u64],
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>,
|
||||
b,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(self, Self::sa_neg_into_sb::<VARANGE, REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [Montgomery<u64>],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_prepare_montgomery_into_sb::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_prepare_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [Montgomery<u64>],
|
||||
) {
|
||||
apply_v!(
|
||||
self,
|
||||
Self::sa_prepare_montgomery_into_sa::<REDUCE>,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[Montgomery<u64>],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_into_sc::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_add_vc_into_vc<
|
||||
const CHUNK: usize,
|
||||
const REDUCE1: REDUCEMOD,
|
||||
const REDUCE2: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[Montgomery<u64>],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_add_sc_into_sc::<REDUCE1, REDUCE2>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[Montgomery<u64>],
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_into_sa::<REDUCE>,
|
||||
b,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_sb_barrett_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &Barrett<u64>,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(
|
||||
self,
|
||||
Self::sa_mul_sb_barrett_into_sc::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_sb_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Barrett<u64>,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_sv!(self, Self::sa_mul_sb_barrett_into_sa::<REDUCE>, b, a, CHUNK);
|
||||
}
|
||||
|
||||
fn va_sub_vb_mul_sc_barrett_into_vd<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &Barrett<u64>,
|
||||
d: &mut [u64],
|
||||
) {
|
||||
apply_vvsv!(
|
||||
self,
|
||||
Self::sa_sub_sb_mul_sc_barrett_into_sd::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
d,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
fn va_sub_vb_mul_sc_barrett_into_vb<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &Barrett<u64>,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(
|
||||
self,
|
||||
Self::sa_sub_sb_mul_sc_barrett_into_sb::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
|
||||
fn va_add_sb_mul_sc_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_ssv!(
|
||||
self,
|
||||
Self::sa_add_sb_mul_sc_barrett_into_sa::<REDUCE>,
|
||||
b,
|
||||
c,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
|
||||
fn va_add_sb_mul_sc_barrett_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sa_add_sb_mul_sc_barrett_into_sd::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
d,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_ve<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &Barrett<u64>,
|
||||
ve: &mut [u64],
|
||||
) {
|
||||
apply_vvssv!(
|
||||
self,
|
||||
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_se::<VBRANGE, REDUCE>,
|
||||
va,
|
||||
vb,
|
||||
sc,
|
||||
sd,
|
||||
ve,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_va<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &Barrett<u64>,
|
||||
va: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_sa::<VBRANGE, REDUCE>,
|
||||
vb,
|
||||
sc,
|
||||
sd,
|
||||
va,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_va<const CHUNK: usize>(&self, sb: &usize, sc: &u64, va: &mut [u64]) {
|
||||
apply_ssv!(self, Self::sa_rsh_sb_mask_sc_into_sa, sb, sc, va, CHUNK);
|
||||
}
|
||||
|
||||
// vec(d) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(self, Self::sa_rsh_sb_mask_sc_into_sd, va, sb, sc, vd, CHUNK);
|
||||
}
|
||||
|
||||
// vec(d) <- vec(d) + (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_add_vd_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sa_rsh_sb_mask_sc_add_sd_into_sd,
|
||||
va,
|
||||
sb,
|
||||
sc,
|
||||
vd,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(c) <- i-th unsigned digit base 2^{sb} of vec(a).
|
||||
// vec(c) is ensured to be in the range [0, 2^{sb}-1[ with E[vec(c)] = 2^{sb}-1.
|
||||
fn va_ith_digit_unsigned_base_sb_into_vc<const CHUNK: usize>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
vc: &mut [u64],
|
||||
) {
|
||||
self.va_rsh_sb_mask_sc_into_vd::<CHUNK>(va, &(i * sb), &((1 << sb) - 1), vc);
|
||||
}
|
||||
|
||||
// vec(c) <- i-th signed digit base 2^{w} of vec(a).
|
||||
// Reads the carry of the i-1-th iteration and write the carry on the i-th iteration on carry.
|
||||
// if i > 0, carry of the i-1th iteration must be provided.
|
||||
// if BALANCED: vec(c) is ensured to be [-2^{sb-1}, 2^{sb-1}[ with E[vec(c)] = 0, else E[vec(c)] = -0.5
|
||||
fn va_ith_digit_signed_base_sb_into_vc<const CHUNK: usize, const BALANCED: bool>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
carry: &mut [u64],
|
||||
vc: &mut [u64],
|
||||
) {
|
||||
let base: u64 = 1 << sb;
|
||||
let mask: u64 = base - 1;
|
||||
if i == 0 {
|
||||
apply_vsssvv!(
|
||||
self,
|
||||
Self::sa_signed_digit_into_sb::<true, BALANCED>,
|
||||
va,
|
||||
&base,
|
||||
&(i * sb),
|
||||
&mask,
|
||||
carry,
|
||||
vc,
|
||||
CHUNK
|
||||
);
|
||||
} else {
|
||||
apply_vsssvv!(
|
||||
self,
|
||||
Self::sa_signed_digit_into_sb::<false, BALANCED>,
|
||||
va,
|
||||
&base,
|
||||
&(i * sb),
|
||||
&mask,
|
||||
carry,
|
||||
vc,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
228
rns/src/modulus/impl_u64/prime.rs
Normal file
228
rns/src/modulus/impl_u64/prime.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::ONCE;
|
||||
use primality_test::is_prime;
|
||||
use prime_factorization::Factorization;
|
||||
|
||||
impl Prime<u64> {
|
||||
/// Returns a new instance of Prime<u64>.
|
||||
/// Panics if q_base is not a prime > 2 and
|
||||
/// if q_base^q_power would overflow u64.
|
||||
pub fn new(q_base: u64, q_power: usize) -> Self {
|
||||
assert!(is_prime(q_base) && q_base > 2);
|
||||
Self::new_unchecked(q_base, q_power)
|
||||
}
|
||||
|
||||
/// Returns a new instance of Prime<u64>.
|
||||
/// Does not check if q_base is a prime > 2.
|
||||
/// Panics if q_base^q_power would overflow u64.
|
||||
pub fn new_unchecked(q_base: u64, q_power: usize) -> Self {
|
||||
let mut q = q_base;
|
||||
for _i in 1..q_power {
|
||||
q *= q_base
|
||||
}
|
||||
|
||||
assert!(q.next_power_of_two().ilog2() <= 61);
|
||||
|
||||
let mut phi = q_base - 1;
|
||||
for _i in 1..q_power {
|
||||
phi *= q_base
|
||||
}
|
||||
|
||||
let mut prime: Prime<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
q_base: q_base,
|
||||
q_power: q_power,
|
||||
factors: Vec::new(),
|
||||
montgomery: MontgomeryPrecomp::new(q),
|
||||
barrett: BarrettPrecomp::new(q),
|
||||
phi: phi,
|
||||
};
|
||||
|
||||
prime.check_factors();
|
||||
|
||||
prime
|
||||
}
|
||||
|
||||
pub fn q(&self) -> u64 {
|
||||
self.q
|
||||
}
|
||||
|
||||
pub fn q_base(&self) -> u64 {
|
||||
self.q_base
|
||||
}
|
||||
|
||||
pub fn q_power(&self) -> usize {
|
||||
self.q_power
|
||||
}
|
||||
|
||||
/// Returns x^exponen mod q.
|
||||
#[inline(always)]
|
||||
pub fn pow(&self, x: u64, exponent: u64) -> u64 {
|
||||
let mut y_mont: Montgomery<u64> = self.montgomery.one();
|
||||
let mut x_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(x);
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
self.montgomery
|
||||
.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||
}
|
||||
|
||||
self.montgomery
|
||||
.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
self.montgomery.unprepare::<ONCE>(y_mont)
|
||||
}
|
||||
|
||||
/// Returns x^-1 mod q.
|
||||
/// User must ensure that x is not divisible by q_base.
|
||||
#[inline(always)]
|
||||
pub fn inv(&self, x: u64) -> u64 {
|
||||
self.pow(x, self.phi - 1)
|
||||
}
|
||||
}
|
||||
|
||||
impl Prime<u64> {
|
||||
/// Returns the smallest nth primitive root of q_base.
|
||||
pub fn primitive_root(&self) -> u64 {
|
||||
let mut candidate: u64 = 1u64;
|
||||
let mut not_found: bool = true;
|
||||
|
||||
while not_found {
|
||||
candidate += 1;
|
||||
|
||||
for &factor in &self.factors {
|
||||
if pow(candidate, (self.q_base - 1) / factor, self.q_base) == 1 {
|
||||
not_found = true;
|
||||
break;
|
||||
}
|
||||
not_found = false;
|
||||
}
|
||||
}
|
||||
|
||||
if not_found {
|
||||
panic!("failed to find a primitive root for q_base={}", self.q_base)
|
||||
}
|
||||
|
||||
candidate
|
||||
}
|
||||
|
||||
/// Returns an nth primitive root of q = q_base^q_power in Montgomery.
|
||||
pub fn primitive_nth_root(&self, nth_root: u64) -> u64 {
|
||||
assert!(
|
||||
self.q & (nth_root - 1) == 1,
|
||||
"invalid prime: q = {} % nth_root = {} = {} != 1",
|
||||
self.q,
|
||||
nth_root,
|
||||
self.q & (nth_root - 1)
|
||||
);
|
||||
|
||||
let psi: u64 = self.primitive_root();
|
||||
|
||||
// nth primitive root mod q_base: psi_nth^(prime.q_base-1)/nth_root mod q_base
|
||||
let psi_nth_q_base: u64 = pow(psi, (self.q_base - 1) / nth_root, self.q_base);
|
||||
|
||||
// lifts nth primitive root mod q_base to q = q_base^q_power
|
||||
let psi_nth_q: u64 = self.hensel_lift(psi_nth_q_base, nth_root);
|
||||
|
||||
assert!(
|
||||
self.pow(psi_nth_q, nth_root) == 1,
|
||||
"invalid nth primitive root: psi^nth_root != 1 mod q"
|
||||
);
|
||||
assert!(
|
||||
self.pow(psi_nth_q, nth_root >> 1) == self.q - 1,
|
||||
"invalid nth primitive root: psi^(nth_root/2) != -1 mod q"
|
||||
);
|
||||
|
||||
psi_nth_q
|
||||
}
|
||||
|
||||
/// Checks if the field self.factor is populated.
|
||||
/// If not, factorize q_base-1 and populates self.factor.
|
||||
/// If yes, checks that it contains the unique factors of q_base-1.
|
||||
pub fn check_factors(&mut self) {
|
||||
if self.factors.len() == 0 {
|
||||
let factors = Factorization::run(self.q_base - 1).prime_factor_repr();
|
||||
let mut distincts_factors: Vec<u64> = Vec::with_capacity(factors.len());
|
||||
for factor in factors.iter() {
|
||||
distincts_factors.push(factor.0)
|
||||
}
|
||||
self.factors = distincts_factors
|
||||
} else {
|
||||
let mut q_base: u64 = self.q_base;
|
||||
|
||||
for &factor in &self.factors {
|
||||
if !is_prime(factor) {
|
||||
panic!("invalid factor list: factor {} is not prime", factor)
|
||||
}
|
||||
|
||||
while q_base % factor != 0 {
|
||||
q_base /= factor
|
||||
}
|
||||
}
|
||||
|
||||
if q_base != 1 {
|
||||
panic!("invalid factor list: does not fully divide q_base: q_base % (all factors) = {}", q_base)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns (psi + a * q_base)^{nth_root} = 1 mod q = q_base^q_power given psi^{nth_root} = 1 mod q_base.
|
||||
/// Panics if psi^{nth_root} != 1 mod q_base.
|
||||
fn hensel_lift(&self, psi: u64, nth_root: u64) -> u64 {
|
||||
assert!(
|
||||
pow(psi, nth_root, self.q_base) == 1,
|
||||
"invalid argument psi: psi^nth_root = {} != 1",
|
||||
pow(psi, nth_root, self.q_base)
|
||||
);
|
||||
|
||||
let mut psi_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(psi);
|
||||
let nth_root_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(nth_root);
|
||||
|
||||
for _i in 1..self.q_power {
|
||||
let psi_pow: Montgomery<u64> = self.montgomery.pow(psi_mont, nth_root - 1);
|
||||
|
||||
let num: Montgomery<u64> = self.montgomery.one() + self.q
|
||||
- self.montgomery.mul_internal::<ONCE>(psi_pow, psi_mont);
|
||||
|
||||
let mut den: Montgomery<u64> =
|
||||
self.montgomery.mul_internal::<ONCE>(nth_root_mont, psi_pow);
|
||||
|
||||
den = self.montgomery.pow(den, self.phi - 1);
|
||||
|
||||
psi_mont = self
|
||||
.montgomery
|
||||
.add_internal(psi_mont, self.montgomery.mul_internal::<ONCE>(num, den));
|
||||
}
|
||||
|
||||
self.montgomery.unprepare::<ONCE>(psi_mont)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns x^exponent mod q.
|
||||
/// This function internally instantiate a new MontgomeryPrecomp<u64>
|
||||
/// To be used when called only a few times and if there
|
||||
/// is no Prime instantiated with q.
|
||||
pub fn pow(x: u64, exponent: u64, q: u64) -> u64 {
|
||||
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
||||
let mut y_mont: Montgomery<u64> = montgomery.one();
|
||||
let mut x_mont: Montgomery<u64> = montgomery.prepare::<ONCE>(x);
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||
}
|
||||
|
||||
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
montgomery.unprepare::<ONCE>(y_mont)
|
||||
}
|
||||
18
rns/src/modulus/montgomery.rs
Normal file
18
rns/src/modulus/montgomery.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
|
||||
/// Montgomery is a generic struct storing
|
||||
/// an element in the Montgomery domain.
|
||||
pub type Montgomery<O> = O;
|
||||
|
||||
/// MontgomeryPrecomp is a generic struct storing
|
||||
/// precomputations for Montgomery arithmetic.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct MontgomeryPrecomp<O> {
|
||||
pub q: O,
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub barrett: BarrettPrecomp<O>,
|
||||
pub q_inv: O,
|
||||
pub one: Montgomery<O>,
|
||||
pub minus_one: Montgomery<O>,
|
||||
}
|
||||
25
rns/src/modulus/prime.rs
Normal file
25
rns/src/modulus/prime.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::MontgomeryPrecomp;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Prime<O> {
|
||||
pub q: O,
|
||||
/// q_base^q_powers
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub q_base: O,
|
||||
pub q_power: usize,
|
||||
pub factors: Vec<O>,
|
||||
/// distinct factors of q-1
|
||||
pub montgomery: MontgomeryPrecomp<O>,
|
||||
pub barrett: BarrettPrecomp<O>,
|
||||
pub phi: O,
|
||||
}
|
||||
|
||||
pub struct NTTFriendlyPrimesGenerator<O> {
|
||||
pub size: f64,
|
||||
pub next_prime: O,
|
||||
pub prev_prime: O,
|
||||
pub check_next_prime: bool,
|
||||
pub check_prev_prime: bool,
|
||||
}
|
||||
Reference in New Issue
Block a user