mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Added const for modular reduction, simplfied API
This commit is contained in:
@@ -1,6 +1,10 @@
|
|||||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||||
use math::ring::Ring;
|
use math::ring::Ring;
|
||||||
use math::modulus::Operations;
|
use math::modulus::Operations;
|
||||||
|
use math::modulus::montgomery::Montgomery;
|
||||||
|
use math::modulus::{NONE, ONCE};
|
||||||
|
|
||||||
|
const CHUNK: usize= 8;
|
||||||
|
|
||||||
fn add_vec_unary(c: &mut Criterion) {
|
fn add_vec_unary(c: &mut Criterion) {
|
||||||
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
|
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
|
||||||
@@ -11,9 +15,8 @@ fn add_vec_unary(c: &mut Criterion) {
|
|||||||
p0.0[i] = i as u64;
|
p0.0[i] = i as u64;
|
||||||
p1.0[i] = i as u64;
|
p1.0[i] = i as u64;
|
||||||
}
|
}
|
||||||
println!("{}", r.n());
|
|
||||||
Box::new(move || {
|
Box::new(move || {
|
||||||
r.modulus.add_vec_unary_assign::<8>(&p0.0, &mut p1.0);
|
r.modulus.add_vec_unary_assign::<CHUNK, ONCE>(&p0.0, &mut p1.0);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,5 +39,38 @@ fn add_vec_unary(c: &mut Criterion) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, add_vec_unary);
|
fn mul_vec_montgomery_external_unary_assign(c: &mut Criterion) {
|
||||||
|
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
|
||||||
|
|
||||||
|
let mut p0: math::poly::Poly<Montgomery<u64>> = r.new_poly_montgomery();
|
||||||
|
let mut p1: math::poly::Poly<u64> = r.new_poly();
|
||||||
|
for i in 0..p0.n(){
|
||||||
|
p0.0[i] = r.modulus.montgomery.prepare::<ONCE>(i as u64);
|
||||||
|
p1.0[i] = i as u64;
|
||||||
|
}
|
||||||
|
Box::new(move || {
|
||||||
|
r.modulus.mul_vec_montgomery_external_unary_assign::<CHUNK, NONE>(&p0.0, &mut p1.0);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("mul_vec_montgomery_external_unary_assign");
|
||||||
|
for log_n in 11..17 {
|
||||||
|
|
||||||
|
let n: usize = 1<<log_n as usize;
|
||||||
|
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||||
|
let q_power: usize = 1usize;
|
||||||
|
let r: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||||
|
let runners = [
|
||||||
|
("prime", {
|
||||||
|
runner(r)
|
||||||
|
}),
|
||||||
|
];
|
||||||
|
for (name, mut runner) in runners {
|
||||||
|
let id = BenchmarkId::new(name, n);
|
||||||
|
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, add_vec_unary, mul_vec_montgomery_external_unary_assign);
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use crate::modulus::shoup::Shoup;
|
|||||||
use crate::modulus::prime::Prime;
|
use crate::modulus::prime::Prime;
|
||||||
use crate::modulus::ReduceOnce;
|
use crate::modulus::ReduceOnce;
|
||||||
use crate::modulus::WordOps;
|
use crate::modulus::WordOps;
|
||||||
|
use crate::modulus::ONCE;
|
||||||
use crate::dft::DFT;
|
use crate::dft::DFT;
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ impl Table< u64> {
|
|||||||
|
|
||||||
let psi: u64 = prime.primitive_nth_root(nth_root);
|
let psi: u64 = prime.primitive_nth_root(nth_root);
|
||||||
|
|
||||||
let psi_mont: Montgomery<u64> = prime.montgomery.prepare(psi);
|
let psi_mont: Montgomery<u64> = prime.montgomery.prepare::<ONCE>(psi);
|
||||||
let psi_inv_mont: Montgomery<u64> = prime.montgomery.pow(psi_mont, prime.phi-1);
|
let psi_inv_mont: Montgomery<u64> = prime.montgomery.pow(psi_mont, prime.phi-1);
|
||||||
|
|
||||||
let mut psi_forward_rev: Vec<Shoup<u64>> = vec![Shoup(0, 0); (nth_root >> 1) as usize];
|
let mut psi_forward_rev: Vec<Shoup<u64>> = vec![Shoup(0, 0); (nth_root >> 1) as usize];
|
||||||
@@ -40,8 +41,8 @@ impl Table< u64> {
|
|||||||
|
|
||||||
let i_rev: usize = i.reverse_bits_msb(log_nth_root_half);
|
let i_rev: usize = i.reverse_bits_msb(log_nth_root_half);
|
||||||
|
|
||||||
prime.montgomery.mul_external_assign(psi_mont, &mut powers_forward);
|
prime.montgomery.mul_external_assign::<ONCE>(psi_mont, &mut powers_forward);
|
||||||
prime.montgomery.mul_external_assign(psi_inv_mont, &mut powers_backward);
|
prime.montgomery.mul_external_assign::<ONCE>(psi_inv_mont, &mut powers_backward);
|
||||||
|
|
||||||
psi_forward_rev[i_rev] = prime.shoup.prepare(powers_forward);
|
psi_forward_rev[i_rev] = prime.shoup.prepare(powers_forward);
|
||||||
psi_backward_rev[i_rev] = prime.shoup.prepare(powers_backward);
|
psi_backward_rev[i_rev] = prime.shoup.prepare(powers_backward);
|
||||||
@@ -61,7 +62,7 @@ impl Table< u64> {
|
|||||||
|
|
||||||
// Returns n^-1 mod q in Montgomery.
|
// Returns n^-1 mod q in Montgomery.
|
||||||
fn inv(&self, n:u64) -> Montgomery<u64>{
|
fn inv(&self, n:u64) -> Montgomery<u64>{
|
||||||
self.prime.montgomery.pow(self.prime.montgomery.prepare(n), self.prime.phi-1)
|
self.prime.montgomery.pow(self.prime.montgomery.prepare::<ONCE>(n), self.prime.phi-1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,14 @@ pub mod montgomery;
|
|||||||
pub mod shoup;
|
pub mod shoup;
|
||||||
pub mod impl_u64;
|
pub mod impl_u64;
|
||||||
|
|
||||||
|
pub type REDUCEMOD = u8;
|
||||||
|
|
||||||
|
pub const NONE: REDUCEMOD = 0;
|
||||||
|
pub const ONCE: REDUCEMOD = 1;
|
||||||
|
pub const TWICE: REDUCEMOD = 2;
|
||||||
|
pub const FOURTIMES: REDUCEMOD = 3;
|
||||||
|
pub const BARRETT: REDUCEMOD = 4;
|
||||||
|
pub const BARRETTLAZY: REDUCEMOD = 5;
|
||||||
|
|
||||||
pub trait WordOps<O>{
|
pub trait WordOps<O>{
|
||||||
fn log2(self) -> O;
|
fn log2(self) -> O;
|
||||||
@@ -64,47 +71,61 @@ impl ReduceOnce<u64> for u64{
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reduce_once_assign(&mut self, q: u64){
|
fn reduce_once_assign(&mut self, q: u64){
|
||||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||||
*self = (*self).min(self.wrapping_sub(q))
|
*self = *self.min(&mut self.wrapping_sub(q))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reduce_once(&self, q:u64) -> u64{
|
fn reduce_once(&self, q:u64) -> u64{
|
||||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||||
(*self).min(self.wrapping_sub(q))
|
*self.min(&mut self.wrapping_sub(q))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub trait Operations<O>{
|
pub trait Operations<O>{
|
||||||
// Assigns a + b to c.
|
// Assigns a + b to c.
|
||||||
fn add_binary_assign(&self, a: &O, b:&O, c: &mut O);
|
fn add_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
|
||||||
|
|
||||||
// Assigns a + b to b.
|
// Assigns a + b to b.
|
||||||
fn add_unary_assign(&self, a: &O, b: &mut O);
|
fn add_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||||
|
|
||||||
// Assigns a[i] + b[i] to c[i]
|
// Assigns a[i] + b[i] to c[i]
|
||||||
fn add_vec_binary_assign<const CHUNK:usize>(&self, a: &[O], b:&[O], c: &mut [O]);
|
fn add_vec_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
|
||||||
|
|
||||||
// Assigns a[i] + b[i] to b[i]
|
// Assigns a[i] + b[i] to b[i]
|
||||||
fn add_vec_unary_assign<const CHUNK:usize>(&self, a: &[O], b: &mut [O]);
|
fn add_vec_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
|
||||||
|
|
||||||
// Assigns a - b to c.
|
// Assigns a - b to c.
|
||||||
fn sub_binary_assign(&self, a: &O, b:&O, c: &mut O);
|
fn sub_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
|
||||||
|
|
||||||
// Assigns b - a to b.
|
// Assigns b - a to b.
|
||||||
fn sub_unary_assign(&self, a: &O, b: &mut O);
|
fn sub_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||||
|
|
||||||
// Assigns a[i] - b[i] to c[i]
|
// Assigns a[i] - b[i] to c[i]
|
||||||
fn sub_vec_binary_assign<const CHUNK:usize>(&self, a: &[O], b:&[O], c: &mut [O]);
|
fn sub_vec_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
|
||||||
|
|
||||||
// Assigns a[i] - b[i] to b[i]
|
// Assigns a[i] - b[i] to b[i]
|
||||||
fn sub_vec_unary_assign<const CHUNK:usize>(&self, a: &[O], b: &mut [O]);
|
fn sub_vec_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
|
||||||
|
|
||||||
// Assigns -a to a.
|
// Assigns -a to a.
|
||||||
fn neg_assign(&self, a:&mut O);
|
fn neg_assign<const REDUCE:REDUCEMOD>(&self, a:&mut O);
|
||||||
|
|
||||||
// Assigns -a[i] to a[i].
|
// Assigns -a[i] to a[i].
|
||||||
fn neg_vec_assign<const CHUNK:usize>(&self, a: &mut [O]);
|
fn neg_vec_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [O]);
|
||||||
|
|
||||||
|
// Assigns a * b to c.
|
||||||
|
fn mul_montgomery_external_binary_assign<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&O, c: &mut O);
|
||||||
|
|
||||||
|
// Assigns a * b to b.
|
||||||
|
fn mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&mut O);
|
||||||
|
|
||||||
|
// Assigns a[i] * b[i] to c[i].
|
||||||
|
fn mul_vec_montgomery_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&[O], c: &mut [O]);
|
||||||
|
|
||||||
|
// Assigns a[i] * b[i] to b[i].
|
||||||
|
fn mul_vec_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&mut [O]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
|
||||||
use crate::modulus::ReduceOnce;
|
use crate::modulus::ReduceOnce;
|
||||||
use crate::modulus::montgomery::{MontgomeryPrecomp, Montgomery};
|
use crate::modulus::montgomery::{MontgomeryPrecomp, Montgomery};
|
||||||
use crate::modulus::barrett::{BarrettPrecomp};
|
use crate::modulus::barrett::BarrettPrecomp;
|
||||||
|
use crate::modulus::{REDUCEMOD, NONE, ONCE, TWICE, FOURTIMES, BARRETT, BARRETTLAZY};
|
||||||
extern crate test;
|
extern crate test;
|
||||||
|
|
||||||
/// MontgomeryPrecomp is a set of methods implemented for MontgomeryPrecomp<u64>
|
/// MontgomeryPrecomp is a set of methods implemented for MontgomeryPrecomp<u64>
|
||||||
@@ -20,14 +21,16 @@ impl MontgomeryPrecomp<u64>{
|
|||||||
q_pow = q_pow.wrapping_mul(q_pow);
|
q_pow = q_pow.wrapping_mul(q_pow);
|
||||||
}
|
}
|
||||||
let mut precomp = Self{
|
let mut precomp = Self{
|
||||||
q: q,
|
q: q,
|
||||||
|
two_q: q<<1,
|
||||||
|
four_q: q<<2,
|
||||||
barrett: BarrettPrecomp::new(q),
|
barrett: BarrettPrecomp::new(q),
|
||||||
q_inv: q_inv,
|
q_inv: q_inv,
|
||||||
one: Montgomery(0),
|
one: Montgomery(0),
|
||||||
minus_one: Montgomery(0),
|
minus_one: Montgomery(0),
|
||||||
};
|
};
|
||||||
|
|
||||||
precomp.one = precomp.prepare(1);
|
precomp.one = precomp.prepare::<ONCE>(1);
|
||||||
precomp.minus_one = Montgomery(q-precomp.one.value());
|
precomp.minus_one = Montgomery(q-precomp.one.value());
|
||||||
|
|
||||||
precomp
|
precomp
|
||||||
@@ -45,120 +48,91 @@ impl MontgomeryPrecomp<u64>{
|
|||||||
self.minus_one
|
self.minus_one
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Applies a modular reduction on x based on REDUCE:
|
||||||
|
/// - LAZY: no modular reduction.
|
||||||
|
/// - ONCE: subtracts q if x >= q.
|
||||||
|
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn reduce<const REDUCE:REDUCEMOD>(&self, x: u64) -> u64{
|
||||||
|
let mut r: u64 = x;
|
||||||
|
self.reduce_assign::<REDUCE>(&mut r);
|
||||||
|
r
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies a modular reduction on x based on REDUCE:
|
||||||
|
/// - LAZY: no modular reduction.
|
||||||
|
/// - ONCE: subtracts q if x >= q.
|
||||||
|
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn reduce_assign<const REDUCE:REDUCEMOD>(&self, x: &mut u64){
|
||||||
|
match REDUCE {
|
||||||
|
NONE =>{},
|
||||||
|
ONCE =>{x.reduce_once_assign(self.q)},
|
||||||
|
TWICE=>{x.reduce_once_assign(self.two_q)},
|
||||||
|
FOURTIMES =>{x.reduce_once_assign(self.four_q)},
|
||||||
|
BARRETT =>{self.barrett.reduce_assign(x)},
|
||||||
|
BARRETTLAZY =>{self.barrett.reduce_assign(x)},
|
||||||
|
_ => unreachable!("invalid REDUCE argument")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns lhs * 2^64 mod q as a Montgomery<u64>.
|
/// Returns lhs * 2^64 mod q as a Montgomery<u64>.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn prepare(&self, lhs: u64) -> Montgomery<u64>{
|
pub fn prepare<const REDUCE:REDUCEMOD>(&self, lhs: u64) -> Montgomery<u64>{
|
||||||
let mut rhs = Montgomery(0);
|
let mut rhs = Montgomery(0);
|
||||||
self.prepare_assign(lhs, &mut rhs);
|
self.prepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||||
rhs
|
rhs
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assigns lhs * 2^64 mod q to rhs.
|
/// Assigns lhs * 2^64 mod q to rhs.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn prepare_assign(&self, lhs: u64, rhs: &mut Montgomery<u64>){
|
pub fn prepare_assign<const REDUCE:REDUCEMOD>(&self, lhs: u64, rhs: &mut Montgomery<u64>){
|
||||||
self.prepare_lazy_assign(lhs, rhs);
|
|
||||||
rhs.value_mut().reduce_once_assign(self.q);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs * 2^64 mod q in range [0, 2q-1] as a Montgomery<u64>.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn prepare_lazy(&self, lhs: u64) -> Montgomery<u64>{
|
|
||||||
let mut rhs = Montgomery(0);
|
|
||||||
self.prepare_lazy_assign(lhs, &mut rhs);
|
|
||||||
rhs
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Assigns lhs * 2^64 mod q in range [0, 2q-1] to rhs.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn prepare_lazy_assign(&self, lhs: u64, rhs: &mut Montgomery<u64>){
|
|
||||||
let (_, mhi) = lhs.widening_mul(*self.barrett.value_lo());
|
let (_, mhi) = lhs.widening_mul(*self.barrett.value_lo());
|
||||||
*rhs = Montgomery((lhs.wrapping_mul(*self.barrett.value_hi()).wrapping_add(mhi)).wrapping_mul(self.q).wrapping_neg());
|
*rhs = Montgomery((lhs.wrapping_mul(*self.barrett.value_hi()).wrapping_add(mhi)).wrapping_mul(self.q).wrapping_neg());
|
||||||
|
self.reduce_assign::<REDUCE>(rhs.value_mut());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns lhs * (2^64)^-1 mod q as a u64.
|
/// Returns lhs * (2^64)^-1 mod q as a u64.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn unprepare(&self, lhs: Montgomery<u64>) -> u64{
|
pub fn unprepare<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>) -> u64{
|
||||||
let mut rhs = 0u64;
|
let mut rhs = 0u64;
|
||||||
self.unprepare_assign(lhs, &mut rhs);
|
self.unprepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||||
rhs
|
rhs
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assigns lhs * (2^64)^-1 mod q to rhs.
|
/// Assigns lhs * (2^64)^-1 mod q to rhs.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn unprepare_assign(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
pub fn unprepare_assign<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
||||||
self.unprepare_lazy_assign(lhs, rhs);
|
|
||||||
rhs.reduce_once_assign(self.q);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs * (2^64)^-1 mod q in range [0, 2q-1].
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn unprepare_lazy(&self, lhs: Montgomery<u64>) -> u64{
|
|
||||||
let mut rhs = 0u64;
|
|
||||||
self.unprepare_lazy_assign(lhs, &mut rhs);
|
|
||||||
rhs
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Assigns lhs * (2^64)^-1 mod q in range [0, 2q-1] to rhs.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn unprepare_lazy_assign(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
|
||||||
let (_, r) = self.q.widening_mul(lhs.value().wrapping_mul(self.q_inv));
|
let (_, r) = self.q.widening_mul(lhs.value().wrapping_mul(self.q_inv));
|
||||||
*rhs = self.q - r
|
*rhs = self.reduce::<REDUCE>(self.q.wrapping_sub(r));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns lhs * rhs * (2^{64})^-1 mod q.
|
/// Returns lhs * rhs * (2^{64})^-1 mod q.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn mul_external(&self, lhs: Montgomery<u64>, rhs: u64) -> u64{
|
pub fn mul_external<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: u64) -> u64{
|
||||||
let mut r = self.mul_external_lazy(lhs, rhs);
|
let mut r: u64 = rhs;
|
||||||
r.reduce_once_assign(self.q);
|
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn mul_external_assign(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
pub fn mul_external_assign<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
||||||
self.mul_external_lazy_assign(lhs, rhs);
|
|
||||||
rhs.reduce_once_assign(self.q);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn mul_external_lazy(&self, lhs: Montgomery<u64>, rhs: u64) -> u64{
|
|
||||||
let mut result: u64 = rhs;
|
|
||||||
self.mul_external_lazy_assign(lhs, &mut result);
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1] to rhs.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn mul_external_lazy_assign(&self, lhs: Montgomery<u64>, rhs: &mut u64){
|
|
||||||
let (mlo, mhi) = lhs.value().widening_mul(*rhs);
|
let (mlo, mhi) = lhs.value().widening_mul(*rhs);
|
||||||
let (_, hhi) = self.q.widening_mul(mlo.wrapping_mul(self.q_inv));
|
let (_, hhi) = self.q.widening_mul(mlo.wrapping_mul(self.q_inv));
|
||||||
*rhs = mhi.wrapping_add(self.q - hhi)
|
*rhs = self.reduce::<REDUCE>(mhi.wrapping_sub(hhi).wrapping_add(self.q));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn mul_internal(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64>{
|
pub fn mul_internal<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64>{
|
||||||
Montgomery(self.mul_external(lhs, *rhs.value()))
|
Montgomery(self.mul_external::<REDUCE>(lhs, *rhs.value()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn mul_internal_assign(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>){
|
pub fn mul_internal_assign<const REDUCE:REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>){
|
||||||
self.mul_external_assign(lhs, rhs.value_mut());
|
self.mul_external_assign::<REDUCE>(lhs, rhs.value_mut());
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn mul_internal_lazy(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64>{
|
|
||||||
Montgomery(self.mul_external_lazy(lhs, *rhs.value()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1] to rhs.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn mul_internal_lazy_assign(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>){
|
|
||||||
self.mul_external_lazy_assign(lhs, rhs.value_mut());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -174,27 +148,11 @@ impl MontgomeryPrecomp<u64>{
|
|||||||
|
|
||||||
/// Assigns lhs + rhs - q if (lhs + rhs) >= q to rhs.
|
/// Assigns lhs + rhs - q if (lhs + rhs) >= q to rhs.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn add_internal_reduce_once_assign(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>){
|
pub fn add_internal_reduce_once_assign<const LAZY:bool>(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>){
|
||||||
self.add_internal_lazy_assign(lhs, rhs);
|
self.add_internal_lazy_assign(lhs, rhs);
|
||||||
rhs.value_mut().reduce_once_assign(self.q);
|
rhs.value_mut().reduce_once_assign(self.q);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn reduce(&self, lhs: u64) -> u64{
|
|
||||||
self.barrett.reduce(lhs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs mod q in range [0, 2q-1].
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn reduce_lazy(&self, lhs: u64) -> u64{
|
|
||||||
self.barrett.reduce_lazy(lhs)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn reduce_assign(&self, lhs: &mut u64){
|
|
||||||
self.barrett.reduce_assign(lhs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns lhs mod q in range [0, 2q-1].
|
/// Returns lhs mod q in range [0, 2q-1].
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn reduce_lazy_assign(&self, lhs: &mut u64){
|
pub fn reduce_lazy_assign(&self, lhs: &mut u64){
|
||||||
@@ -209,9 +167,9 @@ impl MontgomeryPrecomp<u64>{
|
|||||||
let mut i: u64 = exponent;
|
let mut i: u64 = exponent;
|
||||||
while i > 0{
|
while i > 0{
|
||||||
if i & 1 == 1{
|
if i & 1 == 1{
|
||||||
self.mul_internal_assign(x_mut, &mut y);
|
self.mul_internal_assign::<ONCE>(x_mut, &mut y);
|
||||||
}
|
}
|
||||||
self.mul_internal_assign(x_mut, &mut x_mut);
|
self.mul_internal_assign::<ONCE>(x_mut, &mut x_mut);
|
||||||
i >>= 1;
|
i >>= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,16 +185,16 @@ impl MontgomeryPrecomp<u64>{
|
|||||||
fn pow(x:u64, exponent:u64, q:u64) -> u64{
|
fn pow(x:u64, exponent:u64, q:u64) -> u64{
|
||||||
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
||||||
let mut y_mont: Montgomery<u64> = montgomery.one();
|
let mut y_mont: Montgomery<u64> = montgomery.one();
|
||||||
let mut x_mont: Montgomery<u64> = montgomery.prepare(x);
|
let mut x_mont: Montgomery<u64> = montgomery.prepare::<ONCE>(x);
|
||||||
while exponent > 0{
|
while exponent > 0{
|
||||||
if exponent & 1 == 1{
|
if exponent & 1 == 1{
|
||||||
montgomery.mul_internal_assign(x_mont, &mut y_mont);
|
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||||
}
|
}
|
||||||
|
|
||||||
montgomery.mul_internal_assign(x_mont, &mut x_mont);
|
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||||
}
|
}
|
||||||
|
|
||||||
montgomery.unprepare(y_mont)
|
montgomery.unprepare::<ONCE>(y_mont)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -251,8 +209,8 @@ mod tests {
|
|||||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||||
let x: u64 = 0x5f876e514845cc8b;
|
let x: u64 = 0x5f876e514845cc8b;
|
||||||
let y: u64 = 0xad726f98f24a761a;
|
let y: u64 = 0xad726f98f24a761a;
|
||||||
let y_mont = m_precomp.prepare(y);
|
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||||
assert!(m_precomp.mul_external(y_mont, x) == (x as u128 * y as u128 % q as u128) as u64);
|
assert!(m_precomp.mul_external::<ONCE>(y_mont, x) == (x as u128 * y as u128 % q as u128) as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -261,7 +219,7 @@ mod tests {
|
|||||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||||
let mut x: u64 = 0x5f876e514845cc8b;
|
let mut x: u64 = 0x5f876e514845cc8b;
|
||||||
let y: u64 = 0xad726f98f24a761a;
|
let y: u64 = 0xad726f98f24a761a;
|
||||||
let y_mont = m_precomp.prepare(y);
|
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||||
b.iter(|| m_precomp.mul_external_assign(y_mont, &mut x));
|
b.iter(|| m_precomp.mul_external_assign::<ONCE>(y_mont, &mut x));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2,59 +2,83 @@
|
|||||||
use crate::modulus::Operations;
|
use crate::modulus::Operations;
|
||||||
use crate::modulus::prime::Prime;
|
use crate::modulus::prime::Prime;
|
||||||
use crate::modulus::ReduceOnce;
|
use crate::modulus::ReduceOnce;
|
||||||
|
use crate::modulus::montgomery::Montgomery;
|
||||||
|
use crate::modulus::{REDUCEMOD, NONE, ONCE, BARRETT, BARRETTLAZY};
|
||||||
use crate::{apply_unary, apply_binary, apply_ternary};
|
use crate::{apply_unary, apply_binary, apply_ternary};
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
|
|
||||||
impl Operations<u64> for Prime<u64>{
|
impl Operations<u64> for Prime<u64>{
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn add_binary_assign(&self, a: &u64, b: &u64, c: &mut u64){
|
fn add_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
|
||||||
*c = a.wrapping_add(*b).reduce_once(self.q);
|
*c = a.wrapping_add(*b);
|
||||||
|
self.montgomery.reduce_assign::<REDUCE>(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn add_unary_assign(&self, a: &u64, b: &mut u64){
|
fn add_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
|
||||||
*b = a.wrapping_add(*b).reduce_once(self.q);
|
*b = a.wrapping_add(*b);
|
||||||
|
self.montgomery.reduce_assign::<REDUCE>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn add_vec_binary_assign<const CHUNK:usize>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
fn add_vec_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
||||||
apply_ternary!(self, Self::add_binary_assign, a, b, c, CHUNK);
|
apply_ternary!(self, Self::add_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn add_vec_unary_assign<const CHUNK:usize>(&self, a: &[u64], b:&mut [u64]){
|
fn add_vec_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
|
||||||
apply_binary!(self, Self::add_unary_assign, a, b, CHUNK);
|
apply_binary!(self, Self::add_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sub_binary_assign(&self, a: &u64, b: &u64, c: &mut u64){
|
fn sub_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
|
||||||
*c = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
|
*c = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sub_unary_assign(&self, a: &u64, b: &mut u64){
|
fn sub_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
|
||||||
*b = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
|
*b = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sub_vec_binary_assign<const CHUNK:usize>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
fn sub_vec_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
||||||
apply_ternary!(self, Self::sub_binary_assign, a, b, c, CHUNK);
|
apply_ternary!(self, Self::sub_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sub_vec_unary_assign<const CHUNK:usize>(&self, a: &[u64], b:&mut [u64]){
|
fn sub_vec_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
|
||||||
apply_binary!(self, Self::sub_unary_assign, a, b, CHUNK);
|
apply_binary!(self, Self::sub_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn neg_assign(&self, a: &mut u64){
|
fn neg_assign<const REDUCE:REDUCEMOD>(&self, a: &mut u64){
|
||||||
*a = self.q.wrapping_sub(*a);
|
*a = self.q.wrapping_sub(*a);
|
||||||
|
self.montgomery.reduce_assign::<REDUCE>(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn neg_vec_assign<const CHUNK:usize>(&self, a: &mut [u64]){
|
fn neg_vec_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [u64]){
|
||||||
apply_unary!(self, Self::neg_assign, a, CHUNK);
|
apply_unary!(self, Self::neg_assign::<REDUCE>, a, CHUNK);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn mul_montgomery_external_binary_assign<const REDUCE:REDUCEMOD>(&self, a:& Montgomery<u64>, b:&u64, c: &mut u64){
|
||||||
|
*c = self.montgomery.mul_external::<REDUCE>(*a, *b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, lhs:&Montgomery<u64>, rhs:&mut u64){
|
||||||
|
*rhs = self.montgomery.mul_external::<REDUCE>(*lhs, *rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn mul_vec_montgomery_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&[u64], c: &mut [u64]){
|
||||||
|
apply_ternary!(self, Self::mul_montgomery_external_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn mul_vec_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&[Montgomery<u64>], b:&mut [u64]){
|
||||||
|
apply_binary!(self, Self::mul_montgomery_external_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use crate::modulus::prime::Prime;
|
use crate::modulus::prime::Prime;
|
||||||
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
||||||
use crate::modulus::shoup::{ShoupPrecomp};
|
use crate::modulus::shoup::{ShoupPrecomp};
|
||||||
|
use crate::modulus::ONCE;
|
||||||
use primality_test::is_prime;
|
use primality_test::is_prime;
|
||||||
use prime_factorization::Factorization;
|
use prime_factorization::Factorization;
|
||||||
|
|
||||||
@@ -63,20 +64,20 @@ impl Prime<u64>{
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn pow(&self, x: u64, exponent: u64) -> u64{
|
pub fn pow(&self, x: u64, exponent: u64) -> u64{
|
||||||
let mut y_mont: Montgomery<u64> = self.montgomery.one();
|
let mut y_mont: Montgomery<u64> = self.montgomery.one();
|
||||||
let mut x_mont: Montgomery<u64> = self.montgomery.prepare(x);
|
let mut x_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(x);
|
||||||
let mut i: u64 = exponent;
|
let mut i: u64 = exponent;
|
||||||
while i > 0{
|
while i > 0{
|
||||||
if i & 1 == 1{
|
if i & 1 == 1{
|
||||||
self.montgomery.mul_internal_assign(x_mont, &mut y_mont);
|
self.montgomery.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.montgomery.mul_internal_assign(x_mont, &mut x_mont);
|
self.montgomery.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||||
|
|
||||||
i >>= 1;
|
i >>= 1;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.montgomery.unprepare(y_mont)
|
self.montgomery.unprepare::<ONCE>(y_mont)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns x^-1 mod q.
|
/// Returns x^-1 mod q.
|
||||||
@@ -164,9 +165,7 @@ impl Prime<u64>{
|
|||||||
if q_base != 1{
|
if q_base != 1{
|
||||||
panic!("invalid factor list: does not fully divide q_base: q_base % (all factors) = {}", q_base)
|
panic!("invalid factor list: does not fully divide q_base: q_base % (all factors) = {}", q_base)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns (psi + a * q_base)^{nth_root} = 1 mod q = q_base^q_power given psi^{nth_root} = 1 mod q_base.
|
/// Returns (psi + a * q_base)^{nth_root} = 1 mod q = q_base^q_power given psi^{nth_root} = 1 mod q_base.
|
||||||
@@ -174,23 +173,23 @@ impl Prime<u64>{
|
|||||||
fn hensel_lift(&self, psi: u64, nth_root: u64) -> u64{
|
fn hensel_lift(&self, psi: u64, nth_root: u64) -> u64{
|
||||||
assert!(Pow(psi, nth_root, self.q_base)==1, "invalid argument psi: psi^nth_root = {} != 1", Pow(psi, nth_root, self.q_base));
|
assert!(Pow(psi, nth_root, self.q_base)==1, "invalid argument psi: psi^nth_root = {} != 1", Pow(psi, nth_root, self.q_base));
|
||||||
|
|
||||||
let mut psi_mont: Montgomery<u64> = self.montgomery.prepare(psi);
|
let mut psi_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(psi);
|
||||||
let nth_root_mont: Montgomery<u64> = self.montgomery.prepare(nth_root);
|
let nth_root_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(nth_root);
|
||||||
|
|
||||||
for _i in 1..self.q_power{
|
for _i in 1..self.q_power{
|
||||||
|
|
||||||
let psi_pow: Montgomery<u64> = self.montgomery.pow(psi_mont, nth_root-1);
|
let psi_pow: Montgomery<u64> = self.montgomery.pow(psi_mont, nth_root-1);
|
||||||
|
|
||||||
let num: Montgomery<u64> = Montgomery(self.montgomery.one().value() + self.q - self.montgomery.mul_internal(psi_pow, psi_mont).value());
|
let num: Montgomery<u64> = Montgomery(self.montgomery.one().value() + self.q - self.montgomery.mul_internal::<ONCE>(psi_pow, psi_mont).value());
|
||||||
|
|
||||||
let mut den: Montgomery<u64> = self.montgomery.mul_internal(nth_root_mont, psi_pow);
|
let mut den: Montgomery<u64> = self.montgomery.mul_internal::<ONCE>(nth_root_mont, psi_pow);
|
||||||
|
|
||||||
den = self.montgomery.pow(den, self.phi-1);
|
den = self.montgomery.pow(den, self.phi-1);
|
||||||
|
|
||||||
psi_mont = self.montgomery.add_internal(psi_mont, self.montgomery.mul_internal(num, den));
|
psi_mont = self.montgomery.add_internal(psi_mont, self.montgomery.mul_internal::<ONCE>(num, den));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.montgomery.unprepare(psi_mont)
|
self.montgomery.unprepare::<ONCE>(psi_mont)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,17 +200,17 @@ impl Prime<u64>{
|
|||||||
pub fn Pow(x:u64, exponent:u64, q:u64) -> u64{
|
pub fn Pow(x:u64, exponent:u64, q:u64) -> u64{
|
||||||
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
||||||
let mut y_mont: Montgomery<u64> = montgomery.one();
|
let mut y_mont: Montgomery<u64> = montgomery.one();
|
||||||
let mut x_mont: Montgomery<u64> = montgomery.prepare(x);
|
let mut x_mont: Montgomery<u64> = montgomery.prepare::<ONCE>(x);
|
||||||
let mut i: u64 = exponent;
|
let mut i: u64 = exponent;
|
||||||
while i > 0{
|
while i > 0{
|
||||||
if i & 1 == 1{
|
if i & 1 == 1{
|
||||||
montgomery.mul_internal_assign(x_mont, &mut y_mont);
|
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||||
}
|
}
|
||||||
|
|
||||||
montgomery.mul_internal_assign(x_mont, &mut x_mont);
|
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||||
|
|
||||||
i >>= 1;
|
i >>= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
montgomery.unprepare(y_mont)
|
montgomery.unprepare::<ONCE>(y_mont)
|
||||||
}
|
}
|
||||||
@@ -23,11 +23,22 @@ impl<O> Montgomery<O>{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Default instantiation.
|
||||||
|
impl<O> Default for Montgomery<O> where O:Default {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
0: O::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// MontgomeryPrecomp is a generic struct storing
|
/// MontgomeryPrecomp is a generic struct storing
|
||||||
/// precomputations for Montgomery arithmetic.
|
/// precomputations for Montgomery arithmetic.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub struct MontgomeryPrecomp<O>{
|
pub struct MontgomeryPrecomp<O>{
|
||||||
pub q: O,
|
pub q: O,
|
||||||
|
pub two_q: O,
|
||||||
|
pub four_q: O,
|
||||||
pub barrett: BarrettPrecomp<O>,
|
pub barrett: BarrettPrecomp<O>,
|
||||||
pub q_inv: O,
|
pub q_inv: O,
|
||||||
pub one: Montgomery<O>,
|
pub one: Montgomery<O>,
|
||||||
|
|||||||
12
src/poly.rs
12
src/poly.rs
@@ -3,16 +3,22 @@ pub mod poly;
|
|||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub struct Poly<O>(pub Vec<O>);
|
pub struct Poly<O>(pub Vec<O>);
|
||||||
|
|
||||||
impl Poly<u64>{
|
impl<O> Poly<O>where
|
||||||
|
O: Default + Clone,
|
||||||
|
{
|
||||||
pub fn new(n: usize) -> Self{
|
pub fn new(n: usize) -> Self{
|
||||||
Self(vec![0u64;n])
|
Self(vec![O::default();n])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_montgomery(n: usize) -> Self{
|
||||||
|
Self(vec![O::default();n])
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn buffer_size(&self) -> usize{
|
pub fn buffer_size(&self) -> usize{
|
||||||
return self.0.len()
|
return self.0.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_buffer(&mut self, n: usize, buf: &mut [u64]){
|
pub fn from_buffer(&mut self, n: usize, buf: &mut [O]){
|
||||||
assert!(buf.len() >= n, "invalid buffer: buf.len()={} < n={}", buf.len(), n);
|
assert!(buf.len() >= n, "invalid buffer: buf.len()={} < n={}", buf.len(), n);
|
||||||
self.0 = Vec::from(&buf[..n]);
|
self.0 = Vec::from(&buf[..n]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use crate::ring::Ring;
|
use crate::ring::Ring;
|
||||||
use crate::dft::ntt::Table;
|
use crate::dft::ntt::Table;
|
||||||
use crate::modulus::prime::Prime;
|
use crate::modulus::prime::Prime;
|
||||||
|
use crate::modulus::montgomery::Montgomery;
|
||||||
use crate::poly::Poly;
|
use crate::poly::Poly;
|
||||||
|
|
||||||
impl Ring<u64>{
|
impl Ring<u64>{
|
||||||
@@ -17,7 +18,15 @@ impl Ring<u64>{
|
|||||||
return self.n
|
return self.n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_poly_core<O>(&self) -> Poly<O> where O: Default + Clone {
|
||||||
|
Poly::<O>::new(self.n())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn new_poly(&self) -> Poly<u64>{
|
pub fn new_poly(&self) -> Poly<u64>{
|
||||||
Poly::<u64>::new(self.n())
|
self.new_poly_core::<u64>()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_poly_montgomery(&self) -> Poly<Montgomery<u64>>{
|
||||||
|
self.new_poly_core::<Montgomery<u64>>()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user