mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
wip
This commit is contained in:
@@ -18,3 +18,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "operations"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ring_rns"
|
||||
harness = false
|
||||
39
math/benches/ring_rns.rs
Normal file
39
math/benches/ring_rns.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use math::ring::{Ring, RingRNS};
|
||||
use math::ring::impl_u64::ring_rns::new_rings;
|
||||
use math::poly::PolyRNS;
|
||||
|
||||
fn div_floor_by_last_modulus_ntt(c: &mut Criterion) {
|
||||
fn runner(r: RingRNS<u64>) -> Box<dyn FnMut() + '_> {
|
||||
|
||||
let a: PolyRNS<u64> = r.new_polyrns();
|
||||
let mut b: PolyRNS<u64> = r.new_polyrns();
|
||||
let mut c: PolyRNS<u64> = r.new_polyrns();
|
||||
|
||||
Box::new(move || {
|
||||
r.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c)
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("div_floor_by_last_modulus_ntt");
|
||||
for log_n in 11..18 {
|
||||
|
||||
let n = 1<<log_n;
|
||||
let moduli: Vec<u64> = vec![0x1fffffffffe00001u64, 0x1fffffffffc80001u64, 0x1fffffffffb40001, 0x1fffffffff500001];
|
||||
let rings: Vec<Ring<u64>> = new_rings(n, moduli);
|
||||
let ring_rns: RingRNS<'_, u64> = RingRNS::new(&rings);
|
||||
|
||||
let runners = [
|
||||
(format!("prime/n={}/level={}", n, ring_rns.level()), {
|
||||
runner(ring_rns)
|
||||
}),
|
||||
];
|
||||
|
||||
for (name, mut runner) in runners {
|
||||
b.bench_with_input(name, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, div_floor_by_last_modulus_ntt);
|
||||
criterion_main!(benches);
|
||||
@@ -3,7 +3,7 @@ use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::WordOps;
|
||||
use crate::modulus::ONCE;
|
||||
use crate::modulus::{NONE, ONCE, BARRETT};
|
||||
use crate::dft::DFT;
|
||||
use itertools::izip;
|
||||
|
||||
@@ -114,8 +114,8 @@ impl Table<u64>{
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
self.dit_inplace::<true>(&mut a[0], &mut b[0], *psi);
|
||||
self.prime.barrett.reduce_assign(&mut a[0]);
|
||||
self.prime.barrett.reduce_assign(&mut b[0]);
|
||||
self.prime.barrett.reduce_assign::<BARRETT>(&mut a[0]);
|
||||
self.prime.barrett.reduce_assign::<BARRETT>(&mut b[0]);
|
||||
debug_assert!(a[0] < self.q, "forward_inplace_core::<LAZY=false> output {} > {} (q-1)", a[0], self.q-1);
|
||||
debug_assert!(b[0] < self.q, "forward_inplace_core::<LAZY=false> output {} > {} (q-1)", b[0], self.q-1);
|
||||
});
|
||||
@@ -149,7 +149,7 @@ impl Table<u64>{
|
||||
debug_assert!(*a < self.four_q, "a:{} q:{}", a, self.four_q);
|
||||
debug_assert!(*b < self.four_q, "b:{} q:{}", b, self.four_q);
|
||||
a.reduce_once_assign(self.two_q);
|
||||
let bt: u64 = self.prime.barrett.mul_external_lazy(t, *b);
|
||||
let bt: u64 = self.prime.barrett.mul_external::<NONE>(t, *b);
|
||||
*b = a.wrapping_add(self.two_q-bt);
|
||||
*a = a.wrapping_add(bt);
|
||||
if !LAZY {
|
||||
@@ -176,7 +176,7 @@ impl Table<u64>{
|
||||
if layer == 0 {
|
||||
|
||||
let n_inv: Barrett<u64> = self.prime.barrett.prepare(self.prime.inv(n as u64));
|
||||
let psi: Barrett<u64> = self.prime.barrett.prepare(self.prime.barrett.mul_external(n_inv, self.psi_backward_rev[1].0));
|
||||
let psi: Barrett<u64> = self.prime.barrett.prepare(self.prime.barrett.mul_external::<ONCE>(n_inv, self.psi_backward_rev[1].0));
|
||||
|
||||
izip!(a.chunks_exact_mut(2 * size)).for_each(
|
||||
|a| {
|
||||
@@ -225,7 +225,7 @@ impl Table<u64>{
|
||||
fn dif_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: Barrett<u64>) {
|
||||
debug_assert!(*a < self.two_q, "a:{} q:{}", a, self.four_q);
|
||||
debug_assert!(*b < self.two_q, "b:{} q:{}", b, self.four_q);
|
||||
let d: u64 = self.prime.barrett.mul_external_lazy(t, *a + self.two_q - *b);
|
||||
let d: u64 = self.prime.barrett.mul_external::<NONE>(t, *a + self.two_q - *b);
|
||||
*a = a.wrapping_add(*b);
|
||||
a.reduce_once_assign(self.two_q);
|
||||
*b = d;
|
||||
@@ -239,12 +239,12 @@ impl Table<u64>{
|
||||
debug_assert!(*a < self.two_q);
|
||||
debug_assert!(*b < self.two_q);
|
||||
if LAZY{
|
||||
let d: u64 = self.prime.barrett.mul_external_lazy(psi, *a + self.two_q - *b);
|
||||
*a = self.prime.barrett.mul_external_lazy(n_inv, *a + *b);
|
||||
let d: u64 = self.prime.barrett.mul_external::<NONE>(psi, *a + self.two_q - *b);
|
||||
*a = self.prime.barrett.mul_external::<NONE>(n_inv, *a + *b);
|
||||
*b = d;
|
||||
}else{
|
||||
let d: u64 = self.prime.barrett.mul_external(psi, *a + self.two_q - *b);
|
||||
*a = self.prime.barrett.mul_external(n_inv, *a + *b);
|
||||
let d: u64 = self.prime.barrett.mul_external::<ONCE>(psi, *a + self.two_q - *b);
|
||||
*a = self.prime.barrett.mul_external::<ONCE>(n_inv, *a + *b);
|
||||
*b = d;
|
||||
}
|
||||
}
|
||||
|
||||
120
math/src/lib.rs
120
math/src/lib.rs
@@ -11,13 +11,12 @@ pub const CHUNK: usize= 8;
|
||||
pub mod macros{
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_unary {
|
||||
macro_rules! apply_v {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $CHUNK:expr) => {
|
||||
|
||||
match CHUNK{
|
||||
8 => {
|
||||
|
||||
$a.chunks_exact_mut(8).for_each(|a| {
|
||||
$f(&$self, &mut a[0]);
|
||||
$f(&$self, &mut a[1]);
|
||||
@@ -45,7 +44,7 @@ pub mod macros{
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_binary {
|
||||
macro_rules! apply_vv {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $CHUNK:expr) => {
|
||||
|
||||
@@ -82,12 +81,13 @@ pub mod macros{
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_ternary {
|
||||
macro_rules! apply_vvv {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $CHUNK:expr) => {
|
||||
|
||||
let n: usize = $a.len();
|
||||
debug_assert!($b.len() == n, "invalid argument b: b.len() = {} != a.len() = {}", $b.len(), n);
|
||||
debug_assert!($c.len() == n, "invalid argument c: b.len() = {} != a.len() = {}", $c.len(), n);
|
||||
debug_assert!(CHUNK&(CHUNK-1) == 0, "invalid CHUNK const: not a power of two");
|
||||
|
||||
match CHUNK{
|
||||
@@ -117,4 +117,116 @@ pub mod macros{
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_sv {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $CHUNK:expr) => {
|
||||
|
||||
let n: usize = $b.len();
|
||||
|
||||
debug_assert!(CHUNK&(CHUNK-1) == 0, "invalid CHUNK const: not a power of two");
|
||||
|
||||
match CHUNK{
|
||||
8 => {
|
||||
|
||||
izip!($b.chunks_exact_mut(8)).for_each(|b| {
|
||||
$f(&$self, $a, &mut b[0]);
|
||||
$f(&$self, $a, &mut b[1]);
|
||||
$f(&$self, $a, &mut b[2]);
|
||||
$f(&$self, $a, &mut b[3]);
|
||||
$f(&$self, $a, &mut b[4]);
|
||||
$f(&$self, $a, &mut b[5]);
|
||||
$f(&$self, $a, &mut b[6]);
|
||||
$f(&$self, $a, &mut b[7]);
|
||||
});
|
||||
|
||||
let m = n - (n&7);
|
||||
izip!($b[m..].iter_mut()).for_each(|b| {
|
||||
$f(&$self, $a, b);
|
||||
});
|
||||
},
|
||||
_=>{
|
||||
izip!($b.iter_mut()).for_each(|b| {
|
||||
$f(&$self, $a, b);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_svv {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $CHUNK:expr) => {
|
||||
|
||||
let n: usize = $b.len();
|
||||
debug_assert!($c.len() == n, "invalid argument c: c.len() = {} != b.len() = {}", $c.len(), n);
|
||||
debug_assert!(CHUNK&(CHUNK-1) == 0, "invalid CHUNK const: not a power of two");
|
||||
|
||||
match CHUNK{
|
||||
8 => {
|
||||
|
||||
izip!($b.chunks_exact(8), $c.chunks_exact_mut(8)).for_each(|(b, c)| {
|
||||
$f(&$self, $a, &b[0], &mut c[0]);
|
||||
$f(&$self, $a, &b[1], &mut c[1]);
|
||||
$f(&$self, $a, &b[2], &mut c[2]);
|
||||
$f(&$self, $a, &b[3], &mut c[3]);
|
||||
$f(&$self, $a, &b[4], &mut c[4]);
|
||||
$f(&$self, $a, &b[5], &mut c[5]);
|
||||
$f(&$self, $a, &b[6], &mut c[6]);
|
||||
$f(&$self, $a, &b[7], &mut c[7]);
|
||||
});
|
||||
|
||||
let m = n - (n&7);
|
||||
izip!($b[m..].iter(), $c[m..].iter_mut()).for_each(|(b, c)| {
|
||||
$f(&$self, $a, b, c);
|
||||
});
|
||||
},
|
||||
_=>{
|
||||
izip!($b.iter(), $c.iter_mut()).for_each(|(b, c)| {
|
||||
$f(&$self, $a, b, c);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vvsv {
|
||||
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $d:expr, $CHUNK:expr) => {
|
||||
|
||||
let n: usize = $a.len();
|
||||
debug_assert!($b.len() == n, "invalid argument b: b.len() = {} != a.len() = {}", $b.len(), n);
|
||||
debug_assert!($d.len() == n, "invalid argument d: d.len() = {} != a.len() = {}", $d.len(), n);
|
||||
debug_assert!(CHUNK&(CHUNK-1) == 0, "invalid CHUNK const: not a power of two");
|
||||
|
||||
match CHUNK{
|
||||
8 => {
|
||||
|
||||
izip!($a.chunks_exact(8), $b.chunks_exact(8), $d.chunks_exact_mut(8)).for_each(|(a, b, d)| {
|
||||
$f(&$self, &a[0], &b[0], $c, &mut d[0]);
|
||||
$f(&$self, &a[1], &b[1], $c, &mut d[1]);
|
||||
$f(&$self, &a[2], &b[2], $c, &mut d[2]);
|
||||
$f(&$self, &a[3], &b[3], $c, &mut d[3]);
|
||||
$f(&$self, &a[4], &b[4], $c, &mut d[4]);
|
||||
$f(&$self, &a[5], &b[5], $c, &mut d[5]);
|
||||
$f(&$self, &a[6], &b[6], $c, &mut d[6]);
|
||||
$f(&$self, &a[7], &b[7], $c, &mut d[7]);
|
||||
});
|
||||
|
||||
let m = n - (n&7);
|
||||
izip!($a[m..].iter(), $b[m..].iter(), $d[m..].iter_mut()).for_each(|(a, b, d)| {
|
||||
$f(&$self, a, b, $c, d);
|
||||
});
|
||||
},
|
||||
_=>{
|
||||
izip!($a.iter(), $b.iter(), $d.iter_mut()).for_each(|(a, b, d)| {
|
||||
$f(&$self, a, b, $c, d);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -85,6 +85,15 @@ pub trait WordOperations<O>{
|
||||
|
||||
// Assigns a * b to b.
|
||||
fn word_mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&mut O);
|
||||
|
||||
// Assigns a * b to c.
|
||||
fn word_mul_barrett_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &barrett::Barrett<O>, b:&O, c: &mut O);
|
||||
|
||||
// Assigns a * b to b.
|
||||
fn word_mul_barrett_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&barrett::Barrett<O>, b:&mut O);
|
||||
|
||||
// Assigns (a + 2q - b) * c to d.
|
||||
fn word_sum_aqqmb_prod_c_barrett_assign_d<const REDUCE:REDUCEMOD>(&self, a: &O, b: &O, c: &barrett::Barrett<O>, d: &mut O);
|
||||
}
|
||||
|
||||
pub trait VecOperations<O>{
|
||||
@@ -118,6 +127,15 @@ pub trait VecOperations<O>{
|
||||
|
||||
// Assigns a[i] * b[i] to b[i].
|
||||
fn vec_mul_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&mut [O]);
|
||||
|
||||
// Assigns a * b[i] to b[i].
|
||||
fn vec_mul_scalar_barrett_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&mut [u64]);
|
||||
|
||||
// Assigns a * b[i] to c[i].
|
||||
fn vec_mul_scalar_barrett_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&[u64], c: &mut [u64]);
|
||||
|
||||
// Assigns (a[i] + 2q - b[i]) * c to d[i].
|
||||
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &barrett::Barrett<u64>, d: &mut [u64]);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ impl<O> Barrett<O> {
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct BarrettPrecomp<O>{
|
||||
pub q: O,
|
||||
pub two_q:O,
|
||||
pub four_q:O,
|
||||
pub lo:O,
|
||||
pub hi:O,
|
||||
pub one: Barrett<O>,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::modulus::barrett::{Barrett, BarrettPrecomp};
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::{REDUCEMOD, NONE, ONCE, TWICE, FOURTIMES, BARRETT, BARRETTLAZY};
|
||||
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::cast::ToPrimitive;
|
||||
@@ -10,7 +11,7 @@ impl BarrettPrecomp<u64>{
|
||||
let big_r: BigUint = (BigUint::from(1 as usize)<<((u64::BITS<<1) as usize)) / BigUint::from(q);
|
||||
let lo: u64 = (&big_r & BigUint::from(u64::MAX)).to_u64().unwrap();
|
||||
let hi: u64 = (big_r >> u64::BITS).to_u64().unwrap();
|
||||
let mut precomp: BarrettPrecomp<u64> = Self{q, lo, hi, one:Barrett(0,0)};
|
||||
let mut precomp: BarrettPrecomp<u64> = Self{q:q, two_q:q<<1, four_q:q<<2, lo:lo, hi:hi, one:Barrett(0,0)};
|
||||
precomp.one = precomp.prepare(1);
|
||||
precomp
|
||||
}
|
||||
@@ -20,6 +21,30 @@ impl BarrettPrecomp<u64>{
|
||||
self.one
|
||||
}
|
||||
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE:REDUCEMOD>(&self, x: &mut u64){
|
||||
match REDUCE {
|
||||
NONE =>{},
|
||||
ONCE =>{x.reduce_once_assign(self.q)},
|
||||
TWICE=>{x.reduce_once_assign(self.two_q)},
|
||||
FOURTIMES =>{x.reduce_once_assign(self.four_q)},
|
||||
BARRETT =>{
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q);
|
||||
x.reduce_once_assign(self.q);
|
||||
},
|
||||
BARRETTLAZY =>{
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q)
|
||||
},
|
||||
_ => unreachable!("invalid REDUCE argument")
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn prepare(&self, v: u64) -> Barrett<u64> {
|
||||
debug_assert!(v < self.q);
|
||||
@@ -27,58 +52,17 @@ impl BarrettPrecomp<u64>{
|
||||
Barrett(v, quotient)
|
||||
}
|
||||
|
||||
/// Returns lhs mod q.
|
||||
#[inline(always)]
|
||||
pub fn reduce(&self, lhs: u64) -> u64{
|
||||
let mut r: u64 = self.reduce_lazy(lhs);
|
||||
r.reduce_once_assign(self.q);
|
||||
r
|
||||
}
|
||||
|
||||
/// Returns lhs mod q in range [0, 2q-1].
|
||||
#[inline(always)]
|
||||
pub fn reduce_lazy(&self, lhs: u64) -> u64{
|
||||
let (_, mhi) = lhs.widening_mul(self.hi);
|
||||
lhs - mhi.wrapping_mul(self.q)
|
||||
}
|
||||
|
||||
/// Assigns lhs mod q to lhs.
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign(&self, lhs: &mut u64){
|
||||
self.reduce_lazy_assign(lhs);
|
||||
lhs.reduce_once_assign(self.q);
|
||||
}
|
||||
|
||||
/// Assigns lhs mod q in range [0, 2q-1] to lhs.
|
||||
#[inline(always)]
|
||||
pub fn reduce_lazy_assign(&self, lhs: &mut u64){
|
||||
let (_, mhi) = lhs.widening_mul(self.hi);
|
||||
*lhs = *lhs - mhi.wrapping_mul(self.q)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external(&self, lhs: Barrett<u64>, rhs: u64) -> u64 {
|
||||
let mut r: u64 = self.mul_external_lazy(lhs, rhs);
|
||||
r.reduce_once_assign(self.q);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external_assign(&self, lhs: Barrett<u64>, rhs: &mut u64){
|
||||
self.mul_external_lazy_assign(lhs, rhs);
|
||||
rhs.reduce_once_assign(self.q);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external_lazy(&self, lhs: Barrett<u64>, rhs: u64) -> u64 {
|
||||
pub fn mul_external<const REDUCE:REDUCEMOD>(&self, lhs: Barrett<u64>, rhs: u64) -> u64 {
|
||||
let mut r: u64 = rhs;
|
||||
self.mul_external_lazy_assign(lhs, &mut r);
|
||||
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external_lazy_assign(&self, lhs: Barrett<u64>, rhs: &mut u64){
|
||||
pub fn mul_external_assign<const REDUCE:REDUCEMOD>(&self, lhs: Barrett<u64>, rhs: &mut u64){
|
||||
let t: u64 = ((*lhs.quotient() as u128 * *rhs as u128) >> 64) as _;
|
||||
*rhs = (rhs.wrapping_mul(*lhs.value())).wrapping_sub(self.q.wrapping_mul(t));
|
||||
self.reduce_assign::<REDUCE>(rhs);
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::montgomery::{MontgomeryPrecomp, Montgomery};
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::{REDUCEMOD, NONE, ONCE, TWICE, FOURTIMES, BARRETT, BARRETTLAZY};
|
||||
use crate::modulus::{REDUCEMOD, ONCE};
|
||||
extern crate test;
|
||||
|
||||
/// MontgomeryPrecomp is a set of methods implemented for MontgomeryPrecomp<u64>
|
||||
@@ -65,15 +65,7 @@ impl MontgomeryPrecomp<u64>{
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE:REDUCEMOD>(&self, x: &mut u64){
|
||||
match REDUCE {
|
||||
NONE =>{},
|
||||
ONCE =>{x.reduce_once_assign(self.q)},
|
||||
TWICE=>{x.reduce_once_assign(self.two_q)},
|
||||
FOURTIMES =>{x.reduce_once_assign(self.four_q)},
|
||||
BARRETT =>{self.barrett.reduce_assign(x)},
|
||||
BARRETTLAZY =>{self.barrett.reduce_lazy_assign(x)},
|
||||
_ => unreachable!("invalid REDUCE argument")
|
||||
}
|
||||
self.barrett.reduce_assign::<REDUCE>(x);
|
||||
}
|
||||
|
||||
/// Returns lhs * 2^64 mod q as a Montgomery<u64>.
|
||||
@@ -137,7 +129,7 @@ impl MontgomeryPrecomp<u64>{
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_internal(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64>{
|
||||
self.barrett.reduce(rhs + lhs)
|
||||
rhs + lhs
|
||||
}
|
||||
|
||||
/// Assigns lhs + rhs to rhs.
|
||||
@@ -153,12 +145,6 @@ impl MontgomeryPrecomp<u64>{
|
||||
rhs.reduce_once_assign(self.q);
|
||||
}
|
||||
|
||||
/// Returns lhs mod q in range [0, 2q-1].
|
||||
#[inline(always)]
|
||||
pub fn reduce_lazy_assign(&self, lhs: &mut u64){
|
||||
self.barrett.reduce_lazy_assign(lhs)
|
||||
}
|
||||
|
||||
/// Returns (x^exponent) * 2^64 mod q.
|
||||
#[inline(always)]
|
||||
pub fn pow(&self, x: Montgomery<u64>, exponent:u64) -> Montgomery<u64>{
|
||||
|
||||
@@ -3,8 +3,9 @@ use crate::modulus::{WordOperations, VecOperations};
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::REDUCEMOD;
|
||||
use crate::{apply_unary, apply_binary, apply_ternary};
|
||||
use crate::{apply_v, apply_vv, apply_vvv, apply_sv, apply_svv, apply_vvsv};
|
||||
use itertools::izip;
|
||||
|
||||
impl WordOperations<u64> for Prime<u64>{
|
||||
@@ -69,6 +70,22 @@ impl WordOperations<u64> for Prime<u64>{
|
||||
fn word_mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, lhs:&Montgomery<u64>, rhs:&mut u64){
|
||||
self.montgomery.mul_external_assign::<REDUCE>(*lhs, rhs);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn word_mul_barrett_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &Barrett<u64>, b:&u64, c: &mut u64){
|
||||
*c = self.barrett.mul_external::<REDUCE>(*a, *b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn word_mul_barrett_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut u64){
|
||||
self.barrett.mul_external_assign::<REDUCE>(*a, b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn word_sum_aqqmb_prod_c_barrett_assign_d<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &Barrett<u64>, d: &mut u64){
|
||||
*d = self.two_q.wrapping_sub(*b).wrapping_add(*a);
|
||||
self.barrett.mul_external_assign::<REDUCE>(*c, d);
|
||||
}
|
||||
}
|
||||
|
||||
impl VecOperations<u64> for Prime<u64>{
|
||||
@@ -82,51 +99,65 @@ impl VecOperations<u64> for Prime<u64>{
|
||||
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
|
||||
#[inline(always)]
|
||||
fn vec_reduce_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, x: &mut [u64]){
|
||||
apply_unary!(self, Self::word_reduce_assign::<REDUCE>, x, CHUNK);
|
||||
apply_v!(self, Self::word_reduce_assign::<REDUCE>, x, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_add_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
||||
apply_ternary!(self, Self::word_add_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
apply_vvv!(self, Self::word_add_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_add_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
|
||||
apply_binary!(self, Self::word_add_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
apply_vv!(self, Self::word_add_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_sub_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
|
||||
apply_ternary!(self, Self::word_sub_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
apply_vvv!(self, Self::word_sub_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_sub_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
|
||||
apply_binary!(self, Self::word_sub_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
apply_vv!(self, Self::word_sub_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_neg_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [u64]){
|
||||
apply_unary!(self, Self::word_neg_unary_assign::<REDUCE>, a, CHUNK);
|
||||
apply_v!(self, Self::word_neg_unary_assign::<REDUCE>, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_neg_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [u64]){
|
||||
apply_binary!(self, Self::word_neg_binary_assign::<REDUCE>, a, b, CHUNK);
|
||||
apply_vv!(self, Self::word_neg_binary_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_prepare_montgomery_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [Montgomery<u64>]){
|
||||
apply_binary!(self, Self::word_prepare_montgomery_assign::<REDUCE>, a, b, CHUNK);
|
||||
apply_vv!(self, Self::word_prepare_montgomery_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_mul_montgomery_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&[u64], c: &mut [u64]){
|
||||
apply_ternary!(self, Self::word_mul_montgomery_external_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
apply_vvv!(self, Self::word_mul_montgomery_external_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_mul_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&mut [u64]){
|
||||
apply_binary!(self, Self::word_mul_montgomery_external_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
apply_vv!(self, Self::word_mul_montgomery_external_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_mul_scalar_barrett_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&[u64], c: &mut [u64]){
|
||||
apply_svv!(self, Self::word_mul_barrett_binary_assign::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn vec_mul_scalar_barrett_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&mut [u64]){
|
||||
apply_sv!(self, Self::word_mul_barrett_unary_assign::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &Barrett<u64>, d: &mut [u64]){
|
||||
apply_vvsv!(self, Self::word_sum_aqqmb_prod_c_barrett_assign_d::<REDUCE>, a, b, c, d, CHUNK);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,8 +30,12 @@ pub struct PolyRNS<O>(pub Vec<Poly<O>>);
|
||||
impl<O> PolyRNS<O>where
|
||||
O: Default + Clone,
|
||||
{
|
||||
pub fn new(n: usize) -> Self{
|
||||
Self(vec![Poly::<O>::new(n);n])
|
||||
|
||||
pub fn new(n: usize, level: usize) -> Self{
|
||||
let mut polyrns: PolyRNS<O> = PolyRNS::<O>::default();
|
||||
let mut buf: Vec<O> = vec![O::default();polyrns.buffer_size(n, level)];
|
||||
polyrns.from_buffer(n, level, &mut buf[..]);
|
||||
polyrns
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize{
|
||||
@@ -42,8 +46,8 @@ impl<O> PolyRNS<O>where
|
||||
self.0.len()-1
|
||||
}
|
||||
|
||||
pub fn buffer_size(&self) -> usize{
|
||||
self.n() * (self.level()+1)
|
||||
pub fn buffer_size(&self, n: usize, level:usize) -> usize{
|
||||
n * (level+1)
|
||||
}
|
||||
|
||||
pub fn from_buffer(&mut self, n: usize, level: usize, buf: &mut [O]){
|
||||
@@ -55,4 +59,19 @@ impl<O> PolyRNS<O>where
|
||||
self.0.push(poly);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn at(&self, level:usize) -> &Poly<O>{
|
||||
&self.0[level]
|
||||
}
|
||||
|
||||
pub fn at_mut(&mut self, level:usize) -> &mut Poly<O>{
|
||||
&mut self.0[level]
|
||||
}
|
||||
}
|
||||
|
||||
impl<O> Default for PolyRNS<O>{
|
||||
fn default() -> Self{
|
||||
let polys:Vec<Poly<O>> = Vec::new();
|
||||
Self{0:polys}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
pub mod impl_u64;
|
||||
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::poly::{Poly, PolyRNS};
|
||||
use crate::dft::DFT;
|
||||
|
||||
|
||||
@@ -10,5 +11,45 @@ pub struct Ring<O>{
|
||||
pub dft:Box<dyn DFT<O>>,
|
||||
}
|
||||
|
||||
impl<O> Ring<O>{
|
||||
pub fn n(&self) -> usize{
|
||||
return self.n
|
||||
}
|
||||
|
||||
pub struct RingRNS<O>(pub Vec<Ring<O>>);
|
||||
pub fn new_poly(&self) -> Poly<u64>{
|
||||
Poly::<u64>::new(self.n())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//pub struct RingRNS<'a, O: Copy>(pub Vec<Box<& 'a Ring<O>>>);
|
||||
|
||||
pub struct RingRNS<'a, O>(& 'a [Ring<O>]);
|
||||
|
||||
impl<O: Copy> RingRNS<'_, O>{
|
||||
|
||||
pub fn n(&self) -> usize{
|
||||
self.0[0].n()
|
||||
}
|
||||
|
||||
pub fn new_polyrns(&self) -> PolyRNS<u64>{
|
||||
PolyRNS::<u64>::new(self.n(), self.level())
|
||||
}
|
||||
|
||||
pub fn max_level(&self) -> usize{
|
||||
self.0.len()-1
|
||||
}
|
||||
|
||||
pub fn modulus<const LEVEL:usize>(&self) -> O{
|
||||
self.0[LEVEL].modulus.q
|
||||
}
|
||||
|
||||
pub fn level(&self) -> usize{
|
||||
self.0.len()-1
|
||||
}
|
||||
|
||||
pub fn at_level(&self, level:usize) -> RingRNS<O>{
|
||||
assert!(level <= self.0.len());
|
||||
RingRNS(&self.0[..level+1])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod automorphism;
|
||||
pub mod ring;
|
||||
pub mod ring_rns;
|
||||
pub mod rescaling_rns;
|
||||
56
math/src/ring/impl_u64/rescaling_rns.rs
Normal file
56
math/src/ring/impl_u64/rescaling_rns.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use crate::ring::RingRNS;
|
||||
use crate::poly::PolyRNS;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::ONCE;
|
||||
extern crate test;
|
||||
|
||||
impl RingRNS<'_, u64>{
|
||||
|
||||
/// Updates b to floor(b / q[b.level()]).
|
||||
/// Expects a and b to be in the NTT domain.
|
||||
pub fn div_floor_by_last_modulus_ntt(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
|
||||
let level = self.level();
|
||||
self.0[level].intt::<true>(a.at(level), buf.at_mut(0));
|
||||
let rescaling_constants: Vec<Barrett<u64>> = self.rescaling_constant();
|
||||
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
|
||||
for (i, r) in self.0[0..level].iter().enumerate(){
|
||||
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
|
||||
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants[i], b.at_mut(i));
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates b to floor(b / q[b.level()]).
|
||||
pub fn div_floor_by_last_modulus(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
|
||||
let level = self.level();
|
||||
let rescaling_constants: Vec<Barrett<u64>> = self.rescaling_constant();
|
||||
for (i, r) in self.0[0..level].iter().enumerate(){
|
||||
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(a.at(level), a.at(i), &rescaling_constants[i], b.at_mut(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::ring::Ring;
|
||||
use crate::ring::impl_u64::ring_rns::new_rings;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_div_floor_by_last_modulus_ntt() {
|
||||
let n = 1<<10;
|
||||
let moduli: Vec<u64> = vec![0x1fffffffffe00001u64, 0x1fffffffffc80001u64];
|
||||
let rings: Vec<Ring<u64>> = new_rings(n, moduli);
|
||||
let ring_rns = RingRNS::new(&rings);
|
||||
|
||||
let a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut b: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut c: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
|
||||
ring_rns.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c);
|
||||
|
||||
//assert!(m_precomp.mul_external::<ONCE>(y_mont, x) == (x as u128 * y as u128 % q as u128) as u64);
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,12 @@ use crate::ring::Ring;
|
||||
use crate::dft::ntt::Table;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::poly::Poly;
|
||||
use crate::modulus::REDUCEMOD;
|
||||
use crate::modulus::VecOperations;
|
||||
use num_bigint::BigInt;
|
||||
use num_traits::ToPrimitive;
|
||||
use crate::CHUNK;
|
||||
|
||||
impl Ring<u64>{
|
||||
@@ -17,12 +20,11 @@ impl Ring<u64>{
|
||||
}
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize{
|
||||
return self.n
|
||||
}
|
||||
|
||||
pub fn new_poly(&self) -> Poly<u64>{
|
||||
Poly::<u64>::new(self.n())
|
||||
pub fn from_bigint(&self, coeffs: &[BigInt], step:usize, a: &mut Poly<u64>){
|
||||
assert!(step <= a.n(), "invalid step: step={} > a.n()={}", step, a.n());
|
||||
assert!(coeffs.len() <= a.n() / step, "invalid coeffs: coeffs.len()={} > a.n()/step={}", coeffs.len(), a.n()/step);
|
||||
let q_big: BigInt = BigInt::from(self.modulus.q);
|
||||
a.0.iter_mut().step_by(step).enumerate().for_each(|(i, v)| *v = (&coeffs[i] % &q_big).to_u64().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,41 +64,79 @@ impl Ring<u64>{
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_add_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_add_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_sub_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_sub_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_neg_binary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus.vec_neg_unary_assign::<CHUNK, REDUCE>(&mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&Poly<u64>, c: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_mul_montgomery_external_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_montgomery_external_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_unary_assign::<CHUNK, REDUCE>(a, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b: &Poly<u64>, c:&mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_binary_assign::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sum_aqqmb_prod_c_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &Barrett<u64>, d: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_d::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
|
||||
}
|
||||
}
|
||||
@@ -1,114 +1,110 @@
|
||||
use crate::ring::{Ring, RingRNS};
|
||||
use crate::poly::PolyRNS;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::REDUCEMOD;
|
||||
use num_bigint::BigInt;
|
||||
|
||||
impl RingRNS<u64>{
|
||||
pub fn new(n:usize, moduli: Vec<u64>) -> Self{
|
||||
pub fn new_rings(n: usize, moduli: Vec<u64>) -> Vec<Ring<u64>>{
|
||||
assert!(!moduli.is_empty(), "moduli cannot be empty");
|
||||
let rings: Vec<Ring<u64>> = moduli
|
||||
.into_iter()
|
||||
.map(|prime: u64| Ring::new(n, prime, 1))
|
||||
.map(|prime| Ring::new(n, prime, 1))
|
||||
.collect();
|
||||
return rings
|
||||
}
|
||||
|
||||
impl<'a> RingRNS<'a, u64>{
|
||||
pub fn new(rings:&'a [Ring<u64>]) -> Self{
|
||||
RingRNS(rings)
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize{
|
||||
self.0[0].n()
|
||||
pub fn rescaling_constant(&self) -> Vec<Barrett<u64>> {
|
||||
let level = self.level();
|
||||
let q_scale: u64 = self.0[level].modulus.q;
|
||||
(0..level).map(|i| {self.0[i].modulus.barrett.prepare(self.0[i].modulus.q - self.0[i].modulus.inv(q_scale))}).collect()
|
||||
}
|
||||
|
||||
pub fn max_level(&self) -> usize{
|
||||
self.0.len()-1
|
||||
pub fn set_poly_from_bigint(&self, coeffs: &[BigInt], step:usize, a: &mut PolyRNS<u64>){
|
||||
let level = self.level();
|
||||
assert!(level <= a.level(), "invalid level: level={} > a.level()={}", level, a.level());
|
||||
(0..level).for_each(|i|{self.0[i].from_bigint(coeffs, step, a.at_mut(i))});
|
||||
}
|
||||
}
|
||||
|
||||
impl RingRNS<u64>{
|
||||
|
||||
}
|
||||
|
||||
impl RingRNS<u64>{
|
||||
impl RingRNS<'_, u64>{
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() >= self.n(), "c.n()={} < n={}", c.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
debug_assert!(c.level() >= LEVEL, "c.level()={} < LEVEL={}", c.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.add::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
debug_assert!(c.level() >= level, "c.level()={} < level={}", c.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.add::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_inplace<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.add_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.add_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sub<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() >= self.n(), "c.n()={} < n={}", c.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
debug_assert!(c.level() >= LEVEL, "c.level()={} < LEVEL={}", c.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.sub::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
debug_assert!(c.level() >= level, "c.level()={} < level={}", c.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.sub::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sub_inplace<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.sub_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.sub_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.neg::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.neg::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg_inplace<const LEVEL:usize, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.neg_inplace::<REDUCE>(&mut a.0[i]));
|
||||
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.neg_inplace::<REDUCE>(&mut a.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external<const LEVEL:usize, const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() >= self.n(), "c.n()={} < n={}", c.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
debug_assert!(c.level() >= LEVEL, "c.level()={} < LEVEL={}", c.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.mul_montgomery_external::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
pub fn mul_montgomery_external<const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&PolyRNS<u64>, c: &mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
debug_assert!(c.level() >= level, "c.level()={} < level={}", c.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.mul_montgomery_external::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external_inplace<const LEVEL:usize, const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&mut PolyRNS<u64>){
|
||||
debug_assert!(self.max_level() <= LEVEL, "max_level={} < LEVEL={}", self.max_level(), LEVEL);
|
||||
debug_assert!(a.n() >= self.n(), "a.n()={} < n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() >= self.n(), "b.n()={} < n={}", b.n(), self.n());
|
||||
debug_assert!(a.level() >= LEVEL, "a.level()={} < LEVEL={}", a.level(), LEVEL);
|
||||
debug_assert!(b.level() >= LEVEL, "b.level()={} < LEVEL={}", b.level(), LEVEL);
|
||||
self.0.iter().take(LEVEL + 1).enumerate().for_each(|(i, ring)| ring.mul_montgomery_external_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&mut PolyRNS<u64>){
|
||||
let level: usize = self.level();
|
||||
debug_assert!(self.max_level() <= level, "max_level={} < level={}", self.max_level(), level);
|
||||
debug_assert!(a.level() >= level, "a.level()={} < level={}", a.level(), level);
|
||||
debug_assert!(b.level() >= level, "b.level()={} < level={}", b.level(), level);
|
||||
self.0.iter().take(level + 1).enumerate().for_each(|(i, ring)| ring.mul_montgomery_external_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user