This commit is contained in:
Jean-Philippe Bossuat
2025-01-08 11:07:04 +01:00
parent bdd57b91ed
commit 160e7a33da
9 changed files with 383 additions and 207 deletions

View File

@@ -3,10 +3,10 @@
pub mod dft; pub mod dft;
pub mod modulus; pub mod modulus;
pub mod num_bigint;
pub mod poly; pub mod poly;
pub mod ring; pub mod ring;
pub mod scalar; pub mod scalar;
pub mod num_bigint;
pub const CHUNK: usize = 8; pub const CHUNK: usize = 8;
@@ -333,11 +333,7 @@ pub mod macros {
match CHUNK { match CHUNK {
8 => { 8 => {
izip!( izip!($a.chunks_exact(8), $d.chunks_exact_mut(8)).for_each(|(a, d)| {
$a.chunks_exact(8),
$d.chunks_exact_mut(8)
)
.for_each(|(a, d)| {
$f(&$self, &a[0], $b, $c, &mut d[0]); $f(&$self, &a[0], $b, $c, &mut d[0]);
$f(&$self, &a[1], $b, $c, &mut d[1]); $f(&$self, &a[1], $b, $c, &mut d[1]);
$f(&$self, &a[2], $b, $c, &mut d[2]); $f(&$self, &a[2], $b, $c, &mut d[2]);
@@ -349,11 +345,9 @@ pub mod macros {
}); });
let m = n - (n & 7); let m = n - (n & 7);
izip!($a[m..].iter(), $d[m..].iter_mut()).for_each( izip!($a[m..].iter(), $d[m..].iter_mut()).for_each(|(a, d)| {
|(a, d)| {
$f(&$self, a, $b, $c, d); $f(&$self, a, $b, $c, d);
}, });
);
} }
_ => { _ => {
izip!($a.iter(), $d.iter_mut()).for_each(|(a, d)| { izip!($a.iter(), $d.iter_mut()).for_each(|(a, d)| {

View File

@@ -74,7 +74,12 @@ pub trait ScalarOperations<O> {
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O); fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
// Assigns a - b to c. // Assigns a - b to c.
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &O, c: &mut O); fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &O,
b: &O,
c: &mut O,
);
// Assigns a - b to b. // Assigns a - b to b.
fn sa_sub_sb_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O); fn sa_sub_sb_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
@@ -147,7 +152,7 @@ pub trait ScalarOperations<O> {
&self, &self,
b: &u64, b: &u64,
c: &barrett::Barrett<u64>, c: &barrett::Barrett<u64>,
a: &mut u64 a: &mut u64,
); );
// Assigns (a + b) * c to d. // Assigns (a + b) * c to d.
@@ -156,7 +161,7 @@ pub trait ScalarOperations<O> {
a: &u64, a: &u64,
b: &u64, b: &u64,
c: &barrett::Barrett<u64>, c: &barrett::Barrett<u64>,
d: &mut u64 d: &mut u64,
); );
// Assigns (a - b + c) * d to e. // Assigns (a - b + c) * d to e.
@@ -166,7 +171,7 @@ pub trait ScalarOperations<O> {
b: &u64, b: &u64,
c: &u64, c: &u64,
d: &barrett::Barrett<u64>, d: &barrett::Barrett<u64>,
e: &mut u64 e: &mut u64,
); );
fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>( fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
@@ -174,7 +179,7 @@ pub trait ScalarOperations<O> {
b: &u64, b: &u64,
c: &u64, c: &u64,
d: &barrett::Barrett<u64>, d: &barrett::Barrett<u64>,
a: &mut u64 a: &mut u64,
); );
} }
@@ -206,10 +211,18 @@ pub trait VectorOperations<O> {
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut [O]); fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut [O]);
// vec(b) <- vec(a) - vec(b). // vec(b) <- vec(a) - vec(b).
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &[O], b: &mut [O]); fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &[O],
b: &mut [O],
);
// vec(a) <- vec(a) - vec(b). // vec(a) <- vec(a) - vec(b).
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &[O], a: &mut [O]); fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &[O],
a: &mut [O],
);
// vec(c) <- vec(a) - vec(b). // vec(c) <- vec(a) - vec(b).
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>( fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
@@ -220,10 +233,17 @@ pub trait VectorOperations<O> {
); );
// vec(a) <- -vec(a). // vec(a) <- -vec(a).
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut [O]); fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &mut [O],
);
// vec(b) <- -vec(a). // vec(b) <- -vec(a).
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &[O], b: &mut [O]); fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &[O],
b: &mut [O],
);
// vec(b) <- vec(a) // vec(b) <- vec(a)
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>( fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
@@ -297,7 +317,11 @@ pub trait VectorOperations<O> {
); );
// vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). // vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_ve<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>( fn vb_sub_va_add_sc_mul_sd_into_ve<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
va: &[u64], va: &[u64],
vb: &[u64], vb: &[u64],
@@ -307,7 +331,11 @@ pub trait VectorOperations<O> {
); );
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). // vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>( fn vb_sub_va_add_sc_mul_sd_into_va<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
vb: &[u64], vb: &[u64],
sc: &u64, sc: &u64,

View File

@@ -1,9 +1,12 @@
use crate::modulus::barrett::Barrett; use crate::modulus::barrett::Barrett;
use crate::modulus::montgomery::Montgomery; use crate::modulus::montgomery::Montgomery;
use crate::modulus::prime::Prime; use crate::modulus::prime::Prime;
use crate::modulus::{REDUCEMOD, NONE};
use crate::modulus::{ScalarOperations, VectorOperations}; use crate::modulus::{ScalarOperations, VectorOperations};
use crate::{apply_sv, apply_svv, apply_v, apply_vsv, apply_vv, apply_vvsv, apply_vvv, apply_ssv, apply_vssv, apply_vvssv}; use crate::modulus::{NONE, REDUCEMOD};
use crate::{
apply_ssv, apply_sv, apply_svv, apply_v, apply_vssv, apply_vsv, apply_vv, apply_vvssv,
apply_vvsv, apply_vvv,
};
use itertools::izip; use itertools::izip;
impl ScalarOperations<u64> for Prime<u64> { impl ScalarOperations<u64> for Prime<u64> {
@@ -32,11 +35,16 @@ impl ScalarOperations<u64> for Prime<u64> {
} }
#[inline(always)] #[inline(always)]
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64) { fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
c: &mut u64,
) {
match SBRANGE { match SBRANGE {
1 =>{*c = *a + self.q - *b} 1 => *c = *a + self.q - *b,
2 =>{*c = *a + self.two_q - *b} 2 => *c = *a + self.two_q - *b,
4 =>{*c = *a + self.four_q - *b} 4 => *c = *a + self.four_q - *b,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.sa_reduce_into_sa::<REDUCE>(c) self.sa_reduce_into_sa::<REDUCE>(c)
@@ -45,9 +53,9 @@ impl ScalarOperations<u64> for Prime<u64> {
#[inline(always)] #[inline(always)]
fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) { fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) {
match SBRANGE { match SBRANGE {
1 =>{*a = *a + self.q - *b} 1 => *a = *a + self.q - *b,
2 =>{*a = *a + self.two_q - *b} 2 => *a = *a + self.two_q - *b,
4 =>{*a = *a + self.four_q - *b} 4 => *a = *a + self.four_q - *b,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.sa_reduce_into_sa::<REDUCE>(a) self.sa_reduce_into_sa::<REDUCE>(a)
@@ -56,9 +64,9 @@ impl ScalarOperations<u64> for Prime<u64> {
#[inline(always)] #[inline(always)]
fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) { fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
match SBRANGE { match SBRANGE {
1 =>{*b = *a + self.q - *b} 1 => *b = *a + self.q - *b,
2 =>{*b = *a + self.two_q - *b} 2 => *b = *a + self.two_q - *b,
4 =>{*b = *a + self.four_q - *b} 4 => *b = *a + self.four_q - *b,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.sa_reduce_into_sa::<REDUCE>(b) self.sa_reduce_into_sa::<REDUCE>(b)
@@ -67,9 +75,9 @@ impl ScalarOperations<u64> for Prime<u64> {
#[inline(always)] #[inline(always)]
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) { fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
match SBRANGE { match SBRANGE {
1 =>{*a = self.q - *a} 1 => *a = self.q - *a,
2 =>{*a = self.two_q - *a} 2 => *a = self.two_q - *a,
4 =>{*a = self.four_q - *a} 4 => *a = self.four_q - *a,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.sa_reduce_into_sa::<REDUCE>(a) self.sa_reduce_into_sa::<REDUCE>(a)
@@ -78,9 +86,9 @@ impl ScalarOperations<u64> for Prime<u64> {
#[inline(always)] #[inline(always)]
fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) { fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
match SBRANGE { match SBRANGE {
1 =>{*b = self.q - *a} 1 => *b = self.q - *a,
2 =>{*b = self.two_q - *a} 2 => *b = self.two_q - *a,
4 =>{*b = self.four_q - *a} 4 => *b = self.four_q - *a,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.sa_reduce_into_sa::<REDUCE>(b) self.sa_reduce_into_sa::<REDUCE>(b)
@@ -130,9 +138,9 @@ impl ScalarOperations<u64> for Prime<u64> {
d: &mut u64, d: &mut u64,
) { ) {
match VBRANGE { match VBRANGE {
1 =>{*d = a + self.q - b} 1 => *d = a + self.q - b,
2 =>{*d = a + self.two_q - b} 2 => *d = a + self.two_q - b,
4 =>{*d = a + self.four_q - b} 4 => *d = a + self.four_q - b,
_ => unreachable!("invalid SBRANGE argument"), _ => unreachable!("invalid SBRANGE argument"),
} }
self.barrett.mul_external_assign::<REDUCE>(*c, d); self.barrett.mul_external_assign::<REDUCE>(*c, d);
@@ -155,7 +163,7 @@ impl ScalarOperations<u64> for Prime<u64> {
a: &u64, a: &u64,
b: &u64, b: &u64,
c: &Barrett<u64>, c: &Barrett<u64>,
d: &mut u64 d: &mut u64,
) { ) {
*d = self.barrett.mul_external::<REDUCE>(*c, *a + *b); *d = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
} }
@@ -165,7 +173,7 @@ impl ScalarOperations<u64> for Prime<u64> {
&self, &self,
b: &u64, b: &u64,
c: &Barrett<u64>, c: &Barrett<u64>,
a: &mut u64 a: &mut u64,
) { ) {
*a = self.barrett.mul_external::<REDUCE>(*c, *a + *b); *a = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
} }
@@ -177,7 +185,7 @@ impl ScalarOperations<u64> for Prime<u64> {
b: &u64, b: &u64,
c: &u64, c: &u64,
d: &Barrett<u64>, d: &Barrett<u64>,
e: &mut u64 e: &mut u64,
) { ) {
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e); self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
self.barrett.mul_external_assign::<REDUCE>(*d, e); self.barrett.mul_external_assign::<REDUCE>(*d, e);
@@ -189,12 +197,11 @@ impl ScalarOperations<u64> for Prime<u64> {
b: &u64, b: &u64,
c: &u64, c: &u64,
d: &Barrett<u64>, d: &Barrett<u64>,
a: &mut u64 a: &mut u64,
) { ) {
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a); self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
self.barrett.mul_external_assign::<REDUCE>(*d, a); self.barrett.mul_external_assign::<REDUCE>(*d, a);
} }
} }
impl VectorOperations<u64> for Prime<u64> { impl VectorOperations<u64> for Prime<u64> {
@@ -255,7 +262,14 @@ impl VectorOperations<u64> for Prime<u64> {
b: &[u64], b: &[u64],
c: &mut [u64], c: &mut [u64],
) { ) {
apply_vvv!(self, Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>, a, b, c, CHUNK); apply_vvv!(
self,
Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>,
a,
b,
c,
CHUNK
);
} }
#[inline(always)] #[inline(always)]
@@ -264,7 +278,13 @@ impl VectorOperations<u64> for Prime<u64> {
b: &[u64], b: &[u64],
a: &mut [u64], a: &mut [u64],
) { ) {
apply_vv!(self, Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>, b, a, CHUNK); apply_vv!(
self,
Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>,
b,
a,
CHUNK
);
} }
#[inline(always)] #[inline(always)]
@@ -273,11 +293,20 @@ impl VectorOperations<u64> for Prime<u64> {
a: &[u64], a: &[u64],
b: &mut [u64], b: &mut [u64],
) { ) {
apply_vv!(self, Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>, a, b, CHUNK); apply_vv!(
self,
Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>,
a,
b,
CHUNK
);
} }
#[inline(always)] #[inline(always)]
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut [u64]) { fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &mut [u64],
) {
apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK); apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK);
} }
@@ -415,7 +444,11 @@ impl VectorOperations<u64> for Prime<u64> {
} }
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e). // vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_ve<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>( fn vb_sub_va_add_sc_mul_sd_into_ve<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
va: &[u64], va: &[u64],
vb: &[u64], vb: &[u64],
@@ -436,14 +469,17 @@ impl VectorOperations<u64> for Prime<u64> {
} }
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). // vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>( fn vb_sub_va_add_sc_mul_sd_into_va<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
vb: &[u64], vb: &[u64],
sc: &u64, sc: &u64,
sd: &Barrett<u64>, sd: &Barrett<u64>,
va: &mut [u64], va: &mut [u64],
) { ) {
apply_vssv!( apply_vssv!(
self, self,
Self::sb_sub_sa_add_sc_mul_sd_into_sa::<VBRANGE, REDUCE>, Self::sb_sub_sa_add_sc_mul_sd_into_sa::<VBRANGE, REDUCE>,

View File

@@ -1,7 +1,7 @@
use num_bigint::BigInt; use num_bigint::BigInt;
use num_bigint::Sign; use num_bigint::Sign;
use num_integer::Integer; use num_integer::Integer;
use num_traits::{Zero, One, Signed}; use num_traits::{One, Signed, Zero};
pub trait Div { pub trait Div {
fn div_floor(&self, other: &Self) -> Self; fn div_floor(&self, other: &Self) -> Self;
@@ -9,13 +9,12 @@ pub trait Div{
} }
impl Div for BigInt { impl Div for BigInt {
fn div_floor(&self, other: &Self) -> Self { fn div_floor(&self, other: &Self) -> Self {
let quo: BigInt = self / other; let quo: BigInt = self / other;
if self.sign() == Sign::Minus { if self.sign() == Sign::Minus {
return quo - BigInt::one() return quo - BigInt::one();
} }
return quo return quo;
} }
fn div_round(&self, other: &Self) -> Self { fn div_round(&self, other: &Self) -> Self {
@@ -23,12 +22,11 @@ impl Div for BigInt{
rem <<= 1; rem <<= 1;
if rem != BigInt::zero() && &rem.abs() > other { if rem != BigInt::zero() && &rem.abs() > other {
if self.sign() == other.sign() { if self.sign() == other.sign() {
return quo + BigInt::one() return quo + BigInt::one();
} else { } else {
return quo - BigInt::one() return quo - BigInt::one();
} }
} }
return quo return quo;
} }
} }

View File

@@ -1,5 +1,5 @@
use crate::modulus::barrett::Barrett; use crate::modulus::barrett::Barrett;
use crate::modulus::{NONE, ONCE, BARRETT}; use crate::modulus::{BARRETT, NONE, ONCE};
use crate::poly::PolyRNS; use crate::poly::PolyRNS;
use crate::ring::Ring; use crate::ring::Ring;
use crate::ring::RingRNS; use crate::ring::RingRNS;
@@ -33,7 +33,6 @@ impl RingRNS<u64> {
let r_last: &Ring<u64> = &self.0[level]; let r_last: &Ring<u64> = &self.0[level];
if ROUND { if ROUND {
let q_level_half: u64 = r_last.modulus.q >> 1; let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1); let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
@@ -56,7 +55,11 @@ impl RingRNS<u64> {
); );
} }
} else { } else {
r_last.a_add_b_scalar_into_c::<ONCE>(a.at(self.level()), &q_level_half, &mut buf_q_scaling[0]); r_last.a_add_b_scalar_into_c::<ONCE>(
a.at(self.level()),
&q_level_half,
&mut buf_q_scaling[0],
);
for (i, r) in self.0[0..level].iter().enumerate() { for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>( r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0], &buf_q_scaling[0],
@@ -116,7 +119,6 @@ impl RingRNS<u64> {
let r_last: &Ring<u64> = &self.0[level]; let r_last: &Ring<u64> = &self.0[level];
if ROUND { if ROUND {
let q_level_half: u64 = r_last.modulus.q >> 1; let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1); let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
@@ -149,7 +151,6 @@ impl RingRNS<u64> {
} }
} }
} else { } else {
if NTT { if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]); r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
@@ -172,7 +173,6 @@ impl RingRNS<u64> {
} }
} }
} }
} }
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i]) /// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
@@ -213,7 +213,6 @@ impl RingRNS<u64> {
c.copy(a); c.copy(a);
} }
} else { } else {
if NTT { if NTT {
self.intt::<false>(a, buf); self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| { (0..nb_moduli).for_each(|i| {
@@ -225,7 +224,6 @@ impl RingRNS<u64> {
}); });
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c); self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
} else { } else {
println!("{} {:?}", self.level(), buf.level()); println!("{} {:?}", self.level(), buf.level());
self.div_by_last_modulus::<ROUND, false>(a, buf, c); self.div_by_last_modulus::<ROUND, false>(a, buf, c);
@@ -235,7 +233,8 @@ impl RingRNS<u64> {
.div_by_last_modulus_inplace::<ROUND, false>(buf, c); .div_by_last_modulus_inplace::<ROUND, false>(buf, c);
}); });
self.at_level(self.level()-nb_moduli+1).div_by_last_modulus_inplace::<ROUND, false>(buf, c); self.at_level(self.level() - nb_moduli + 1)
.div_by_last_modulus_inplace::<ROUND, false>(buf, c);
} }
} }
} }
@@ -260,14 +259,17 @@ impl RingRNS<u64> {
a.level() a.level()
); );
if nb_moduli == 0 { if nb_moduli == 0 {
return return;
} }
if NTT { if NTT {
self.intt::<false>(a, buf); self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| { (0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i) self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(&mut PolyRNS::<u64>::default(), buf) .div_by_last_modulus_inplace::<ROUND, false>(
&mut PolyRNS::<u64>::default(),
buf,
)
}); });
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a); self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
} else { } else {

View File

@@ -83,7 +83,12 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) { pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
@@ -98,7 +103,12 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) { pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus self.modulus
@@ -106,13 +116,25 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, c: &Barrett<u64>, a: &mut Poly<u64>) { pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0); self.modulus
.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
} }
#[inline(always)] #[inline(always)]
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &Barrett<u64>, d: &mut Poly<u64>) { pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &Barrett<u64>,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n()); debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
self.modulus self.modulus
@@ -120,7 +142,11 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_into_b<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) { pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus self.modulus
@@ -128,7 +154,11 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_into_a<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, b: &Poly<u64>, a: &mut Poly<u64>) { pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &Poly<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus self.modulus
@@ -136,7 +166,12 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_into_c<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) { pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
@@ -145,16 +180,22 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_neg_into_b<const ARANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) { pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0); self.modulus
.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
} }
#[inline(always)] #[inline(always)]
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) { pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0); self.modulus
.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
} }
#[inline(always)] #[inline(always)]
@@ -184,7 +225,12 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) { pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>( self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
@@ -258,7 +304,10 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<const BRANGE: u8, const REDUCE: REDUCEMOD>( pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
a: &Poly<u64>, a: &Poly<u64>,
b: &Poly<u64>, b: &Poly<u64>,
@@ -274,7 +323,10 @@ impl Ring<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>( pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self, &self,
b: &Poly<u64>, b: &Poly<u64>,
c: &u64, c: &u64,
@@ -286,5 +338,4 @@ impl Ring<u64> {
self.modulus self.modulus
.vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0); .vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
} }
} }

View File

@@ -7,8 +7,6 @@ use crate::scalar::ScalarRNS;
use num_bigint::BigInt; use num_bigint::BigInt;
use std::sync::Arc; use std::sync::Arc;
impl RingRNS<u64> { impl RingRNS<u64> {
pub fn new(n: usize, moduli: Vec<u64>) -> Self { pub fn new(n: usize, moduli: Vec<u64>) -> Self {
assert!(!moduli.is_empty(), "moduli cannot be empty"); assert!(!moduli.is_empty(), "moduli cannot be empty");
@@ -198,14 +196,17 @@ impl RingRNS<u64> {
c.level(), c.level(),
self.level() self.level()
); );
self.0 self.0.iter().enumerate().for_each(|(i, ring)| {
.iter() ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
.enumerate() });
.for_each(|(i, ring)| ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) { pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!( debug_assert!(
a.level() >= self.level(), a.level() >= self.level(),
"a.level()={} < self.level()={}", "a.level()={} < self.level()={}",
@@ -225,7 +226,11 @@ impl RingRNS<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &PolyRNS<u64>, a: &mut PolyRNS<u64>) { pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!( debug_assert!(
a.level() >= self.level(), a.level() >= self.level(),
"a.level()={} < self.level()={}", "a.level()={} < self.level()={}",
@@ -245,7 +250,11 @@ impl RingRNS<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) { pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!( debug_assert!(
a.level() >= self.level(), a.level() >= self.level(),
"a.level()={} < self.level()={}", "a.level()={} < self.level()={}",
@@ -326,9 +335,10 @@ impl RingRNS<u64> {
b.level(), b.level(),
self.level() self.level()
); );
self.0.iter().enumerate().for_each(|(i, ring)| { self.0
ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i]) .iter()
}); .enumerate()
.for_each(|(i, ring)| ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i]));
} }
#[inline(always)] #[inline(always)]
@@ -371,7 +381,17 @@ impl RingRNS<u64> {
} }
#[inline(always)] #[inline(always)]
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, e: &mut PolyRNS<u64>){ pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &u64,
d: &Barrett<u64>,
e: &mut PolyRNS<u64>,
) {
debug_assert!( debug_assert!(
a.level() >= self.level(), a.level() >= self.level(),
"a.level()={} < self.level()={}", "a.level()={} < self.level()={}",
@@ -390,14 +410,28 @@ impl RingRNS<u64> {
e.level(), e.level(),
self.level() self.level()
); );
self.0 self.0.iter().enumerate().for_each(|(i, ring)| {
.iter() ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(
.enumerate() &a.0[i],
.for_each(|(i, ring)| ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(&a.0[i], &b.0[i], c, d, &mut e.0[i])); &b.0[i],
c,
d,
&mut e.0[i],
)
});
} }
#[inline(always)] #[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, a: &mut PolyRNS<u64>){ pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
b: &PolyRNS<u64>,
c: &u64,
d: &Barrett<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!( debug_assert!(
a.level() >= self.level(), a.level() >= self.level(),
"a.level()={} < self.level()={}", "a.level()={} < self.level()={}",
@@ -410,9 +444,13 @@ impl RingRNS<u64> {
b.level(), b.level(),
self.level() self.level()
); );
self.0 self.0.iter().enumerate().for_each(|(i, ring)| {
.iter() ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(
.enumerate() &b.0[i],
.for_each(|(i, ring)| ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(&b.0[i], c, d, &mut a.0[i])); c,
d,
&mut a.0[i],
)
});
} }
} }

View File

@@ -2,7 +2,7 @@ use crate::modulus::WordOps;
use crate::poly::{Poly, PolyRNS}; use crate::poly::{Poly, PolyRNS};
use crate::ring::{Ring, RingRNS}; use crate::ring::{Ring, RingRNS};
use num::ToPrimitive; use num::ToPrimitive;
use rand_distr::{Normal, Distribution}; use rand_distr::{Distribution, Normal};
use sampling::source::Source; use sampling::source::Source;
impl Ring<u64> { impl Ring<u64> {
@@ -13,11 +13,15 @@ impl Ring<u64> {
.for_each(|a| *a = source.next_u64n(max, mask)); .for_each(|a| *a = source.next_u64n(max, mask));
} }
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut Poly<u64>) { pub fn fill_dist_f64<T: Distribution<f64>>(
&self,
source: &mut Source,
dist: T,
bound: f64,
a: &mut Poly<u64>,
) {
let max: u64 = self.modulus.q; let max: u64 = self.modulus.q;
a.0.iter_mut() a.0.iter_mut().for_each(|a| {
.for_each(|a| {
let mut dist_f64: f64 = dist.sample(source); let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound { while dist_f64.abs() > bound {
@@ -40,7 +44,13 @@ impl RingRNS<u64> {
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i))); .for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
} }
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut PolyRNS<u64>) { pub fn fill_dist_f64<T: Distribution<f64>>(
&self,
source: &mut Source,
dist: T,
bound: f64,
a: &mut PolyRNS<u64>,
) {
(0..a.n()).for_each(|j| { (0..a.n()).for_each(|j| {
let mut dist_f64: f64 = dist.sample(source); let mut dist_f64: f64 = dist.sample(source);

View File

@@ -1,30 +1,49 @@
use itertools::izip;
use math::num_bigint::Div;
use math::poly::PolyRNS; use math::poly::PolyRNS;
use math::ring::RingRNS; use math::ring::RingRNS;
use num_bigint::BigInt; use num_bigint::BigInt;
use math::num_bigint::Div;
use sampling::source::Source; use sampling::source::Source;
use itertools::izip;
#[test] #[test]
fn rescaling_rns_u64() { fn rescaling_rns_u64() {
let n = 1 << 10; let n = 1 << 10;
let moduli: Vec<u64> = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64, 0x1fffffffffb40001, 0x1fffffffff500001]; let moduli: Vec<u64> = vec![
0x1fffffffffc80001u64,
0x1fffffffffe00001u64,
0x1fffffffffb40001,
0x1fffffffff500001,
];
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli); let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:false>", || {
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:false>", ||{test_div_by_last_modulus::<false, false>(&ring_rns)}); test_div_by_last_modulus::<false, false>(&ring_rns)
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:true>", ||{test_div_by_last_modulus::<false, true>(&ring_rns)}); });
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:false>", ||{test_div_by_last_modulus::<true, false>(&ring_rns)}); sub_test("test_div_by_last_modulus::<ROUND:false, NTT:true>", || {
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:true>", ||{test_div_by_last_modulus::<true, true>(&ring_rns)}); test_div_by_last_modulus::<false, true>(&ring_rns)
sub_test("test_div_by_last_modulus_inplace::<ROUND:false, NTT:false>", ||{test_div_by_last_modulus_inplace::<false, false>(&ring_rns)}); });
sub_test("test_div_by_last_modulus_inplace::<ROUND:false, NTT:true>", ||{test_div_by_last_modulus_inplace::<false, true>(&ring_rns)}); sub_test("test_div_by_last_modulus::<ROUND:true, NTT:false>", || {
sub_test("test_div_by_last_modulus_inplace::<ROUND:true, NTT:true>", ||{test_div_by_last_modulus_inplace::<true, true>(&ring_rns)}); test_div_by_last_modulus::<true, false>(&ring_rns)
sub_test("test_div_by_last_modulus_inplace::<ROUND:true, NTT:false>", ||{test_div_by_last_modulus_inplace::<true, false>(&ring_rns)}); });
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:true>", || {
test_div_by_last_modulus::<true, true>(&ring_rns)
});
sub_test(
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:false>",
|| test_div_by_last_modulus_inplace::<false, false>(&ring_rns),
);
sub_test(
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:true>",
|| test_div_by_last_modulus_inplace::<false, true>(&ring_rns),
);
sub_test(
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:true>",
|| test_div_by_last_modulus_inplace::<true, true>(&ring_rns),
);
sub_test(
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:false>",
|| test_div_by_last_modulus_inplace::<true, false>(&ring_rns),
);
//sub_test("test_div_by_last_moduli::<ROUND:false, NTT:false>", ||{test_div_by_last_moduli::<false, false>(&ring_rns)}); //sub_test("test_div_by_last_moduli::<ROUND:false, NTT:false>", ||{test_div_by_last_moduli::<false, false>(&ring_rns)});
} }
@@ -35,7 +54,6 @@ fn sub_test<F: FnOnce()>(name: &str, f: F) {
} }
fn test_div_by_last_modulus<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) { fn test_div_by_last_modulus<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
let seed: [u8; 32] = [0; 32]; let seed: [u8; 32] = [0; 32];
let mut source: Source = Source::new(seed); let mut source: Source = Source::new(seed);
@@ -59,7 +77,6 @@ fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS
ring_rns.div_by_last_modulus::<ROUND, NTT>(&a, &mut b, &mut c); ring_rns.div_by_last_modulus::<ROUND, NTT>(&a, &mut b, &mut c);
if NTT { if NTT {
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c); ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
} }
@@ -84,7 +101,6 @@ fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS
} }
fn test_div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) { fn test_div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
let seed: [u8; 32] = [0; 32]; let seed: [u8; 32] = [0; 32];
let mut source: Source = Source::new(seed); let mut source: Source = Source::new(seed);
@@ -108,7 +124,9 @@ fn test_div_by_last_modulus_inplace<const ROUND:bool, const NTT:bool>(ring_rns:
ring_rns.div_by_last_modulus_inplace::<ROUND, NTT>(&mut buf, &mut a); ring_rns.div_by_last_modulus_inplace::<ROUND, NTT>(&mut buf, &mut a);
if NTT { if NTT {
ring_rns.at_level(a.level()-1).intt_inplace::<false>(&mut a); ring_rns
.at_level(a.level() - 1)
.intt_inplace::<false>(&mut a);
} }
// Exports c to coeffs_c // Exports c to coeffs_c
@@ -130,9 +148,7 @@ fn test_div_by_last_modulus_inplace<const ROUND:bool, const NTT:bool>(ring_rns:
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b)); izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
} }
fn test_div_by_last_moduli<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) { fn test_div_by_last_moduli<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
let seed: [u8; 32] = [0; 32]; let seed: [u8; 32] = [0; 32];
let mut source: Source = Source::new(seed); let mut source: Source = Source::new(seed);
@@ -140,7 +156,9 @@ fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<
let mut a: PolyRNS<u64> = ring_rns.new_polyrns(); let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
let mut buf: PolyRNS<u64> = ring_rns.new_polyrns(); let mut buf: PolyRNS<u64> = ring_rns.new_polyrns();
let mut c: PolyRNS<u64> = ring_rns.at_level(ring_rns.level() - nb_moduli).new_polyrns(); let mut c: PolyRNS<u64> = ring_rns
.at_level(ring_rns.level() - nb_moduli)
.new_polyrns();
// Allocates a random PolyRNS // Allocates a random PolyRNS
ring_rns.fill_uniform(&mut source, &mut a); ring_rns.fill_uniform(&mut source, &mut a);
@@ -170,7 +188,8 @@ fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<
// Performs floor division on a // Performs floor division on a
let mut scalar_big = BigInt::from(1); let mut scalar_big = BigInt::from(1);
(0..nb_moduli).for_each(|i|{scalar_big *= BigInt::from(ring_rns.0[ring_rns.level()-i].modulus.q)}); (0..nb_moduli)
.for_each(|i| scalar_big *= BigInt::from(ring_rns.0[ring_rns.level() - i].modulus.q));
coeffs_a.iter_mut().for_each(|a| { coeffs_a.iter_mut().for_each(|a| {
if ROUND { if ROUND {
*a = a.div_round(&scalar_big); *a = a.div_round(&scalar_big);