mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
fmt
This commit is contained in:
@@ -3,10 +3,10 @@
|
|||||||
|
|
||||||
pub mod dft;
|
pub mod dft;
|
||||||
pub mod modulus;
|
pub mod modulus;
|
||||||
|
pub mod num_bigint;
|
||||||
pub mod poly;
|
pub mod poly;
|
||||||
pub mod ring;
|
pub mod ring;
|
||||||
pub mod scalar;
|
pub mod scalar;
|
||||||
pub mod num_bigint;
|
|
||||||
|
|
||||||
pub const CHUNK: usize = 8;
|
pub const CHUNK: usize = 8;
|
||||||
|
|
||||||
@@ -333,11 +333,7 @@ pub mod macros {
|
|||||||
|
|
||||||
match CHUNK {
|
match CHUNK {
|
||||||
8 => {
|
8 => {
|
||||||
izip!(
|
izip!($a.chunks_exact(8), $d.chunks_exact_mut(8)).for_each(|(a, d)| {
|
||||||
$a.chunks_exact(8),
|
|
||||||
$d.chunks_exact_mut(8)
|
|
||||||
)
|
|
||||||
.for_each(|(a, d)| {
|
|
||||||
$f(&$self, &a[0], $b, $c, &mut d[0]);
|
$f(&$self, &a[0], $b, $c, &mut d[0]);
|
||||||
$f(&$self, &a[1], $b, $c, &mut d[1]);
|
$f(&$self, &a[1], $b, $c, &mut d[1]);
|
||||||
$f(&$self, &a[2], $b, $c, &mut d[2]);
|
$f(&$self, &a[2], $b, $c, &mut d[2]);
|
||||||
@@ -349,11 +345,9 @@ pub mod macros {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let m = n - (n & 7);
|
let m = n - (n & 7);
|
||||||
izip!($a[m..].iter(), $d[m..].iter_mut()).for_each(
|
izip!($a[m..].iter(), $d[m..].iter_mut()).for_each(|(a, d)| {
|
||||||
|(a, d)| {
|
|
||||||
$f(&$self, a, $b, $c, d);
|
$f(&$self, a, $b, $c, d);
|
||||||
},
|
});
|
||||||
);
|
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
izip!($a.iter(), $d.iter_mut()).for_each(|(a, d)| {
|
izip!($a.iter(), $d.iter_mut()).for_each(|(a, d)| {
|
||||||
|
|||||||
@@ -74,7 +74,12 @@ pub trait ScalarOperations<O> {
|
|||||||
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||||
|
|
||||||
// Assigns a - b to c.
|
// Assigns a - b to c.
|
||||||
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &O, c: &mut O);
|
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &O,
|
||||||
|
b: &O,
|
||||||
|
c: &mut O,
|
||||||
|
);
|
||||||
|
|
||||||
// Assigns a - b to b.
|
// Assigns a - b to b.
|
||||||
fn sa_sub_sb_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
fn sa_sub_sb_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||||
@@ -147,7 +152,7 @@ pub trait ScalarOperations<O> {
|
|||||||
&self,
|
&self,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &barrett::Barrett<u64>,
|
c: &barrett::Barrett<u64>,
|
||||||
a: &mut u64
|
a: &mut u64,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Assigns (a + b) * c to d.
|
// Assigns (a + b) * c to d.
|
||||||
@@ -156,25 +161,25 @@ pub trait ScalarOperations<O> {
|
|||||||
a: &u64,
|
a: &u64,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &barrett::Barrett<u64>,
|
c: &barrett::Barrett<u64>,
|
||||||
d: &mut u64
|
d: &mut u64,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Assigns (a - b + c) * d to e.
|
// Assigns (a - b + c) * d to e.
|
||||||
fn sb_sub_sa_add_sc_mul_sd_into_se<const SBRANGE: u8,const REDUCE: REDUCEMOD>(
|
fn sb_sub_sa_add_sc_mul_sd_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
&self,
|
&self,
|
||||||
a: &u64,
|
a: &u64,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &barrett::Barrett<u64>,
|
d: &barrett::Barrett<u64>,
|
||||||
e: &mut u64
|
e: &mut u64,
|
||||||
);
|
);
|
||||||
|
|
||||||
fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8,const REDUCE: REDUCEMOD>(
|
fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
&self,
|
&self,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &barrett::Barrett<u64>,
|
d: &barrett::Barrett<u64>,
|
||||||
a: &mut u64
|
a: &mut u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,10 +211,18 @@ pub trait VectorOperations<O> {
|
|||||||
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut [O]);
|
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut [O]);
|
||||||
|
|
||||||
// vec(b) <- vec(a) - vec(b).
|
// vec(b) <- vec(a) - vec(b).
|
||||||
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &[O], b: &mut [O]);
|
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &[O],
|
||||||
|
b: &mut [O],
|
||||||
|
);
|
||||||
|
|
||||||
// vec(a) <- vec(a) - vec(b).
|
// vec(a) <- vec(a) - vec(b).
|
||||||
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &[O], a: &mut [O]);
|
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
b: &[O],
|
||||||
|
a: &mut [O],
|
||||||
|
);
|
||||||
|
|
||||||
// vec(c) <- vec(a) - vec(b).
|
// vec(c) <- vec(a) - vec(b).
|
||||||
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
@@ -220,10 +233,17 @@ pub trait VectorOperations<O> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// vec(a) <- -vec(a).
|
// vec(a) <- -vec(a).
|
||||||
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut [O]);
|
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &mut [O],
|
||||||
|
);
|
||||||
|
|
||||||
// vec(b) <- -vec(a).
|
// vec(b) <- -vec(a).
|
||||||
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &[O], b: &mut [O]);
|
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &[O],
|
||||||
|
b: &mut [O],
|
||||||
|
);
|
||||||
|
|
||||||
// vec(b) <- vec(a)
|
// vec(b) <- vec(a)
|
||||||
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||||
@@ -297,7 +317,11 @@ pub trait VectorOperations<O> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
// vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||||
fn vb_sub_va_add_sc_mul_sd_into_ve<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
fn vb_sub_va_add_sc_mul_sd_into_ve<
|
||||||
|
const CHUNK: usize,
|
||||||
|
const VBRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
va: &[u64],
|
va: &[u64],
|
||||||
vb: &[u64],
|
vb: &[u64],
|
||||||
@@ -307,7 +331,11 @@ pub trait VectorOperations<O> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||||
fn vb_sub_va_add_sc_mul_sd_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
fn vb_sub_va_add_sc_mul_sd_into_va<
|
||||||
|
const CHUNK: usize,
|
||||||
|
const VBRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
vb: &[u64],
|
vb: &[u64],
|
||||||
sc: &u64,
|
sc: &u64,
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
use crate::modulus::barrett::Barrett;
|
use crate::modulus::barrett::Barrett;
|
||||||
use crate::modulus::montgomery::Montgomery;
|
use crate::modulus::montgomery::Montgomery;
|
||||||
use crate::modulus::prime::Prime;
|
use crate::modulus::prime::Prime;
|
||||||
use crate::modulus::{REDUCEMOD, NONE};
|
|
||||||
use crate::modulus::{ScalarOperations, VectorOperations};
|
use crate::modulus::{ScalarOperations, VectorOperations};
|
||||||
use crate::{apply_sv, apply_svv, apply_v, apply_vsv, apply_vv, apply_vvsv, apply_vvv, apply_ssv, apply_vssv, apply_vvssv};
|
use crate::modulus::{NONE, REDUCEMOD};
|
||||||
|
use crate::{
|
||||||
|
apply_ssv, apply_sv, apply_svv, apply_v, apply_vssv, apply_vsv, apply_vv, apply_vvssv,
|
||||||
|
apply_vvsv, apply_vvv,
|
||||||
|
};
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
|
|
||||||
impl ScalarOperations<u64> for Prime<u64> {
|
impl ScalarOperations<u64> for Prime<u64> {
|
||||||
@@ -32,11 +35,16 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64) {
|
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
match SBRANGE{
|
&self,
|
||||||
1 =>{*c = *a + self.q - *b}
|
a: &u64,
|
||||||
2 =>{*c = *a + self.two_q - *b}
|
b: &u64,
|
||||||
4 =>{*c = *a + self.four_q - *b}
|
c: &mut u64,
|
||||||
|
) {
|
||||||
|
match SBRANGE {
|
||||||
|
1 => *c = *a + self.q - *b,
|
||||||
|
2 => *c = *a + self.two_q - *b,
|
||||||
|
4 => *c = *a + self.four_q - *b,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.sa_reduce_into_sa::<REDUCE>(c)
|
self.sa_reduce_into_sa::<REDUCE>(c)
|
||||||
@@ -44,10 +52,10 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) {
|
fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) {
|
||||||
match SBRANGE{
|
match SBRANGE {
|
||||||
1 =>{*a = *a + self.q - *b}
|
1 => *a = *a + self.q - *b,
|
||||||
2 =>{*a = *a + self.two_q - *b}
|
2 => *a = *a + self.two_q - *b,
|
||||||
4 =>{*a = *a + self.four_q - *b}
|
4 => *a = *a + self.four_q - *b,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||||
@@ -55,10 +63,10 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||||
match SBRANGE{
|
match SBRANGE {
|
||||||
1 =>{*b = *a + self.q - *b}
|
1 => *b = *a + self.q - *b,
|
||||||
2 =>{*b = *a + self.two_q - *b}
|
2 => *b = *a + self.two_q - *b,
|
||||||
4 =>{*b = *a + self.four_q - *b}
|
4 => *b = *a + self.four_q - *b,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||||
@@ -66,10 +74,10 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
||||||
match SBRANGE{
|
match SBRANGE {
|
||||||
1 =>{*a = self.q - *a}
|
1 => *a = self.q - *a,
|
||||||
2 =>{*a = self.two_q - *a}
|
2 => *a = self.two_q - *a,
|
||||||
4 =>{*a = self.four_q - *a}
|
4 => *a = self.four_q - *a,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||||
@@ -77,10 +85,10 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||||
match SBRANGE{
|
match SBRANGE {
|
||||||
1 =>{*b = self.q - *a}
|
1 => *b = self.q - *a,
|
||||||
2 =>{*b = self.two_q - *a}
|
2 => *b = self.two_q - *a,
|
||||||
4 =>{*b = self.four_q - *a}
|
4 => *b = self.four_q - *a,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||||
@@ -129,10 +137,10 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
c: &Barrett<u64>,
|
c: &Barrett<u64>,
|
||||||
d: &mut u64,
|
d: &mut u64,
|
||||||
) {
|
) {
|
||||||
match VBRANGE{
|
match VBRANGE {
|
||||||
1 =>{*d = a + self.q - b}
|
1 => *d = a + self.q - b,
|
||||||
2 =>{*d = a + self.two_q - b}
|
2 => *d = a + self.two_q - b,
|
||||||
4 =>{*d = a + self.four_q - b}
|
4 => *d = a + self.four_q - b,
|
||||||
_ => unreachable!("invalid SBRANGE argument"),
|
_ => unreachable!("invalid SBRANGE argument"),
|
||||||
}
|
}
|
||||||
self.barrett.mul_external_assign::<REDUCE>(*c, d);
|
self.barrett.mul_external_assign::<REDUCE>(*c, d);
|
||||||
@@ -155,7 +163,7 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
a: &u64,
|
a: &u64,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &Barrett<u64>,
|
c: &Barrett<u64>,
|
||||||
d: &mut u64
|
d: &mut u64,
|
||||||
) {
|
) {
|
||||||
*d = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
|
*d = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
|
||||||
}
|
}
|
||||||
@@ -165,7 +173,7 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
&self,
|
&self,
|
||||||
b: &u64,
|
b: &u64,
|
||||||
c: &Barrett<u64>,
|
c: &Barrett<u64>,
|
||||||
a: &mut u64
|
a: &mut u64,
|
||||||
) {
|
) {
|
||||||
*a = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
|
*a = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
|
||||||
}
|
}
|
||||||
@@ -177,7 +185,7 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
b: &u64,
|
b: &u64,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &Barrett<u64>,
|
d: &Barrett<u64>,
|
||||||
e: &mut u64
|
e: &mut u64,
|
||||||
) {
|
) {
|
||||||
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
|
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
|
||||||
self.barrett.mul_external_assign::<REDUCE>(*d, e);
|
self.barrett.mul_external_assign::<REDUCE>(*d, e);
|
||||||
@@ -189,12 +197,11 @@ impl ScalarOperations<u64> for Prime<u64> {
|
|||||||
b: &u64,
|
b: &u64,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &Barrett<u64>,
|
d: &Barrett<u64>,
|
||||||
a: &mut u64
|
a: &mut u64,
|
||||||
) {
|
) {
|
||||||
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
|
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
|
||||||
self.barrett.mul_external_assign::<REDUCE>(*d, a);
|
self.barrett.mul_external_assign::<REDUCE>(*d, a);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VectorOperations<u64> for Prime<u64> {
|
impl VectorOperations<u64> for Prime<u64> {
|
||||||
@@ -255,7 +262,14 @@ impl VectorOperations<u64> for Prime<u64> {
|
|||||||
b: &[u64],
|
b: &[u64],
|
||||||
c: &mut [u64],
|
c: &mut [u64],
|
||||||
) {
|
) {
|
||||||
apply_vvv!(self, Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>, a, b, c, CHUNK);
|
apply_vvv!(
|
||||||
|
self,
|
||||||
|
Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>,
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
c,
|
||||||
|
CHUNK
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -264,7 +278,13 @@ impl VectorOperations<u64> for Prime<u64> {
|
|||||||
b: &[u64],
|
b: &[u64],
|
||||||
a: &mut [u64],
|
a: &mut [u64],
|
||||||
) {
|
) {
|
||||||
apply_vv!(self, Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>, b, a, CHUNK);
|
apply_vv!(
|
||||||
|
self,
|
||||||
|
Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>,
|
||||||
|
b,
|
||||||
|
a,
|
||||||
|
CHUNK
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -273,11 +293,20 @@ impl VectorOperations<u64> for Prime<u64> {
|
|||||||
a: &[u64],
|
a: &[u64],
|
||||||
b: &mut [u64],
|
b: &mut [u64],
|
||||||
) {
|
) {
|
||||||
apply_vv!(self, Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>, a, b, CHUNK);
|
apply_vv!(
|
||||||
|
self,
|
||||||
|
Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>,
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
CHUNK
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut [u64]) {
|
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &mut [u64],
|
||||||
|
) {
|
||||||
apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK);
|
apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,14 +444,18 @@ impl VectorOperations<u64> for Prime<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
|
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
|
||||||
fn vb_sub_va_add_sc_mul_sd_into_ve<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
fn vb_sub_va_add_sc_mul_sd_into_ve<
|
||||||
|
const CHUNK: usize,
|
||||||
|
const VBRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
va: &[u64],
|
va: &[u64],
|
||||||
vb: &[u64],
|
vb: &[u64],
|
||||||
sc: &u64,
|
sc: &u64,
|
||||||
sd: &Barrett<u64>,
|
sd: &Barrett<u64>,
|
||||||
ve: &mut [u64],
|
ve: &mut [u64],
|
||||||
){
|
) {
|
||||||
apply_vvssv!(
|
apply_vvssv!(
|
||||||
self,
|
self,
|
||||||
Self::sb_sub_sa_add_sc_mul_sd_into_se::<VBRANGE, REDUCE>,
|
Self::sb_sub_sa_add_sc_mul_sd_into_se::<VBRANGE, REDUCE>,
|
||||||
@@ -436,14 +469,17 @@ impl VectorOperations<u64> for Prime<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||||
fn vb_sub_va_add_sc_mul_sd_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
fn vb_sub_va_add_sc_mul_sd_into_va<
|
||||||
|
const CHUNK: usize,
|
||||||
|
const VBRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
vb: &[u64],
|
vb: &[u64],
|
||||||
sc: &u64,
|
sc: &u64,
|
||||||
sd: &Barrett<u64>,
|
sd: &Barrett<u64>,
|
||||||
va: &mut [u64],
|
va: &mut [u64],
|
||||||
){
|
) {
|
||||||
|
|
||||||
apply_vssv!(
|
apply_vssv!(
|
||||||
self,
|
self,
|
||||||
Self::sb_sub_sa_add_sc_mul_sd_into_sa::<VBRANGE, REDUCE>,
|
Self::sb_sub_sa_add_sc_mul_sd_into_sa::<VBRANGE, REDUCE>,
|
||||||
|
|||||||
@@ -1,34 +1,32 @@
|
|||||||
use num_bigint::BigInt;
|
use num_bigint::BigInt;
|
||||||
use num_bigint::Sign;
|
use num_bigint::Sign;
|
||||||
use num_integer::Integer;
|
use num_integer::Integer;
|
||||||
use num_traits::{Zero, One, Signed};
|
use num_traits::{One, Signed, Zero};
|
||||||
|
|
||||||
pub trait Div{
|
pub trait Div {
|
||||||
fn div_floor(&self, other: &Self) -> Self;
|
fn div_floor(&self, other: &Self) -> Self;
|
||||||
fn div_round(&self, other: &Self) -> Self;
|
fn div_round(&self, other: &Self) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Div for BigInt{
|
impl Div for BigInt {
|
||||||
|
fn div_floor(&self, other: &Self) -> Self {
|
||||||
fn div_floor(&self, other:&Self) -> Self{
|
|
||||||
let quo: BigInt = self / other;
|
let quo: BigInt = self / other;
|
||||||
if self.sign() == Sign::Minus {
|
if self.sign() == Sign::Minus {
|
||||||
return quo - BigInt::one()
|
return quo - BigInt::one();
|
||||||
}
|
}
|
||||||
return quo
|
return quo;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn div_round(&self, other:&Self) -> Self{
|
fn div_round(&self, other: &Self) -> Self {
|
||||||
let (quo, mut rem) = self.div_rem(other);
|
let (quo, mut rem) = self.div_rem(other);
|
||||||
rem <<= 1;
|
rem <<= 1;
|
||||||
if rem != BigInt::zero() && &rem.abs() > other{
|
if rem != BigInt::zero() && &rem.abs() > other {
|
||||||
if self.sign() == other.sign(){
|
if self.sign() == other.sign() {
|
||||||
return quo + BigInt::one()
|
return quo + BigInt::one();
|
||||||
}else{
|
} else {
|
||||||
return quo - BigInt::one()
|
return quo - BigInt::one();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return quo
|
return quo;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::modulus::barrett::Barrett;
|
use crate::modulus::barrett::Barrett;
|
||||||
use crate::modulus::{NONE, ONCE, BARRETT};
|
use crate::modulus::{BARRETT, NONE, ONCE};
|
||||||
use crate::poly::PolyRNS;
|
use crate::poly::PolyRNS;
|
||||||
use crate::ring::Ring;
|
use crate::ring::Ring;
|
||||||
use crate::ring::RingRNS;
|
use crate::ring::RingRNS;
|
||||||
@@ -32,8 +32,7 @@ impl RingRNS<u64> {
|
|||||||
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
||||||
let r_last: &Ring<u64> = &self.0[level];
|
let r_last: &Ring<u64> = &self.0[level];
|
||||||
|
|
||||||
if ROUND{
|
if ROUND {
|
||||||
|
|
||||||
let q_level_half: u64 = r_last.modulus.q >> 1;
|
let q_level_half: u64 = r_last.modulus.q >> 1;
|
||||||
|
|
||||||
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
|
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
|
||||||
@@ -56,7 +55,11 @@ impl RingRNS<u64> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r_last.a_add_b_scalar_into_c::<ONCE>(a.at(self.level()), &q_level_half, &mut buf_q_scaling[0]);
|
r_last.a_add_b_scalar_into_c::<ONCE>(
|
||||||
|
a.at(self.level()),
|
||||||
|
&q_level_half,
|
||||||
|
&mut buf_q_scaling[0],
|
||||||
|
);
|
||||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||||
r_last.a_add_b_scalar_into_c::<NONE>(
|
r_last.a_add_b_scalar_into_c::<NONE>(
|
||||||
&buf_q_scaling[0],
|
&buf_q_scaling[0],
|
||||||
@@ -71,7 +74,7 @@ impl RingRNS<u64> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}else{
|
} else {
|
||||||
if NTT {
|
if NTT {
|
||||||
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
|
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
|
||||||
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
||||||
@@ -115,8 +118,7 @@ impl RingRNS<u64> {
|
|||||||
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
||||||
let r_last: &Ring<u64> = &self.0[level];
|
let r_last: &Ring<u64> = &self.0[level];
|
||||||
|
|
||||||
if ROUND{
|
if ROUND {
|
||||||
|
|
||||||
let q_level_half: u64 = r_last.modulus.q >> 1;
|
let q_level_half: u64 = r_last.modulus.q >> 1;
|
||||||
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
|
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
|
||||||
|
|
||||||
@@ -148,8 +150,7 @@ impl RingRNS<u64> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}else{
|
} else {
|
||||||
|
|
||||||
if NTT {
|
if NTT {
|
||||||
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
|
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
|
||||||
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
||||||
@@ -161,7 +162,7 @@ impl RingRNS<u64> {
|
|||||||
a.at_mut(i),
|
a.at_mut(i),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}else{
|
} else {
|
||||||
let (a_i, a_level) = a.0.split_at_mut(level);
|
let (a_i, a_level) = a.0.split_at_mut(level);
|
||||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||||
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
|
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
|
||||||
@@ -172,7 +173,6 @@ impl RingRNS<u64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
|
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
|
||||||
@@ -213,8 +213,7 @@ impl RingRNS<u64> {
|
|||||||
c.copy(a);
|
c.copy(a);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if NTT {
|
||||||
if NTT{
|
|
||||||
self.intt::<false>(a, buf);
|
self.intt::<false>(a, buf);
|
||||||
(0..nb_moduli).for_each(|i| {
|
(0..nb_moduli).for_each(|i| {
|
||||||
self.at_level(self.level() - i)
|
self.at_level(self.level() - i)
|
||||||
@@ -224,24 +223,24 @@ impl RingRNS<u64> {
|
|||||||
)
|
)
|
||||||
});
|
});
|
||||||
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
|
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
|
||||||
}else{
|
} else {
|
||||||
|
|
||||||
println!("{} {:?}", self.level(), buf.level());
|
println!("{} {:?}", self.level(), buf.level());
|
||||||
self.div_by_last_modulus::<ROUND, false>(a, buf, c);
|
self.div_by_last_modulus::<ROUND, false>(a, buf, c);
|
||||||
|
|
||||||
(1..nb_moduli-1).for_each(|i| {
|
(1..nb_moduli - 1).for_each(|i| {
|
||||||
println!("{} {:?}", self.level() - i, buf.level());
|
println!("{} {:?}", self.level() - i, buf.level());
|
||||||
self.at_level(self.level() - i)
|
self.at_level(self.level() - i)
|
||||||
.div_by_last_modulus_inplace::<ROUND, false>(buf, c);
|
.div_by_last_modulus_inplace::<ROUND, false>(buf, c);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.at_level(self.level()-nb_moduli+1).div_by_last_modulus_inplace::<ROUND, false>(buf, c);
|
self.at_level(self.level() - nb_moduli + 1)
|
||||||
|
.div_by_last_modulus_inplace::<ROUND, false>(buf, c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i])
|
/// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i])
|
||||||
pub fn div_by_last_moduli_inplace<const ROUND:bool, const NTT: bool>(
|
pub fn div_by_last_moduli_inplace<const ROUND: bool, const NTT: bool>(
|
||||||
&self,
|
&self,
|
||||||
nb_moduli: usize,
|
nb_moduli: usize,
|
||||||
buf: &mut PolyRNS<u64>,
|
buf: &mut PolyRNS<u64>,
|
||||||
@@ -259,15 +258,18 @@ impl RingRNS<u64> {
|
|||||||
nb_moduli,
|
nb_moduli,
|
||||||
a.level()
|
a.level()
|
||||||
);
|
);
|
||||||
if nb_moduli == 0{
|
if nb_moduli == 0 {
|
||||||
return
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if NTT {
|
if NTT {
|
||||||
self.intt::<false>(a, buf);
|
self.intt::<false>(a, buf);
|
||||||
(0..nb_moduli).for_each(|i| {
|
(0..nb_moduli).for_each(|i| {
|
||||||
self.at_level(self.level() - i)
|
self.at_level(self.level() - i)
|
||||||
.div_by_last_modulus_inplace::<ROUND, false>(&mut PolyRNS::<u64>::default(), buf)
|
.div_by_last_modulus_inplace::<ROUND, false>(
|
||||||
|
&mut PolyRNS::<u64>::default(),
|
||||||
|
buf,
|
||||||
|
)
|
||||||
});
|
});
|
||||||
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
|
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -83,7 +83,12 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
|
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &Poly<u64>,
|
||||||
|
c: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||||
@@ -98,7 +103,12 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
|
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &u64,
|
||||||
|
c: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||||
self.modulus
|
self.modulus
|
||||||
@@ -106,13 +116,25 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, c: &Barrett<u64>, a: &mut Poly<u64>) {
|
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
b: &u64,
|
||||||
|
c: &Barrett<u64>,
|
||||||
|
a: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
|
||||||
self.modulus.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
|
self.modulus
|
||||||
|
.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &Barrett<u64>, d: &mut Poly<u64>) {
|
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &u64,
|
||||||
|
c: &Barrett<u64>,
|
||||||
|
d: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
|
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
|
||||||
self.modulus
|
self.modulus
|
||||||
@@ -120,7 +142,11 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_into_b<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
|
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
self.modulus
|
self.modulus
|
||||||
@@ -128,7 +154,11 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_into_a<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, b: &Poly<u64>, a: &mut Poly<u64>) {
|
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
b: &Poly<u64>,
|
||||||
|
a: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
self.modulus
|
self.modulus
|
||||||
@@ -136,7 +166,12 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_into_c<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
|
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &Poly<u64>,
|
||||||
|
c: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||||
@@ -145,16 +180,22 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_neg_into_b<const ARANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
|
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
self.modulus.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
|
self.modulus
|
||||||
|
.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_neg_into_a<const ARANGE:u8,const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
|
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
self.modulus.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
|
self.modulus
|
||||||
|
.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -184,7 +225,12 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
|
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &Poly<u64>,
|
||||||
|
b: &u64,
|
||||||
|
c: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||||
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
|
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
|
||||||
@@ -258,14 +304,17 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<
|
||||||
|
const BRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
a: &Poly<u64>,
|
a: &Poly<u64>,
|
||||||
b: &Poly<u64>,
|
b: &Poly<u64>,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &Barrett<u64>,
|
d: &Barrett<u64>,
|
||||||
e: &mut Poly<u64>,
|
e: &mut Poly<u64>,
|
||||||
){
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
|
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
|
||||||
@@ -274,17 +323,19 @@ impl Ring<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
|
||||||
|
const BRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
&self,
|
&self,
|
||||||
b: &Poly<u64>,
|
b: &Poly<u64>,
|
||||||
c: &u64,
|
c: &u64,
|
||||||
d: &Barrett<u64>,
|
d: &Barrett<u64>,
|
||||||
a: &mut Poly<u64>,
|
a: &mut Poly<u64>,
|
||||||
){
|
) {
|
||||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||||
self.modulus
|
self.modulus
|
||||||
.vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
|
.vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,6 @@ use crate::scalar::ScalarRNS;
|
|||||||
use num_bigint::BigInt;
|
use num_bigint::BigInt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
impl RingRNS<u64> {
|
impl RingRNS<u64> {
|
||||||
pub fn new(n: usize, moduli: Vec<u64>) -> Self {
|
pub fn new(n: usize, moduli: Vec<u64>) -> Self {
|
||||||
assert!(!moduli.is_empty(), "moduli cannot be empty");
|
assert!(!moduli.is_empty(), "moduli cannot be empty");
|
||||||
@@ -198,14 +196,17 @@ impl RingRNS<u64> {
|
|||||||
c.level(),
|
c.level(),
|
||||||
self.level()
|
self.level()
|
||||||
);
|
);
|
||||||
self.0
|
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||||
.iter()
|
ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
|
||||||
.enumerate()
|
});
|
||||||
.for_each(|(i, ring)| ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
|
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &PolyRNS<u64>,
|
||||||
|
b: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
a.level() >= self.level(),
|
a.level() >= self.level(),
|
||||||
"a.level()={} < self.level()={}",
|
"a.level()={} < self.level()={}",
|
||||||
@@ -225,7 +226,11 @@ impl RingRNS<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &PolyRNS<u64>, a: &mut PolyRNS<u64>) {
|
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
b: &PolyRNS<u64>,
|
||||||
|
a: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
a.level() >= self.level(),
|
a.level() >= self.level(),
|
||||||
"a.level()={} < self.level()={}",
|
"a.level()={} < self.level()={}",
|
||||||
@@ -245,7 +250,11 @@ impl RingRNS<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
|
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||||
|
&self,
|
||||||
|
a: &PolyRNS<u64>,
|
||||||
|
b: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
a.level() >= self.level(),
|
a.level() >= self.level(),
|
||||||
"a.level()={} < self.level()={}",
|
"a.level()={} < self.level()={}",
|
||||||
@@ -326,9 +335,10 @@ impl RingRNS<u64> {
|
|||||||
b.level(),
|
b.level(),
|
||||||
self.level()
|
self.level()
|
||||||
);
|
);
|
||||||
self.0.iter().enumerate().for_each(|(i, ring)| {
|
self.0
|
||||||
ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i])
|
.iter()
|
||||||
});
|
.enumerate()
|
||||||
|
.for_each(|(i, ring)| ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -371,7 +381,17 @@ impl RingRNS<u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, e: &mut PolyRNS<u64>){
|
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<
|
||||||
|
const BRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
|
&self,
|
||||||
|
a: &PolyRNS<u64>,
|
||||||
|
b: &PolyRNS<u64>,
|
||||||
|
c: &u64,
|
||||||
|
d: &Barrett<u64>,
|
||||||
|
e: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
a.level() >= self.level(),
|
a.level() >= self.level(),
|
||||||
"a.level()={} < self.level()={}",
|
"a.level()={} < self.level()={}",
|
||||||
@@ -390,14 +410,28 @@ impl RingRNS<u64> {
|
|||||||
e.level(),
|
e.level(),
|
||||||
self.level()
|
self.level()
|
||||||
);
|
);
|
||||||
self.0
|
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||||
.iter()
|
ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(
|
||||||
.enumerate()
|
&a.0[i],
|
||||||
.for_each(|(i, ring)| ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(&a.0[i], &b.0[i], c, d, &mut e.0[i]));
|
&b.0[i],
|
||||||
|
c,
|
||||||
|
d,
|
||||||
|
&mut e.0[i],
|
||||||
|
)
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, a: &mut PolyRNS<u64>){
|
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
|
||||||
|
const BRANGE: u8,
|
||||||
|
const REDUCE: REDUCEMOD,
|
||||||
|
>(
|
||||||
|
&self,
|
||||||
|
b: &PolyRNS<u64>,
|
||||||
|
c: &u64,
|
||||||
|
d: &Barrett<u64>,
|
||||||
|
a: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
a.level() >= self.level(),
|
a.level() >= self.level(),
|
||||||
"a.level()={} < self.level()={}",
|
"a.level()={} < self.level()={}",
|
||||||
@@ -410,9 +444,13 @@ impl RingRNS<u64> {
|
|||||||
b.level(),
|
b.level(),
|
||||||
self.level()
|
self.level()
|
||||||
);
|
);
|
||||||
self.0
|
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||||
.iter()
|
ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(
|
||||||
.enumerate()
|
&b.0[i],
|
||||||
.for_each(|(i, ring)| ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(&b.0[i], c, d, &mut a.0[i]));
|
c,
|
||||||
|
d,
|
||||||
|
&mut a.0[i],
|
||||||
|
)
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use crate::modulus::WordOps;
|
|||||||
use crate::poly::{Poly, PolyRNS};
|
use crate::poly::{Poly, PolyRNS};
|
||||||
use crate::ring::{Ring, RingRNS};
|
use crate::ring::{Ring, RingRNS};
|
||||||
use num::ToPrimitive;
|
use num::ToPrimitive;
|
||||||
use rand_distr::{Normal, Distribution};
|
use rand_distr::{Distribution, Normal};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
impl Ring<u64> {
|
impl Ring<u64> {
|
||||||
@@ -13,21 +13,25 @@ impl Ring<u64> {
|
|||||||
.for_each(|a| *a = source.next_u64n(max, mask));
|
.for_each(|a| *a = source.next_u64n(max, mask));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut Poly<u64>) {
|
pub fn fill_dist_f64<T: Distribution<f64>>(
|
||||||
|
&self,
|
||||||
|
source: &mut Source,
|
||||||
|
dist: T,
|
||||||
|
bound: f64,
|
||||||
|
a: &mut Poly<u64>,
|
||||||
|
) {
|
||||||
let max: u64 = self.modulus.q;
|
let max: u64 = self.modulus.q;
|
||||||
a.0.iter_mut()
|
a.0.iter_mut().for_each(|a| {
|
||||||
.for_each(|a| {
|
|
||||||
|
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
|
||||||
while dist_f64.abs() > bound{
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
|
|
||||||
let dist_u64: u64 = (dist_f64+0.5).abs().to_u64().unwrap();
|
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
|
||||||
let sign: u64 = dist_f64.to_bits()>>63;
|
let sign: u64 = dist_f64.to_bits() >> 63;
|
||||||
|
|
||||||
*a = (dist_u64 * sign) | (max-dist_u64)*(sign^1)
|
*a = (dist_u64 * sign) | (max - dist_u64) * (sign ^ 1)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -40,19 +44,25 @@ impl RingRNS<u64> {
|
|||||||
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
|
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut PolyRNS<u64>) {
|
pub fn fill_dist_f64<T: Distribution<f64>>(
|
||||||
(0..a.n()).for_each(|j|{
|
&self,
|
||||||
|
source: &mut Source,
|
||||||
|
dist: T,
|
||||||
|
bound: f64,
|
||||||
|
a: &mut PolyRNS<u64>,
|
||||||
|
) {
|
||||||
|
(0..a.n()).for_each(|j| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
|
||||||
while dist_f64.abs() > bound{
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
|
|
||||||
let dist_u64: u64 = (dist_f64+0.5).abs().to_u64().unwrap();
|
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
|
||||||
let sign: u64 = dist_f64.to_bits()>>63;
|
let sign: u64 = dist_f64.to_bits() >> 63;
|
||||||
|
|
||||||
self.0.iter().enumerate().for_each(|(i, r)|{
|
self.0.iter().enumerate().for_each(|(i, r)| {
|
||||||
a.at_mut(i).0[j] = (dist_u64 * sign) | (r.modulus.q-dist_u64)*(sign^1);
|
a.at_mut(i).0[j] = (dist_u64 * sign) | (r.modulus.q - dist_u64) * (sign ^ 1);
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,49 @@
|
|||||||
|
use itertools::izip;
|
||||||
|
use math::num_bigint::Div;
|
||||||
use math::poly::PolyRNS;
|
use math::poly::PolyRNS;
|
||||||
use math::ring::RingRNS;
|
use math::ring::RingRNS;
|
||||||
use num_bigint::BigInt;
|
use num_bigint::BigInt;
|
||||||
use math::num_bigint::Div;
|
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
use itertools::izip;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rescaling_rns_u64() {
|
fn rescaling_rns_u64() {
|
||||||
let n = 1 << 10;
|
let n = 1 << 10;
|
||||||
let moduli: Vec<u64> = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64, 0x1fffffffffb40001, 0x1fffffffff500001];
|
let moduli: Vec<u64> = vec![
|
||||||
|
0x1fffffffffc80001u64,
|
||||||
|
0x1fffffffffe00001u64,
|
||||||
|
0x1fffffffffb40001,
|
||||||
|
0x1fffffffff500001,
|
||||||
|
];
|
||||||
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
|
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
|
||||||
|
|
||||||
|
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:false>", || {
|
||||||
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:false>", ||{test_div_by_last_modulus::<false, false>(&ring_rns)});
|
test_div_by_last_modulus::<false, false>(&ring_rns)
|
||||||
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:true>", ||{test_div_by_last_modulus::<false, true>(&ring_rns)});
|
});
|
||||||
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:false>", ||{test_div_by_last_modulus::<true, false>(&ring_rns)});
|
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:true>", || {
|
||||||
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:true>", ||{test_div_by_last_modulus::<true, true>(&ring_rns)});
|
test_div_by_last_modulus::<false, true>(&ring_rns)
|
||||||
sub_test("test_div_by_last_modulus_inplace::<ROUND:false, NTT:false>", ||{test_div_by_last_modulus_inplace::<false, false>(&ring_rns)});
|
});
|
||||||
sub_test("test_div_by_last_modulus_inplace::<ROUND:false, NTT:true>", ||{test_div_by_last_modulus_inplace::<false, true>(&ring_rns)});
|
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:false>", || {
|
||||||
sub_test("test_div_by_last_modulus_inplace::<ROUND:true, NTT:true>", ||{test_div_by_last_modulus_inplace::<true, true>(&ring_rns)});
|
test_div_by_last_modulus::<true, false>(&ring_rns)
|
||||||
sub_test("test_div_by_last_modulus_inplace::<ROUND:true, NTT:false>", ||{test_div_by_last_modulus_inplace::<true, false>(&ring_rns)});
|
});
|
||||||
|
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:true>", || {
|
||||||
|
test_div_by_last_modulus::<true, true>(&ring_rns)
|
||||||
|
});
|
||||||
|
sub_test(
|
||||||
|
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:false>",
|
||||||
|
|| test_div_by_last_modulus_inplace::<false, false>(&ring_rns),
|
||||||
|
);
|
||||||
|
sub_test(
|
||||||
|
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:true>",
|
||||||
|
|| test_div_by_last_modulus_inplace::<false, true>(&ring_rns),
|
||||||
|
);
|
||||||
|
sub_test(
|
||||||
|
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:true>",
|
||||||
|
|| test_div_by_last_modulus_inplace::<true, true>(&ring_rns),
|
||||||
|
);
|
||||||
|
sub_test(
|
||||||
|
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:false>",
|
||||||
|
|| test_div_by_last_modulus_inplace::<true, false>(&ring_rns),
|
||||||
|
);
|
||||||
|
|
||||||
//sub_test("test_div_by_last_moduli::<ROUND:false, NTT:false>", ||{test_div_by_last_moduli::<false, false>(&ring_rns)});
|
//sub_test("test_div_by_last_moduli::<ROUND:false, NTT:false>", ||{test_div_by_last_moduli::<false, false>(&ring_rns)});
|
||||||
}
|
}
|
||||||
@@ -34,8 +53,7 @@ fn sub_test<F: FnOnce()>(name: &str, f: F) {
|
|||||||
f();
|
f();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<u64>){
|
fn test_div_by_last_modulus<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||||
|
|
||||||
let seed: [u8; 32] = [0; 32];
|
let seed: [u8; 32] = [0; 32];
|
||||||
let mut source: Source = Source::new(seed);
|
let mut source: Source = Source::new(seed);
|
||||||
|
|
||||||
@@ -57,8 +75,7 @@ fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS
|
|||||||
ring_rns.ntt_inplace::<false>(&mut a);
|
ring_rns.ntt_inplace::<false>(&mut a);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring_rns.div_by_last_modulus::<ROUND,NTT>(&a, &mut b, &mut c);
|
ring_rns.div_by_last_modulus::<ROUND, NTT>(&a, &mut b, &mut c);
|
||||||
|
|
||||||
|
|
||||||
if NTT {
|
if NTT {
|
||||||
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
||||||
@@ -73,9 +90,9 @@ fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS
|
|||||||
// Performs floor division on a
|
// Performs floor division on a
|
||||||
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
||||||
coeffs_a.iter_mut().for_each(|a| {
|
coeffs_a.iter_mut().for_each(|a| {
|
||||||
if ROUND{
|
if ROUND {
|
||||||
*a = a.div_round(&scalar_big);
|
*a = a.div_round(&scalar_big);
|
||||||
}else{
|
} else {
|
||||||
*a = a.div_floor(&scalar_big);
|
*a = a.div_floor(&scalar_big);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -83,8 +100,7 @@ fn test_div_by_last_modulus<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS
|
|||||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_div_by_last_modulus_inplace<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<u64>) {
|
fn test_div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||||
|
|
||||||
let seed: [u8; 32] = [0; 32];
|
let seed: [u8; 32] = [0; 32];
|
||||||
let mut source: Source = Source::new(seed);
|
let mut source: Source = Source::new(seed);
|
||||||
|
|
||||||
@@ -105,24 +121,26 @@ fn test_div_by_last_modulus_inplace<const ROUND:bool, const NTT:bool>(ring_rns:
|
|||||||
ring_rns.ntt_inplace::<false>(&mut a);
|
ring_rns.ntt_inplace::<false>(&mut a);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring_rns.div_by_last_modulus_inplace::<ROUND,NTT>(&mut buf, &mut a);
|
ring_rns.div_by_last_modulus_inplace::<ROUND, NTT>(&mut buf, &mut a);
|
||||||
|
|
||||||
if NTT {
|
if NTT {
|
||||||
ring_rns.at_level(a.level()-1).intt_inplace::<false>(&mut a);
|
ring_rns
|
||||||
|
.at_level(a.level() - 1)
|
||||||
|
.intt_inplace::<false>(&mut a);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exports c to coeffs_c
|
// Exports c to coeffs_c
|
||||||
let mut coeffs_c = vec![BigInt::from(0); a.n()];
|
let mut coeffs_c = vec![BigInt::from(0); a.n()];
|
||||||
ring_rns
|
ring_rns
|
||||||
.at_level(a.level()-1)
|
.at_level(a.level() - 1)
|
||||||
.to_bigint_inplace(&a, 1, &mut coeffs_c);
|
.to_bigint_inplace(&a, 1, &mut coeffs_c);
|
||||||
|
|
||||||
// Performs floor division on a
|
// Performs floor division on a
|
||||||
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
||||||
coeffs_a.iter_mut().for_each(|a| {
|
coeffs_a.iter_mut().for_each(|a| {
|
||||||
if ROUND{
|
if ROUND {
|
||||||
*a = a.div_round(&scalar_big);
|
*a = a.div_round(&scalar_big);
|
||||||
}else{
|
} else {
|
||||||
*a = a.div_floor(&scalar_big);
|
*a = a.div_floor(&scalar_big);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -130,9 +148,7 @@ fn test_div_by_last_modulus_inplace<const ROUND:bool, const NTT:bool>(ring_rns:
|
|||||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn test_div_by_last_moduli<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||||
fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<u64>){
|
|
||||||
|
|
||||||
let seed: [u8; 32] = [0; 32];
|
let seed: [u8; 32] = [0; 32];
|
||||||
let mut source: Source = Source::new(seed);
|
let mut source: Source = Source::new(seed);
|
||||||
|
|
||||||
@@ -140,7 +156,9 @@ fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<
|
|||||||
|
|
||||||
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||||
let mut buf: PolyRNS<u64> = ring_rns.new_polyrns();
|
let mut buf: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||||
let mut c: PolyRNS<u64> = ring_rns.at_level(ring_rns.level() - nb_moduli).new_polyrns();
|
let mut c: PolyRNS<u64> = ring_rns
|
||||||
|
.at_level(ring_rns.level() - nb_moduli)
|
||||||
|
.new_polyrns();
|
||||||
|
|
||||||
// Allocates a random PolyRNS
|
// Allocates a random PolyRNS
|
||||||
ring_rns.fill_uniform(&mut source, &mut a);
|
ring_rns.fill_uniform(&mut source, &mut a);
|
||||||
@@ -156,7 +174,7 @@ fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<
|
|||||||
ring_rns.ntt_inplace::<false>(&mut a);
|
ring_rns.ntt_inplace::<false>(&mut a);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring_rns.div_by_last_moduli::<ROUND,NTT>(nb_moduli, &a, &mut buf, &mut c);
|
ring_rns.div_by_last_moduli::<ROUND, NTT>(nb_moduli, &a, &mut buf, &mut c);
|
||||||
|
|
||||||
if NTT {
|
if NTT {
|
||||||
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
||||||
@@ -170,11 +188,12 @@ fn test_div_by_last_moduli<const ROUND:bool, const NTT:bool>(ring_rns: &RingRNS<
|
|||||||
|
|
||||||
// Performs floor division on a
|
// Performs floor division on a
|
||||||
let mut scalar_big = BigInt::from(1);
|
let mut scalar_big = BigInt::from(1);
|
||||||
(0..nb_moduli).for_each(|i|{scalar_big *= BigInt::from(ring_rns.0[ring_rns.level()-i].modulus.q)});
|
(0..nb_moduli)
|
||||||
|
.for_each(|i| scalar_big *= BigInt::from(ring_rns.0[ring_rns.level() - i].modulus.q));
|
||||||
coeffs_a.iter_mut().for_each(|a| {
|
coeffs_a.iter_mut().for_each(|a| {
|
||||||
if ROUND{
|
if ROUND {
|
||||||
*a = a.div_round(&scalar_big);
|
*a = a.div_round(&scalar_big);
|
||||||
}else{
|
} else {
|
||||||
*a = a.div_floor(&scalar_big);
|
*a = a.div_floor(&scalar_big);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
Reference in New Issue
Block a user