This commit is contained in:
Jean-Philippe Bossuat
2025-01-06 13:00:34 +01:00
parent 4b8427c6b3
commit 681268c28e
9 changed files with 335 additions and 220 deletions

View File

@@ -1,11 +1,11 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use math::ring::Ring;
use math::modulus::VecOperations;
use math::modulus::VectorOperations;
use math::modulus::montgomery::Montgomery;
use math::modulus::ONCE;
use math::CHUNK;
fn vec_add_unary(c: &mut Criterion) {
fn va_add_vb_into_vb(c: &mut Criterion) {
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
let mut p0: math::poly::Poly<u64> = r.new_poly();
@@ -15,11 +15,11 @@ fn vec_add_unary(c: &mut Criterion) {
p1.0[i] = i as u64;
}
Box::new(move || {
r.modulus.vec_add_unary_assign::<CHUNK, ONCE>(&p0.0, &mut p1.0);
r.modulus.va_add_vb_into_vb::<CHUNK, ONCE>(&p0.0, &mut p1.0);
})
}
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("add_vec_unary");
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_add_vb_into_vb");
for log_n in 11..17 {
let n: usize = 1<<log_n as usize;
@@ -38,7 +38,7 @@ fn vec_add_unary(c: &mut Criterion) {
}
}
fn vec_mul_montgomery_external_unary_assign(c: &mut Criterion) {
fn va_mont_mul_vb_into_vb(c: &mut Criterion) {
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
let mut p0: math::poly::Poly<Montgomery<u64>> = r.new_poly();
@@ -48,11 +48,11 @@ fn vec_mul_montgomery_external_unary_assign(c: &mut Criterion) {
p1.0[i] = i as u64;
}
Box::new(move || {
r.modulus.vec_mul_montgomery_external_unary_assign::<CHUNK, ONCE>(&p0.0, &mut p1.0);
r.modulus.va_mont_mul_vb_into_vb::<CHUNK, ONCE>(&p0.0, &mut p1.0);
})
}
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("mul_vec_montgomery_external_unary_assign");
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_mont_mul_vb_into_vb");
for log_n in 11..17 {
let n: usize = 1<<log_n as usize;
@@ -71,7 +71,7 @@ fn vec_mul_montgomery_external_unary_assign(c: &mut Criterion) {
}
}
fn vec_mul_montgomery_external_binary_assign(c: &mut Criterion) {
fn va_mont_mul_vb_into_vc(c: &mut Criterion) {
fn runner(r: Ring<u64>) -> Box<dyn FnMut()> {
let mut p0: math::poly::Poly<Montgomery<u64>> = r.new_poly();
@@ -82,11 +82,11 @@ fn vec_mul_montgomery_external_binary_assign(c: &mut Criterion) {
p1.0[i] = i as u64;
}
Box::new(move || {
r.modulus.vec_mul_montgomery_external_binary_assign::<CHUNK,ONCE>(&p0.0, & p1.0, &mut p2.0);
r.modulus.va_mont_mul_vb_into_vc::<CHUNK,ONCE>(&p0.0, & p1.0, &mut p2.0);
})
}
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("mul_vec_montgomery_external_binary_assign");
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_mont_mul_vb_into_vc");
for log_n in 11..17 {
let n: usize = 1<<log_n as usize;
@@ -105,5 +105,5 @@ fn vec_mul_montgomery_external_binary_assign(c: &mut Criterion) {
}
}
criterion_group!(benches, vec_add_unary, vec_mul_montgomery_external_unary_assign, vec_mul_montgomery_external_binary_assign);
criterion_group!(benches, va_add_vb_into_vb, va_mont_mul_vb_into_vb, va_mont_mul_vb_into_vc);
criterion_main!(benches);

View File

@@ -3,7 +3,7 @@ use math::ring::{Ring, RingRNS};
use math::ring::impl_u64::ring_rns::new_rings;
use math::poly::PolyRNS;
fn div_floor_by_last_modulus_ntt(c: &mut Criterion) {
fn div_floor_by_last_modulus_ntt_true(c: &mut Criterion) {
fn runner(r: RingRNS<u64>) -> Box<dyn FnMut() + '_> {
let a: PolyRNS<u64> = r.new_polyrns();
@@ -11,11 +11,11 @@ fn div_floor_by_last_modulus_ntt(c: &mut Criterion) {
let mut c: PolyRNS<u64> = r.new_polyrns();
Box::new(move || {
r.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c)
r.div_floor_by_last_modulus::<true>(&a, &mut b, &mut c)
})
}
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("div_floor_by_last_modulus_ntt");
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("div_floor_by_last_modulus_ntt_true");
for log_n in 11..18 {
let n = 1<<log_n;
@@ -35,5 +35,5 @@ fn div_floor_by_last_modulus_ntt(c: &mut Criterion) {
}
}
criterion_group!(benches, div_floor_by_last_modulus_ntt);
criterion_group!(benches, div_floor_by_last_modulus_ntt_true);
criterion_main!(benches);

View File

@@ -63,94 +63,106 @@ pub trait ReduceOnce<O>{
fn reduce_once(&self, q:O) -> O;
}
pub trait WordOperations<O>{
pub trait ScalarOperations<O>{
// Applies a parameterized modular reduction.
fn word_reduce_assign<const REDUCE:REDUCEMOD>(&self, x: &mut O);
fn sa_reduce_into_sa<const REDUCE:REDUCEMOD>(&self, x: &mut O);
// Assigns a + b to c.
fn word_add_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
fn sa_add_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
// Assigns a + b to b.
fn word_add_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
fn sa_add_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
// Assigns a - b to c.
fn word_sub_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
fn sa_sub_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &O, b:&O, c: &mut O);
// Assigns b - a to b.
fn word_sub_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
fn sa_sub_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut O);
// Assigns -a to a.
fn word_neg_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&mut O);
fn sa_neg_into_sa<const REDUCE:REDUCEMOD>(&self, a:&mut O);
// Assigns -a to b.
fn word_neg_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b:&mut O);
fn sa_neg_into_sb<const REDUCE:REDUCEMOD>(&self, a: &O, b:&mut O);
// Assigns a * 2^64 to b.
fn word_prepare_montgomery_assign<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut montgomery::Montgomery<O>);
fn sa_prep_mont_into_sb<const REDUCE:REDUCEMOD>(&self, a: &O, b: &mut montgomery::Montgomery<O>);
// Assigns a * b to c.
fn word_mul_montgomery_external_binary_assign<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&O, c: &mut O);
fn sa_mont_mul_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&O, c: &mut O);
// Assigns a * b to b.
fn word_mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&mut O);
fn sa_mont_mul_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a:&montgomery::Montgomery<O>, b:&mut O);
// Assigns a * b to c.
fn word_mul_barrett_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &barrett::Barrett<O>, b:&O, c: &mut O);
fn sa_barrett_mul_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &barrett::Barrett<O>, b:&O, c: &mut O);
// Assigns a * b to b.
fn word_mul_barrett_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&barrett::Barrett<O>, b:&mut O);
fn sa_barrett_mul_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a:&barrett::Barrett<O>, b:&mut O);
// Assigns (a + 2q - b) * c to d.
fn word_sum_aqqmb_prod_c_barrett_assign_d<const REDUCE:REDUCEMOD>(&self, a: &O, b: &O, c: &barrett::Barrett<O>, d: &mut O);
fn sa_sub_sb_mul_sc_into_sd<const REDUCE:REDUCEMOD>(&self, a: &O, b: &O, c: &barrett::Barrett<O>, d: &mut O);
// Assigns (a + 2q - b) * c to b.
fn word_sum_aqqmb_prod_c_barrett_assign_b<const REDUCE:REDUCEMOD>(&self, a: &u64, c: &barrett::Barrett<u64>, b: &mut u64);
fn sa_sub_sb_mul_sc_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, c: &barrett::Barrett<u64>, b: &mut u64);
}
pub trait VecOperations<O>{
pub trait VectorOperations<O>{
// Applies a parameterized modular reduction.
fn vec_reduce_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, x: &mut [O]);
fn va_reduce_into_va<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, x: &mut [O]);
// ADD
// Assigns a[i] + b[i] to c[i]
fn vec_add_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
fn va_add_vb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
// Assigns a[i] + b[i] to b[i]
fn vec_add_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
fn va_add_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
// Assigns a[i] + b to c[i]
fn va_add_sb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&O, c:&mut [O]);
// Assigns b[i] + a to b[i]
fn sa_add_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&O, b:&mut [O]);
// SUB
// Assigns a[i] - b[i] to b[i]
fn va_sub_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
// Assigns a[i] - b[i] to c[i]
fn vec_sub_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
fn va_sub_vb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b:&[O], c: &mut [O]);
// Assigns a[i] - b[i] to b[i]
fn vec_sub_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
// NEG
// Assigns -a[i] to a[i].
fn va_neg_into_va<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [O]);
// Assigns -a[i] to a[i].
fn vec_neg_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [O]);
// Assigns -a[i] to a[i].
fn vec_neg_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
fn va_neg_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [O]);
// MUL MONTGOMERY
// Assigns a * 2^64 to b.
fn vec_prepare_montgomery_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [montgomery::Montgomery<O>]);
fn va_prep_mont_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[O], b: &mut [montgomery::Montgomery<O>]);
// Assigns a[i] * b[i] to c[i].
fn vec_mul_montgomery_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&[O], c: &mut [O]);
fn va_mont_mul_vb_into_vc<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&[O], c: &mut [O]);
// Assigns a[i] * b[i] to b[i].
fn vec_mul_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&mut [O]);
fn va_mont_mul_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&[montgomery::Montgomery<O>], b:&mut [O]);
// MUL BARRETT
// Assigns a * b[i] to b[i].
fn vec_mul_scalar_barrett_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&mut [u64]);
fn sa_barrett_mul_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&mut [u64]);
// Assigns a * b[i] to c[i].
fn vec_mul_scalar_barrett_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&[u64], c: &mut [u64]);
fn sa_barrett_mul_vb_into_vc<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& barrett::Barrett<u64>, b:&[u64], c: &mut [u64]);
// OTHERS
// Assigns (a[i] + 2q - b[i]) * c to d[i].
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &barrett::Barrett<u64>, d: &mut [u64]);
fn va_sub_vb_mul_sc_into_vd<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &barrett::Barrett<u64>, d: &mut [u64]);
// Assigns (a[i] + 2q - b[i]) * c to b[i].
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_b<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], c: &barrett::Barrett<u64>, b: &mut [u64]);
fn va_sub_vb_mul_sc_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], c: &barrett::Barrett<u64>, b: &mut [u64]);
}

View File

@@ -1,5 +1,5 @@
use crate::modulus::{WordOperations, VecOperations};
use crate::modulus::{ScalarOperations, VectorOperations};
use crate::modulus::prime::Prime;
use crate::modulus::ReduceOnce;
use crate::modulus::montgomery::Montgomery;
@@ -8,7 +8,7 @@ use crate::modulus::REDUCEMOD;
use crate::{apply_v, apply_vv, apply_vvv, apply_sv, apply_svv, apply_vvsv, apply_vsv};
use itertools::izip;
impl WordOperations<u64> for Prime<u64>{
impl ScalarOperations<u64> for Prime<u64>{
/// Applies a modular reduction on x based on REDUCE:
/// - LAZY: no modular reduction.
@@ -18,83 +18,83 @@ impl WordOperations<u64> for Prime<u64>{
/// - BARRETT: maps x to x mod q using Barrett reduction.
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
#[inline(always)]
fn word_reduce_assign<const REDUCE:REDUCEMOD>(&self, x: &mut u64){
self.montgomery.reduce_assign::<REDUCE>(x);
fn sa_reduce_into_sa<const REDUCE:REDUCEMOD>(&self, a: &mut u64){
self.montgomery.reduce_assign::<REDUCE>(a);
}
#[inline(always)]
fn word_add_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
fn sa_add_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
*c = a.wrapping_add(*b);
self.word_reduce_assign::<REDUCE>(c);
self.sa_reduce_into_sa::<REDUCE>(c);
}
#[inline(always)]
fn word_add_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
fn sa_add_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
*b = a.wrapping_add(*b);
self.word_reduce_assign::<REDUCE>(b);
self.sa_reduce_into_sa::<REDUCE>(b);
}
#[inline(always)]
fn word_sub_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
fn sa_sub_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64){
*c = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
}
#[inline(always)]
fn word_sub_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
fn sa_sub_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
*b = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q);
}
#[inline(always)]
fn word_neg_unary_assign<const REDUCE:REDUCEMOD>(&self, a: &mut u64){
fn sa_neg_into_sa<const REDUCE:REDUCEMOD>(&self, a: &mut u64){
*a = self.q.wrapping_sub(*a);
self.word_reduce_assign::<REDUCE>(a)
self.sa_reduce_into_sa::<REDUCE>(a)
}
#[inline(always)]
fn word_neg_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
fn sa_neg_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut u64){
*b = self.q.wrapping_sub(*a);
self.word_reduce_assign::<REDUCE>(b)
self.sa_reduce_into_sa::<REDUCE>(b)
}
#[inline(always)]
fn word_prepare_montgomery_assign<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut Montgomery<u64>){
fn sa_prep_mont_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut Montgomery<u64>){
self.montgomery.prepare_assign::<REDUCE>(*a, b);
}
#[inline(always)]
fn word_mul_montgomery_external_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &Montgomery<u64>, b:&u64, c: &mut u64){
fn sa_mont_mul_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &Montgomery<u64>, b:&u64, c: &mut u64){
*c = self.montgomery.mul_external::<REDUCE>(*a, *b);
}
#[inline(always)]
fn word_mul_montgomery_external_unary_assign<const REDUCE:REDUCEMOD>(&self, lhs:&Montgomery<u64>, rhs:&mut u64){
self.montgomery.mul_external_assign::<REDUCE>(*lhs, rhs);
fn sa_mont_mul_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a:&Montgomery<u64>, b:&mut u64){
self.montgomery.mul_external_assign::<REDUCE>(*a, b);
}
#[inline(always)]
fn word_mul_barrett_binary_assign<const REDUCE:REDUCEMOD>(&self, a: &Barrett<u64>, b:&u64, c: &mut u64){
fn sa_barrett_mul_sb_into_sc<const REDUCE:REDUCEMOD>(&self, a: &Barrett<u64>, b:&u64, c: &mut u64){
*c = self.barrett.mul_external::<REDUCE>(*a, *b);
}
#[inline(always)]
fn word_mul_barrett_unary_assign<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut u64){
fn sa_barrett_mul_sb_into_sb<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut u64){
self.barrett.mul_external_assign::<REDUCE>(*a, b);
}
#[inline(always)]
fn word_sum_aqqmb_prod_c_barrett_assign_d<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &Barrett<u64>, d: &mut u64){
fn sa_sub_sb_mul_sc_into_sd<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &u64, c: &Barrett<u64>, d: &mut u64){
*d = self.two_q.wrapping_sub(*b).wrapping_add(*a);
self.barrett.mul_external_assign::<REDUCE>(*c, d);
}
#[inline(always)]
fn word_sum_aqqmb_prod_c_barrett_assign_b<const REDUCE:REDUCEMOD>(&self, a: &u64, c: &Barrett<u64>, b: &mut u64){
fn sa_sub_sb_mul_sc_into_sb<const REDUCE:REDUCEMOD>(&self, a: &u64, c: &Barrett<u64>, b: &mut u64){
*b = self.two_q.wrapping_sub(*b).wrapping_add(*a);
self.barrett.mul_external_assign::<REDUCE>(*c, b);
}
}
impl VecOperations<u64> for Prime<u64>{
impl VectorOperations<u64> for Prime<u64>{
/// Applies a modular reduction on x based on REDUCE:
/// - LAZY: no modular reduction.
@@ -104,70 +104,80 @@ impl VecOperations<u64> for Prime<u64>{
/// - BARRETT: maps x to x mod q using Barrett reduction.
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
#[inline(always)]
fn vec_reduce_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, x: &mut [u64]){
apply_v!(self, Self::word_reduce_assign::<REDUCE>, x, CHUNK);
fn va_reduce_into_va<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [u64]){
apply_v!(self, Self::sa_reduce_into_sa::<REDUCE>, a, CHUNK);
}
#[inline(always)]
fn vec_add_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
apply_vvv!(self, Self::word_add_binary_assign::<REDUCE>, a, b, c, CHUNK);
fn va_add_vb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
apply_vvv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
#[inline(always)]
fn vec_add_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
apply_vv!(self, Self::word_add_unary_assign::<REDUCE>, a, b, CHUNK);
fn va_add_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
apply_vv!(self, Self::sa_add_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_sub_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
apply_vvv!(self, Self::word_sub_binary_assign::<REDUCE>, a, b, c, CHUNK);
fn va_add_sb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&u64, c:&mut [u64]){
apply_vsv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
#[inline(always)]
fn vec_sub_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
apply_vv!(self, Self::word_sub_unary_assign::<REDUCE>, a, b, CHUNK);
fn sa_add_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:&u64, b:&mut [u64]){
apply_sv!(self, Self::sa_add_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_neg_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [u64]){
apply_v!(self, Self::word_neg_unary_assign::<REDUCE>, a, CHUNK);
fn va_sub_vb_into_vc<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&[u64], c:&mut [u64]){
apply_vvv!(self, Self::sa_sub_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
#[inline(always)]
fn va_sub_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b:&mut [u64]){
apply_vv!(self, Self::sa_sub_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_neg_binary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [u64]){
apply_vv!(self, Self::word_neg_binary_assign::<REDUCE>, a, b, CHUNK);
fn va_neg_into_va<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &mut [u64]){
apply_v!(self, Self::sa_neg_into_sa::<REDUCE>, a, CHUNK);
}
#[inline(always)]
fn vec_prepare_montgomery_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [Montgomery<u64>]){
apply_vv!(self, Self::word_prepare_montgomery_assign::<REDUCE>, a, b, CHUNK);
fn va_neg_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [u64]){
apply_vv!(self, Self::sa_neg_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_mul_montgomery_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&[u64], c: &mut [u64]){
apply_vvv!(self, Self::word_mul_montgomery_external_binary_assign::<REDUCE>, a, b, c, CHUNK);
fn va_prep_mont_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &mut [Montgomery<u64>]){
apply_vv!(self, Self::sa_prep_mont_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_mul_montgomery_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&mut [u64]){
apply_vv!(self, Self::word_mul_montgomery_external_unary_assign::<REDUCE>, a, b, CHUNK);
fn va_mont_mul_vb_into_vc<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&[u64], c: &mut [u64]){
apply_vvv!(self, Self::sa_mont_mul_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
#[inline(always)]
fn vec_mul_scalar_barrett_external_binary_assign<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&[u64], c: &mut [u64]){
apply_svv!(self, Self::word_mul_barrett_binary_assign::<REDUCE>, a, b, c, CHUNK);
fn va_mont_mul_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& [Montgomery<u64>], b:&mut [u64]){
apply_vv!(self, Self::sa_mont_mul_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn vec_mul_scalar_barrett_external_unary_assign<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&mut [u64]){
apply_sv!(self, Self::word_mul_barrett_unary_assign::<REDUCE>, a, b, CHUNK);
fn sa_barrett_mul_vb_into_vc<const CHUNK:usize,const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&[u64], c: &mut [u64]){
apply_svv!(self, Self::sa_barrett_mul_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &Barrett<u64>, d: &mut [u64]){
apply_vvsv!(self, Self::word_sum_aqqmb_prod_c_barrett_assign_d::<REDUCE>, a, b, c, d, CHUNK);
#[inline(always)]
fn sa_barrett_mul_vb_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a:& Barrett<u64>, b:&mut [u64]){
apply_sv!(self, Self::sa_barrett_mul_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_b<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], c: &Barrett<u64>, b: &mut [u64]){
apply_vsv!(self, Self::word_sum_aqqmb_prod_c_barrett_assign_b::<REDUCE>, a, c, b, CHUNK);
fn va_sub_vb_mul_sc_into_vd<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &[u64], c: &Barrett<u64>, d: &mut [u64]){
apply_vvsv!(self, Self::sa_sub_sb_mul_sc_into_sd::<REDUCE>, a, b, c, d, CHUNK);
}
fn va_sub_vb_mul_sc_into_vb<const CHUNK:usize, const REDUCE:REDUCEMOD>(&self, a: &[u64], b: &Barrett<u64>, c: &mut [u64]){
apply_vsv!(self, Self::sa_sub_sb_mul_sc_into_sb::<REDUCE>, a, b, c, CHUNK);
}
}

View File

@@ -55,6 +55,12 @@ impl<O: PartialEq> PartialEq for Poly<O> {
}
}
impl<O> Default for Poly<O> {
fn default() -> Self {
Poly(Vec::new())
}
}
#[derive(Clone, Debug, Eq)]
pub struct PolyRNS<O>(pub Vec<Poly<O>>);
@@ -143,7 +149,6 @@ impl<O: PartialEq> PartialEq for PolyRNS<O> {
impl<O> Default for PolyRNS<O>{
fn default() -> Self{
let polys:Vec<Poly<O>> = Vec::new();
Self{0:polys}
Self{0:Vec::new()}
}
}

View File

@@ -22,7 +22,7 @@ impl<O: Unsigned> Ring<O>{
}
}
pub struct RingRNS<'a, O: Unsigned>(& 'a [Ring<O>]);
pub struct RingRNS<'a, O: Unsigned>(pub & 'a [Ring<O>]);
impl<O: Unsigned> RingRNS<'_, O> {

View File

@@ -1,3 +1,4 @@
use crate::ring::Ring;
use crate::ring::RingRNS;
use crate::poly::PolyRNS;
use crate::modulus::barrett::Barrett;
@@ -8,121 +9,134 @@ extern crate test;
impl RingRNS<'_, u64>{
/// Updates b to floor(a / q[b.level()]).
/// Expects a and b to be in the NTT domain.
pub fn div_floor_by_last_modulus_ntt(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
let level = self.level();
self.0[level].intt::<false>(a.at(level), buf.at_mut(0));
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
for (i, r) in self.0[0..level].iter().enumerate(){
r.ntt::<false>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i));
}
}
pub fn div_floor_by_last_modulus<const NTT:bool>(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
/// Updates b to floor(b / q[b.level()]).
/// Expects b to be in the NTT domain.
pub fn div_floor_by_last_modulus_ntt_inplace(&self, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
let level = self.level();
self.0[level].intt::<true>(b.at(level), buf.at_mut(0));
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
for (i, r) in self.0[0..level].iter().enumerate(){
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], b.at_mut(i));
}
}
/// Updates b to floor(a / q[b.level()]).
pub fn div_floor_by_last_modulus(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
let level = self.level();
let rescaling_constants:ScalarRNS<Barrett<u64>> = self.rescaling_constant();
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(a.at(level), a.at(i), &rescaling_constants.0[i], b.at_mut(i));
}
}
/// Updates a to floor(b / q[b.level()]).
pub fn div_floor_by_last_modulus_inplace(&self, a: &mut PolyRNS<u64>){
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (a_i, a_level) = a.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&a_level[0], &rescaling_constants.0[i], &mut a_i[i]);
}
}
pub fn div_floor_by_last_moduli(&self, nb_moduli:usize, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
if nb_moduli == 0{
if a != b{
b.copy(a);
if NTT{
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r.ntt::<false>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i));
}
}else{
self.div_floor_by_last_modulus(a, b);
(1..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace(b)});
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(a.at(level), a.at(i), &rescaling_constants.0[i], b.at_mut(i));
}
}
}
pub fn div_floor_by_last_moduli_inplace(&self, nb_moduli:usize, a: &mut PolyRNS<u64>){
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace(a)});
/// Updates a to floor(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_floor_by_last_modulus_inplace<const NTT:bool>(&self, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
if NTT{
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i));
}
}else{
let (a_i, a_level) = buf.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&a_level[0], &rescaling_constants.0[i], &mut a_i[i]);
}
}
}
pub fn div_round_by_last_modulus_ntt(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli<const NTT:bool>(&self, nb_moduli:usize, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, c: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(c.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", c.level(), a.level()-1);
debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level());
if nb_moduli == 0{
if a != c{
c.copy(a);
}
}else{
if NTT{
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)});
self.at_level(self.level()-nb_moduli).ntt::<false>(buf, c);
}else{
self.div_floor_by_last_modulus::<false>(a, buf, c);
(1..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(buf, c)});
}
}
}
/// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli_inplace<const NTT:bool>(&self, nb_moduli:usize, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level());
if NTT{
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)});
self.at_level(self.level()-nb_moduli).ntt::<false>(buf, a);
}else{
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(buf, a)});
}
}
/// Updates b to round(a / q[b.level()]).
/// Expects b to be in the NTT domain.
pub fn div_round_by_last_modulus<const NTT:bool>(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
let level: usize = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q>>1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
if NTT{
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r_last.add_scalar::<ONCE>(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]);
r.ntt_inplace::<false>(&mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i));
}
}else{
}
}
/// Updates a to round(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_round_by_last_modulus_inplace<const NTT:bool>(&self, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
let level = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q>>1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
if NTT{
r_last.intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r_last.add_scalar::<ONCE>(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]);
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i));
}
}
}
}
#[cfg(test)]
mod tests {
use num_bigint::BigInt;
use num_bigint::Sign;
use crate::ring::Ring;
use crate::ring::impl_u64::ring_rns::new_rings;
use sampling::source::Source;
use super::*;
#[test]
fn test_div_floor_by_last_modulus_ntt() {
let n = 1<<10;
let moduli: Vec<u64> = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64];
let rings: Vec<Ring<u64>> = new_rings(n, moduli);
let ring_rns: RingRNS<'_, u64> = RingRNS::new(&rings);
let seed: [u8; 32] = [0;32];
let mut source: Source = Source::new(seed);
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
let mut b: PolyRNS<u64> = ring_rns.new_polyrns();
let mut c: PolyRNS<u64> = ring_rns.at_level(ring_rns.level()-1).new_polyrns();
// Allocates a random PolyRNS
ring_rns.fill_uniform(&mut source, &mut a);
// Maps PolyRNS to [BigInt]
let mut coeffs_a: Vec<BigInt> = (0..n).map(|i|{BigInt::from(i)}).collect();
ring_rns.at_level(a.level()).to_bigint_inplace(&a, 1, &mut coeffs_a);
// Performs c = intt(ntt(a) / q_level)
ring_rns.ntt_inplace::<false>(&mut a);
ring_rns.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c);
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
// Exports c to coeffs_c
let mut coeffs_c = vec![BigInt::from(0);c.n()];
ring_rns.at_level(c.level()).to_bigint_inplace(&c, 1, &mut coeffs_c);
// Performs floor division on a
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
coeffs_a.iter_mut().for_each(|a|{
// Emulates floor division in [0, q-1] and maps to [-(q-1)/2, (q-1)/2-1]
*a /= &scalar_big;
if a.sign() == Sign::Minus {
*a -= 1;
}
});
assert!(coeffs_a == coeffs_c);
}
}

View File

@@ -5,7 +5,7 @@ use crate::modulus::montgomery::Montgomery;
use crate::modulus::barrett::Barrett;
use crate::poly::Poly;
use crate::modulus::{REDUCEMOD, BARRETT};
use crate::modulus::VecOperations;
use crate::modulus::VectorOperations;
use num_bigint::BigInt;
use num_traits::ToPrimitive;
use crate::CHUNK;
@@ -66,7 +66,7 @@ impl Ring<u64>{
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_add_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
@@ -74,14 +74,27 @@ impl Ring<u64>{
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.vec_add_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>){
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_add_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
}
#[inline(always)]
pub fn add_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
}
#[inline(always)]
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_sub_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus.va_sub_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
@@ -89,20 +102,20 @@ impl Ring<u64>{
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.vec_sub_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus.va_sub_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_neg_binary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus.va_neg_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.vec_neg_unary_assign::<CHUNK, REDUCE>(&mut a.0);
self.modulus.va_neg_into_va::<CHUNK, REDUCE>(&mut a.0);
}
#[inline(always)]
@@ -110,39 +123,39 @@ impl Ring<u64>{
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.vec_mul_montgomery_external_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus.va_mont_mul_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_mul_montgomery_external_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn mul_scalar<const REDUCE:REDUCEMOD>(&self, a:&Poly<u64>, b: &u64, c:&mut Poly<u64>){
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.vec_mul_scalar_barrett_external_binary_assign::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0);
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0);
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE:REDUCEMOD>(&self, a:&u64, b:&mut Poly<u64>){
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_mul_scalar_barrett_external_unary_assign::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::<BARRETT>(a)), &mut b.0);
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::<BARRETT>(a)), &mut b.0);
}
#[inline(always)]
pub fn mul_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut Poly<u64>){
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_mul_scalar_barrett_external_unary_assign::<CHUNK, REDUCE>(a, &mut b.0);
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
}
#[inline(always)]
pub fn mul_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b: &Poly<u64>, c:&mut Poly<u64>){
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_mul_scalar_barrett_external_binary_assign::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
}
#[inline(always)]
@@ -150,13 +163,13 @@ impl Ring<u64>{
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_d::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
self.modulus.va_sub_vb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
}
#[inline(always)]
pub fn sum_aqqmb_prod_c_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a: &Poly<u64>, c: &Barrett<u64>, b: &mut Poly<u64>){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_b::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
self.modulus.va_sub_vb_mul_sc_into_vb::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
}
}

View File

@@ -0,0 +1,61 @@
use num_bigint::BigInt;
use num_bigint::Sign;
use math::ring::{Ring, RingRNS};
use math::poly::PolyRNS;
use math::ring::impl_u64::ring_rns::new_rings;
use sampling::source::Source;
#[test]
fn rescaling_rns_u64(){
let n = 1<<10;
let moduli: Vec<u64> = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64];
let rings: Vec<Ring<u64>> = new_rings(n, moduli);
let ring_rns: RingRNS<'_, u64> = RingRNS::new(&rings);
test_div_floor_by_last_modulus::<false>(&ring_rns);
test_div_floor_by_last_modulus::<true>(&ring_rns);
}
fn test_div_floor_by_last_modulus<const NTT:bool>(ring_rns: &RingRNS<u64>) {
let seed: [u8; 32] = [0;32];
let mut source: Source = Source::new(seed);
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
let mut b: PolyRNS<u64> = ring_rns.new_polyrns();
let mut c: PolyRNS<u64> = ring_rns.at_level(ring_rns.level()-1).new_polyrns();
// Allocates a random PolyRNS
ring_rns.fill_uniform(&mut source, &mut a);
// Maps PolyRNS to [BigInt]
let mut coeffs_a: Vec<BigInt> = (0..a.n()).map(|i|{BigInt::from(i)}).collect();
ring_rns.at_level(a.level()).to_bigint_inplace(&a, 1, &mut coeffs_a);
// Performs c = intt(ntt(a) / q_level)
if NTT{
ring_rns.ntt_inplace::<false>(&mut a);
}
ring_rns.div_floor_by_last_modulus::<NTT>(&a, &mut b, &mut c);
if NTT{
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
}
// Exports c to coeffs_c
let mut coeffs_c = vec![BigInt::from(0);c.n()];
ring_rns.at_level(c.level()).to_bigint_inplace(&c, 1, &mut coeffs_c);
// Performs floor division on a
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
coeffs_a.iter_mut().for_each(|a|{
// Emulates floor division in [0, q-1] and maps to [-(q-1)/2, (q-1)/2-1]
*a /= &scalar_big;
if a.sign() == Sign::Minus {
*a -= 1;
}
});
assert!(coeffs_a == coeffs_c);
}