added spqlios as submodule

This commit is contained in:
Jean-Philippe Bossuat
2025-01-27 14:10:59 +01:00
parent 250d1a4942
commit c30f598776
244 changed files with 51 additions and 29899 deletions

View File

@@ -0,0 +1,208 @@
use crate::automorphism::AutoPerm;
use crate::modulus::{ScalarOperations, ONCE};
use crate::modulus::{WordOps, REDUCEMOD};
use crate::poly::Poly;
use crate::ring::Ring;
impl Ring<u64> {
// b <- auto(a)
pub fn a_apply_automorphism_native_into_b<const NTT: bool>(
&self,
a: &Poly<u64>,
gal_el: usize,
nth_root: usize,
b: &mut Poly<u64>,
) {
self.apply_automorphism_native_core::<0, ONCE, NTT>(a, gal_el, nth_root, b)
}
// b <- REDUCEMOD(b + auto(a))
pub fn a_apply_automorphism_native_add_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
gal_el: usize,
nth_root: usize,
b: &mut Poly<u64>,
) {
self.apply_automorphism_native_core::<1, REDUCE, NTT>(a, gal_el, nth_root, b)
}
// b <- REDUCEMOD(b - auto(a))
pub fn a_apply_automorphism_native_sub_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
gal_el: usize,
nth_root: usize,
b: &mut Poly<u64>,
) {
self.apply_automorphism_native_core::<2, REDUCE, NTT>(a, gal_el, nth_root, b)
}
fn apply_automorphism_native_core<const MOD: u8, const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
gal_el: usize,
nth_root: usize,
b: &mut Poly<u64>,
) {
debug_assert!(
a.n() == b.n(),
"invalid inputs: a.n() = {} != b.n() = {}",
a.n(),
b.n()
);
assert!(
gal_el & 1 == 1,
"invalid gal_el={}: not coprime with nth_root={}",
gal_el,
nth_root
);
assert!(
nth_root & (nth_root - 1) == 0,
"invalid nth_root={}: not a power-of-two",
nth_root
);
let b_vec: &mut Vec<u64> = &mut b.0;
let a_vec: &Vec<u64> = &a.0;
if NTT {
let mask: usize = nth_root - 1;
let log_nth_root_half: u32 = nth_root.log2() as u32 - 1;
a_vec.iter().enumerate().for_each(|(i, ai)| {
let i_rev: usize = 2 * i.reverse_bits_msb(log_nth_root_half) + 1;
let gal_el_i: usize = (((gal_el * i_rev) & mask) - 1) >> 1;
let idx: usize = gal_el_i.reverse_bits_msb(log_nth_root_half);
match MOD {
0 => b_vec[idx] = *ai,
1 => self
.modulus
.sa_add_sb_into_sb::<REDUCE>(ai, &mut b_vec[idx]),
2 => self
.modulus
.sa_sub_sb_into_sa::<1, REDUCE>(ai, &mut b_vec[idx]),
_ => {
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
}
}
});
} else {
let n: usize = a.n();
let mask: usize = n - 1;
let log_n: usize = n.log2();
let q: u64 = self.modulus.q();
a_vec.iter().enumerate().for_each(|(i, ai)| {
let gal_el_i: usize = i * gal_el;
let sign: u64 = ((gal_el_i >> log_n) & 1) as u64;
let i_out: usize = gal_el_i & mask;
let v: u64 = ai * (sign ^ 1) | (q - ai) * sign;
match MOD {
0 => b_vec[i_out] = v,
1 => self
.modulus
.sa_add_sb_into_sb::<REDUCE>(&v, &mut b_vec[i_out]),
2 => self
.modulus
.sa_sub_sb_into_sa::<1, REDUCE>(&v, &mut b_vec[i_out]),
_ => {
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
}
}
});
}
}
// b <- auto(a)
pub fn a_apply_automorphism_from_perm_into_b<const NTT: bool>(
&self,
a: &Poly<u64>,
auto_perm: &AutoPerm,
b: &mut Poly<u64>,
) {
self.automorphism_from_perm_core::<0, ONCE, NTT>(a, auto_perm, b)
}
// b <- REDUCEMOD(b + auto(a))
pub fn a_apply_automorphism_from_perm_add_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
auto_perm: &AutoPerm,
b: &mut Poly<u64>,
) {
self.automorphism_from_perm_core::<1, REDUCE, NTT>(a, auto_perm, b)
}
// b <- REDUCEMOD(b - auto(a))
pub fn a_apply_automorphism_from_perm_sub_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
auto_perm: &AutoPerm,
b: &mut Poly<u64>,
) {
self.automorphism_from_perm_core::<2, REDUCE, NTT>(a, auto_perm, b)
}
// b <- auto(a) if OVERWRITE else b <- REDUCEMOD(b + auto(a))
fn automorphism_from_perm_core<const MOD: u8, const REDUCE: REDUCEMOD, const NTT: bool>(
&self,
a: &Poly<u64>,
auto_perm: &AutoPerm,
b: &mut Poly<u64>,
) {
debug_assert!(
a.n() == b.n(),
"invalid inputs: a.n() = {} != b.n() = {}",
a.n(),
b.n()
);
assert!(
NTT == auto_perm.ntt,
"missmatch between AutoPerm NTT flag={} and method NTT flag={}",
auto_perm.ntt,
NTT
);
let b_vec: &mut Vec<u64> = &mut b.0;
let a_vec: &Vec<u64> = &a.0;
let idx: &Vec<usize> = &auto_perm.permutation;
if NTT {
a_vec.iter().enumerate().for_each(|(i, ai)| match MOD {
0 => b_vec[idx[i]] = *ai,
1 => self
.modulus
.sa_add_sb_into_sb::<REDUCE>(ai, &mut b_vec[idx[i]]),
2 => self
.modulus
.sa_sub_sb_into_sa::<1, REDUCE>(ai, &mut b_vec[idx[i]]),
_ => {
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
}
});
} else {
let n: usize = a.n();
let mask: usize = n - 1;
let q: u64 = self.modulus.q();
a_vec.iter().enumerate().for_each(|(i, ai)| {
let sign: u64 = (idx[i] >> usize::BITS - 1) as u64;
let v: u64 = ai * (sign ^ 1) | (q - ai) * sign;
match MOD {
0 => b_vec[idx[i] & mask] = v,
1 => self
.modulus
.sa_add_sb_into_sb::<REDUCE>(&v, &mut b_vec[idx[i] & mask]),
2 => self
.modulus
.sa_sub_sb_into_sa::<1, REDUCE>(&v, &mut b_vec[idx[i] & mask]),
_ => {
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
}
}
});
}
}
}

View File

@@ -0,0 +1,7 @@
pub mod automorphism;
pub mod rescaling_rns;
pub mod ring;
pub mod ring_rns;
pub mod ring_switch;
pub mod sampling;
pub mod utils;

View File

@@ -0,0 +1,285 @@
use crate::modulus::barrett::Barrett;
use crate::modulus::{BARRETT, NONE, ONCE};
use crate::poly::{Poly, PolyRNS};
use crate::ring::Ring;
use crate::ring::RingRNS;
use crate::scalar::ScalarRNS;
extern crate test;
impl RingRNS<u64> {
/// Updates b to floor(a / q[b.level()]).
/// buf is unused if <ROUND=false,NTT=false>
pub fn div_by_last_modulus<const ROUND: bool, const NTT: bool>(
&self,
a: &PolyRNS<u64>,
buf: &mut [Poly<u64>; 2],
b: &mut PolyRNS<u64>,
) {
debug_assert!(self.level() != 0, "invalid call: self.level()=0");
debug_assert!(
a.level() >= self.level(),
"invalid input a: a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level() - 1,
"invalid input b: b.level()={} < self.level()-1={}",
b.level(),
self.level() - 1
);
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let r_last: &Ring<u64> = &self.0[level];
if ROUND {
let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<true>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
r_last.a_add_b_scalar_into_c::<ONCE>(
a.at(self.level()),
&q_level_half,
&mut buf_q_scaling[0],
);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
} else {
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
for (i, r) in self.0[0..level].iter().enumerate() {
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
a.at(level),
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
}
}
/// Updates a to floor(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(
&self,
buf: &mut [Poly<u64>; 2],
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: a.level()={} < self.level()={}",
self.level(),
a.level()
);
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let r_last: &Ring<u64> = &self.0[level];
if ROUND {
let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<false>(&mut buf_qi_scaling[0]);
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
let (a_qi, a_q_last) = a.0.split_at_mut(self.level());
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut a_q_last[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<1, ONCE>(
&a_q_last[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&rescaling_constants.0[i],
&mut a_qi[i],
);
}
}
} else {
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.split_at_mut(1);
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
let (a_i, a_level) = a.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate() {
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&a_level[0],
&rescaling_constants.0[i],
&mut a_i[i],
);
}
}
}
}
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_by_last_moduli<const ROUND: bool, const NTT: bool>(
&self,
nb_moduli_dropped: usize,
a: &PolyRNS<u64>,
buf0: &mut [Poly<u64>; 2],
buf1: &mut PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
nb_moduli_dropped <= self.level(),
"invalid input nb_moduli_dropped: nb_moduli_dropped={} > self.level()={}",
nb_moduli_dropped,
self.level()
);
debug_assert!(
a.level() >= self.level(),
"invalid input a: a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
buf1.level() >= self.level(),
"invalid input buf: buf.level()={} < self.level()={}",
buf1.level(),
self.level()
);
debug_assert!(
c.level() >= self.level() - nb_moduli_dropped,
"invalid input c: c.level()={} < self.level()-nb_moduli_dropped={}",
c.level(),
self.level() - nb_moduli_dropped
);
if nb_moduli_dropped == 0 {
if a != c {
c.copy(a);
}
} else {
if NTT {
self.intt::<false>(a, buf1);
(0..nb_moduli_dropped).for_each(|i| {
self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1)
});
self.at_level(self.level() - nb_moduli_dropped)
.ntt::<false>(buf1, c);
} else {
self.div_by_last_modulus::<ROUND, false>(a, buf0, buf1);
(1..nb_moduli_dropped - 1).for_each(|i| {
self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1);
});
self.at_level(self.level() - nb_moduli_dropped + 1)
.div_by_last_modulus::<ROUND, false>(buf1, buf0, c);
}
}
}
/// Updates a to floor(a / prod_{level - nb_moduli_dropped}^{level} q[i])
pub fn div_by_last_moduli_inplace<const ROUND: bool, const NTT: bool>(
&self,
nb_moduli_dropped: usize,
buf0: &mut [Poly<u64>; 2],
buf1: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
nb_moduli_dropped <= self.level(),
"invalid input nb_moduli_dropped: nb_moduli_dropped={} > self.level()={}",
nb_moduli_dropped,
self.level()
);
debug_assert!(
a.level() >= self.level(),
"invalid input a: a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
buf1.level() >= self.level(),
"invalid input buf: buf.level()={} < self.level()={}",
buf1.level(),
self.level()
);
if nb_moduli_dropped == 0 {
return;
}
if NTT {
self.intt::<false>(a, buf1);
(0..nb_moduli_dropped).for_each(|i| {
self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1)
});
self.at_level(self.level() - nb_moduli_dropped)
.ntt::<false>(buf1, a);
} else {
(0..nb_moduli_dropped).for_each(|i| {
self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(buf0, a)
});
}
}
}

View File

@@ -0,0 +1,451 @@
use crate::dft::ntt::Table;
use crate::modulus::barrett::Barrett;
use crate::modulus::montgomery::Montgomery;
use crate::modulus::prime::Prime;
use crate::modulus::{VectorOperations, ONCE};
use crate::modulus::{BARRETT, REDUCEMOD};
use crate::poly::Poly;
use crate::ring::Ring;
use crate::CHUNK;
use num_bigint::BigInt;
use num_traits::ToPrimitive;
impl Ring<u64> {
pub fn new(n: usize, q_base: u64, q_power: usize) -> Self {
let prime: Prime<u64> = Prime::<u64>::new(q_base, q_power);
Self {
n: n,
modulus: prime.clone(),
cyclotomic_order: n << 1,
dft: Box::new(Table::<u64>::new(prime, n << 1)),
}
}
pub fn from_bigint(&self, coeffs: &[BigInt], step: usize, a: &mut Poly<u64>) {
assert!(
step <= a.n(),
"invalid step: step={} > a.n()={}",
step,
a.n()
);
assert!(
coeffs.len() <= a.n() / step,
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
coeffs.len(),
a.n() / step
);
let q_big: BigInt = BigInt::from(self.modulus.q);
a.0.iter_mut()
.step_by(step)
.enumerate()
.for_each(|(i, v)| *v = (&coeffs[i] % &q_big).to_u64().unwrap());
}
}
impl Ring<u64> {
pub fn ntt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
match LAZY {
true => self.dft.forward_inplace_lazy(&mut poly.0),
false => self.dft.forward_inplace(&mut poly.0),
}
}
pub fn intt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
match LAZY {
true => self.dft.backward_inplace_lazy(&mut poly.0),
false => self.dft.backward_inplace(&mut poly.0),
}
}
pub fn ntt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
poly_out.0.copy_from_slice(&poly_in.0);
match LAZY {
true => self.dft.forward_inplace_lazy(&mut poly_out.0),
false => self.dft.forward_inplace(&mut poly_out.0),
}
}
pub fn intt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
poly_out.0.copy_from_slice(&poly_in.0);
match LAZY {
true => self.dft.backward_inplace_lazy(&mut poly_out.0),
false => self.dft.backward_inplace(&mut poly_out.0),
}
}
}
impl Ring<u64> {
#[inline(always)]
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn a_add_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_into_va::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
}
#[inline(always)]
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
self.modulus
.va_add_sb_mul_sc_barrett_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
}
#[inline(always)]
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &Barrett<u64>,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
self.modulus
.va_add_sb_mul_sc_barrett_into_vd::<CHUNK, REDUCE>(&a.0, b, c, &mut d.0);
}
#[inline(always)]
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_into_vb::<CHUNK, BRANGE, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &Poly<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, &mut a.0);
}
#[inline(always)]
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_sub_vb_into_vc::<CHUNK, BRANGE, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
}
#[inline(always)]
pub fn a_prepare_montgomery_into_a<const REDUCE: REDUCEMOD>(
&self,
a: &mut Poly<Montgomery<u64>>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.va_prepare_montgomery_into_va::<CHUNK, REDUCE>(&mut a.0);
}
#[inline(always)]
pub fn a_mul_b_montgomery_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_mul_vb_montgomery_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn a_mul_b_montgomery_add_c_into_c<const REDUCE1: REDUCEMOD, const REDUCE2: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_mul_vb_montgomery_add_vc_into_vc::<CHUNK, REDUCE1, REDUCE2>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn a_mul_b_montgomery_into_a<const REDUCE: REDUCEMOD>(
&self,
b: &Poly<Montgomery<u64>>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_mul_vb_montgomery_into_va::<CHUNK, REDUCE>(&b.0, &mut a.0);
}
#[inline(always)]
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &u64,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(
&a.0,
&self.modulus.barrett.prepare(*b),
&mut c.0,
);
}
#[inline(always)]
pub fn a_mul_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(
&self
.modulus
.barrett
.prepare(self.modulus.barrett.reduce::<BARRETT>(b)),
&mut a.0,
);
}
#[inline(always)]
pub fn a_mul_b_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
&self,
b: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
pub fn a_mul_b_scalar_barrett_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Barrett<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
}
#[inline(always)]
pub fn a_sub_b_mul_c_scalar_barrett_into_d<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &Barrett<u64>,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_barrett_into_vd::<CHUNK, VBRANGE, REDUCE>(&a.0, &b.0, c, &mut d.0);
}
#[inline(always)]
pub fn b_sub_a_mul_c_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &Poly<u64>,
c: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_barrett_into_vb::<CHUNK, BRANGE, REDUCE>(&b.0, c, &mut a.0);
}
#[inline(always)]
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &u64,
d: &Barrett<u64>,
e: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_barrett_into_ve::<CHUNK, BRANGE, REDUCE>(
&a.0, &b.0, c, d, &mut e.0,
);
}
#[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
b: &Poly<u64>,
c: &u64,
d: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_barrett_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
}
pub fn a_rsh_scalar_b_mask_scalar_c_into_d(
&self,
a: &Poly<u64>,
b: &usize,
c: &u64,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus
.va_rsh_sb_mask_sc_into_vd::<CHUNK>(&a.0, b, c, &mut d.0);
}
pub fn a_rsh_scalar_b_mask_scalar_c_add_d_into_d(
&self,
a: &Poly<u64>,
b: &usize,
c: &u64,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus
.va_rsh_sb_mask_sc_add_vd_into_vd::<CHUNK>(&a.0, b, c, &mut d.0);
}
pub fn a_ith_digit_unsigned_base_scalar_b_into_c(
&self,
i: usize,
a: &Poly<u64>,
b: &usize,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_ith_digit_unsigned_base_sb_into_vc::<CHUNK>(i, &a.0, b, &mut c.0);
}
pub fn a_ith_digit_signed_base_scalar_b_into_c<const BALANCED: bool>(
&self,
i: usize,
a: &Poly<u64>,
b: &usize,
carry: &mut Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(
carry.n() == self.n(),
"carry.n()={} != n={}",
carry.n(),
self.n()
);
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_ith_digit_signed_base_sb_into_vc::<CHUNK, BALANCED>(
i,
&a.0,
b,
&mut carry.0,
&mut c.0,
);
}
pub fn a_mul_by_x_pow_b_into_a(&self, b: i32, a: &mut Poly<u64>) {
let n: usize = self.n();
let cyclotomic_order: usize = self.cyclotomic_order();
let b_0: usize = (b as usize).wrapping_add(cyclotomic_order) & (cyclotomic_order - 1);
let b_1: usize = b as usize & (n - 1);
a.0.rotate_right(b_1);
if b_0 > b_1 {
self.modulus
.va_neg_into_va::<CHUNK, 1, ONCE>(&mut a.0[b_1..])
} else {
self.modulus
.va_neg_into_va::<CHUNK, 1, ONCE>(&mut a.0[..b_1])
}
}
}

View File

@@ -0,0 +1,456 @@
use crate::modulus::barrett::Barrett;
use crate::modulus::montgomery::Montgomery;
use crate::modulus::REDUCEMOD;
use crate::poly::PolyRNS;
use crate::ring::{Ring, RingRNS};
use crate::scalar::ScalarRNS;
use num_bigint::BigInt;
use std::rc::Rc;
impl RingRNS<u64> {
pub fn new(n: usize, moduli: Vec<u64>) -> Self {
assert!(!moduli.is_empty(), "moduli cannot be empty");
let rings: Vec<Rc<Ring<u64>>> = moduli
.into_iter()
.map(|prime| Rc::new(Ring::new(n, prime, 1)))
.collect();
return RingRNS(rings);
}
pub fn modulus(&self) -> BigInt {
let mut modulus = BigInt::from(1);
self.0
.iter()
.enumerate()
.for_each(|(_, r)| modulus *= BigInt::from(r.modulus.q));
modulus
}
pub fn rescaling_constant(&self) -> ScalarRNS<Barrett<u64>> {
let level = self.level();
let q_scale: u64 = self.0[level].modulus.q;
ScalarRNS(
(0..level)
.map(|i| {
self.0[i]
.modulus
.barrett
.prepare(self.0[i].modulus.q - self.0[i].modulus.inv(q_scale))
})
.collect(),
)
}
pub fn from_bigint_inplace(&self, coeffs: &[BigInt], step: usize, a: &mut PolyRNS<u64>) {
let level = self.level();
assert!(
level <= a.level(),
"invalid level: level={} > a.level()={}",
level,
a.level()
);
(0..level).for_each(|i| self.0[i].from_bigint(coeffs, step, a.at_mut(i)));
}
pub fn to_bigint_inplace(&self, a: &PolyRNS<u64>, step: usize, coeffs: &mut [BigInt]) {
assert!(
step <= a.n(),
"invalid step: step={} > a.n()={}",
step,
a.n()
);
assert!(
coeffs.len() <= a.n() / step,
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
coeffs.len(),
a.n() / step
);
let mut inv_crt: Vec<BigInt> = vec![BigInt::default(); self.level() + 1];
let q_big: BigInt = self.modulus();
let q_big_half: BigInt = &q_big >> 1;
inv_crt.iter_mut().enumerate().for_each(|(i, a)| {
let qi_big = BigInt::from(self.0[i].modulus.q);
*a = &q_big / &qi_big;
*a *= a.modinv(&qi_big).unwrap();
});
(0..self.n()).step_by(step).enumerate().for_each(|(i, j)| {
coeffs[j] = BigInt::from(a.at(0).0[i]) * &inv_crt[0];
(1..self.level() + 1).for_each(|k| {
coeffs[j] += BigInt::from(a.at(k).0[i] * &inv_crt[k]);
});
coeffs[j] %= &q_big;
if &coeffs[j] >= &q_big_half {
coeffs[j] -= &q_big;
}
});
}
}
impl RingRNS<u64> {
pub fn ntt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.ntt_inplace::<LAZY>(&mut a.0[i]));
}
pub fn intt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.intt_inplace::<LAZY>(&mut a.0[i]));
}
pub fn ntt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.ntt::<LAZY>(&a.0[i], &mut b.0[i]));
}
pub fn intt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.intt::<LAZY>(&a.0[i], &mut b.0[i]));
}
}
impl RingRNS<u64> {
#[inline(always)]
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_add_b_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_add_b_into_b::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
});
}
#[inline(always)]
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_sub_b_into_b::<BRANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_sub_b_into_a::<BRANGE, REDUCE>(&b.0[i], &mut a.0[i]));
}
#[inline(always)]
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_neg_into_b::<ARANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_neg_into_a::<ARANGE, REDUCE>(&mut a.0[i]));
}
#[inline(always)]
pub fn mul_montgomery_external<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<Montgomery<u64>>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.a_mul_b_montgomery_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
});
}
#[inline(always)]
pub fn mul_montgomery_external_inplace<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<Montgomery<u64>>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn mul_scalar<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &u64,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"b.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_c::<REDUCE>(&a.0[i], b, &mut c.0[i]));
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"b.level()={} < self.level()={}",
a.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_a::<REDUCE>(b, &mut a.0[i]));
}
#[inline(always)]
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &u64,
d: &Barrett<u64>,
e: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
e.level() >= self.level(),
"e.level()={} < self.level()={}",
e.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(
&a.0[i],
&b.0[i],
c,
d,
&mut e.0[i],
)
});
}
#[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
const BRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
b: &PolyRNS<u64>,
c: &u64,
d: &Barrett<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(
&b.0[i],
c,
d,
&mut a.0[i],
)
});
}
}

View File

@@ -0,0 +1,43 @@
use crate::poly::Poly;
use crate::ring::Ring;
impl Ring<u64> {
pub fn switch_degree<const NTT: bool>(
&self,
a: &Poly<u64>,
buf: &mut Poly<u64>,
b: &mut Poly<u64>,
) {
let (n_in, n_out) = (a.n(), b.n());
if n_in > n_out {
let (gap_in, gap_out) = (1, n_in / n_out);
if NTT {
self.intt::<false>(&a, buf);
b.0.iter_mut()
.step_by(gap_in)
.zip(buf.0.iter().step_by(gap_out))
.for_each(|(x_out, x_in)| *x_out = *x_in);
self.ntt_inplace::<false>(b);
} else {
b.0.iter_mut()
.step_by(gap_in)
.zip(a.0.iter().step_by(gap_out))
.for_each(|(x_out, x_in)| *x_out = *x_in);
}
} else {
let gap: usize = n_out / n_in;
if NTT {
a.0.iter()
.enumerate()
.for_each(|(i, &c)| (0..gap).for_each(|j| b.0[i * gap + j] = c));
} else {
b.0.iter_mut()
.step_by(gap)
.zip(a.0.iter())
.for_each(|(x_out, x_in)| *x_out = *x_in);
}
}
}
}

View File

@@ -0,0 +1,73 @@
use crate::modulus::WordOps;
use crate::poly::{Poly, PolyRNS};
use crate::ring::{Ring, RingRNS};
use num::ToPrimitive;
use rand_distr::{Distribution, Normal};
use sampling::source::Source;
impl Ring<u64> {
pub fn fill_uniform(&self, source: &mut Source, a: &mut Poly<u64>) {
let max: u64 = self.modulus.q;
let mask: u64 = max.mask();
a.0.iter_mut()
.for_each(|a| *a = source.next_u64n(max, mask));
}
pub fn fill_dist_f64<T: Distribution<f64>>(
&self,
source: &mut Source,
dist: T,
bound: f64,
a: &mut Poly<u64>,
) {
let max: u64 = self.modulus.q;
a.0.iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
let sign: u64 = dist_f64.to_bits() >> 63;
*a = (dist_u64 * sign) | (max - dist_u64) * (sign ^ 1)
});
}
pub fn fill_normal(&self, source: &mut Source, sigma: f64, bound: f64, a: &mut Poly<u64>) {
self.fill_dist_f64(source, Normal::new(0.0, sigma).unwrap(), bound, a);
}
}
impl RingRNS<u64> {
pub fn fill_uniform(&self, source: &mut Source, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
}
pub fn fill_dist_f64<T: Distribution<f64>>(
&self,
source: &mut Source,
dist: T,
bound: f64,
a: &mut PolyRNS<u64>,
) {
(0..a.n()).for_each(|j| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
let sign: u64 = dist_f64.to_bits() >> 63;
self.0.iter().enumerate().for_each(|(i, r)| {
a.at_mut(i).0[j] = (dist_u64 * sign) | (r.modulus.q - dist_u64) * (sign ^ 1);
})
})
}
}

View File

@@ -0,0 +1,38 @@
use crate::modulus::ONCE;
use crate::poly::Poly;
use crate::ring::Ring;
impl Ring<u64> {
// Generates a vector storing {X^{2^0}, X^{2^1}, .., X^{2^log_n}}.
pub fn gen_x_pow_2<const NTT: bool, const INV: bool>(&self, log_n: usize) -> Vec<Poly<u64>> {
let mut x_pow: Vec<Poly<u64>> = Vec::<Poly<u64>>::with_capacity(log_n);
(0..log_n).for_each(|i| {
let mut idx: usize = 1 << i;
if INV {
idx = self.n() - idx;
}
x_pow.push(self.new_poly());
if i == 0 {
x_pow[i].0[idx] = self.modulus.montgomery.one();
self.ntt_inplace::<false>(&mut x_pow[i]);
} else {
let (left, right) = x_pow.split_at_mut(i);
self.a_mul_b_montgomery_into_c::<ONCE>(&left[i - 1], &left[i - 1], &mut right[0]);
}
});
if INV {
self.a_neg_into_a::<1, ONCE>(&mut x_pow[0]);
}
if !NTT {
x_pow.iter_mut().for_each(|x| self.intt_inplace::<false>(x));
}
x_pow
}
}