This commit is contained in:
Jean-Philippe Bossuat
2025-01-06 14:10:28 +01:00
parent 681268c28e
commit a074886b3e
29 changed files with 1650 additions and 928 deletions

View File

@@ -1,43 +1,57 @@
use crate::modulus::WordOps;
use crate::ring::Ring;
use crate::poly::Poly;
use crate::ring::Ring;
/// Returns a lookup table for the automorphism X^{i} -> X^{i * k mod nth_root}.
/// Method will panic if n or nth_root are not power-of-two.
/// Method will panic if gal_el is not coprime with nth_root.
pub fn automorphism_index_ntt(n: usize, nth_root:u64, gal_el: u64) -> Vec<u64>{
assert!(n&(n-1) != 0, "invalid n={}: not a power-of-two", n);
assert!(nth_root&(nth_root-1) != 0, "invalid nth_root={}: not a power-of-two", n);
assert!(gal_el & 1 == 1, "invalid gal_el={}: not coprime with nth_root={}", gal_el, nth_root);
pub fn automorphism_index_ntt(n: usize, nth_root: u64, gal_el: u64) -> Vec<u64> {
assert!(n & (n - 1) != 0, "invalid n={}: not a power-of-two", n);
assert!(
nth_root & (nth_root - 1) != 0,
"invalid nth_root={}: not a power-of-two",
n
);
assert!(
gal_el & 1 == 1,
"invalid gal_el={}: not coprime with nth_root={}",
gal_el,
nth_root
);
let mask = nth_root-1;
let mask = nth_root - 1;
let log_nth_root: u32 = nth_root.log2() as u32;
let mut index: Vec<u64> = Vec::with_capacity(n);
for i in 0..n{
let i_rev: usize = 2*i.reverse_bits_msb(log_nth_root)+1;
let gal_el_i: u64 = (gal_el * (i_rev as u64) & mask)>>1;
for i in 0..n {
let i_rev: usize = 2 * i.reverse_bits_msb(log_nth_root) + 1;
let gal_el_i: u64 = (gal_el * (i_rev as u64) & mask) >> 1;
index.push(gal_el_i.reverse_bits_msb(log_nth_root));
}
index
}
impl Ring<u64>{
pub fn automorphism(&self, a:Poly<u64>, gal_el: u64, b:&mut Poly<u64>){
debug_assert!(a.n() == b.n(), "invalid inputs: a.n() = {} != b.n() = {}", a.n(), b.n());
debug_assert!(gal_el&1 == 1, "invalid gal_el = {}: not odd", gal_el);
impl Ring<u64> {
pub fn automorphism(&self, a: Poly<u64>, gal_el: u64, b: &mut Poly<u64>) {
debug_assert!(
a.n() == b.n(),
"invalid inputs: a.n() = {} != b.n() = {}",
a.n(),
b.n()
);
debug_assert!(gal_el & 1 == 1, "invalid gal_el = {}: not odd", gal_el);
let n: usize = a.n();
let mask: u64 = (n-1) as u64;
let mask: u64 = (n - 1) as u64;
let log_n: usize = n.log2();
let q: u64 = self.modulus.q();
let b_vec: &mut _ = &mut b.0;
let a_vec: &_ = &a.0;
a_vec.iter().enumerate().for_each(|(i, ai)|{
a_vec.iter().enumerate().for_each(|(i, ai)| {
let gal_el_i: u64 = i as u64 * gal_el;
let sign: u64 = (gal_el_i>>log_n) & 1;
let sign: u64 = (gal_el_i >> log_n) & 1;
let i_out: u64 = gal_el_i & mask;
b_vec[i_out as usize] = ai * (sign^1) | (q - ai) * sign
b_vec[i_out as usize] = ai * (sign ^ 1) | (q - ai) * sign
});
}
}
}

View File

@@ -1,5 +1,5 @@
pub mod automorphism;
pub mod rescaling_rns;
pub mod ring;
pub mod ring_rns;
pub mod rescaling_rns;
pub mod sampling;
pub mod sampling;

View File

@@ -1,142 +1,269 @@
use crate::modulus::barrett::Barrett;
use crate::modulus::ONCE;
use crate::poly::PolyRNS;
use crate::ring::Ring;
use crate::ring::RingRNS;
use crate::poly::PolyRNS;
use crate::modulus::barrett::Barrett;
use crate::scalar::ScalarRNS;
use crate::modulus::ONCE;
extern crate test;
impl RingRNS<'_, u64>{
impl RingRNS<'_, u64> {
/// Updates b to floor(a / q[b.level()]).
pub fn div_floor_by_last_modulus<const NTT:bool>(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
pub fn div_floor_by_last_modulus<const NTT: bool>(
&self,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
b.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
b.level(),
a.level() - 1
);
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
if NTT{
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<false>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i));
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}else{
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(a.at(level), a.at(i), &rescaling_constants.0[i], b.at_mut(i));
} else {
for (i, r) in self.0[0..level].iter().enumerate() {
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
a.at(level),
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
}
/// Updates a to floor(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_floor_by_last_modulus_inplace<const NTT:bool>(&self, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
pub fn div_floor_by_last_modulus_inplace<const NTT: bool>(
&self,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
if NTT{
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i));
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
}else{
} else {
let (a_i, a_level) = buf.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate(){
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&a_level[0], &rescaling_constants.0[i], &mut a_i[i]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
&a_level[0],
&rescaling_constants.0[i],
&mut a_i[i],
);
}
}
}
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli<const NTT:bool>(&self, nb_moduli:usize, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, c: &mut PolyRNS<u64>){
pub fn div_floor_by_last_moduli<const NTT: bool>(
&self,
nb_moduli: usize,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
c.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
c.level(),
a.level() - 1
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(c.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", c.level(), a.level()-1);
debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level());
if nb_moduli == 0{
if a != c{
if nb_moduli == 0 {
if a != c {
c.copy(a);
}
}else{
if NTT{
} else {
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)});
self.at_level(self.level()-nb_moduli).ntt::<false>(buf, c);
}else{
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(
&mut PolyRNS::<u64>::default(),
buf,
)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
} else {
self.div_floor_by_last_modulus::<false>(a, buf, c);
(1..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(buf, c)});
(1..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(buf, c)
});
}
}
}
/// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli_inplace<const NTT:bool>(&self, nb_moduli:usize, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level());
if NTT{
pub fn div_floor_by_last_moduli_inplace<const NTT: bool>(
&self,
nb_moduli: usize,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)});
self.at_level(self.level()-nb_moduli).ntt::<false>(buf, a);
}else{
(0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::<false>(buf, a)});
}
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
} else {
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(buf, a)
});
}
}
/// Updates b to round(a / q[b.level()]).
/// Expects b to be in the NTT domain.
pub fn div_round_by_last_modulus<const NTT:bool>(&self, a: &PolyRNS<u64>, buf: &mut PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1);
pub fn div_round_by_last_modulus<const NTT: bool>(
&self,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
b.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
b.level(),
a.level() - 1
);
let level: usize = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q>>1;
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
if NTT{
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r_last.add_scalar::<ONCE>(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<ONCE>(
&buf_ntt_q_scaling[0],
&q_level_half,
&mut buf_ntt_qi_scaling[0],
);
r.ntt_inplace::<false>(&mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i));
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}else{
} else {
}
}
/// Updates a to round(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_round_by_last_modulus_inplace<const NTT:bool>(&self, buf: &mut PolyRNS<u64>, a: &mut PolyRNS<u64>){
debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level());
pub fn div_round_by_last_modulus_inplace<const NTT: bool>(
&self,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
let level = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q>>1;
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
if NTT{
if NTT {
r_last.intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate(){
r_last.add_scalar::<ONCE>(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<ONCE>(
&buf_ntt_q_scaling[0],
&q_level_half,
&mut buf_ntt_qi_scaling[0],
);
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i));
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
}
}
}

View File

@@ -1,17 +1,17 @@
use crate::ring::Ring;
use crate::dft::ntt::Table;
use crate::modulus::prime::Prime;
use crate::modulus::montgomery::Montgomery;
use crate::modulus::barrett::Barrett;
use crate::poly::Poly;
use crate::modulus::{REDUCEMOD, BARRETT};
use crate::modulus::montgomery::Montgomery;
use crate::modulus::prime::Prime;
use crate::modulus::VectorOperations;
use crate::modulus::{BARRETT, REDUCEMOD};
use crate::poly::Poly;
use crate::ring::Ring;
use crate::CHUNK;
use num_bigint::BigInt;
use num_traits::ToPrimitive;
use crate::CHUNK;
impl Ring<u64>{
pub fn new(n:usize, q_base:u64, q_power:usize) -> Self{
impl Ring<u64> {
pub fn new(n: usize, q_base: u64, q_power: usize) -> Self {
let prime: Prime<u64> = Prime::<u64>::new(q_base, q_power);
Self {
n: n,
@@ -20,156 +20,218 @@ impl Ring<u64>{
}
}
pub fn from_bigint(&self, coeffs: &[BigInt], step:usize, a: &mut Poly<u64>){
assert!(step <= a.n(), "invalid step: step={} > a.n()={}", step, a.n());
assert!(coeffs.len() <= a.n() / step, "invalid coeffs: coeffs.len()={} > a.n()/step={}", coeffs.len(), a.n()/step);
pub fn from_bigint(&self, coeffs: &[BigInt], step: usize, a: &mut Poly<u64>) {
assert!(
step <= a.n(),
"invalid step: step={} > a.n()={}",
step,
a.n()
);
assert!(
coeffs.len() <= a.n() / step,
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
coeffs.len(),
a.n() / step
);
let q_big: BigInt = BigInt::from(self.modulus.q);
a.0.iter_mut().step_by(step).enumerate().for_each(|(i, v)| *v = (&coeffs[i] % &q_big).to_u64().unwrap());
a.0.iter_mut()
.step_by(step)
.enumerate()
.for_each(|(i, v)| *v = (&coeffs[i] % &q_big).to_u64().unwrap());
}
}
impl Ring<u64>{
pub fn ntt_inplace<const LAZY:bool>(&self, poly: &mut Poly<u64>){
match LAZY{
impl Ring<u64> {
pub fn ntt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
match LAZY {
true => self.dft.forward_inplace_lazy(&mut poly.0),
false => self.dft.forward_inplace(&mut poly.0)
false => self.dft.forward_inplace(&mut poly.0),
}
}
pub fn intt_inplace<const LAZY:bool>(&self, poly: &mut Poly<u64>){
match LAZY{
pub fn intt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
match LAZY {
true => self.dft.backward_inplace_lazy(&mut poly.0),
false => self.dft.backward_inplace(&mut poly.0)
false => self.dft.backward_inplace(&mut poly.0),
}
}
pub fn ntt<const LAZY:bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>){
pub fn ntt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
poly_out.0.copy_from_slice(&poly_in.0);
match LAZY{
match LAZY {
true => self.dft.forward_inplace_lazy(&mut poly_out.0),
false => self.dft.forward_inplace(&mut poly_out.0)
false => self.dft.forward_inplace(&mut poly_out.0),
}
}
pub fn intt<const LAZY:bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>){
pub fn intt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
poly_out.0.copy_from_slice(&poly_in.0);
match LAZY{
match LAZY {
true => self.dft.backward_inplace_lazy(&mut poly_out.0),
false => self.dft.backward_inplace(&mut poly_out.0)
false => self.dft.backward_inplace(&mut poly_out.0),
}
}
}
impl Ring<u64>{
impl Ring<u64> {
#[inline(always)]
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus
.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>){
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus
.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>){
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_add_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
}
#[inline(always)]
pub fn add_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>){
pub fn add_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
self.modulus
.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
}
#[inline(always)]
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_sub_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus
.va_sub_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>){
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_sub_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus
.va_sub_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_neg_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>){
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_neg_into_va::<CHUNK, REDUCE>(&mut a.0);
}
#[inline(always)]
pub fn mul_montgomery_external<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&Poly<u64>, c: &mut Poly<u64>){
pub fn mul_montgomery_external<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.va_mont_mul_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
self.modulus
.va_mont_mul_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&mut Poly<u64>){
pub fn mul_montgomery_external_inplace<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus
.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn mul_scalar<const REDUCE:REDUCEMOD>(&self, a:&Poly<u64>, b: &u64, c:&mut Poly<u64>){
pub fn mul_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0);
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
&self.modulus.barrett.prepare(*b),
&a.0,
&mut c.0,
);
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE:REDUCEMOD>(&self, a:&u64, b:&mut Poly<u64>){
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::<BARRETT>(a)), &mut b.0);
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(
&self
.modulus
.barrett
.prepare(self.modulus.barrett.reduce::<BARRETT>(a)),
&mut b.0,
);
}
#[inline(always)]
pub fn mul_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut Poly<u64>){
pub fn mul_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
self.modulus
.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
}
#[inline(always)]
pub fn mul_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b: &Poly<u64>, c:&mut Poly<u64>){
pub fn mul_scalar_barrett<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &Poly<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
self.modulus
.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
}
#[inline(always)]
pub fn sum_aqqmb_prod_c_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &Barrett<u64>, d: &mut Poly<u64>){
pub fn sum_aqqmb_prod_c_scalar_barrett<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &Barrett<u64>,
d: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus.va_sub_vb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
self.modulus
.va_sub_vb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
}
#[inline(always)]
pub fn sum_aqqmb_prod_c_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a: &Poly<u64>, c: &Barrett<u64>, b: &mut Poly<u64>){
pub fn sum_aqqmb_prod_c_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
c: &Barrett<u64>,
b: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_sub_vb_mul_sc_into_vb::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
self.modulus
.va_sub_vb_mul_sc_into_vb::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
}
}
}

View File

@@ -1,158 +1,353 @@
use crate::ring::{Ring, RingRNS};
use crate::poly::PolyRNS;
use crate::modulus::montgomery::Montgomery;
use crate::modulus::barrett::Barrett;
use crate::scalar::ScalarRNS;
use crate::modulus::montgomery::Montgomery;
use crate::modulus::REDUCEMOD;
use crate::poly::PolyRNS;
use crate::ring::{Ring, RingRNS};
use crate::scalar::ScalarRNS;
use num_bigint::BigInt;
pub fn new_rings(n: usize, moduli: Vec<u64>) -> Vec<Ring<u64>>{
pub fn new_rings(n: usize, moduli: Vec<u64>) -> Vec<Ring<u64>> {
assert!(!moduli.is_empty(), "moduli cannot be empty");
let rings: Vec<Ring<u64>> = moduli
.into_iter()
.map(|prime| Ring::new(n, prime, 1))
.map(|prime| Ring::new(n, prime, 1))
.collect();
return rings
return rings;
}
impl<'a> RingRNS<'a, u64>{
pub fn new(rings:&'a [Ring<u64>]) -> Self{
impl<'a> RingRNS<'a, u64> {
pub fn new(rings: &'a [Ring<u64>]) -> Self {
RingRNS(rings)
}
pub fn modulus(&self) -> BigInt{
pub fn modulus(&self) -> BigInt {
let mut modulus = BigInt::from(1);
self.0.iter().enumerate().for_each(|(_, r)|modulus *= BigInt::from(r.modulus.q));
self.0
.iter()
.enumerate()
.for_each(|(_, r)| modulus *= BigInt::from(r.modulus.q));
modulus
}
pub fn rescaling_constant(&self) -> ScalarRNS<Barrett<u64>> {
let level = self.level();
let q_scale: u64 = self.0[level].modulus.q;
ScalarRNS((0..level).map(|i| {self.0[i].modulus.barrett.prepare(self.0[i].modulus.q - self.0[i].modulus.inv(q_scale))}).collect())
ScalarRNS(
(0..level)
.map(|i| {
self.0[i]
.modulus
.barrett
.prepare(self.0[i].modulus.q - self.0[i].modulus.inv(q_scale))
})
.collect(),
)
}
pub fn from_bigint_inplace(&self, coeffs: &[BigInt], step:usize, a: &mut PolyRNS<u64>){
pub fn from_bigint_inplace(&self, coeffs: &[BigInt], step: usize, a: &mut PolyRNS<u64>) {
let level = self.level();
assert!(level <= a.level(), "invalid level: level={} > a.level()={}", level, a.level());
(0..level).for_each(|i|{self.0[i].from_bigint(coeffs, step, a.at_mut(i))});
assert!(
level <= a.level(),
"invalid level: level={} > a.level()={}",
level,
a.level()
);
(0..level).for_each(|i| self.0[i].from_bigint(coeffs, step, a.at_mut(i)));
}
pub fn to_bigint_inplace(&self, a: &PolyRNS<u64>, step: usize, coeffs: &mut [BigInt]){
assert!(step <= a.n(), "invalid step: step={} > a.n()={}", step, a.n());
assert!(coeffs.len() <= a.n() / step, "invalid coeffs: coeffs.len()={} > a.n()/step={}", coeffs.len(), a.n()/step);
pub fn to_bigint_inplace(&self, a: &PolyRNS<u64>, step: usize, coeffs: &mut [BigInt]) {
assert!(
step <= a.n(),
"invalid step: step={} > a.n()={}",
step,
a.n()
);
assert!(
coeffs.len() <= a.n() / step,
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
coeffs.len(),
a.n() / step
);
let mut inv_crt: Vec<BigInt> = vec![BigInt::default(); self.level()+1];
let mut inv_crt: Vec<BigInt> = vec![BigInt::default(); self.level() + 1];
let q_big: BigInt = self.modulus();
let q_big_half: BigInt = &q_big>>1;
let q_big_half: BigInt = &q_big >> 1;
inv_crt.iter_mut().enumerate().for_each(|(i, a)|{
inv_crt.iter_mut().enumerate().for_each(|(i, a)| {
let qi_big = BigInt::from(self.0[i].modulus.q);
*a = &q_big / &qi_big;
*a *= a.modinv(&qi_big).unwrap();
});
(0..self.n()).step_by(step).enumerate().for_each(|(i, j)|{
(0..self.n()).step_by(step).enumerate().for_each(|(i, j)| {
coeffs[j] = BigInt::from(a.at(0).0[i]) * &inv_crt[0];
(1..self.level()+1).for_each(|k|{
(1..self.level() + 1).for_each(|k| {
coeffs[j] += BigInt::from(a.at(k).0[i] * &inv_crt[k]);
});
coeffs[j] %= &q_big;
if &coeffs[j] >= &q_big_half{
if &coeffs[j] >= &q_big_half {
coeffs[j] -= &q_big;
}
});
}
}
impl RingRNS<'_, u64>{
pub fn ntt_inplace<const LAZY:bool>(&self, a: &mut PolyRNS<u64>){
self.0.iter().enumerate().for_each(|(i, ring)| ring.ntt_inplace::<LAZY>(&mut a.0[i]));
impl RingRNS<'_, u64> {
pub fn ntt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.ntt_inplace::<LAZY>(&mut a.0[i]));
}
pub fn intt_inplace<const LAZY:bool>(&self, a: &mut PolyRNS<u64>){
self.0.iter().enumerate().for_each(|(i, ring)| ring.intt_inplace::<LAZY>(&mut a.0[i]));
pub fn intt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.intt_inplace::<LAZY>(&mut a.0[i]));
}
pub fn ntt<const LAZY:bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
self.0.iter().enumerate().for_each(|(i, ring)| ring.ntt::<LAZY>(&a.0[i], &mut b.0[i]));
pub fn ntt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.ntt::<LAZY>(&a.0[i], &mut b.0[i]));
}
pub fn intt<const LAZY:bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
self.0.iter().enumerate().for_each(|(i, ring)| ring.intt::<LAZY>(&a.0[i], &mut b.0[i]));
pub fn intt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.intt::<LAZY>(&a.0[i], &mut b.0[i]));
}
}
impl RingRNS<'_, u64>{
impl RingRNS<'_, u64> {
#[inline(always)]
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
debug_assert!(c.level() >= self.level(), "c.level()={} < self.level()={}", c.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.add::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
pub fn add<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.add::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.add_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.add_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
debug_assert!(c.level() >= self.level(), "c.level()={} < self.level()={}", c.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.sub::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
pub fn sub<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.sub_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.neg::<REDUCE>(&a.0[i], &mut b.0[i]));
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.neg_inplace::<REDUCE>(&mut a.0[i]));
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg_inplace::<REDUCE>(&mut a.0[i]));
}
#[inline(always)]
pub fn mul_montgomery_external<const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&PolyRNS<u64>, c: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
debug_assert!(c.level() >= self.level(), "c.level()={} < self.level()={}", c.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.mul_montgomery_external::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
pub fn mul_montgomery_external<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<Montgomery<u64>>,
b: &PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"c.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.mul_montgomery_external::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
});
}
#[inline(always)]
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&PolyRNS<Montgomery<u64>>, b:&mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.mul_montgomery_external_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
pub fn mul_montgomery_external_inplace<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<Montgomery<u64>>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.mul_montgomery_external_inplace::<REDUCE>(&a.0[i], &mut b.0[i])
});
}
#[inline(always)]
pub fn mul_scalar<const REDUCE:REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &u64, c: &mut PolyRNS<u64>){
debug_assert!(a.level() >= self.level(), "a.level()={} < self.level()={}", a.level(), self.level());
debug_assert!(c.level() >= self.level(), "b.level()={} < self.level()={}", c.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.mul_scalar::<REDUCE>(&a.0[i], b, &mut c.0[i]));
pub fn mul_scalar<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &u64,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
c.level() >= self.level(),
"b.level()={} < self.level()={}",
c.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.mul_scalar::<REDUCE>(&a.0[i], b, &mut c.0[i]));
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE:REDUCEMOD>(&self, a: &u64, b: &mut PolyRNS<u64>){
debug_assert!(b.level() >= self.level(), "b.level()={} < self.level()={}", b.level(), self.level());
self.0.iter().enumerate().for_each(|(i, ring)| ring.mul_scalar_inplace::<REDUCE>(a, &mut b.0[i]));
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut PolyRNS<u64>) {
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.mul_scalar_inplace::<REDUCE>(a, &mut b.0[i]));
}
}
}

View File

@@ -1,18 +1,22 @@
use sampling::source::Source;
use crate::modulus::WordOps;
use crate::ring::{Ring, RingRNS};
use crate::poly::{Poly, PolyRNS};
use crate::ring::{Ring, RingRNS};
use sampling::source::Source;
impl Ring<u64>{
pub fn fill_uniform(&self, source: &mut Source, a: &mut Poly<u64>){
let max:u64 = self.modulus.q;
impl Ring<u64> {
pub fn fill_uniform(&self, source: &mut Source, a: &mut Poly<u64>) {
let max: u64 = self.modulus.q;
let mask: u64 = max.mask();
a.0.iter_mut().for_each(|a|{*a = source.next_u64n(max, mask)});
a.0.iter_mut()
.for_each(|a| *a = source.next_u64n(max, mask));
}
}
impl RingRNS<'_, u64>{
pub fn fill_uniform(&self, source: &mut Source, a: &mut PolyRNS<u64>){
self.0.iter().enumerate().for_each(|(i, r)|{r.fill_uniform(source, a.at_mut(i))});
impl RingRNS<'_, u64> {
pub fn fill_uniform(&self, source: &mut Source, a: &mut PolyRNS<u64>) {
self.0
.iter()
.enumerate()
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
}
}
}