This commit is contained in:
Jean-Philippe Bossuat
2025-01-06 18:05:32 +01:00
parent c69bd6985a
commit 7e4ca491c7
7 changed files with 617 additions and 133 deletions

View File

@@ -1,5 +1,5 @@
use crate::modulus::barrett::Barrett;
use crate::modulus::ONCE;
use crate::modulus::{NONE, ONCE, BARRETT};
use crate::poly::PolyRNS;
use crate::ring::Ring;
use crate::ring::RingRNS;
@@ -34,8 +34,8 @@ impl RingRNS<u64> {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<false>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
@@ -44,7 +44,7 @@ impl RingRNS<u64> {
}
} else {
for (i, r) in self.0[0..level].iter().enumerate() {
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
a.at(level),
a.at(i),
&rescaling_constants.0[i],
@@ -73,19 +73,19 @@ impl RingRNS<u64> {
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
let (a_i, a_level) = buf.0.split_at_mut(level);
let (a_i, a_level) = a.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate() {
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&a_level[0],
&rescaling_constants.0[i],
&mut a_i[i],
@@ -102,6 +102,9 @@ impl RingRNS<u64> {
buf: &mut PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
println!("{:?}", buf);
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
@@ -205,26 +208,40 @@ impl RingRNS<u64> {
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<ONCE>(
&buf_ntt_q_scaling[0],
&q_level_half,
&mut buf_ntt_qi_scaling[0],
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<false>(&mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett::<ONCE>(
&buf_ntt_qi_scaling[0],
r.ntt_inplace::<true>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
}
@@ -246,24 +263,124 @@ impl RingRNS<u64> {
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<true>(a.at(level), &mut buf_ntt_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_ntt_q_scaling[0]);
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<ONCE>(
&buf_ntt_q_scaling[0],
&q_level_half,
&mut buf_ntt_qi_scaling[0],
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.sum_aqqmb_prod_c_scalar_barrett_inplace::<ONCE>(
&buf_ntt_qi_scaling[0],
r.ntt_inplace::<false>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
}
}
/// Updates b to round(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_round_by_last_moduli<const NTT: bool>(
&self,
nb_moduli: usize,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
c.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
c.level(),
a.level() - 1
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
if nb_moduli == 0 {
if a != c {
c.copy(a);
}
} else {
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(
&mut PolyRNS::<u64>::default(),
buf,
)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
} else {
self.div_round_by_last_modulus::<false>(a, buf, c);
(1..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(buf, c)
});
}
}
}
/// Updates a to round(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_round_by_last_moduli_inplace<const NTT: bool>(
&self,
nb_moduli: usize,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
} else {
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(buf, a)
});
}
}
}

View File

@@ -92,9 +92,9 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.sa_add_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_into_va::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
@@ -106,33 +106,47 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
pub fn add_scalar_then_mul_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, c: &Barrett<u64>, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
}
#[inline(always)]
pub fn sub<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &Barrett<u64>, d: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
self.modulus
.va_add_sb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, b, c, &mut d.0);
}
#[inline(always)]
pub fn sub_inplace<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_into_vb::<CHUNK, BRANGE, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn sub<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_sub_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
.va_sub_vb_into_vc::<CHUNK, BRANGE, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
pub fn neg<const ARANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_neg_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
self.modulus.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
pub fn neg_inplace<const ARANGE:u8,const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_neg_into_va::<CHUNK, REDUCE>(&mut a.0);
self.modulus.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
}
#[inline(always)]
@@ -208,7 +222,7 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn sum_aqqmb_prod_c_scalar_barrett<const REDUCE: REDUCEMOD>(
pub fn a_sub_b_mul_c_scalar_barrett<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
@@ -219,11 +233,11 @@ impl Ring<u64> {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
.va_sub_vb_mul_sc_into_vd::<CHUNK, VBRANGE, REDUCE>(&a.0, &b.0, c, &mut d.0);
}
#[inline(always)]
pub fn sum_aqqmb_prod_c_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(
pub fn a_sub_b_mul_c_scalar_barrett_inplace<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
c: &Barrett<u64>,
@@ -232,6 +246,6 @@ impl Ring<u64> {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_into_vb::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
.va_sub_vb_mul_sc_into_vb::<CHUNK, BRANGE, REDUCE>(&a.0, c, &mut b.0);
}
}

View File

@@ -172,7 +172,7 @@ impl RingRNS<u64> {
}
#[inline(always)]
pub fn sub<const REDUCE: REDUCEMOD>(
pub fn sub<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
@@ -199,11 +199,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
.for_each(|(i, ring)| ring.sub::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
pub fn sub_inplace<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -219,11 +219,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
.for_each(|(i, ring)| ring.sub_inplace::<BRANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
pub fn neg<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -239,11 +239,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg::<REDUCE>(&a.0[i], &mut b.0[i]));
.for_each(|(i, ring)| ring.neg::<ARANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
pub fn neg_inplace<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -253,7 +253,7 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg_inplace::<REDUCE>(&mut a.0[i]));
.for_each(|(i, ring)| ring.neg_inplace::<ARANGE, REDUCE>(&mut a.0[i]));
}
#[inline(always)]