fixed rounding rescaling

This commit is contained in:
Jean-Philippe Bossuat
2025-01-08 11:06:56 +01:00
parent 3db800f4ce
commit bdd57b91ed
13 changed files with 649 additions and 362 deletions

View File

@@ -8,7 +8,7 @@ extern crate test;
impl RingRNS<u64> {
/// Updates b to floor(a / q[b.level()]).
pub fn div_floor_by_last_modulus<const NTT: bool>(
pub fn div_by_last_modulus<const ROUND: bool, const NTT: bool>(
&self,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
@@ -30,34 +30,76 @@ impl RingRNS<u64> {
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let r_last: &Ring<u64> = &self.0[level];
if ROUND{
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<true>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
r_last.a_add_b_scalar_into_c::<ONCE>(a.at(self.level()), &q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
} else {
for (i, r) in self.0[0..level].iter().enumerate() {
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
a.at(level),
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}else{
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
&buf_ntt_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
for (i, r) in self.0[0..level].iter().enumerate() {
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
a.at(level),
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
}
}
/// Updates a to floor(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_floor_by_last_modulus_inplace<const NTT: bool>(
pub fn div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(
&self,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
@@ -71,32 +113,70 @@ impl RingRNS<u64> {
let level = self.level();
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let r_last: &Ring<u64> = &self.0[level];
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
if ROUND{
let q_level_half: u64 = r_last.modulus.q >> 1;
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.a_add_b_scalar_into_c::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<false>(&mut buf_qi_scaling[0]);
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
let (a_qi, a_q_last) = a.0.split_at_mut(self.level());
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut a_q_last[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<1, ONCE>(
&a_q_last[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&rescaling_constants.0[i],
&mut a_qi[i],
);
}
}
} else {
let (a_i, a_level) = a.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate() {
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&a_level[0],
&rescaling_constants.0[i],
&mut a_i[i],
);
}else{
if NTT {
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1);
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&buf_ntt_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
}else{
let (a_i, a_level) = a.0.split_at_mut(level);
for (i, r) in self.0[0..level].iter().enumerate() {
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
&a_level[0],
&rescaling_constants.0[i],
&mut a_i[i],
);
}
}
}
}
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli<const NTT: bool>(
pub fn div_by_last_moduli<const ROUND: bool, const NTT: bool>(
&self,
nb_moduli: usize,
a: &PolyRNS<u64>,
@@ -133,38 +213,35 @@ impl RingRNS<u64> {
c.copy(a);
}
} else {
if NTT {
if NTT{
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(
.div_by_last_modulus_inplace::<ROUND, false>(
&mut PolyRNS::<u64>::default(),
buf,
)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
} else {
let empty_buf: &mut PolyRNS<u64> = &mut PolyRNS::<u64>::default();
if nb_moduli == 1{
self.div_floor_by_last_modulus::<false>(a, empty_buf, c);
}else{
self.div_floor_by_last_modulus::<false>(a, empty_buf, buf);
}
}else{
(1..nb_moduli-1).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(empty_buf, buf);
});
println!("{} {:?}", self.level(), buf.level());
self.div_by_last_modulus::<ROUND, false>(a, buf, c);
self.at_level(self.level()-nb_moduli+1).div_floor_by_last_modulus::<false>(buf, empty_buf, c);
(1..nb_moduli-1).for_each(|i| {
println!("{} {:?}", self.level() - i, buf.level());
self.at_level(self.level() - i)
.div_by_last_modulus_inplace::<ROUND, false>(buf, c);
});
self.at_level(self.level()-nb_moduli+1).div_by_last_modulus_inplace::<ROUND, false>(buf, c);
}
}
}
/// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_floor_by_last_moduli_inplace<const NTT: bool>(
pub fn div_by_last_moduli_inplace<const ROUND:bool, const NTT: bool>(
&self,
nb_moduli: usize,
buf: &mut PolyRNS<u64>,
@@ -185,218 +262,18 @@ impl RingRNS<u64> {
if nb_moduli == 0{
return
}
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)
});
self.at_level(self.level() - nb_moduli+1).ntt::<false>(buf, a);
} else {
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_floor_by_last_modulus_inplace::<false>(buf, a);
});
}
}
/// Updates b to round(a / q[b.level()]).
/// Expects b to be in the NTT domain.
pub fn div_round_by_last_modulus<const NTT: bool>(
&self,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
b: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
b.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
b.level(),
a.level() - 1
);
let level: usize = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<true>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
} else {
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett::<2, ONCE>(
&buf_qi_scaling[0],
a.at(i),
&rescaling_constants.0[i],
b.at_mut(i),
);
}
}
}
/// Updates a to round(a / q[b.level()]).
/// Expects a to be in the NTT domain.
pub fn div_round_by_last_modulus_inplace<const NTT: bool>(
&self,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
let level = self.level();
let r_last: &Ring<u64> = &self.0[level];
let q_level_half: u64 = r_last.modulus.q >> 1;
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
let (buf_q_scaling, buf_qi_scaling) = buf.0.split_at_mut(1);
if NTT {
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.ntt_inplace::<false>(&mut buf_qi_scaling[0]);
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
} else {
r_last.add_scalar_inplace::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
for (i, r) in self.0[0..level].iter().enumerate() {
r_last.add_scalar::<NONE>(
&buf_q_scaling[0],
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
&mut buf_qi_scaling[0],
);
r.a_sub_b_mul_c_scalar_barrett_inplace::<2, ONCE>(
&buf_qi_scaling[0],
&rescaling_constants.0[i],
a.at_mut(i),
);
}
}
}
/// Updates b to round(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_round_by_last_moduli<const NTT: bool>(
&self,
nb_moduli: usize,
a: &PolyRNS<u64>,
buf: &mut PolyRNS<u64>,
c: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
c.level() >= a.level() - 1,
"invalid input b: b.level()={} < a.level()-1={}",
c.level(),
a.level() - 1
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
if nb_moduli == 0 {
if a != c {
c.copy(a);
}
} else {
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(
&mut PolyRNS::<u64>::default(),
buf,
)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, c);
} else {
self.div_round_by_last_modulus::<false>(a, buf, c);
(1..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(buf, c)
});
}
}
}
/// Updates a to round(a / prod_{level - nb_moduli}^{level} q[i])
pub fn div_round_by_last_moduli_inplace<const NTT: bool>(
&self,
nb_moduli: usize,
buf: &mut PolyRNS<u64>,
a: &mut PolyRNS<u64>,
) {
debug_assert!(
self.level() <= a.level(),
"invalid input a: self.level()={} > a.level()={}",
self.level(),
a.level()
);
debug_assert!(
nb_moduli <= a.level(),
"invalid input nb_moduli: nb_moduli={} > a.level()={}",
nb_moduli,
a.level()
);
if NTT {
self.intt::<false>(a, buf);
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(&mut PolyRNS::<u64>::default(), buf)
.div_by_last_modulus_inplace::<ROUND, false>(&mut PolyRNS::<u64>::default(), buf)
});
self.at_level(self.level() - nb_moduli).ntt::<false>(buf, a);
} else {
(0..nb_moduli).for_each(|i| {
self.at_level(self.level() - i)
.div_round_by_last_modulus_inplace::<false>(buf, a)
.div_by_last_modulus_inplace::<ROUND, false>(buf, a)
});
}
}

View File

@@ -75,7 +75,7 @@ impl Ring<u64> {
impl Ring<u64> {
#[inline(always)]
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
@@ -83,7 +83,7 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn add<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
@@ -92,13 +92,13 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
pub fn a_add_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_into_va::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
pub fn add_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
@@ -106,7 +106,7 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn add_scalar_then_mul_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, c: &Barrett<u64>, a: &mut Poly<u64>) {
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, c: &Barrett<u64>, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
self.modulus.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
}
@@ -120,7 +120,7 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn sub_inplace<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
pub fn a_sub_b_into_b<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
@@ -128,7 +128,15 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn sub<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
pub fn a_sub_b_into_a<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, b: &Poly<u64>, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, &mut a.0);
}
#[inline(always)]
pub fn a_sub_b_into_c<const BRANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &Poly<u64>, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
@@ -137,20 +145,20 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn neg<const ARANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
pub fn a_neg_into_b<const ARANGE:u8, const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
}
#[inline(always)]
pub fn neg_inplace<const ARANGE:u8,const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
pub fn a_neg_into_a<const ARANGE:u8,const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
}
#[inline(always)]
pub fn mul_montgomery_external<const REDUCE: REDUCEMOD>(
pub fn a_mul_b_montgomery_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &Poly<u64>,
@@ -164,20 +172,20 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn mul_montgomery_external_inplace<const REDUCE: REDUCEMOD>(
pub fn a_mul_b_montgomery_into_a<const REDUCE: REDUCEMOD>(
&self,
a: &Poly<Montgomery<u64>>,
b: &mut Poly<u64>,
b: &Poly<Montgomery<u64>>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&b.0, &mut a.0);
}
#[inline(always)]
pub fn mul_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
&self.modulus.barrett.prepare(*b),
@@ -187,30 +195,30 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
pub fn a_mul_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(
&self
.modulus
.barrett
.prepare(self.modulus.barrett.reduce::<BARRETT>(a)),
&mut b.0,
.prepare(self.modulus.barrett.reduce::<BARRETT>(b)),
&mut a.0,
);
}
#[inline(always)]
pub fn mul_scalar_barrett_inplace<const REDUCE: REDUCEMOD>(
pub fn a_mul_b_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &mut Poly<u64>,
b: &Barrett<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
pub fn mul_scalar_barrett<const REDUCE: REDUCEMOD>(
pub fn a_mul_b_scalar_barrett_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &Poly<u64>,
@@ -222,7 +230,7 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn a_sub_b_mul_c_scalar_barrett<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
pub fn a_sub_b_mul_c_scalar_barrett_into_d<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
@@ -237,15 +245,46 @@ impl Ring<u64> {
}
#[inline(always)]
pub fn a_sub_b_mul_c_scalar_barrett_inplace<const BRANGE: u8, const REDUCE: REDUCEMOD>(
pub fn b_sub_a_mul_c_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &Barrett<u64>,
b: &mut Poly<u64>,
a: &mut Poly<u64>,
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_into_vb::<CHUNK, BRANGE, REDUCE>(&a.0, c, &mut b.0);
.va_sub_vb_mul_sc_into_vb::<CHUNK, BRANGE, REDUCE>(&b.0, c, &mut a.0);
}
#[inline(always)]
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &Poly<u64>,
b: &Poly<u64>,
c: &u64,
d: &Barrett<u64>,
e: &mut Poly<u64>,
){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_into_ve::<CHUNK, BRANGE, REDUCE>(&a.0, &b.0, c, d, &mut e.0);
}
#[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &Poly<u64>,
c: &u64,
d: &Barrett<u64>,
a: &mut Poly<u64>,
){
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
}
}

View File

@@ -7,6 +7,8 @@ use crate::scalar::ScalarRNS;
use num_bigint::BigInt;
use std::sync::Arc;
impl RingRNS<u64> {
pub fn new(n: usize, moduli: Vec<u64>) -> Self {
assert!(!moduli.is_empty(), "moduli cannot be empty");
@@ -121,7 +123,7 @@ impl RingRNS<u64> {
impl RingRNS<u64> {
#[inline(always)]
pub fn add<const REDUCE: REDUCEMOD>(
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
@@ -148,11 +150,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.add::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
.for_each(|(i, ring)| ring.a_add_b_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -168,11 +170,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.add_inplace::<REDUCE>(&a.0[i], &mut b.0[i]));
.for_each(|(i, ring)| ring.a_add_b_into_b::<REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn sub<const BRANGE: u8, const REDUCE: REDUCEMOD>(
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &PolyRNS<u64>,
b: &PolyRNS<u64>,
@@ -199,11 +201,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
.for_each(|(i, ring)| ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
}
#[inline(always)]
pub fn sub_inplace<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -219,11 +221,11 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.sub_inplace::<BRANGE, REDUCE>(&a.0[i], &mut b.0[i]));
.for_each(|(i, ring)| ring.a_sub_b_into_b::<BRANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn neg<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &PolyRNS<u64>, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -239,11 +241,31 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg::<ARANGE, REDUCE>(&a.0[i], &mut b.0[i]));
.for_each(|(i, ring)| ring.a_sub_b_into_a::<BRANGE, REDUCE>(&b.0[i], &mut a.0[i]));
}
#[inline(always)]
pub fn neg_inplace<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_neg_into_b::<ARANGE, REDUCE>(&a.0[i], &mut b.0[i]));
}
#[inline(always)]
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
@@ -253,7 +275,7 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.neg_inplace::<ARANGE, REDUCE>(&mut a.0[i]));
.for_each(|(i, ring)| ring.a_neg_into_a::<ARANGE, REDUCE>(&mut a.0[i]));
}
#[inline(always)]
@@ -282,7 +304,7 @@ impl RingRNS<u64> {
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.mul_montgomery_external::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
ring.a_mul_b_montgomery_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
});
}
@@ -305,7 +327,7 @@ impl RingRNS<u64> {
self.level()
);
self.0.iter().enumerate().for_each(|(i, ring)| {
ring.mul_montgomery_external_inplace::<REDUCE>(&a.0[i], &mut b.0[i])
ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i])
});
}
@@ -331,11 +353,57 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.mul_scalar::<REDUCE>(&a.0[i], b, &mut c.0[i]));
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_c::<REDUCE>(&a.0[i], b, &mut c.0[i]));
}
#[inline(always)]
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut PolyRNS<u64>) {
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut PolyRNS<u64>) {
debug_assert!(
a.level() >= self.level(),
"b.level()={} < self.level()={}",
a.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_a::<REDUCE>(b, &mut a.0[i]));
}
#[inline(always)]
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, e: &mut PolyRNS<u64>){
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
b.level(),
self.level()
);
debug_assert!(
e.level() >= self.level(),
"e.level()={} < self.level()={}",
e.level(),
self.level()
);
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(&a.0[i], &b.0[i], c, d, &mut e.0[i]));
}
#[inline(always)]
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<const BRANGE:u8, const REDUCE:REDUCEMOD>(&self, b: &PolyRNS<u64>, c: &u64, d: &Barrett<u64>, a: &mut PolyRNS<u64>){
debug_assert!(
a.level() >= self.level(),
"a.level()={} < self.level()={}",
a.level(),
self.level()
);
debug_assert!(
b.level() >= self.level(),
"b.level()={} < self.level()={}",
@@ -345,6 +413,6 @@ impl RingRNS<u64> {
self.0
.iter()
.enumerate()
.for_each(|(i, ring)| ring.mul_scalar_inplace::<REDUCE>(a, &mut b.0[i]));
.for_each(|(i, ring)| ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(&b.0[i], c, d, &mut a.0[i]));
}
}

View File

@@ -1,6 +1,8 @@
use crate::modulus::WordOps;
use crate::poly::{Poly, PolyRNS};
use crate::ring::{Ring, RingRNS};
use num::ToPrimitive;
use rand_distr::{Normal, Distribution};
use sampling::source::Source;
impl Ring<u64> {
@@ -10,6 +12,24 @@ impl Ring<u64> {
a.0.iter_mut()
.for_each(|a| *a = source.next_u64n(max, mask));
}
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut Poly<u64>) {
let max: u64 = self.modulus.q;
a.0.iter_mut()
.for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound{
dist_f64 = dist.sample(source)
}
let dist_u64: u64 = (dist_f64+0.5).abs().to_u64().unwrap();
let sign: u64 = dist_f64.to_bits()>>63;
*a = (dist_u64 * sign) | (max-dist_u64)*(sign^1)
});
}
}
impl RingRNS<u64> {
@@ -19,4 +39,21 @@ impl RingRNS<u64> {
.enumerate()
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
}
pub fn fill_dist_f64<T: Distribution<f64>>(&self, source: &mut Source, dist: T, bound: f64, a: &mut PolyRNS<u64>) {
(0..a.n()).for_each(|j|{
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound{
dist_f64 = dist.sample(source)
}
let dist_u64: u64 = (dist_f64+0.5).abs().to_u64().unwrap();
let sign: u64 = dist_f64.to_bits()>>63;
self.0.iter().enumerate().for_each(|(i, r)|{
a.at_mut(i).0[j] = (dist_u64 * sign) | (r.modulus.q-dist_u64)*(sign^1);
})
})
}
}