mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
472 lines
16 KiB
Rust
472 lines
16 KiB
Rust
use crate::cast_mut_u8_to_mut_i64_slice;
|
|
use crate::ffi::znx::{
|
|
znx_automorphism_i64, znx_automorphism_inplace_i64, znx_normalize, znx_zero_i64_ref,
|
|
};
|
|
use crate::module::Module;
|
|
use itertools::izip;
|
|
use rand_distr::{Distribution, Normal};
|
|
use sampling::source::Source;
|
|
use std::cmp::min;
|
|
|
|
impl Module {
|
|
pub fn new_vec_znx(&self, log_base2k: usize, log_q: usize) -> VecZnx {
|
|
VecZnx::new(self.n(), log_base2k, log_q)
|
|
}
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
pub struct VecZnx {
|
|
pub n: usize,
|
|
pub log_base2k: usize,
|
|
pub data: Vec<i64>,
|
|
}
|
|
|
|
impl VecZnx {
|
|
pub fn new(n: usize, log_base2k: usize, limbs: usize) -> Self {
|
|
Self {
|
|
n: n,
|
|
log_base2k: log_base2k,
|
|
data: vec![i64::default(); Self::buffer_size(n, limbs)],
|
|
}
|
|
}
|
|
|
|
pub fn buffer_size(n: usize, limbs: usize) -> usize {
|
|
n * limbs
|
|
}
|
|
|
|
pub fn from_buffer(&mut self, n: usize, log_base2k: usize, limbs: usize, buf: &[i64]) {
|
|
let size = Self::buffer_size(n, limbs);
|
|
assert!(
|
|
buf.len() >= size,
|
|
"invalid buffer: buf.len()={} < self.buffer_size(n={}, limbs={})={}",
|
|
buf.len(),
|
|
n,
|
|
limbs,
|
|
size
|
|
);
|
|
self.n = n;
|
|
self.log_base2k = log_base2k;
|
|
self.data = Vec::from(&buf[..size])
|
|
}
|
|
|
|
pub fn log_n(&self) -> u64 {
|
|
(u64::BITS - (self.n - 1).leading_zeros()) as _
|
|
}
|
|
|
|
pub fn n(&self) -> usize {
|
|
self.n
|
|
}
|
|
|
|
pub fn limbs(&self) -> usize {
|
|
self.data.len() / self.n
|
|
}
|
|
|
|
pub fn copy_from(&mut self, a: &VecZnx) {
|
|
let size = min(self.data.len(), a.data.len());
|
|
self.data[..size].copy_from_slice(&a.data[..size])
|
|
}
|
|
|
|
pub fn as_ptr(&self) -> *const i64 {
|
|
self.data.as_ptr()
|
|
}
|
|
|
|
pub fn as_mut_ptr(&mut self) -> *mut i64 {
|
|
self.data.as_mut_ptr()
|
|
}
|
|
|
|
pub fn at(&self, i: usize) -> &[i64] {
|
|
&self.data[i * self.n..(i + 1) * self.n]
|
|
}
|
|
|
|
pub fn at_ptr(&self, i: usize) -> *const i64 {
|
|
&self.data[i * self.n] as *const i64
|
|
}
|
|
|
|
pub fn at_mut_ptr(&mut self, i: usize) -> *mut i64 {
|
|
&mut self.data[i * self.n] as *mut i64
|
|
}
|
|
|
|
pub fn at_mut(&mut self, i: usize) -> &mut [i64] {
|
|
&mut self.data[i * self.n..(i + 1) * self.n]
|
|
}
|
|
|
|
pub fn zero(&mut self) {
|
|
unsafe { znx_zero_i64_ref(self.data.len() as u64, self.data.as_mut_ptr()) }
|
|
}
|
|
|
|
pub fn from_i64(&mut self, data: &[i64], log_max: usize, log_k: usize) {
|
|
let limbs: usize = (log_k + self.log_base2k - 1) / self.log_base2k;
|
|
|
|
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs());
|
|
|
|
let size: usize = min(data.len(), self.n());
|
|
let log_k_rem: usize = self.log_base2k - (log_k % self.log_base2k);
|
|
|
|
// If 2^{log_base2k} * 2^{k_rem} < 2^{63}-1, then we can simply copy
|
|
// values on the last limb.
|
|
// Else we decompose values base2k.
|
|
if log_max + log_k_rem < 63 || log_k_rem == self.log_base2k {
|
|
(0..limbs - 1).for_each(|i| unsafe {
|
|
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
|
|
});
|
|
self.at_mut(self.limbs() - 1)[..size].copy_from_slice(&data[..size]);
|
|
} else {
|
|
let mask: i64 = (1 << self.log_base2k) - 1;
|
|
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
|
|
|
|
(0..steps).for_each(|i| unsafe {
|
|
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
|
|
});
|
|
|
|
(limbs - steps..limbs)
|
|
.rev()
|
|
.enumerate()
|
|
.for_each(|(i, i_rev)| {
|
|
let shift: usize = i * self.log_base2k;
|
|
izip!(self.at_mut(i_rev)[..size].iter_mut(), data[..size].iter())
|
|
.for_each(|(y, x)| *y = (x >> shift) & mask);
|
|
})
|
|
}
|
|
|
|
// Case where self.prec % self.k != 0.
|
|
if log_k_rem != self.log_base2k {
|
|
let limbs = self.limbs();
|
|
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
|
|
(limbs - steps..limbs).rev().for_each(|i| {
|
|
self.at_mut(i)[..size]
|
|
.iter_mut()
|
|
.for_each(|x| *x <<= log_k_rem);
|
|
})
|
|
}
|
|
}
|
|
|
|
pub fn from_i64_single(&mut self, i: usize, value: i64, log_max: usize, log_k: usize) {
|
|
assert!(i < self.n());
|
|
let limbs: usize = (log_k + self.log_base2k - 1) / self.log_base2k;
|
|
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs());
|
|
let log_k_rem: usize = self.log_base2k - (log_k % self.log_base2k);
|
|
let limbs = self.limbs();
|
|
|
|
// If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy
|
|
// values on the last limb.
|
|
// Else we decompose values base2k.
|
|
if log_max + log_k_rem < 63 || log_k_rem == self.log_base2k {
|
|
(0..limbs - 1).for_each(|j| self.at_mut(j)[i] = 0);
|
|
|
|
self.at_mut(self.limbs() - 1)[i] = value;
|
|
} else {
|
|
let mask: i64 = (1 << self.log_base2k) - 1;
|
|
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
|
|
|
|
(0..limbs - steps).for_each(|j| self.at_mut(j)[i] = 0);
|
|
|
|
(limbs - steps..limbs)
|
|
.rev()
|
|
.enumerate()
|
|
.for_each(|(j, j_rev)| {
|
|
self.at_mut(j_rev)[i] = (value >> (j * self.log_base2k)) & mask;
|
|
})
|
|
}
|
|
|
|
// Case where self.prec % self.k != 0.
|
|
if log_k_rem != self.log_base2k {
|
|
let limbs = self.limbs();
|
|
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
|
|
(limbs - steps..limbs).rev().for_each(|j| {
|
|
self.at_mut(j)[i] <<= log_k_rem;
|
|
})
|
|
}
|
|
}
|
|
|
|
pub fn normalize(&mut self, carry: &mut [u8]) {
|
|
assert!(
|
|
carry.len() >= self.n * 8,
|
|
"invalid carry: carry.len()={} < self.n()={}",
|
|
carry.len(),
|
|
self.n()
|
|
);
|
|
|
|
let carry_i64: &mut [i64] = cast_mut_u8_to_mut_i64_slice(carry);
|
|
|
|
unsafe {
|
|
znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
|
|
(0..self.limbs()).rev().for_each(|i| {
|
|
znx_normalize(
|
|
self.n as u64,
|
|
self.log_base2k as u64,
|
|
self.at_mut_ptr(i),
|
|
carry_i64.as_mut_ptr(),
|
|
self.at_mut_ptr(i),
|
|
carry_i64.as_mut_ptr(),
|
|
)
|
|
});
|
|
}
|
|
}
|
|
|
|
pub fn to_i64(&self, data: &mut [i64], log_k: usize) {
|
|
let limbs: usize = (log_k + self.log_base2k - 1) / self.log_base2k;
|
|
assert!(
|
|
data.len() >= self.n,
|
|
"invalid data: data.len()={} < self.n()={}",
|
|
data.len(),
|
|
self.n
|
|
);
|
|
data.copy_from_slice(self.at(0));
|
|
let rem: usize = self.log_base2k - (log_k % self.log_base2k);
|
|
(1..limbs).for_each(|i| {
|
|
if i == limbs - 1 && rem != self.log_base2k {
|
|
let k_rem: usize = self.log_base2k - rem;
|
|
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
|
*y = (*y << k_rem) + (x >> rem);
|
|
});
|
|
} else {
|
|
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
|
*y = (*y << self.log_base2k) + x;
|
|
});
|
|
}
|
|
})
|
|
}
|
|
|
|
pub fn to_i64_single(&self, i: usize, log_k: usize) -> i64 {
|
|
let limbs: usize = (log_k + self.log_base2k - 1) / self.log_base2k;
|
|
assert!(i < self.n());
|
|
let mut res: i64 = self.data[i];
|
|
let rem: usize = self.log_base2k - (log_k % self.log_base2k);
|
|
(1..limbs).for_each(|i| {
|
|
let x = self.data[i * self.n];
|
|
if i == limbs - 1 && rem != self.log_base2k {
|
|
let k_rem: usize = self.log_base2k - rem;
|
|
res = (res << k_rem) + (x >> rem);
|
|
} else {
|
|
res = (res << self.log_base2k) + x;
|
|
}
|
|
});
|
|
res
|
|
}
|
|
|
|
pub fn automorphism_inplace(&mut self, gal_el: i64) {
|
|
unsafe {
|
|
(0..self.limbs()).for_each(|i| {
|
|
znx_automorphism_inplace_i64(self.n as u64, gal_el, self.at_mut_ptr(i))
|
|
})
|
|
}
|
|
}
|
|
pub fn automorphism(&mut self, gal_el: i64, a: &mut VecZnx) {
|
|
unsafe {
|
|
(0..self.limbs()).for_each(|i| {
|
|
znx_automorphism_i64(self.n as u64, gal_el, a.at_mut_ptr(i), self.at_ptr(i))
|
|
})
|
|
}
|
|
}
|
|
|
|
pub fn fill_uniform(&mut self, source: &mut Source, log_k: usize) {
|
|
let mut base2k: u64 = 1 << self.log_base2k;
|
|
let mut mask: u64 = base2k - 1;
|
|
let mut base2k_half: i64 = (base2k >> 1) as i64;
|
|
|
|
let size: usize = self.n() * (self.limbs() - 1);
|
|
|
|
self.data[..size]
|
|
.iter_mut()
|
|
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
|
|
|
let log_base2k_rem: usize = log_k % self.log_base2k;
|
|
|
|
if log_base2k_rem != 0 {
|
|
base2k = 1 << log_base2k_rem;
|
|
mask = (base2k - 1) << (self.log_base2k - log_base2k_rem);
|
|
base2k_half = ((mask >> 1) + 1) as i64;
|
|
}
|
|
|
|
self.data[size..]
|
|
.iter_mut()
|
|
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
|
}
|
|
|
|
pub fn add_dist_f64<T: Distribution<f64>>(
|
|
&mut self,
|
|
source: &mut Source,
|
|
dist: T,
|
|
bound: f64,
|
|
log_k: usize,
|
|
) {
|
|
let log_base2k_rem: usize = log_k % self.log_base2k;
|
|
|
|
if log_base2k_rem != 0 {
|
|
self.at_mut(self.limbs() - 1).iter_mut().for_each(|a| {
|
|
let mut dist_f64: f64 = dist.sample(source);
|
|
while dist_f64.abs() > bound {
|
|
dist_f64 = dist.sample(source)
|
|
}
|
|
*a += (dist_f64.round() as i64) << log_base2k_rem
|
|
});
|
|
} else {
|
|
self.at_mut(self.limbs() - 1).iter_mut().for_each(|a| {
|
|
let mut dist_f64: f64 = dist.sample(source);
|
|
while dist_f64.abs() > bound {
|
|
dist_f64 = dist.sample(source)
|
|
}
|
|
*a += dist_f64.round() as i64
|
|
});
|
|
}
|
|
}
|
|
|
|
pub fn add_normal(&mut self, source: &mut Source, sigma: f64, bound: f64, log_k: usize) {
|
|
self.add_dist_f64(source, Normal::new(0.0, sigma).unwrap(), bound, log_k);
|
|
}
|
|
|
|
pub fn trunc_pow2(&mut self, k: usize) {
|
|
if k == 0 {
|
|
return;
|
|
}
|
|
|
|
self.data
|
|
.truncate((self.limbs() - k / self.log_base2k) * self.n());
|
|
|
|
let k_rem: usize = k % self.log_base2k;
|
|
|
|
if k_rem != 0 {
|
|
let mask: i64 = ((1 << (self.log_base2k - k_rem - 1)) - 1) << k_rem;
|
|
self.at_mut(self.limbs() - 1)
|
|
.iter_mut()
|
|
.for_each(|x: &mut i64| *x &= mask)
|
|
}
|
|
}
|
|
|
|
pub fn rsh(&mut self, k: usize, carry: &mut [u8]) {
|
|
assert!(
|
|
carry.len() >> 3 >= self.n(),
|
|
"invalid carry: carry.len()/8={} < self.n()={}",
|
|
carry.len() >> 3,
|
|
self.n()
|
|
);
|
|
|
|
let limbs: usize = self.limbs();
|
|
let limbs_steps: usize = k / self.log_base2k;
|
|
|
|
self.data.rotate_right(self.n * limbs_steps);
|
|
unsafe {
|
|
znx_zero_i64_ref((self.n * limbs_steps) as u64, self.data.as_mut_ptr());
|
|
}
|
|
|
|
let k_rem = k % self.log_base2k;
|
|
|
|
if k_rem != 0 {
|
|
let carry_i64: &mut [i64] = cast_mut_u8_to_mut_i64_slice(carry);
|
|
|
|
unsafe {
|
|
znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
|
|
}
|
|
|
|
let mask: i64 = (1 << k_rem) - 1;
|
|
let log_base2k: usize = self.log_base2k;
|
|
|
|
(limbs_steps..limbs).for_each(|i| {
|
|
izip!(carry_i64.iter_mut(), self.at_mut(i).iter_mut()).for_each(|(ci, xi)| {
|
|
*xi += *ci << log_base2k;
|
|
*ci = *xi & mask;
|
|
*xi /= 1 << k_rem;
|
|
});
|
|
})
|
|
}
|
|
}
|
|
|
|
pub fn switch_degree(&self, a: &mut VecZnx) {
|
|
let (n_in, n_out) = (self.n(), a.n());
|
|
let (gap_in, gap_out): (usize, usize);
|
|
|
|
if n_in > n_out {
|
|
(gap_in, gap_out) = (n_in / n_out, 1)
|
|
} else {
|
|
(gap_in, gap_out) = (1, n_out / n_in);
|
|
a.zero();
|
|
}
|
|
|
|
let limbs = min(self.limbs(), a.limbs());
|
|
|
|
(0..limbs).for_each(|i| {
|
|
izip!(
|
|
self.at(i).iter().step_by(gap_in),
|
|
a.at_mut(i).iter_mut().step_by(gap_out)
|
|
)
|
|
.for_each(|(x_in, x_out)| *x_out = *x_in);
|
|
});
|
|
}
|
|
|
|
pub fn print_limbs(&self, limbs: usize, n: usize) {
|
|
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use crate::VecZnx;
|
|
use itertools::izip;
|
|
use sampling::source::Source;
|
|
|
|
#[test]
|
|
fn test_set_get_i64_lo_norm() {
|
|
let n: usize = 8;
|
|
let log_base2k: usize = 17;
|
|
let limbs: usize = 5;
|
|
let log_k: usize = limbs * log_base2k - 5;
|
|
let mut a: VecZnx = VecZnx::new(n, log_base2k, limbs);
|
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
|
have.iter_mut()
|
|
.enumerate()
|
|
.for_each(|(i, x)| *x = (i as i64) - (n as i64) / 2);
|
|
a.from_i64(&have, 10, log_k);
|
|
let mut want = vec![i64::default(); n];
|
|
a.to_i64(&mut want, log_k);
|
|
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b));
|
|
}
|
|
|
|
#[test]
|
|
fn test_set_get_i64_hi_norm() {
|
|
let n: usize = 8;
|
|
let log_base2k: usize = 17;
|
|
let limbs: usize = 5;
|
|
let log_k: usize = limbs * log_base2k - 5;
|
|
let mut a: VecZnx = VecZnx::new(n, log_base2k, limbs);
|
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
|
let mut source = Source::new([1; 32]);
|
|
have.iter_mut().for_each(|x| {
|
|
*x = source
|
|
.next_u64n(u64::MAX, u64::MAX)
|
|
.wrapping_sub(u64::MAX / 2 + 1) as i64;
|
|
});
|
|
a.from_i64(&have, 63, log_k);
|
|
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
|
let mut want = vec![i64::default(); n];
|
|
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
|
a.to_i64(&mut want, log_k);
|
|
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
|
}
|
|
#[test]
|
|
fn test_normalize() {
|
|
let n: usize = 8;
|
|
let log_base2k: usize = 17;
|
|
let limbs: usize = 5;
|
|
let log_k: usize = limbs * log_base2k - 5;
|
|
let mut a: VecZnx = VecZnx::new(n, log_base2k, limbs);
|
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
|
let mut source = Source::new([1; 32]);
|
|
have.iter_mut().for_each(|x| {
|
|
*x = source
|
|
.next_u64n(u64::MAX, u64::MAX)
|
|
.wrapping_sub(u64::MAX / 2 + 1) as i64;
|
|
});
|
|
a.from_i64(&have, 63, log_k);
|
|
let mut carry: Vec<u8> = vec![u8::default(); n * 8];
|
|
a.normalize(&mut carry);
|
|
|
|
let base_half = 1 << (log_base2k - 1);
|
|
a.data
|
|
.iter()
|
|
.for_each(|x| assert!(x.abs() <= base_half, "|x|={} > 2^(k-1)={}", x, base_half));
|
|
let mut want = vec![i64::default(); n];
|
|
a.to_i64(&mut want, log_k);
|
|
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
|
}
|
|
}
|