added spqlios as submodule

This commit is contained in:
Jean-Philippe Bossuat
2025-01-27 14:10:59 +01:00
parent 250d1a4942
commit c30f598776
244 changed files with 51 additions and 29899 deletions

35
base2k/src/lib.rs Normal file
View File

@@ -0,0 +1,35 @@
pub mod module;
pub mod scalar;
pub mod vector;
#[allow(
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
dead_code,
improper_ctypes
)]
pub mod bindings {
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
pub mod vec_znx_arithmetic;
pub use vec_znx_arithmetic::*;
pub mod vec_znx_big_arithmetic;
pub use vec_znx_big_arithmetic::*;
pub mod vec_znx_dft;
pub use vec_znx_dft::*;
pub mod scalar_vector_product;
pub use scalar_vector_product::*;
fn cast_mut_u64_to_mut_u8_slice(data: &mut [u64]) -> &mut [u8] {
let ptr: *mut u8 = data.as_mut_ptr() as *mut u8;
let len: usize = data.len() * std::mem::size_of::<u64>();
unsafe { std::slice::from_raw_parts_mut(ptr, len) }
}
fn cast_mut_u8_to_mut_i64_slice(data: &mut [u8]) -> &mut [i64] {
let ptr: *mut i64 = data.as_mut_ptr() as *mut i64;
let len: usize = data.len() / std::mem::size_of::<i64>();
unsafe { std::slice::from_raw_parts_mut(ptr, len) }
}

47
base2k/src/module.rs Normal file
View File

@@ -0,0 +1,47 @@
use crate::bindings::{
module_info_t, new_module_info, svp_ppol_t, vec_znx_bigcoeff_t, vec_znx_dft_t, MODULE,
};
pub type MODULETYPE = u8;
pub const FFT64: u8 = 0;
pub const NTT120: u8 = 1;
pub struct Module(pub *mut MODULE);
impl Module {
// Instantiates a new module.
pub fn new<const MODULETYPE: MODULETYPE>(n: usize) -> Self {
unsafe {
let m: *mut module_info_t = new_module_info(n as u64, MODULETYPE as u32);
if m.is_null() {
panic!("Failed to create module.");
}
Self(m)
}
}
}
pub struct SVPPOL(pub *mut svp_ppol_t);
pub struct VECZNXBIG(pub *mut vec_znx_bigcoeff_t, pub usize);
// Stores a vector of
impl VECZNXBIG {
pub fn as_vec_znx_dft(&mut self) -> VECZNXDFT {
VECZNXDFT(self.0 as *mut vec_znx_dft_t, self.1)
}
pub fn limbs(&self) -> usize {
self.1
}
}
pub struct VECZNXDFT(pub *mut vec_znx_dft_t, pub usize);
impl VECZNXDFT {
pub fn as_vec_znx_big(&mut self) -> VECZNXBIG {
VECZNXBIG(self.0 as *mut vec_znx_bigcoeff_t, self.1)
}
pub fn limbs(&self) -> usize {
self.1
}
}

48
base2k/src/scalar.rs Normal file
View File

@@ -0,0 +1,48 @@
use rand::distributions::{Distribution, WeightedIndex};
use rand::seq::SliceRandom;
use rand_core::RngCore;
use sampling::source::Source;
pub struct Scalar(pub Vec<i64>);
impl Scalar {
pub fn new(n: usize) -> Self {
Self(vec![i64::default(); Self::buffer_size(n)])
}
pub fn buffer_size(n: usize) -> usize {
n
}
pub fn from_buffer(&mut self, n: usize, buf: &[i64]) {
let size = Self::buffer_size(n);
assert!(
buf.len() >= size,
"invalid buffer: buf.len()={} < self.buffer_size(n={})={}",
buf.len(),
n,
size
);
self.0 = Vec::from(&buf[..size])
}
pub fn as_ptr(&self) -> *const i64 {
self.0.as_ptr()
}
pub fn fill_ternary_prob(&mut self, prob: f64, source: &mut Source) {
let choices: [i64; 3] = [-1, 0, 1];
let weights: [f64; 3] = [prob / 2.0, 1.0 - prob, prob / 2.0];
let dist: WeightedIndex<f64> = WeightedIndex::new(&weights).unwrap();
self.0
.iter_mut()
.for_each(|x: &mut i64| *x = choices[dist.sample(source)]);
}
pub fn fill_ternary_hw(&mut self, hw: usize, source: &mut Source) {
self.0[..hw]
.iter_mut()
.for_each(|x: &mut i64| *x = (((source.next_u32() & 1) as i64) << 1) - 1);
self.0.shuffle(source);
}
}

View File

@@ -0,0 +1,29 @@
use crate::bindings::{new_svp_ppol, svp_apply_dft, svp_prepare};
use crate::module::{Module, SVPPOL, VECZNXDFT};
use crate::scalar::Scalar;
use crate::vector::Vector;
impl Module {
// Prepares a scalar polynomial (1 limb) for a scalar x vector product.
// Method will panic if a.limbs() != 1.
pub fn svp_prepare(&self, svp_ppol: &mut SVPPOL, a: &Scalar) {
unsafe { svp_prepare(self.0, svp_ppol.0, a.as_ptr()) }
}
// Allocates a scalar-vector-product prepared-poly (SVPPOL).
pub fn svp_new_ppol(&self) -> SVPPOL {
unsafe { SVPPOL(new_svp_ppol(self.0)) }
}
// Applies a scalar x vector product: res <- a (ppol) x b
pub fn svp_apply_dft(&self, c: &mut VECZNXDFT, a: &SVPPOL, b: &Vector) {
let limbs: u64 = b.limbs() as u64;
assert!(
c.limbs() as u64 >= limbs,
"invalid c_vector: c_vector.limbs()={} < b.limbs()={}",
c.limbs(),
limbs
);
unsafe { svp_apply_dft(self.0, c.0, limbs, a.0, b.as_ptr(), limbs, b.n() as u64) }
}
}

View File

@@ -0,0 +1,35 @@
use crate::bindings::vec_znx_automorphism;
use crate::module::Module;
use crate::vector::Vector;
impl Module {
pub fn vec_znx_automorphism(&self, gal_el: i64, b: &mut Vector, a: &Vector) {
unsafe {
vec_znx_automorphism(
self.0,
gal_el,
b.as_mut_ptr(),
b.limbs() as u64,
b.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
);
}
}
pub fn vec_znx_automorphism_inplace(&self, gal_el: i64, a: &mut Vector) {
unsafe {
vec_znx_automorphism(
self.0,
gal_el,
a.as_mut_ptr(),
a.limbs() as u64,
a.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
);
}
}
}

View File

@@ -0,0 +1,162 @@
use crate::bindings::{
new_vec_znx_big, vec_znx_big_add_small, vec_znx_big_automorphism, vec_znx_big_normalize_base2k,
vec_znx_big_normalize_base2k_tmp_bytes, vec_znx_big_sub_small_a,
};
use crate::module::{Module, VECZNXBIG};
use crate::vector::Vector;
impl Module {
// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
pub fn new_vec_znx_big(&self, limbs: usize) -> VECZNXBIG {
unsafe { VECZNXBIG(new_vec_znx_big(self.0, limbs as u64), limbs) }
}
// b <- b - a
pub fn vec_znx_big_sub_small_a_inplace(&self, b: &mut VECZNXBIG, a: &Vector) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
"invalid c_vector: b.limbs()={} < a.limbs()={}",
b.limbs(),
limbs
);
unsafe {
vec_znx_big_sub_small_a(
self.0,
b.0,
b.limbs() as u64,
a.as_ptr(),
limbs as u64,
a.n() as u64,
b.0,
b.limbs() as u64,
)
}
}
// c <- b - a
pub fn big_sub_small_a(&self, c: &mut VECZNXBIG, a: &Vector, b: &VECZNXBIG) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
"invalid c: b.limbs()={} < a.limbs()={}",
b.limbs(),
limbs
);
assert!(
c.limbs() >= limbs,
"invalid c: c.limbs()={} < a.limbs()={}",
c.limbs(),
limbs
);
unsafe {
vec_znx_big_sub_small_a(
self.0,
c.0,
c.limbs() as u64,
a.as_ptr(),
limbs as u64,
a.n() as u64,
b.0,
b.limbs() as u64,
)
}
}
// c <- b + a
pub fn vec_znx_big_add_small(&self, c: &mut VECZNXBIG, a: &Vector, b: &VECZNXBIG) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
"invalid c: b.limbs()={} < a.limbs()={}",
b.limbs(),
limbs
);
assert!(
c.limbs() >= limbs,
"invalid c: c.limbs()={} < a.limbs()={}",
c.limbs(),
limbs
);
unsafe {
vec_znx_big_add_small(
self.0,
c.0,
limbs as u64,
b.0,
limbs as u64,
a.as_ptr(),
limbs as u64,
a.n() as u64,
)
}
}
// b <- b + a
pub fn vec_znx_big_add_small_inplace(&self, b: &mut VECZNXBIG, a: &Vector) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
"invalid c_vector: b.limbs()={} < a.limbs()={}",
b.limbs(),
limbs
);
unsafe {
vec_znx_big_add_small(
self.0,
b.0,
limbs as u64,
b.0,
limbs as u64,
a.as_ptr(),
limbs as u64,
a.n() as u64,
)
}
}
pub fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
unsafe { vec_znx_big_normalize_base2k_tmp_bytes(self.0) as usize }
}
// b <- normalize(a)
pub fn vec_znx_big_normalize(&self, b: &mut Vector, a: &VECZNXBIG, tmp_bytes: &mut [u8]) {
let limbs: usize = b.limbs();
assert!(
b.limbs() >= limbs,
"invalid c_vector: b.limbs()={} < a.limbs()={}",
b.limbs(),
limbs
);
assert!(
tmp_bytes.len() <= self.vec_znx_big_normalize_tmp_bytes(),
"invalid tmp_bytes: tmp_bytes.len()={} <= self.vec_znx_big_normalize_tmp_bytes()={}",
tmp_bytes.len(),
self.vec_znx_big_normalize_tmp_bytes()
);
unsafe {
vec_znx_big_normalize_base2k(
self.0,
b.log_base2k as u64,
b.as_mut_ptr(),
limbs as u64,
b.n() as u64,
a.0,
limbs as u64,
tmp_bytes.as_mut_ptr(),
)
}
}
pub fn big_automorphism(&self, gal_el: i64, b: &mut VECZNXBIG, a: &VECZNXBIG) {
unsafe {
vec_znx_big_automorphism(self.0, gal_el, b.0, b.limbs() as u64, a.0, a.limbs() as u64);
}
}
pub fn big_automorphism_inplace(&self, gal_el: i64, a: &mut VECZNXBIG) {
unsafe {
vec_znx_big_automorphism(self.0, gal_el, a.0, a.limbs() as u64, a.0, a.limbs() as u64);
}
}
}

63
base2k/src/vec_znx_dft.rs Normal file
View File

@@ -0,0 +1,63 @@
use crate::bindings::{new_vec_znx_dft, vec_znx_idft, vec_znx_idft_tmp_a, vec_znx_idft_tmp_bytes};
use crate::module::{Module, VECZNXBIG, VECZNXDFT};
impl Module {
// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space.
pub fn new_vec_znx_dft(&self, limbs: usize) -> VECZNXDFT {
unsafe { VECZNXDFT(new_vec_znx_dft(self.0, limbs as u64), limbs) }
}
// b <- IDFT(a), uses a as scratch space.
pub fn vec_znx_idft_tmp_a(&self, b: &mut VECZNXBIG, a: &mut VECZNXDFT, a_limbs: usize) {
assert!(
b.limbs() >= a_limbs,
"invalid c_vector: b_vector.limbs()={} < a_limbs={}",
b.limbs(),
a_limbs
);
unsafe { vec_znx_idft_tmp_a(self.0, b.0, a_limbs as u64, a.0, a_limbs as u64) }
}
// Returns the size of the scratch space for [vec_znx_idft].
pub fn vec_znx_idft_tmp_bytes(&self) -> usize {
unsafe { vec_znx_idft_tmp_bytes(self.0) as usize }
}
// b <- IDFT(a), scratch space size obtained with [vec_znx_idft_tmp_bytes].
pub fn vec_znx_idft(
&self,
b_vector: &mut VECZNXBIG,
a_vector: &mut VECZNXDFT,
a_limbs: usize,
tmp_bytes: &mut [u8],
) {
assert!(
b_vector.limbs() >= a_limbs,
"invalid c_vector: b_vector.limbs()={} < a_limbs={}",
b_vector.limbs(),
a_limbs
);
assert!(
a_vector.limbs() >= a_limbs,
"invalid c_vector: c_vector.limbs()={} < a_limbs={}",
a_vector.limbs(),
a_limbs
);
assert!(
tmp_bytes.len() <= self.vec_znx_idft_tmp_bytes(),
"invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_idft_tmp_bytes()={}",
tmp_bytes.len(),
self.vec_znx_idft_tmp_bytes()
);
unsafe {
vec_znx_idft(
self.0,
b_vector.0,
a_limbs as u64,
a_vector.0,
a_limbs as u64,
tmp_bytes.as_mut_ptr(),
)
}
}
}

328
base2k/src/vector.rs Normal file
View File

@@ -0,0 +1,328 @@
use crate::bindings::{
znx_automorphism_i64, znx_automorphism_inplace_i64, znx_normalize, znx_zero_i64_ref,
};
use crate::cast_mut_u8_to_mut_i64_slice;
use itertools::izip;
use rand_distr::{Distribution, Normal};
use sampling::source::Source;
use std::cmp::min;
pub struct Vector {
pub n: usize,
pub log_base2k: usize,
pub prec: usize,
pub data: Vec<i64>,
}
impl Vector {
pub fn new(n: usize, log_base2k: usize, prec: usize) -> Self {
Self {
n: n,
log_base2k: log_base2k,
prec: prec,
data: vec![i64::default(); Self::buffer_size(n, log_base2k, prec)],
}
}
pub fn buffer_size(n: usize, log_base2k: usize, prec: usize) -> usize {
n * ((prec + log_base2k - 1) / log_base2k)
}
pub fn from_buffer(&mut self, n: usize, log_base2k: usize, prec: usize, buf: &[i64]) {
let size = Self::buffer_size(n, log_base2k, prec);
assert!(
buf.len() >= size,
"invalid buffer: buf.len()={} < self.buffer_size(n={}, k={}, prec={})={}",
buf.len(),
n,
log_base2k,
prec,
size
);
self.n = n;
self.log_base2k = log_base2k;
self.prec = prec;
self.data = Vec::from(&buf[..size])
}
pub fn log_n(&self) -> u64 {
(u64::BITS - (self.n - 1).leading_zeros()) as _
}
pub fn n(&self) -> usize {
self.n
}
pub fn prec(&self) -> usize {
self.prec
}
pub fn limbs(&self) -> usize {
self.data.len() / self.n
}
pub fn as_ptr(&self) -> *const i64 {
self.data.as_ptr()
}
pub fn as_mut_ptr(&mut self) -> *mut i64 {
self.data.as_mut_ptr()
}
pub fn at(&self, i: usize) -> &[i64] {
&self.data[i * self.n..(i + 1) * self.n]
}
pub fn at_ptr(&self, i: usize) -> *const i64 {
&self.data[i * self.n] as *const i64
}
pub fn at_mut_ptr(&mut self, i: usize) -> *mut i64 {
&mut self.data[i * self.n] as *mut i64
}
pub fn at_mut(&mut self, i: usize) -> &mut [i64] {
&mut self.data[i * self.n..(i + 1) * self.n]
}
pub fn set_i64(&mut self, data: &[i64], log_max: usize) {
let size: usize = min(data.len(), self.n());
let k_rem: usize = self.log_base2k - (self.prec % self.log_base2k);
// If 2^{log_base2k} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == self.log_base2k {
self.at_mut(self.limbs() - 1).copy_from_slice(&data[..size]);
} else {
let mask: i64 = (1 << self.log_base2k) - 1;
let limbs = self.limbs();
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
(limbs - steps..limbs)
.rev()
.enumerate()
.for_each(|(i, i_rev)| {
let shift: usize = i * self.log_base2k;
izip!(self.at_mut(i_rev)[..size].iter_mut(), data[..size].iter())
.for_each(|(y, x)| *y = (x >> shift) & mask);
})
}
// Case where self.prec % self.k != 0.
if k_rem != self.log_base2k {
let limbs = self.limbs();
let steps: usize = min(limbs, (log_max + self.log_base2k - 1) / self.log_base2k);
(limbs - steps..limbs).rev().for_each(|i| {
self.at_mut(i)[..size].iter_mut().for_each(|x| *x <<= k_rem);
})
}
}
pub fn normalize(&mut self, carry: &mut [u8]) {
assert!(
carry.len() >= self.n * 8,
"invalid carry: carry.len()={} < self.n()={}",
carry.len(),
self.n()
);
let carry_i64: &mut [i64] = cast_mut_u8_to_mut_i64_slice(carry);
unsafe {
znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
(0..self.limbs()).rev().for_each(|i| {
znx_normalize(
self.n as u64,
self.log_base2k as u64,
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
)
});
}
}
pub fn get_i64(&self, data: &mut [i64]) {
assert!(
data.len() >= self.n,
"invalid data: data.len()={} < self.n()={}",
data.len(),
self.n
);
data.copy_from_slice(self.at(0));
let rem: usize = self.log_base2k - (self.prec % self.log_base2k);
(1..self.limbs()).for_each(|i| {
if i == self.limbs() - 1 && rem != self.log_base2k {
let k_rem: usize = self.log_base2k - rem;
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << k_rem) + (x >> rem);
});
} else {
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << self.log_base2k) + x;
});
}
})
}
pub fn automorphism_inplace(&mut self, gal_el: i64) {
unsafe {
(0..self.limbs()).for_each(|i| {
znx_automorphism_inplace_i64(self.n as u64, gal_el, self.at_mut_ptr(i))
})
}
}
pub fn automorphism(&mut self, gal_el: i64, a: &mut Vector) {
unsafe {
(0..self.limbs()).for_each(|i| {
znx_automorphism_i64(self.n as u64, gal_el, a.at_mut_ptr(i), self.at_ptr(i))
})
}
}
pub fn fill_uniform(&mut self, source: &mut Source) {
let mut base2k: u64 = 1 << self.log_base2k;
let mut mask: u64 = base2k - 1;
let mut base2k_half: i64 = (base2k >> 1) as i64;
let size: usize = self.n() * (self.limbs() - 1);
self.data[..size]
.iter_mut()
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
let log_base2k_rem: usize = self.prec % self.log_base2k;
if log_base2k_rem != 0 {
base2k = 1 << log_base2k_rem;
mask = (base2k - 1) << (self.log_base2k - log_base2k_rem);
base2k_half = ((mask >> 1) + 1) as i64;
}
self.data[size..]
.iter_mut()
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
}
pub fn add_dist_f64<T: Distribution<f64>>(&mut self, source: &mut Source, dist: T, bound: f64) {
let log_base2k_rem: usize = self.prec % self.log_base2k;
if log_base2k_rem != 0 {
self.at_mut(self.limbs() - 1).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a += (dist_f64.round() as i64) << log_base2k_rem
});
} else {
self.at_mut(self.limbs() - 1).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a += dist_f64.round() as i64
});
}
}
pub fn add_normal(&mut self, source: &mut Source, sigma: f64, bound: f64) {
self.add_dist_f64(source, Normal::new(0.0, sigma).unwrap(), bound);
}
pub fn trunc_pow2(&mut self, k: usize) {
if k == 0 {
return;
}
assert!(
k <= self.prec,
"invalid argument k: k={} > self.prec()={}",
k,
self.prec()
);
self.prec -= k;
self.data
.truncate((self.limbs() - k / self.log_base2k) * self.n());
let k_rem: usize = k % self.log_base2k;
if k_rem != 0 {
let mask: i64 = ((1 << (self.log_base2k - k_rem - 1)) - 1) << k_rem;
self.at_mut(self.limbs() - 1)
.iter_mut()
.for_each(|x: &mut i64| *x &= mask)
}
}
}
#[cfg(test)]
mod tests {
use crate::vector::Vector;
use itertools::izip;
use sampling::source::Source;
#[test]
fn test_set_get_i64_lo_norm() {
let n: usize = 32;
let k: usize = 19;
let prec: usize = 128;
let mut a: Vector = Vector::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut()
.enumerate()
.for_each(|(i, x)| *x = (i as i64) - (n as i64) / 2);
a.set_i64(&have, 10);
let mut want = vec![i64::default(); n];
a.get_i64(&mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b));
}
#[test]
fn test_set_get_i64_hi_norm() {
let n: usize = 8;
let k: usize = 17;
let prec: usize = 84;
let mut a: Vector = Vector::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
let mut source = Source::new([1; 32]);
have.iter_mut().for_each(|x| {
*x = source
.next_u64n(u64::MAX, u64::MAX)
.wrapping_sub(u64::MAX / 2 + 1) as i64;
});
a.set_i64(&have, 63);
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
let mut want = vec![i64::default(); n];
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
a.get_i64(&mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
}
#[test]
fn test_normalize() {
let n: usize = 8;
let k: usize = 17;
let prec: usize = 84;
let mut a: Vector = Vector::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
let mut source = Source::new([1; 32]);
have.iter_mut().for_each(|x| {
*x = source
.next_u64n(u64::MAX, u64::MAX)
.wrapping_sub(u64::MAX / 2 + 1) as i64;
});
a.set_i64(&have, 63);
let mut carry: Vec<u8> = vec![u8::default(); n * 8];
a.normalize(&mut carry);
let base_half = 1 << (k - 1);
a.data
.iter()
.for_each(|x| assert!(x.abs() <= base_half, "|x|={} > 2^(k-1)={}", x, base_half));
let mut want = vec![i64::default(); n];
a.get_i64(&mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
}
}