wip major refactoring (compiles & all test + example passing)

This commit is contained in:
Jean-Philippe Bossuat
2025-04-30 13:43:18 +02:00
parent 2cc51eee18
commit 6f7b93c7ca
18 changed files with 662 additions and 870 deletions

View File

@@ -1,5 +1,6 @@
use crate::ffi::vec_znx;
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, ZnxBase, ZnxInfos, ZnxLayout, assert_alignement};
use crate::ffi::vec_znx_big::{self, vec_znx_big_t};
use crate::znx_base::{ZnxAlloc, ZnxInfos, ZnxLayout, ZnxSliceSize};
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, assert_alignement};
pub trait VecZnxBigOps<B: Backend> {
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
@@ -17,7 +18,7 @@ pub trait VecZnxBigOps<B: Backend> {
///
/// # Panics
/// If `bytes.len()` < [Module::bytes_of_vec_znx_big].
fn new_vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: &mut [u8]) -> VecZnxBig<B>;
fn new_vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBig<B>;
/// Returns a new [VecZnxBig] with the provided bytes array as backing array.
///
@@ -41,74 +42,74 @@ pub trait VecZnxBigOps<B: Backend> {
fn vec_znx_big_add(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<B>,
col_a: usize,
a_col: usize,
b: &VecZnxBig<B>,
col_b: usize,
b_col: usize,
);
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnxBig<B>, a_col: usize);
/// Adds `a` to `b` and stores the result on `c`.
fn vec_znx_big_add_small(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<B>,
col_b: usize,
res_col: usize,
a: &VecZnxBig<B>,
a_col: usize,
b: &VecZnx,
b_col: usize,
);
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnx, a_col: usize);
/// Subtracts `a` to `b` and stores the result on `c`.
fn vec_znx_big_sub(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<B>,
col_a: usize,
a_col: usize,
b: &VecZnxBig<B>,
col_b: usize,
b_col: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnxBig<B>, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `b`.
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnxBig<B>, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
fn vec_znx_big_sub_small_a(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
res_col: usize,
a: &VecZnx,
col_a: usize,
a_col: usize,
b: &VecZnxBig<B>,
col_b: usize,
b_col: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnx, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
fn vec_znx_big_sub_small_b(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<B>,
col_a: usize,
a_col: usize,
b: &VecZnx,
col_b: usize,
b_col: usize,
);
/// Subtracts `b` to `a` and stores the result on `b`.
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnx, a_col: usize);
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize;
@@ -123,44 +124,44 @@ pub trait VecZnxBigOps<B: Backend> {
&self,
log_base2k: usize,
res: &mut VecZnx,
col_res: usize,
res_col: usize,
a: &VecZnxBig<B>,
col_a: usize,
a_col: usize,
tmp_bytes: &mut [u8],
);
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<B>, res_col: usize, a: &VecZnxBig<B>, a_col: usize);
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `a`.
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>, col_a: usize);
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>, a_col: usize);
}
impl VecZnxBigOps<FFT64> for Module<FFT64> {
fn new_vec_znx_big(&self, cols: usize, size: usize) -> VecZnxBig<FFT64> {
VecZnxBig::new(self, cols, size)
VecZnxBig::new(self, 1, cols, size)
}
fn new_vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: &mut [u8]) -> VecZnxBig<FFT64> {
VecZnxBig::from_bytes(self, cols, size, bytes)
fn new_vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBig<FFT64> {
VecZnxBig::from_bytes(self, 1, cols, size, bytes)
}
fn new_vec_znx_big_from_bytes_borrow(&self, cols: usize, size: usize, tmp_bytes: &mut [u8]) -> VecZnxBig<FFT64> {
VecZnxBig::from_bytes_borrow(self, cols, size, tmp_bytes)
VecZnxBig::from_bytes_borrow(self, 1, cols, size, tmp_bytes)
}
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
VecZnxBig::bytes_of(self, cols, size)
VecZnxBig::bytes_of(self, 1, cols, size)
}
fn vec_znx_big_add(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
a_col: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
b_col: usize,
) {
#[cfg(debug_assertions)]
{
@@ -170,36 +171,33 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
vec_znx_big::vec_znx_big_add(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * res.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.at_ptr(b_col * res.size(), 0) as *const vec_znx_big_t,
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnxBig<FFT64>, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
Self::vec_znx_big_add(self, &mut *res_ptr, res_col, a, a_col, &*res_ptr, res_col);
}
}
fn vec_znx_big_sub(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
a_col: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
b_col: usize,
) {
#[cfg(debug_assertions)]
{
@@ -209,43 +207,40 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
vec_znx_big::vec_znx_big_sub(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * res.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.at_ptr(b_col * res.size(), 0) as *const vec_znx_big_t,
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnxBig<FFT64>, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
Self::vec_znx_big_sub(self, &mut *res_ptr, res_col, a, a_col, &*res_ptr, res_col);
}
}
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnxBig<FFT64>, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
Self::vec_znx_big_sub(self, &mut *res_ptr, res_col, &*res_ptr, res_col, a, a_col);
}
}
fn vec_znx_big_sub_small_b(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
res_col: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
a_col: usize,
b: &VecZnx,
col_b: usize,
b_col: usize,
) {
#[cfg(debug_assertions)]
{
@@ -255,36 +250,34 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
vec_znx_big::vec_znx_big_sub_small_b(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * a.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnx, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_b(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
Self::vec_znx_big_sub_small_b(self, &mut *res_ptr, res_col, &*res_ptr, res_col, a, a_col);
}
}
fn vec_znx_big_sub_small_a(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
res_col: usize,
a: &VecZnx,
col_a: usize,
a_col: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
b_col: usize,
) {
#[cfg(debug_assertions)]
{
@@ -294,36 +287,34 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
vec_znx_big::vec_znx_big_sub_small_a(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.at_ptr(b_col * b.size(), 0) as *const vec_znx_big_t,
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnx, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_a(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
Self::vec_znx_big_sub_small_a(self, &mut *res_ptr, res_col, a, a_col, &*res_ptr, res_col);
}
}
fn vec_znx_big_add_small(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
res_col: usize,
a: &VecZnxBig<FFT64>,
a_col: usize,
b: &VecZnx,
b_col: usize,
) {
#[cfg(debug_assertions)]
{
@@ -333,25 +324,23 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
vec_znx_big::vec_znx_big_add_small(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * a.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, a_col: usize) {
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnx, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add_small(self, &mut *res_ptr, col_res, a, a_col, &*res_ptr, col_res);
Self::vec_znx_big_add_small(self, &mut *res_ptr, res_col, &*res_ptr, res_col, a, a_col);
}
}
@@ -363,9 +352,9 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
&self,
log_base2k: usize,
res: &mut VecZnx,
col_res: usize,
res_col: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
a_col: usize,
tmp_bytes: &mut [u8],
) {
#[cfg(debug_assertions)]
@@ -376,44 +365,41 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
assert_alignement(tmp_bytes.as_ptr());
}
unsafe {
vec_znx::vec_znx_normalize_base2k(
vec_znx_big::vec_znx_big_normalize_base2k(
self.ptr,
log_base2k as u64,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * a.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
tmp_bytes.as_mut_ptr(),
);
}
}
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<FFT64>, res_col: usize, a: &VecZnxBig<FFT64>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
vec_znx_big::vec_znx_big_automorphism(
self.ptr,
k,
res.at_mut_ptr(col_res, 0),
res.at_mut_ptr(res_col * res.size(), 0) as *mut vec_znx_big_t,
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.at_ptr(a_col * a.size(), 0) as *const vec_znx_big_t,
a.size() as u64,
a.sl() as u64,
)
}
}
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>, col_a: usize) {
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>, a_col: usize) {
unsafe {
let a_ptr: *mut VecZnxBig<FFT64> = a as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, col_a, &*a_ptr, col_a);
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, a_col, &*a_ptr, a_col);
}
}
}