Files
poulpy/poulpy-backend/src/cpu_spqlios/fft64/vec_znx_big.rs
Jean-Philippe Bossuat 37e13b965c Add cross-basek normalization (#90)
* added cross_basek_normalization

* updated method signatures to take layouts

* fixed cross-base normalization

fix #91
fix #93
2025-09-30 14:40:10 +02:00

622 lines
19 KiB
Rust

use crate::cpu_spqlios::{FFT64Spqlios, ffi::vec_znx};
use poulpy_hal::{
api::{TakeSlice, VecZnxBigNormalizeTmpBytes},
layouts::{
Backend, Module, Scratch, VecZnx, VecZnxBig, VecZnxBigOwned, VecZnxBigToMut, VecZnxBigToRef, VecZnxToMut, VecZnxToRef,
ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut,
},
oep::{
TakeSliceImpl, VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl,
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubImpl, VecZnxBigSubInplaceImpl, VecZnxBigSubNegateInplaceImpl,
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallInplaceImpl, VecZnxBigSubSmallNegateInplaceImpl,
},
reference::{
fft64::vec_znx_big::vec_znx_big_normalize,
vec_znx::{vec_znx_add_normal_ref, vec_znx_normalize_tmp_bytes},
znx::{znx_copy_ref, znx_zero_ref},
},
source::Source,
};
unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_big_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_big_word_count() * n * cols * size * size_of::<f64>()
}
}
unsafe impl VecZnxBigAllocImpl<Self> for FFT64Spqlios {
fn vec_znx_big_alloc_impl(n: usize, cols: usize, size: usize) -> VecZnxBigOwned<Self> {
VecZnxBig::alloc(n, cols, size)
}
}
unsafe impl VecZnxBigFromBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_big_from_bytes_impl(n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<Self> {
VecZnxBig::from_bytes(n, cols, size, bytes)
}
}
unsafe impl VecZnxBigFromSmallImpl<Self> for FFT64Spqlios {
fn vec_znx_big_from_small_impl<R, A>(res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxToRef,
{
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
let a: VecZnx<&[u8]> = a.to_ref();
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), a.n());
}
let res_size: usize = res.size();
let a_size: usize = a.size();
let min_size: usize = res_size.min(a_size);
for j in 0..min_size {
znx_copy_ref(res.at_mut(res_col, j), a.at(a_col, j));
}
for j in min_size..res_size {
znx_zero_ref(res.at_mut(res_col, j));
}
}
}
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Spqlios {
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
_module: &Module<Self>,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
source: &mut Source,
sigma: f64,
bound: f64,
) {
let res: VecZnxBig<&mut [u8], FFT64Spqlios> = res.to_mut();
let mut res_znx: VecZnx<&mut [u8]> = VecZnx {
data: res.data,
n: res.n,
cols: res.cols,
size: res.size,
max_size: res.max_size,
};
vec_znx_add_normal_ref(base2k, &mut res_znx, res_col, k, sigma, bound, source);
}
}
unsafe impl VecZnxBigAddImpl<Self> for FFT64Spqlios {
/// Adds `a` to `b` and stores the result on `c`.
fn vec_znx_big_add_impl<R, A, B>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
B: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let b: VecZnxBig<&[u8], Self> = b.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigAddInplaceImpl<Self> for FFT64Spqlios {
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_add(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigAddSmallImpl<Self> for FFT64Spqlios {
/// Adds `a` to `b` and stores the result on `c`.
fn vec_znx_big_add_small_impl<R, A, B>(
module: &Module<Self>,
res: &mut R,
res_col: usize,
a: &A,
a_col: usize,
b: &B,
b_col: usize,
) where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
B: VecZnxToRef,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let b: VecZnx<&[u8]> = b.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigAddSmallInplaceImpl<Self> for FFT64Spqlios {
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_small_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxToRef,
{
let a: VecZnx<&[u8]> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_add(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubImpl<Self> for FFT64Spqlios {
/// Subtracts `a` to `b` and stores the result on `c`.
fn vec_znx_big_sub_impl<R, A, B>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
B: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let b: VecZnxBig<&[u8], Self> = b.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubInplaceImpl<Self> for FFT64Spqlios {
/// Subtracts `a` from `b` and stores the result on `b`.
fn vec_znx_big_sub_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubNegateInplaceImpl<Self> for FFT64Spqlios {
/// Subtracts `b` from `a` and stores the result on `b`.
fn vec_znx_big_sub_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubSmallAImpl<Self> for FFT64Spqlios {
/// Subtracts `b` from `a` and stores the result on `c`.
fn vec_znx_big_sub_small_a_impl<R, A, B>(
module: &Module<Self>,
res: &mut R,
res_col: usize,
a: &A,
a_col: usize,
b: &B,
b_col: usize,
) where
R: VecZnxBigToMut<Self>,
A: VecZnxToRef,
B: VecZnxBigToRef<Self>,
{
let a: VecZnx<&[u8]> = a.to_ref();
let b: VecZnxBig<&[u8], Self> = b.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubSmallInplaceImpl<Self> for FFT64Spqlios {
/// Subtracts `a` from `res` and stores the result on `res`.
fn vec_znx_big_sub_small_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxToRef,
{
let a: VecZnx<&[u8]> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubSmallBImpl<Self> for FFT64Spqlios {
/// Subtracts `b` from `a` and stores the result on `c`.
fn vec_znx_big_sub_small_b_impl<R, A, B>(
module: &Module<Self>,
res: &mut R,
res_col: usize,
a: &A,
a_col: usize,
b: &B,
b_col: usize,
) where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
B: VecZnxToRef,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let b: VecZnx<&[u8]> = b.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(b_col, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigSubSmallNegateInplaceImpl<Self> for FFT64Spqlios {
/// Subtracts `res` from `a` and stores the result on `res`.
fn vec_znx_big_sub_small_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxToRef,
{
let a: VecZnx<&[u8]> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigNegateImpl<Self> for FFT64Spqlios {
fn vec_znx_big_negate_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
{
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
let a: VecZnxBig<&[u8], Self> = a.to_ref();
unsafe {
vec_znx::vec_znx_negate(
module.ptr(),
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigNegateInplaceImpl<Self> for FFT64Spqlios {
fn vec_znx_big_negate_inplace_impl<A>(module: &Module<Self>, a: &mut A, a_col: usize)
where
A: VecZnxBigToMut<Self>,
{
let mut a: VecZnxBig<&mut [u8], Self> = a.to_mut();
unsafe {
vec_znx::vec_znx_negate(
module.ptr(),
a.at_mut_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_big_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
vec_znx_normalize_tmp_bytes(module.n())
}
}
unsafe impl VecZnxBigNormalizeImpl<Self> for FFT64Spqlios
where
Self: TakeSliceImpl<Self>,
{
fn vec_znx_big_normalize_impl<R, A>(
module: &Module<Self>,
res_basek: usize,
res: &mut R,
res_col: usize,
a_basek: usize,
a: &A,
a_col: usize,
scratch: &mut Scratch<Self>,
) where
R: VecZnxToMut,
A: VecZnxBigToRef<Self>,
{
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
// unsafe {
// vec_znx::vec_znx_normalize_base2k(
// module.ptr(),
// base2k as u64,
// res.at_mut_ptr(res_col, 0),
// res.size() as u64,
// res.sl() as u64,
// a.at_ptr(a_col, 0),
// a.size() as u64,
// a.sl() as u64,
// tmp_bytes.as_mut_ptr(),
// );
// }
vec_znx_big_normalize(res_basek, res, res_col, a_basek, a, a_col, carry);
}
}
unsafe impl VecZnxBigAutomorphismImpl<Self> for FFT64Spqlios {
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
fn vec_znx_big_automorphism_impl<R, A>(module: &Module<Self>, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<Self>,
A: VecZnxBigToRef<Self>,
{
let a: VecZnxBig<&[u8], Self> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], Self> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
module.ptr(),
k,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}
unsafe impl VecZnxBigAutomorphismInplaceTmpBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_big_automorphism_inplace_tmp_bytes_impl(_module: &Module<Self>) -> usize {
0
}
}
unsafe impl VecZnxBigAutomorphismInplaceImpl<Self> for FFT64Spqlios {
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `a`.
fn vec_znx_big_automorphism_inplace_impl<A>(
module: &Module<Self>,
k: i64,
a: &mut A,
a_col: usize,
_scratch: &mut Scratch<Self>,
) where
A: VecZnxBigToMut<Self>,
{
let mut a: VecZnxBig<&mut [u8], Self> = a.to_mut();
unsafe {
vec_znx::vec_znx_automorphism(
module.ptr(),
k,
a.at_mut_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}