Refactor of GGSW key-switch to enable easier implementation of GGSW automorphism

This commit is contained in:
Jean-Philippe Bossuat
2025-05-20 17:42:43 +02:00
parent a803127424
commit 640ff9ea61
9 changed files with 404 additions and 288 deletions

View File

@@ -78,10 +78,7 @@ impl<B: Backend> Module<B> {
if gal_el == 0 {
panic!("cannot invert 0")
}
((mod_exp_u64(
gal_el.abs() as u64,
(self.cyclotomic_order() - 1) as usize,
) & (self.cyclotomic_order() - 1)) as i64)
((mod_exp_u64(gal_el.abs() as u64, (self.cyclotomic_order() - 1) as usize) & (self.cyclotomic_order() - 1)) as i64)
* gal_el.signum()
}
}

View File

@@ -2,7 +2,10 @@ use std::marker::PhantomData;
use crate::ffi::svp;
use crate::znx_base::ZnxInfos;
use crate::{alloc_aligned, Backend, DataView, DataViewMut, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxSliceSize, ZnxView, FFT64};
use crate::{
Backend, DataView, DataViewMut, FFT64, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxSliceSize, ZnxView,
alloc_aligned,
};
pub struct ScalarZnxDft<D, B: Backend> {
data: D,
@@ -93,8 +96,8 @@ impl<D, B: Backend> ScalarZnxDft<D, B> {
}
}
pub fn as_vec_znx_dft(self) -> VecZnxDft<D, B>{
VecZnxDft{
pub fn as_vec_znx_dft(self) -> VecZnxDft<D, B> {
VecZnxDft {
data: self.data,
n: self.n,
cols: self.cols,
@@ -227,4 +230,4 @@ impl<B: Backend> VecZnxDftToRef<B> for ScalarZnxDft<&[u8], B> {
_phantom: PhantomData,
}
}
}
}

View File

@@ -130,9 +130,13 @@ impl<D> VecZnx<D> {
}
}
pub fn to_scalar_znx(self) -> ScalarZnx<D>{
debug_assert_eq!(self.size, 1, "cannot convert VecZnx to ScalarZnx if cols: {} != 1", self.cols);
ScalarZnx{
pub fn to_scalar_znx(self) -> ScalarZnx<D> {
debug_assert_eq!(
self.size, 1,
"cannot convert VecZnx to ScalarZnx if cols: {} != 1",
self.cols
);
ScalarZnx {
data: self.data,
n: self.n,
cols: self.cols,
@@ -198,9 +202,9 @@ where
VecZnx<D>: VecZnxToMut + ZnxInfos,
{
/// Extracts the a_col-th column of 'a' and stores it on the self_col-th column [Self].
pub fn extract_column<C>(&mut self, self_col: usize, a: &VecZnx<C>, a_col: usize)
pub fn extract_column<R>(&mut self, self_col: usize, a: &R, a_col: usize)
where
VecZnx<C>: VecZnxToRef + ZnxInfos,
R: VecZnxToRef + ZnxInfos,
{
#[cfg(debug_assertions)]
{

View File

@@ -1,6 +1,6 @@
use crate::ffi::vec_znx_big;
use crate::znx_base::{ZnxInfos, ZnxView};
use crate::{alloc_aligned, Backend, DataView, DataViewMut, Module, VecZnx, ZnxSliceSize, ZnxViewMut, ZnxZero, FFT64};
use crate::{Backend, DataView, DataViewMut, FFT64, Module, VecZnx, ZnxSliceSize, ZnxViewMut, ZnxZero, alloc_aligned};
use std::fmt;
use std::marker::PhantomData;
@@ -97,11 +97,11 @@ impl<D, B: Backend> VecZnxBig<D, B> {
impl<D> VecZnxBig<D, FFT64>
where
VecZnxBig<D, FFT64>: VecZnxBigToMut<FFT64> + ZnxInfos,
{
// Consumes the VecZnxBig to return a VecZnx.
{
// Consumes the VecZnxBig to return a VecZnx.
// Useful when no normalization is needed.
pub fn to_vec_znx_small(self) -> VecZnx<D>{
VecZnx{
pub fn to_vec_znx_small(self) -> VecZnx<D> {
VecZnx {
data: self.data,
n: self.n,
cols: self.cols,

View File

@@ -147,7 +147,6 @@ pub trait VecZnxBigOps<BACKEND: Backend> {
fn vec_znx_big_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
where
A: VecZnxBigToMut<BACKEND>;
}
pub trait VecZnxBigScratch {
@@ -170,7 +169,6 @@ impl<B: Backend> VecZnxBigAlloc<B> for Module<B> {
}
impl VecZnxBigOps<FFT64> for Module<FFT64> {
fn vec_znx_big_add<R, A, B>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize)
where
R: VecZnxBigToMut<FFT64>,

View File

@@ -42,6 +42,17 @@ pub trait VecZnxDftOps<B: Backend> {
/// a new [VecZnxDft] through [VecZnxDft::from_bytes].
fn vec_znx_idft_tmp_bytes(&self) -> usize;
fn vec_znx_dft_add<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize)
where
R: VecZnxDftToMut<B>,
A: VecZnxDftToRef<B>,
D: VecZnxDftToRef<B>;
fn vec_znx_dft_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<B>,
A: VecZnxDftToRef<B>;
fn vec_znx_dft_copy<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<B>,
@@ -84,6 +95,64 @@ impl<B: Backend> VecZnxDftAlloc<B> for Module<B> {
}
impl VecZnxDftOps<FFT64> for Module<FFT64> {
fn vec_znx_dft_add<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize)
where
R: VecZnxDftToMut<FFT64>,
A: VecZnxDftToRef<FFT64>,
D: VecZnxDftToRef<FFT64>,
{
let mut res_mut: VecZnxDft<&mut [u8], FFT64> = res.to_mut();
let a_ref: VecZnxDft<&[u8], FFT64> = a.to_ref();
let b_ref: VecZnxDft<&[u8], FFT64> = b.to_ref();
let min_size: usize = res_mut.size().min(a_ref.size()).min(b_ref.size());
unsafe {
(0..min_size).for_each(|j| {
vec_znx_dft::vec_dft_add(
self.ptr,
res_mut.at_mut_ptr(res_col, j) as *mut vec_znx_dft::vec_znx_dft_t,
1,
a_ref.at_ptr(a_col, j) as *const vec_znx_dft::vec_znx_dft_t,
1,
b_ref.at_ptr(b_col, j) as *const vec_znx_dft::vec_znx_dft_t,
1,
);
});
}
(min_size..res_mut.size()).for_each(|j| {
res_mut.zero_at(res_col, j);
})
}
fn vec_znx_dft_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<FFT64>,
A: VecZnxDftToRef<FFT64>,
{
let mut res_mut: VecZnxDft<&mut [u8], FFT64> = res.to_mut();
let a_ref: VecZnxDft<&[u8], FFT64> = a.to_ref();
let min_size: usize = res_mut.size().min(a_ref.size());
unsafe {
(0..min_size).for_each(|j| {
vec_znx_dft::vec_dft_add(
self.ptr,
res_mut.at_mut_ptr(res_col, j) as *mut vec_znx_dft::vec_znx_dft_t,
1,
res_mut.at_ptr(res_col, j) as *const vec_znx_dft::vec_znx_dft_t,
1,
a_ref.at_ptr(a_col, j) as *const vec_znx_dft::vec_znx_dft_t,
1,
);
});
}
(min_size..res_mut.size()).for_each(|j| {
res_mut.zero_at(res_col, j);
})
}
fn vec_znx_dft_copy<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<FFT64>,

View File

@@ -574,7 +574,11 @@ impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert!(k & 1 != 0, "invalid galois element: must be odd but is {}", k);
assert!(
k & 1 != 0,
"invalid galois element: must be odd but is {}",
k
);
}
unsafe {
vec_znx::vec_znx_automorphism(