mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
factorized out vmp ops into a common trait & implementation
This commit is contained in:
170
rlwe/src/elem.rs
170
rlwe/src/elem.rs
@@ -1,6 +1,13 @@
|
||||
use base2k::ZnxInfos;
|
||||
use base2k::{
|
||||
Backend, FFT64, MatZnxDft, MatZnxDftToMut, MatZnxDftToRef, Module, Scratch, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxDftToMut,
|
||||
VecZnxDftToRef, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxZero,
|
||||
};
|
||||
|
||||
use crate::utils::derive_size;
|
||||
use crate::{
|
||||
elem_grlwe::GRLWECt,
|
||||
elem_rlwe::{RLWECt, RLWECtDft},
|
||||
utils::derive_size,
|
||||
};
|
||||
|
||||
pub trait Infos {
|
||||
type Inner: ZnxInfos;
|
||||
@@ -45,3 +52,162 @@ pub trait Infos {
|
||||
/// Returns the bit precision of the ciphertext.
|
||||
fn log_k(&self) -> usize;
|
||||
}
|
||||
|
||||
pub trait GetRow<B: Backend> {
|
||||
fn get_row<R>(&self, module: &Module<B>, row_i: usize, col_j: usize, res: &mut RLWECtDft<R, B>)
|
||||
where
|
||||
VecZnxDft<R, B>: VecZnxDftToMut<B>;
|
||||
}
|
||||
|
||||
pub trait SetRow<B: Backend> {
|
||||
fn set_row<A>(&mut self, module: &Module<B>, row_i: usize, col_j: usize, a: &RLWECtDft<A, B>)
|
||||
where
|
||||
VecZnxDft<A, B>: VecZnxDftToRef<B>;
|
||||
}
|
||||
|
||||
pub(crate) trait MatZnxDftProducts<D, C>: Infos
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
fn mul_rlwe<R, A>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, a: &RLWECt<A>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
VecZnx<R>: VecZnxToMut,
|
||||
VecZnx<A>: VecZnxToRef;
|
||||
|
||||
fn mul_rlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnx<R>: VecZnxToMut + VecZnxToRef,
|
||||
{
|
||||
unsafe {
|
||||
let res_ptr: *mut RLWECt<R> = res as *mut RLWECt<R>; // This is ok because [Self::mul_rlwe] only updates res at the end.
|
||||
self.mul_rlwe(&module, &mut *res_ptr, &*res_ptr, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
fn mul_rlwe_dft<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut RLWECtDft<R, FFT64>,
|
||||
a: &RLWECtDft<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<A, FFT64>: VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (a_data, scratch_1) = scratch.tmp_vec_znx(module, 2, a.size());
|
||||
|
||||
let mut a_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: a_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
a.idft(module, &mut a_idft, scratch_1);
|
||||
|
||||
let (res_data, scratch_2) = scratch_1.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
self.mul_rlwe(module, &mut res_idft, &a_idft, scratch_2);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
fn mul_rlwe_dft_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECtDft<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64> + VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (res_data, scratch_1) = scratch.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
res.idft(module, &mut res_idft, scratch_1);
|
||||
|
||||
self.mul_rlwe_inplace(module, &mut res_idft, scratch_1);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
fn mul_grlwe<R, A>(&self, module: &Module<FFT64>, res: &mut GRLWECt<R, FFT64>, a: &GRLWECt<A, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, a.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
let min_rows: usize = res.rows().min(a.rows());
|
||||
|
||||
(0..min_rows).for_each(|row_i| {
|
||||
a.get_row(module, row_i, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
});
|
||||
|
||||
tmp_row.data.zero();
|
||||
|
||||
(min_rows..res.rows()).for_each(|row_i| {
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
})
|
||||
}
|
||||
|
||||
fn mul_grlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut R, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
R: GetRow<FFT64> + SetRow<FFT64> + Infos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, res.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
(0..self.cols()).for_each(|col_j| {
|
||||
(0..res.rows()).for_each(|row_i| {
|
||||
res.get_row(module, row_i, col_j, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, col_j, &tmp_row);
|
||||
});
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use base2k::{
|
||||
use sampling::source::Source;
|
||||
|
||||
use crate::{
|
||||
elem::Infos,
|
||||
elem::{GetRow, Infos, MatZnxDftProducts, SetRow},
|
||||
elem_rlwe::{RLWECt, RLWECtDft, RLWEPt},
|
||||
keys::SecretKeyDft,
|
||||
utils::derive_size,
|
||||
@@ -211,8 +211,106 @@ impl<C> GRLWECt<C, FFT64> {
|
||||
}
|
||||
|
||||
pub fn mul_rlwe<R, A>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, a: &RLWECt<A>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
VecZnx<R>: VecZnxToMut,
|
||||
VecZnx<A>: VecZnxToRef,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnx<R>: VecZnxToMut + VecZnxToRef,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_inplace(self, module, res, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut RLWECtDft<R, FFT64>,
|
||||
a: &RLWECtDft<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<A, FFT64>: VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_dft(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECtDft<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64> + VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_dft_inplace(self, module, res, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut GRLWECt<R, FFT64>,
|
||||
a: &GRLWECt<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
MatZnxDftProducts::mul_grlwe(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut R, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
R: GetRow<FFT64> + SetRow<FFT64> + Infos,
|
||||
{
|
||||
MatZnxDftProducts::mul_grlwe_inplace(self, module, res, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> GetRow<FFT64> for GRLWECt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
{
|
||||
fn get_row<R>(&self, module: &Module<FFT64>, row_i: usize, col_j: usize, res: &mut RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(col_j, 0);
|
||||
}
|
||||
module.vmp_extract_row(res, self, row_i, col_j);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> SetRow<FFT64> for GRLWECt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToMut<FFT64>,
|
||||
{
|
||||
fn set_row<R>(&mut self, module: &Module<FFT64>, row_i: usize, col_j: usize, a: &RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(col_j, 0);
|
||||
}
|
||||
module.vmp_prepare_row(self, row_i, col_j, a);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> MatZnxDftProducts<GRLWECt<C, FFT64>, C> for GRLWECt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
fn mul_rlwe<R, A>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, a: &RLWECt<A>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
VecZnx<R>: VecZnxToMut,
|
||||
VecZnx<A>: VecZnxToRef,
|
||||
{
|
||||
@@ -242,143 +340,4 @@ impl<C> GRLWECt<C, FFT64> {
|
||||
module.vec_znx_big_normalize(log_base2k, res, 0, &res_big, 0, scratch1);
|
||||
module.vec_znx_big_normalize(log_base2k, res, 1, &res_big, 1, scratch1);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnx<R>: VecZnxToMut + VecZnxToRef,
|
||||
{
|
||||
unsafe {
|
||||
let res_ptr: *mut RLWECt<R> = res as *mut RLWECt<R>; // This is ok because [Self::mul_rlwe] only updates res at the end.
|
||||
self.mul_rlwe(&module, &mut *res_ptr, &*res_ptr, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut RLWECtDft<R, FFT64>,
|
||||
a: &RLWECtDft<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<A, FFT64>: VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (a_data, scratch_1) = scratch.tmp_vec_znx(module, 2, a.size());
|
||||
|
||||
let mut a_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: a_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
a.idft(module, &mut a_idft, scratch_1);
|
||||
|
||||
let (res_data, scratch_2) = scratch_1.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
self.mul_rlwe(module, &mut res_idft, &a_idft, scratch_2);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECtDft<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (res_data, scratch_1) = scratch.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
res.idft(module, &mut res_idft, scratch_1);
|
||||
|
||||
self.mul_rlwe_inplace(module, &mut res_idft, scratch_1);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut GRLWECt<R, FFT64>,
|
||||
a: &GRLWECt<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, a.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
let min_rows: usize = res.rows().min(a.rows());
|
||||
|
||||
(0..min_rows).for_each(|row_i| {
|
||||
a.get_row(module, row_i, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
});
|
||||
|
||||
tmp_row.data.zero();
|
||||
|
||||
(min_rows..res.rows()).for_each(|row_i| {
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mul_grlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut GRLWECt<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, res.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
(0..res.rows()).for_each(|row_i| {
|
||||
res.get_row(module, row_i, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use base2k::{
|
||||
use sampling::source::Source;
|
||||
|
||||
use crate::{
|
||||
elem::Infos,
|
||||
elem::{GetRow, Infos, MatZnxDftProducts, SetRow},
|
||||
elem_grlwe::GRLWECt,
|
||||
elem_rlwe::{RLWECt, RLWECtDft, RLWEPt, encrypt_rlwe_sk},
|
||||
keys::SecretKeyDft,
|
||||
@@ -110,30 +110,6 @@ impl RGSWCt<Vec<u8>, FFT64> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> RGSWCt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
{
|
||||
pub fn get_row<R>(&self, module: &Module<FFT64>, row_i: usize, col_j: usize, res: &mut RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
module.vmp_extract_row(res, self, row_i, col_j);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> RGSWCt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToMut<FFT64>,
|
||||
{
|
||||
pub fn set_row<R>(&mut self, module: &Module<FFT64>, row_i: usize, col_j: usize, a: &RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64>,
|
||||
{
|
||||
module.vmp_prepare_row(self, row_i, col_j, a);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encrypt_rgsw_sk<C, P, S>(
|
||||
module: &Module<FFT64>,
|
||||
ct: &mut RGSWCt<C, FFT64>,
|
||||
@@ -221,6 +197,96 @@ impl<C> RGSWCt<C, FFT64> {
|
||||
}
|
||||
|
||||
pub fn mul_rlwe<R, A>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, a: &RLWECt<A>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
VecZnx<R>: VecZnxToMut,
|
||||
VecZnx<A>: VecZnxToRef,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnx<R>: VecZnxToMut + VecZnxToRef,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_inplace(self, module, res, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut RLWECtDft<R, FFT64>,
|
||||
a: &RLWECtDft<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<A, FFT64>: VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_dft(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECtDft<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64> + VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
MatZnxDftProducts::mul_rlwe_dft_inplace(self, module, res, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut GRLWECt<R, FFT64>,
|
||||
a: &GRLWECt<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
MatZnxDftProducts::mul_grlwe(self, module, res, a, scratch);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut R, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
R: GetRow<FFT64> + SetRow<FFT64> + Infos,
|
||||
{
|
||||
MatZnxDftProducts::mul_grlwe_inplace(self, module, res, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> GetRow<FFT64> for RGSWCt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
{
|
||||
fn get_row<R>(&self, module: &Module<FFT64>, row_i: usize, col_j: usize, res: &mut RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64>,
|
||||
{
|
||||
module.vmp_extract_row(res, self, row_i, col_j);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> SetRow<FFT64> for RGSWCt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToMut<FFT64>,
|
||||
{
|
||||
fn set_row<R>(&mut self, module: &Module<FFT64>, row_i: usize, col_j: usize, a: &RLWECtDft<R, FFT64>)
|
||||
where
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToRef<FFT64>,
|
||||
{
|
||||
module.vmp_prepare_row(self, row_i, col_j, a);
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> MatZnxDftProducts<RGSWCt<C, FFT64>, C> for RGSWCt<C, FFT64>
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
fn mul_rlwe<R, A>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, a: &RLWECt<A>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64>,
|
||||
VecZnx<R>: VecZnxToMut,
|
||||
@@ -251,205 +317,4 @@ impl<C> RGSWCt<C, FFT64> {
|
||||
module.vec_znx_big_normalize(log_base2k, res, 0, &res_big, 0, scratch1);
|
||||
module.vec_znx_big_normalize(log_base2k, res, 1, &res_big, 1, scratch1);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECt<R>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnx<R>: VecZnxToMut + VecZnxToRef,
|
||||
{
|
||||
unsafe {
|
||||
let res_ptr: *mut RLWECt<R> = res as *mut RLWECt<R>; // This is ok because [Self::mul_rlwe] only updates res at the end.
|
||||
self.mul_rlwe(&module, &mut *res_ptr, &*res_ptr, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut RLWECtDft<R, FFT64>,
|
||||
a: &RLWECtDft<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<A, FFT64>: VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (a_data, scratch_1) = scratch.tmp_vec_znx(module, 2, a.size());
|
||||
|
||||
let mut a_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: a_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
a.idft(module, &mut a_idft, scratch_1);
|
||||
|
||||
let (res_data, scratch_2) = scratch_1.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
self.mul_rlwe(module, &mut res_idft, &a_idft, scratch_2);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
pub fn mul_rlwe_dft_inplace<R>(&self, module: &Module<FFT64>, res: &mut RLWECtDft<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
VecZnxDft<R, FFT64>: VecZnxDftToMut<FFT64> + VecZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let log_base2k: usize = self.log_base2k();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.log_base2k(), log_base2k);
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(res.n(), module.n());
|
||||
}
|
||||
|
||||
let (res_data, scratch_1) = scratch.tmp_vec_znx(module, 2, res.size());
|
||||
|
||||
let mut res_idft: RLWECt<&mut [u8]> = RLWECt::<&mut [u8]> {
|
||||
data: res_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
res.idft(module, &mut res_idft, scratch_1);
|
||||
|
||||
self.mul_rlwe_inplace(module, &mut res_idft, scratch_1);
|
||||
|
||||
module.vec_znx_dft(res, 0, &res_idft, 0);
|
||||
module.vec_znx_dft(res, 1, &res_idft, 1);
|
||||
}
|
||||
|
||||
pub fn mul_grlwe<R, A>(
|
||||
&self,
|
||||
module: &Module<FFT64>,
|
||||
res: &mut GRLWECt<R, FFT64>,
|
||||
a: &GRLWECt<A, FFT64>,
|
||||
scratch: &mut Scratch,
|
||||
) where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, a.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
let min_rows: usize = res.rows().min(a.rows());
|
||||
|
||||
(0..min_rows).for_each(|row_i| {
|
||||
a.get_row(module, row_i, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
});
|
||||
|
||||
tmp_row.data.zero();
|
||||
|
||||
(min_rows..res.rows()).for_each(|row_i| {
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mul_grlwe_inplace<R>(&self, module: &Module<FFT64>, res: &mut GRLWECt<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, res.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
(0..res.rows()).for_each(|row_i| {
|
||||
res.get_row(module, row_i, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, &tmp_row);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn mul_rgsw<R, A>(&self, module: &Module<FFT64>, res: &mut RGSWCt<R, FFT64>, a: &RGSWCt<A, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<A, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, a.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: a.log_base2k(),
|
||||
log_k: a.log_k(),
|
||||
};
|
||||
|
||||
let min_rows: usize = res.rows().min(a.rows());
|
||||
|
||||
(0..min_rows).for_each(|row_i| {
|
||||
a.get_row(module, row_i, 0, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, 0, &tmp_row);
|
||||
});
|
||||
|
||||
(0..min_rows).for_each(|row_i| {
|
||||
a.get_row(module, row_i, 1, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, 1, &tmp_row);
|
||||
});
|
||||
|
||||
tmp_row.data.zero();
|
||||
|
||||
(min_rows..res.rows()).for_each(|row_i| {
|
||||
res.set_row(module, row_i, 0, &tmp_row);
|
||||
res.set_row(module, row_i, 1, &tmp_row);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mul_rgsw_inplace<R>(&self, module: &Module<FFT64>, res: &mut RGSWCt<R, FFT64>, scratch: &mut Scratch)
|
||||
where
|
||||
MatZnxDft<C, FFT64>: MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
MatZnxDft<R, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64> + ZnxInfos,
|
||||
{
|
||||
let (tmp_row_data, scratch1) = scratch.tmp_vec_znx_dft(module, 2, res.size());
|
||||
|
||||
let mut tmp_row: RLWECtDft<&mut [u8], FFT64> = RLWECtDft::<&mut [u8], FFT64> {
|
||||
data: tmp_row_data,
|
||||
log_base2k: res.log_base2k(),
|
||||
log_k: res.log_k(),
|
||||
};
|
||||
|
||||
(0..res.rows()).for_each(|row_i| {
|
||||
res.get_row(module, row_i, 0, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, 0, &tmp_row);
|
||||
});
|
||||
|
||||
(0..res.rows()).for_each(|row_i| {
|
||||
res.get_row(module, row_i, 1, &mut tmp_row);
|
||||
self.mul_rlwe_dft_inplace(module, &mut tmp_row, scratch1);
|
||||
res.set_row(module, row_i, 1, &tmp_row);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ mod tests {
|
||||
use sampling::source::Source;
|
||||
|
||||
use crate::{
|
||||
elem::Infos,
|
||||
elem::{GetRow, Infos},
|
||||
elem_rgsw::RGSWCt,
|
||||
elem_rlwe::{RLWECt, RLWECtDft, RLWEPt},
|
||||
keys::{SecretKey, SecretKeyDft},
|
||||
@@ -117,9 +117,9 @@ mod tests {
|
||||
|
||||
pt_want.to_mut().at_mut(0, 0)[1] = 1;
|
||||
|
||||
let r: usize = 1;
|
||||
let k: usize = 1;
|
||||
|
||||
pt_rgsw.raw_mut()[r] = 1; // X^{r}
|
||||
pt_rgsw.raw_mut()[k] = 1; // X^{k}
|
||||
|
||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||
RGSWCt::encrypt_sk_scratch_space(&module, ct_rgsw.size())
|
||||
@@ -165,7 +165,7 @@ mod tests {
|
||||
|
||||
ct_rlwe_out.decrypt(&module, &mut pt_have, &sk_dft, scratch.borrow());
|
||||
|
||||
module.vec_znx_rotate_inplace(r as i64, &mut pt_want, 0);
|
||||
module.vec_znx_rotate_inplace(k as i64, &mut pt_want, 0);
|
||||
|
||||
module.vec_znx_sub_ab_inplace(&mut pt_have, 0, &pt_want, 0);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user