mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
wip
This commit is contained in:
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
||||
use poulpy_hal::{
|
||||
DEFAULTALIGN, alloc_aligned,
|
||||
api::ScratchFromBytes,
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
oep::{
|
||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
||||
},
|
||||
layouts::{Backend, Scratch, ScratchOwned},
|
||||
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||
};
|
||||
|
||||
use crate::cpu_fft64_avx::FFT64Avx;
|
||||
@@ -64,178 +59,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Avx
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
||||
(
|
||||
ScalarZnx::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Avx
|
||||
where
|
||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
||||
(
|
||||
SvpPPol::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Avx
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
||||
(
|
||||
VecZnx::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Avx
|
||||
where
|
||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_big_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
(
|
||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Avx
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
|
||||
(
|
||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Avx
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Avx
|
||||
where
|
||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Avx
|
||||
where
|
||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vmp_pmat_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Avx
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_mat_znx_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||
let ptr: *mut u8 = data.as_mut_ptr();
|
||||
let self_len: usize = data.len();
|
||||
|
||||
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
||||
use poulpy_hal::{
|
||||
DEFAULTALIGN, alloc_aligned,
|
||||
api::ScratchFromBytes,
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
oep::{
|
||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
||||
},
|
||||
layouts::{Backend, Scratch, ScratchOwned},
|
||||
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||
};
|
||||
|
||||
use crate::cpu_fft64_ref::FFT64Ref;
|
||||
@@ -64,178 +59,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Ref
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
||||
(
|
||||
ScalarZnx::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Ref
|
||||
where
|
||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
||||
(
|
||||
SvpPPol::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Ref
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
||||
(
|
||||
VecZnx::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Ref
|
||||
where
|
||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_big_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
(
|
||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Ref
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
|
||||
(
|
||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Ref
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Ref
|
||||
where
|
||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Ref
|
||||
where
|
||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vmp_pmat_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Ref
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_mat_znx_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||
let ptr: *mut u8 = data.as_mut_ptr();
|
||||
let self_len: usize = data.len();
|
||||
|
||||
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
||||
use poulpy_hal::{
|
||||
DEFAULTALIGN, alloc_aligned,
|
||||
api::ScratchFromBytes,
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
oep::{
|
||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
||||
},
|
||||
layouts::{Backend, Scratch, ScratchOwned},
|
||||
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||
};
|
||||
|
||||
use crate::cpu_spqlios::FFT64Spqlios;
|
||||
@@ -64,178 +59,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
||||
(
|
||||
ScalarZnx::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
||||
(
|
||||
SvpPPol::from_data(take_slice, n, cols),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
||||
(
|
||||
VecZnx::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_big_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
(
|
||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
||||
);
|
||||
|
||||
(
|
||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
||||
let mut scratch: &mut Scratch<B> = scratch;
|
||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_vmp_pmat_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Spqlios
|
||||
where
|
||||
B: ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn take_mat_znx_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
||||
let (take_slice, rem_slice) = take_slice_aligned(
|
||||
&mut scratch.data,
|
||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
||||
);
|
||||
(
|
||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
||||
Scratch::from_bytes(rem_slice),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||
let ptr: *mut u8 = data.as_mut_ptr();
|
||||
let self_len: usize = data.len();
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_scratch_space(
|
||||
module,
|
||||
@@ -34,7 +34,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
AutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos)
|
||||
}
|
||||
@@ -48,7 +48,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -136,7 +136,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
};
|
||||
@@ -26,14 +26,11 @@ impl GGSW<Vec<u8>> {
|
||||
IN: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
let out_size: usize = out_infos.size();
|
||||
let ci_dft: usize = module.vec_znx_dft_bytes_of((key_infos.rank_out() + 1).into(), out_size);
|
||||
let ci_dft: usize = module.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
||||
let ks_internal: usize = GLWE::keyswitch_scratch_space(
|
||||
module,
|
||||
&out_infos.glwe_layout(),
|
||||
@@ -54,11 +51,8 @@ impl GGSW<Vec<u8>> {
|
||||
OUT: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
GGSW::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos)
|
||||
}
|
||||
@@ -73,7 +67,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -83,7 +77,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
@@ -141,7 +135,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -151,7 +145,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
|
||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace,
|
||||
VecZnxBigSubSmallNegateInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
||||
@@ -21,7 +21,7 @@ impl GLWE<Vec<u8>> {
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
@@ -30,7 +30,7 @@ impl GLWE<Vec<u8>> {
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
}
|
||||
@@ -44,7 +44,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -70,7 +70,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -97,7 +97,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -138,7 +138,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -180,7 +180,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -222,7 +222,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -265,7 +265,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -307,7 +307,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
TakeGLWE,
|
||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared},
|
||||
};
|
||||
|
||||
@@ -23,7 +23,7 @@ impl LWE<Vec<u8>> {
|
||||
OUT: LWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let glwe_layout: GLWELayout = GLWELayout {
|
||||
n: module.n().into(),
|
||||
@@ -69,7 +69,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
||||
) where
|
||||
DGlwe: DataRef,
|
||||
DKs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -80,7 +80,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
TakeGLWE,
|
||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared},
|
||||
};
|
||||
|
||||
@@ -23,7 +23,7 @@ impl GLWE<Vec<u8>> {
|
||||
OUT: GLWEInfos,
|
||||
IN: LWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let ct: usize = GLWE::bytes_of(
|
||||
module.n().into(),
|
||||
@@ -51,7 +51,7 @@ impl<D: DataMut> GLWE<D> {
|
||||
) where
|
||||
DLwe: DataRef,
|
||||
DKsk: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -62,7 +62,7 @@ impl<D: DataMut> GLWE<D> {
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
||||
};
|
||||
@@ -12,10 +12,10 @@ impl GLWE<Vec<u8>> {
|
||||
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size: usize = infos.size();
|
||||
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_bytes_of(1, size)) + module.vec_znx_dft_bytes_of(1, size)
|
||||
(module.vec_znx_normalize_tmp_bytes() | module.bytes_of_vec_znx_dft(1, size)) + module.bytes_of_vec_znx_dft(1, size)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchAvailable, SvpPPolAllocBytes, VecZnxAutomorphism, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
||||
api::{ScratchAvailable, SvpPPolBytesOf, VecZnxAutomorphism, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
@@ -17,7 +17,7 @@ impl AutomorphismKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
@@ -40,8 +40,7 @@ pub trait GGLWEAutomorphismKeyCompressedEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>:
|
||||
GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxAutomorphism,
|
||||
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
|
||||
Scratch<B>: TakeGLWESecret + ScratchAvailable,
|
||||
{
|
||||
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||
ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||
ZnNormalizeInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
@@ -8,7 +8,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWEPt,
|
||||
TakeGLWEPlaintext,
|
||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, LWEInfos,
|
||||
@@ -38,7 +38,7 @@ impl GGLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWE::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -64,10 +64,10 @@ where
|
||||
Module<B>: GLWEEncryptSkInternal<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxAddScalarInplace
|
||||
+ ZnNormalizeInplace<B>,
|
||||
Scratch<B>: TakeGLWEPt<B> + ScratchAvailable,
|
||||
Scratch<B>: TakeGLWEPlaintext<B> + ScratchAvailable,
|
||||
{
|
||||
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes,
|
||||
VecZnxSwitchRing,
|
||||
ScratchAvailable, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||
source::Source,
|
||||
@@ -21,7 +20,7 @@ impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
@@ -64,9 +63,9 @@ pub trait GGLWEKeyCompressedEncryptSk<B: Backend> {
|
||||
impl<B: Backend> GGLWEKeyCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GGLWECompressedEncryptSk<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPrepare<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
SvpApplyDftToDft, SvpPPolAllocBytes, SvpPrepare, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||
SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
@@ -20,8 +20,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>:
|
||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
TensorKey::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWEPt,
|
||||
TakeGLWEPlaintext,
|
||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||
layouts::{
|
||||
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
||||
@@ -18,7 +18,7 @@ impl GGSWCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ pub trait GGSWCompressedEncryptSk<B: Backend> {
|
||||
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: TakeGLWEPt<B>,
|
||||
Scratch<B>: TakeGLWEPlaintext<B>,
|
||||
{
|
||||
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
||||
api::{VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
@@ -17,7 +17,7 @@ impl GLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, Scratch},
|
||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<BE>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
@@ -80,7 +80,7 @@ where
|
||||
impl<BE: Backend> GGLWEAutomorphismKeyEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -95,7 +95,7 @@ where
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<BE>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxAutomorphism,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<BE>,
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
@@ -8,7 +8,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWEPt,
|
||||
TakeGLWEPlaintext,
|
||||
encryption::glwe_ct::GLWEEncryptSk,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
||||
@@ -20,7 +20,7 @@ impl GGLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
+ (GLWEPlaintext::bytes_of(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||
@@ -51,8 +51,7 @@ pub trait GGLWEEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>:
|
||||
GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
{
|
||||
fn gglwe_encrypt_sk<R, P, S>(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
@@ -18,7 +18,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
@@ -45,7 +45,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -60,7 +60,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes,
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
@@ -20,13 +20,12 @@ impl TensorKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>:
|
||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
||||
+ module.vec_znx_dft_bytes_of(infos.rank_out().into(), 1)
|
||||
+ module.vec_znx_big_bytes_of(1, 1)
|
||||
+ module.vec_znx_dft_bytes_of(1, 1)
|
||||
+ module.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
|
||||
+ module.bytes_of_vec_znx_big(1, 1)
|
||||
+ module.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -44,7 +43,7 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
||||
Module<B>: SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -59,7 +58,7 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes,
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>:
|
||||
TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B> + TakeVecZnxBig<B>,
|
||||
{
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
SIGMA, TakeGLWEPt,
|
||||
SIGMA, TakeGLWEPlaintext,
|
||||
encryption::glwe_ct::GLWEEncryptSkInternal,
|
||||
layouts::{
|
||||
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
||||
@@ -17,13 +17,13 @@ impl GGSW<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size = infos.size();
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
+ VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
|
||||
+ VecZnx::bytes_of(module.n(), 1, size)
|
||||
+ module.vec_znx_dft_bytes_of((infos.rank() + 1).into(), size)
|
||||
+ module.bytes_of_vec_znx_dft((infos.rank() + 1).into(), size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ pub trait GGSWEncryptSk<B: Backend> {
|
||||
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: TakeGLWEPt<B>,
|
||||
Scratch<B>: TakeGLWEPlaintext<B>,
|
||||
{
|
||||
fn ggsw_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
||||
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
||||
@@ -22,21 +22,21 @@ impl GLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size: usize = infos.size();
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.vec_znx_dft_bytes_of(1, size)
|
||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
|
||||
}
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let size: usize = infos.size();
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
((module.vec_znx_dft_bytes_of(1, size) + module.vec_znx_big_bytes_of(1, size)) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ module.svp_ppol_bytes_of(1)
|
||||
((module.bytes_of_vec_znx_dft(1, size) + module.bytes_of_vec_znx_big(1, size)) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ module.bytes_of_svp_ppol(1)
|
||||
+ module.vec_znx_normalize_tmp_bytes()
|
||||
}
|
||||
}
|
||||
@@ -120,7 +120,7 @@ pub trait GLWEEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GLWEEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
fn glwe_encrypt_sk<R, P, S>(
|
||||
@@ -186,7 +186,7 @@ pub trait GLWEEncryptZeroSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GLWEEncryptZeroSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
fn glwe_encrypt_zero_sk<R, S>(
|
||||
@@ -440,7 +440,7 @@ pub(crate) trait GLWEEncryptSkInternal<B: Backend> {
|
||||
|
||||
impl<B: Backend> GLWEEncryptSkInternal<B> for Module<B>
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
||||
source::Source,
|
||||
};
|
||||
@@ -21,7 +21,7 @@ pub trait GLWEPublicKeyGenerate<B: Backend> {
|
||||
|
||||
impl<B: Backend> GLWEPublicKeyGenerate<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptZeroSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: GLWEEncryptZeroSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
fn glwe_public_key_generate<R, S>(&self, res: &mut R, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
@@ -20,7 +20,7 @@ impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
||||
+ (GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
@@ -42,7 +42,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
DGlwe: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -57,7 +57,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes,
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||
@@ -21,7 +21,7 @@ impl LWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
@@ -59,7 +59,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
||||
DOut: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -74,7 +74,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes,
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||
@@ -18,7 +18,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_in(),
|
||||
@@ -45,7 +45,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
DGlwe: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -60,7 +60,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes,
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::external_product_scratch_space(module, out_infos, in_infos, ggsw_infos)
|
||||
}
|
||||
@@ -33,7 +33,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::external_product_inplace_scratch_space(module, out_infos, ggsw_infos)
|
||||
}
|
||||
@@ -47,7 +47,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
@@ -67,7 +67,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
@@ -20,7 +20,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::external_product_scratch_space(
|
||||
module,
|
||||
@@ -38,7 +38,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos)
|
||||
}
|
||||
@@ -52,7 +52,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
@@ -110,7 +110,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
@@ -21,7 +21,7 @@ impl GGSW<Vec<u8>> {
|
||||
OUT: GGSWInfos,
|
||||
IN: GGSWInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::external_product_scratch_space(
|
||||
module,
|
||||
@@ -39,7 +39,7 @@ impl GGSW<Vec<u8>> {
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), apply_infos)
|
||||
}
|
||||
@@ -53,7 +53,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
@@ -108,7 +108,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGSWInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGSWInfos, GGSWToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetDegree, LWEInfos,
|
||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
||||
module: &Module<B>,
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
pub fn external_product_scratch_space<OUT, IN, GGSW, B: Backend>(
|
||||
module: Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
apply_infos: &GGSW,
|
||||
@@ -24,76 +25,35 @@ impl GLWE<Vec<u8>> {
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: GLWEExternalProduct<B>,
|
||||
{
|
||||
let in_size: usize = in_infos
|
||||
.k()
|
||||
.div_ceil(apply_infos.base2k())
|
||||
.div_ceil(apply_infos.dsize().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ggsw_size: usize = apply_infos.size();
|
||||
let res_dft: usize = module.vec_znx_dft_bytes_of((apply_infos.rank() + 1).into(), ggsw_size);
|
||||
let a_dft: usize = module.vec_znx_dft_bytes_of((apply_infos.rank() + 1).into(), in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size, // rows
|
||||
(apply_infos.rank() + 1).into(), // cols in
|
||||
(apply_infos.rank() + 1).into(), // cols out
|
||||
ggsw_size,
|
||||
);
|
||||
let normalize_big: usize = module.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
if in_infos.base2k() == apply_infos.base2k() {
|
||||
res_dft + a_dft + (vmp | normalize_big)
|
||||
} else {
|
||||
let normalize_conv: usize = VecZnx::bytes_of(module.n(), (apply_infos.rank() + 1).into(), in_size);
|
||||
res_dft + ((a_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
module.glwe_external_product_scratch_space(out_infos, in_infos, apply_infos)
|
||||
}
|
||||
|
||||
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
apply_infos: &GGSW,
|
||||
) -> usize
|
||||
pub fn external_product<L, R, B: Backend>(&mut self, module: &Module<B>, lhs: &L, rhs: &R, scratch: &mut Scratch<B>)
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::external_product_scratch_space(module, out_infos, out_infos, apply_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GLWE<DataLhs>,
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
L: GLWEToRef,
|
||||
R: GGSWToRef,
|
||||
Module<B>: GLWEExternalProduct<B>,
|
||||
Scratch<B>: ScratchTakeCore<B>,
|
||||
{
|
||||
module.external_product(self, lhs, rhs, scratch);
|
||||
module.glwe_external_product(self, lhs, rhs, scratch);
|
||||
}
|
||||
|
||||
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
rhs: &GGSWPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
pub fn external_product_inplace<R, B: Backend>(&mut self, module: &Module<B>, rhs: &R, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: GGSWToRef,
|
||||
Module<B>: GLWEExternalProduct<B>,
|
||||
Scratch<B>: ScratchTakeCore<B>,
|
||||
{
|
||||
module.external_product_inplace(self, rhs, scratch);
|
||||
module.glwe_external_product_inplace(self, rhs, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEExternalProduct<BE: Backend>
|
||||
where
|
||||
Self: VecZnxDftAllocBytes
|
||||
Self: GetDegree
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
@@ -101,13 +61,49 @@ where
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn glwe_external_product_scratch_space<OUT, IN, GGSW>(&self, out_infos: &OUT, in_infos: &IN, apply_infos: &GGSW) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
{
|
||||
let in_size: usize = in_infos
|
||||
.k()
|
||||
.div_ceil(apply_infos.base2k())
|
||||
.div_ceil(apply_infos.dsize().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ggsw_size: usize = apply_infos.size();
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((apply_infos.rank() + 1).into(), ggsw_size);
|
||||
let a_dft: usize = self.bytes_of_vec_znx_dft((apply_infos.rank() + 1).into(), in_size);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size, // rows
|
||||
(apply_infos.rank() + 1).into(), // cols in
|
||||
(apply_infos.rank() + 1).into(), // cols out
|
||||
ggsw_size,
|
||||
);
|
||||
let normalize_big: usize = self.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
if in_infos.base2k() == apply_infos.base2k() {
|
||||
res_dft + a_dft + (vmp | normalize_big)
|
||||
} else {
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n().into(), (apply_infos.rank() + 1).into(), in_size);
|
||||
res_dft + ((a_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
}
|
||||
|
||||
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
D: GGSWCiphertextPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let rhs: &GGSWPrepared<&[u8], BE> = &ggsw.to_ref();
|
||||
@@ -121,7 +117,7 @@ where
|
||||
|
||||
assert_eq!(rhs.rank(), res.rank());
|
||||
assert_eq!(rhs.n(), res.n());
|
||||
assert!(scratch.available() >= GLWE::external_product_inplace_scratch_space(self, res, rhs));
|
||||
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, res, rhs));
|
||||
}
|
||||
|
||||
let cols: usize = (rhs.rank() + 1).into();
|
||||
@@ -157,7 +153,7 @@ where
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size);
|
||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
||||
|
||||
for j in 0..cols {
|
||||
self.vec_znx_normalize(
|
||||
@@ -216,6 +212,7 @@ where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
D: GGSWCiphertextPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let lhs: &GLWE<&[u8]> = &lhs.to_ref();
|
||||
@@ -234,7 +231,7 @@ where
|
||||
assert_eq!(rhs.rank(), res.rank());
|
||||
assert_eq!(rhs.n(), res.n());
|
||||
assert_eq!(lhs.n(), res.n());
|
||||
assert!(scratch.available() >= GLWE::external_product_scratch_space(self, res, lhs, rhs));
|
||||
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, lhs, rhs));
|
||||
}
|
||||
|
||||
let cols: usize = (rhs.rank() + 1).into();
|
||||
@@ -242,8 +239,8 @@ where
|
||||
|
||||
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
||||
|
||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise
|
||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), cols, a_size.div_ceil(dsize));
|
||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), cols, rhs.size()); // Todo optimise
|
||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), cols, a_size.div_ceil(dsize));
|
||||
a_dft.data_mut().fill(0);
|
||||
|
||||
if basek_in == basek_ggsw {
|
||||
@@ -271,7 +268,7 @@ where
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size);
|
||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
||||
|
||||
for j in 0..cols {
|
||||
self.vec_znx_normalize(
|
||||
@@ -326,9 +323,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE>
|
||||
where
|
||||
Self: VecZnxDftAllocBytes
|
||||
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> where
|
||||
Self: GetDegree
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
@@ -336,7 +333,9 @@ where
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
use poulpy_hal::layouts::{Backend, Scratch};
|
||||
|
||||
use crate::layouts::{GLWEToMut, GLWEToRef, prepared::GGSWCiphertextPreparedToRef};
|
||||
|
||||
mod gglwe_atk;
|
||||
mod gglwe_ksk;
|
||||
mod ggsw_ct;
|
||||
mod glwe_ct;
|
||||
|
||||
pub use glwe_ct::*;
|
||||
|
||||
@@ -4,7 +4,7 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
||||
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWEOperations, TakeGLWECt,
|
||||
GLWEOperations, TakeGLWE,
|
||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||
};
|
||||
|
||||
@@ -94,7 +94,7 @@ impl GLWEPacker {
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
pack_core_scratch_space(module, out_infos, key_infos)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ impl GLWEPacker {
|
||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -181,7 +181,7 @@ fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos:
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
combine_scratch_space(module, out_infos, key_infos)
|
||||
}
|
||||
@@ -194,7 +194,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -272,7 +272,7 @@ fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &O
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::bytes_of(out_infos)
|
||||
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
||||
@@ -287,7 +287,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -310,7 +310,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWECt,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWE,
|
||||
{
|
||||
let log_n: usize = acc.data.n().log2();
|
||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||
@@ -413,7 +413,7 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -480,7 +480,7 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -3,14 +3,14 @@ use std::collections::HashMap;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
TakeGLWE,
|
||||
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||
operations::GLWEOperations,
|
||||
};
|
||||
@@ -38,7 +38,7 @@ impl GLWE<Vec<u8>> {
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos);
|
||||
if in_infos.base2k() != key_infos.base2k() {
|
||||
@@ -57,7 +57,7 @@ impl GLWE<Vec<u8>> {
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::trace_scratch_space(module, out_infos, out_infos, key_infos)
|
||||
}
|
||||
@@ -73,7 +73,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -101,7 +101,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||
@@ -23,7 +23,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
@@ -32,7 +32,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
}
|
||||
@@ -46,7 +46,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -68,7 +68,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -96,7 +96,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||
}
|
||||
@@ -105,7 +105,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
where
|
||||
OUT: GGLWEInfos + GLWEInfos,
|
||||
KEY: GGLWEInfos + GLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -190,7 +190,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
@@ -21,7 +21,7 @@ impl GGSW<Vec<u8>> {
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = out_infos
|
||||
@@ -29,8 +29,8 @@ impl GGSW<Vec<u8>> {
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||
|
||||
let tmp_dft_i: usize = module.vec_znx_dft_bytes_of((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = module.vec_znx_dft_bytes_of(1, size_in);
|
||||
let tmp_dft_i: usize = module.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = module.bytes_of_vec_znx_dft(1, size_in);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
tsk_size,
|
||||
size_in,
|
||||
@@ -39,7 +39,7 @@ impl GGSW<Vec<u8>> {
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = module.vec_znx_big_bytes_of(1, tsk_size);
|
||||
let tmp_idft: usize = module.bytes_of_vec_znx_big(1, tsk_size);
|
||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
@@ -58,11 +58,8 @@ impl GGSW<Vec<u8>> {
|
||||
IN: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -75,10 +72,10 @@ impl GGSW<Vec<u8>> {
|
||||
|
||||
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::bytes_of(module.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = module.vec_znx_dft_bytes_of(rank + 1, size_out);
|
||||
let ci_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
let ks: usize = GLWE::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||
let expand_rows: usize = GGSW::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||
let res_dft: usize = module.vec_znx_dft_bytes_of(rank + 1, size_out);
|
||||
let res_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
|
||||
if in_infos.base2k() == tsk_infos.base2k() {
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
@@ -103,11 +100,8 @@ impl GGSW<Vec<u8>> {
|
||||
OUT: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
GGSW::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||
}
|
||||
@@ -124,9 +118,9 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
DataA: DataRef,
|
||||
DataTsk: DataRef,
|
||||
Module<B>: VecZnxCopy
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
@@ -162,7 +156,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -171,8 +165,8 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
@@ -196,7 +190,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -205,8 +199,8 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
@@ -229,9 +223,9 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||
@@ -20,7 +20,7 @@ impl GLWE<Vec<u8>> {
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let in_size: usize = in_infos
|
||||
.k()
|
||||
@@ -28,8 +28,8 @@ impl GLWE<Vec<u8>> {
|
||||
.div_ceil(key_apply.dsize().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ksk_size: usize = key_apply.size();
|
||||
let res_dft: usize = module.vec_znx_dft_bytes_of((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.vec_znx_dft_bytes_of((key_apply.rank_in()).into(), in_size);
|
||||
let res_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
@@ -37,7 +37,7 @@ impl GLWE<Vec<u8>> {
|
||||
(key_apply.rank_in()).into(),
|
||||
(key_apply.rank_out() + 1).into(),
|
||||
ksk_size,
|
||||
) + module.vec_znx_dft_bytes_of((key_apply.rank_in()).into(), in_size);
|
||||
) + module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||
if in_infos.base2k() == key_apply.base2k() {
|
||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||
@@ -56,7 +56,7 @@ impl GLWE<Vec<u8>> {
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||
}
|
||||
@@ -73,7 +73,7 @@ impl<DataSelf: DataRef> GLWE<DataSelf> {
|
||||
) where
|
||||
DataLhs: DataRef,
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
@@ -121,7 +121,7 @@ impl<DataSelf: DataRef> GLWE<DataSelf> {
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
@@ -152,7 +152,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -194,7 +194,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
@@ -243,7 +243,7 @@ impl<D: DataRef> GLWE<D> {
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataKey: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
@@ -294,7 +294,7 @@ where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -340,7 +340,7 @@ where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
TakeGLWE,
|
||||
layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared},
|
||||
};
|
||||
|
||||
@@ -23,7 +23,7 @@ impl LWE<Vec<u8>> {
|
||||
OUT: LWEInfos,
|
||||
IN: LWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
@@ -69,7 +69,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
||||
) where
|
||||
A: DataRef,
|
||||
DKs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -210,7 +210,7 @@ impl<D: DataMut> AutomorphismKey<D>
|
||||
where
|
||||
Self: SetAutomorphismGaloisElement,
|
||||
{
|
||||
pub fn decompressed<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
||||
where
|
||||
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
|
||||
Module<B>: AutomorphismKeyDecompress,
|
||||
|
||||
@@ -86,7 +86,7 @@ pub trait TensorKeyCompressedAlloc
|
||||
where
|
||||
Self: GLWESwitchingKeyCompressedAlloc,
|
||||
{
|
||||
fn tensor_key_compressed_alloc(
|
||||
fn alloc_tensor_key_compressed(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
@@ -102,7 +102,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn tensor_key_compressed_alloc_from_infos<A>(&self, infos: &A) -> TensorKeyCompressed<Vec<u8>>
|
||||
fn alloc_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> TensorKeyCompressed<Vec<u8>>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -111,7 +111,7 @@ where
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
|
||||
);
|
||||
self.tensor_key_compressed_alloc(
|
||||
self.alloc_tensor_key_compressed(
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
@@ -120,16 +120,16 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn tensor_key_compressed_bytes_of(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
fn bytes_of_tensor_key_compressed(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||
pairs * self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, dsize)
|
||||
}
|
||||
|
||||
fn tensor_key_compressed_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.tensor_key_compressed_bytes_of(
|
||||
self.bytes_of_tensor_key_compressed(
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
@@ -145,14 +145,14 @@ impl TensorKeyCompressed<Vec<u8>> {
|
||||
A: GGLWEInfos,
|
||||
Module<B>: TensorKeyCompressedAlloc,
|
||||
{
|
||||
module.tensor_key_compressed_alloc_from_infos(infos)
|
||||
module.alloc_tensor_key_compressed_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||
where
|
||||
Module<B>: TensorKeyCompressedAlloc,
|
||||
{
|
||||
module.tensor_key_compressed_alloc(base2k, k, rank, dnum, dsize)
|
||||
module.alloc_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
||||
@@ -160,7 +160,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
||||
A: GGLWEInfos,
|
||||
Module<B>: TensorKeyCompressedAlloc,
|
||||
{
|
||||
module.tensor_key_compressed_bytes_of_from_infos(infos)
|
||||
module.bytes_of_tensor_key_compressed_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of<B: Backend>(
|
||||
@@ -174,7 +174,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
||||
where
|
||||
Module<B>: TensorKeyCompressedAlloc,
|
||||
{
|
||||
module.tensor_key_compressed_bytes_of(base2k, k, rank, dnum, dsize)
|
||||
module.bytes_of_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -224,6 +224,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWESwitchingKeyAlloc for Module<B> where Self: GGLWEAlloc {}
|
||||
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
||||
where
|
||||
|
||||
@@ -176,7 +176,7 @@ impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AutomorphismKeyPrepare<B: Backend>
|
||||
pub trait PrepareAutomorphismKey<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
@@ -197,7 +197,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> AutomorphismKeyPrepare<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
||||
impl<B: Backend> PrepareAutomorphismKey<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
||||
|
||||
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
||||
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize
|
||||
@@ -212,7 +212,7 @@ impl<D: DataMut, B: Backend> AutomorphismKeyPrepared<D, B> {
|
||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
||||
Module<B>: AutomorphismKeyPrepare<B>,
|
||||
Module<B>: PrepareAutomorphismKey<B>,
|
||||
{
|
||||
module.prepare_automorphism_key(self, other, scratch);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes},
|
||||
api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
||||
};
|
||||
|
||||
@@ -59,7 +59,7 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWEPrepared<D, B> {
|
||||
|
||||
pub trait GGLWEPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes,
|
||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
|
||||
{
|
||||
fn alloc_gglwe_prepared(
|
||||
&self,
|
||||
@@ -130,7 +130,7 @@ where
|
||||
dsize.0,
|
||||
);
|
||||
|
||||
self.vmp_pmat_bytes_of(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
|
||||
self.bytes_of_vmp_pmat(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
|
||||
}
|
||||
|
||||
fn bytes_of_gglwe_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
@@ -149,7 +149,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes {}
|
||||
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||
|
||||
impl<B: Backend> GGLWEPrepared<Vec<u8>, B>
|
||||
where
|
||||
|
||||
@@ -95,7 +95,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn glwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -121,7 +121,7 @@ where
|
||||
self.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||
}
|
||||
|
||||
fn glwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -146,7 +146,7 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.glwe_switching_key_prepared_alloc_from_infos(infos)
|
||||
module.alloc_glwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc(
|
||||
@@ -165,7 +165,7 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.glwe_switching_key_prepared_bytes_of_from_infos(infos)
|
||||
module.bytes_of_glwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes},
|
||||
api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
||||
};
|
||||
|
||||
@@ -51,7 +51,7 @@ impl<D: Data, B: Backend> GGSWInfos for GGSWPrepared<D, B> {
|
||||
|
||||
pub trait GGSWPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes,
|
||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
|
||||
{
|
||||
fn alloc_ggsw_prepared(
|
||||
&self,
|
||||
@@ -117,7 +117,7 @@ where
|
||||
dsize.0,
|
||||
);
|
||||
|
||||
self.vmp_pmat_bytes_of(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
|
||||
self.bytes_of_vmp_pmat(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
|
||||
}
|
||||
|
||||
fn bytes_of_ggsw_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
@@ -135,7 +135,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes {}
|
||||
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||
|
||||
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
|
||||
where
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply},
|
||||
api::{VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos},
|
||||
};
|
||||
|
||||
@@ -52,7 +52,7 @@ impl<D: Data, B: Backend> GLWEInfos for GLWEPublicKeyPrepared<D, B> {
|
||||
|
||||
pub trait GLWEPublicKeyPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GetDegree + VecZnxDftAlloc<B> + VecZnxDftAllocBytes,
|
||||
Self: GetDegree + VecZnxDftAlloc<B> + VecZnxDftBytesOf,
|
||||
{
|
||||
fn alloc_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
||||
GLWEPublicKeyPrepared {
|
||||
@@ -71,7 +71,7 @@ where
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
||||
self.vec_znx_dft_bytes_of((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||
self.bytes_of_vec_znx_dft((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_public_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
@@ -82,7 +82,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftAllocBytes {}
|
||||
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftBytesOf {}
|
||||
|
||||
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare},
|
||||
api::{SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, SvpPPol, SvpPPolToMut, SvpPPolToRef, ZnxInfos},
|
||||
};
|
||||
|
||||
@@ -47,7 +47,7 @@ impl<D: Data, B: Backend> GLWEInfos for GLWESecretPrepared<D, B> {
|
||||
|
||||
pub trait GLWESecretPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GetDegree + SvpPPolAllocBytes + SvpPPolAlloc<B>,
|
||||
Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B>,
|
||||
{
|
||||
fn alloc_glwe_secret_prepared(&self, rank: Rank) -> GLWESecretPrepared<Vec<u8>, B> {
|
||||
GLWESecretPrepared {
|
||||
@@ -64,7 +64,7 @@ where
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_secret(&self, rank: Rank) -> usize {
|
||||
self.svp_ppol_bytes_of(rank.into())
|
||||
self.bytes_of_svp_ppol(rank.into())
|
||||
}
|
||||
fn bytes_of_glwe_secret_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
@@ -75,7 +75,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolAllocBytes + SvpPPolAlloc<B> {}
|
||||
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B> {}
|
||||
|
||||
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B>
|
||||
where
|
||||
|
||||
@@ -57,7 +57,7 @@ pub trait GLWEToLWESwitchingKeyPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
||||
{
|
||||
fn glwe_to_lwe_switching_key_prepared_alloc(
|
||||
fn alloc_glwe_to_lwe_switching_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
@@ -66,7 +66,7 @@ where
|
||||
) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
GLWEToLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
|
||||
}
|
||||
fn glwe_to_lwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -80,14 +80,14 @@ where
|
||||
1,
|
||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
);
|
||||
self.glwe_to_lwe_switching_key_prepared_alloc(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
self.alloc_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
}
|
||||
|
||||
fn glwe_to_lwe_switching_key_prepared_bytes_of(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||
fn bytes_of_glwe_to_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn glwe_to_lwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -101,7 +101,7 @@ where
|
||||
1,
|
||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
);
|
||||
self.glwe_to_lwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
self.bytes_of_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,22 +115,22 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_prepared_alloc_from_infos(infos)
|
||||
module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self {
|
||||
module.glwe_to_lwe_switching_key_prepared_alloc(base2k, k, rank_in, dnum)
|
||||
module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_prepared_bytes_of_from_infos(infos)
|
||||
module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||
module.glwe_to_lwe_switching_key_prepared_bytes_of(base2k, k, rank_in, dnum)
|
||||
module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,14 +138,14 @@ pub trait GLWEToLWESwitchingKeyPrepare<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
fn glwe_to_lwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
||||
fn prepare_glwe_to_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
|
||||
fn glwe_to_lwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
fn prepare_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: GLWEToLWESwitchingKeyPreparedToMut<B>,
|
||||
O: GLWEToLWESwitchingKeyToRef,
|
||||
@@ -162,7 +162,7 @@ impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
A: GGLWEInfos,
|
||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_prepare_tmp_bytes(infos);
|
||||
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
O: GLWEToLWESwitchingKeyToRef,
|
||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_prepare(self, other, scratch);
|
||||
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ where
|
||||
LWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
|
||||
}
|
||||
|
||||
fn lwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -87,11 +87,11 @@ where
|
||||
self.alloc_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
|
||||
}
|
||||
|
||||
fn lwe_switching_key_prepared_bytes_of(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||
fn bytes_of_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn lwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -110,7 +110,7 @@ where
|
||||
1,
|
||||
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||
);
|
||||
self.lwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.dnum())
|
||||
self.bytes_of_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.lwe_switching_key_prepared_alloc_from_infos(infos)
|
||||
module.alloc_lwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self {
|
||||
@@ -135,11 +135,11 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.lwe_switching_key_prepared_bytes_of_from_infos(infos)
|
||||
module.bytes_of_lwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||
module.lwe_switching_key_prepared_bytes_of(base2k, k, dnum)
|
||||
module.bytes_of_lwe_switching_key_prepared(base2k, k, dnum)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,13 +147,13 @@ pub trait LWESwitchingKeyPrepare<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
fn lwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
||||
fn prepare_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
fn lwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
fn prepare_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: LWESwitchingKeyPreparedToMut<B>,
|
||||
O: LWESwitchingKeyToRef,
|
||||
@@ -170,7 +170,7 @@ impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
A: GGLWEInfos,
|
||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.lwe_switching_key_prepare_tmp_bytes(infos);
|
||||
module.prepare_lwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ impl<D: DataMut, B: Backend> LWESwitchingKeyPrepared<D, B> {
|
||||
O: LWESwitchingKeyToRef,
|
||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.lwe_switching_key_prepare(self, other, scratch);
|
||||
module.prepare_lwe_switching_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ pub trait LWEToGLWESwitchingKeyPreparedAlloc<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
||||
{
|
||||
fn lwe_to_glwe_switching_key_prepared_alloc(
|
||||
fn alloc_lwe_to_glwe_switching_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
@@ -67,7 +67,7 @@ where
|
||||
) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
LWEToGLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
|
||||
}
|
||||
fn lwe_to_glwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -81,10 +81,10 @@ where
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
);
|
||||
self.lwe_to_glwe_switching_key_prepared_alloc(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
self.alloc_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
}
|
||||
|
||||
fn lwe_to_glwe_switching_key_prepared_bytes_of(
|
||||
fn bytes_of_lwe_to_glwe_switching_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
@@ -94,7 +94,7 @@ where
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn lwe_to_glwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -108,7 +108,7 @@ where
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
);
|
||||
self.lwe_to_glwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
self.bytes_of_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,22 +122,22 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_prepared_alloc_from_infos(infos)
|
||||
module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
||||
module.lwe_to_glwe_switching_key_prepared_alloc(base2k, k, rank_out, dnum)
|
||||
module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_prepared_bytes_of_from_infos(infos)
|
||||
module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
|
||||
module.lwe_to_glwe_switching_key_prepared_bytes_of(base2k, k, rank_out, dnum)
|
||||
module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,14 +145,14 @@ pub trait LWEToGLWESwitchingKeyPrepare<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
fn lwe_to_glwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
||||
fn prepare_lwe_to_glwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
|
||||
fn lwe_to_glwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
fn prepare_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: LWEToGLWESwitchingKeyPreparedToMut<B>,
|
||||
O: LWEToGLWESwitchingKeyToRef,
|
||||
@@ -169,7 +169,7 @@ impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
A: GGLWEInfos,
|
||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_prepare_tmp_bytes(infos);
|
||||
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
O: LWEToGLWESwitchingKeyToRef,
|
||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_prepare(self, other, scratch);
|
||||
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSubScalarInplace,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
VecZnxSubScalarInplace,
|
||||
},
|
||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||
@@ -20,8 +20,8 @@ impl<D: DataRef> GGLWE<D> {
|
||||
) where
|
||||
DataSk: DataRef,
|
||||
DataWant: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddScalarInplace, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||
VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, VecZnxBig, VecZnxDft, ZnxZero},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||
@@ -21,8 +21,8 @@ impl<D: DataRef> GGSW<D> {
|
||||
) where
|
||||
DataSk: DataRef,
|
||||
DataScalar: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -94,8 +94,8 @@ impl<D: DataRef> GGSW<D> {
|
||||
) where
|
||||
DataSk: DataRef,
|
||||
DataScalar: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataRef, Module, Scratch, ScratchOwned},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||
@@ -48,8 +48,8 @@ impl<D: DataRef> GLWE<D> {
|
||||
) where
|
||||
DataSk: DataRef,
|
||||
DataPt: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use poulpy_hal::{
|
||||
api::{TakeMatZnx, TakeScalarZnx, TakeSvpPPol, TakeVecZnx, TakeVecZnxDft, TakeVmpPMat},
|
||||
layouts::{Backend, Scratch},
|
||||
api::{ScratchAvailable, ScratchTakeBasic},
|
||||
layouts::{Backend, Module, Scratch},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
dist::Distribution,
|
||||
layouts::{
|
||||
AutomorphismKey, Degree, GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPublicKey, GLWESecret,
|
||||
GLWESwitchingKey, Rank, TensorKey,
|
||||
GLWESwitchingKey, GetDegree, Rank, TensorKey,
|
||||
prepared::{
|
||||
AutomorphismKeyPrepared, GGLWEPrepared, GGSWPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
||||
GLWESwitchingKeyPrepared, TensorKeyPrepared,
|
||||
@@ -15,119 +15,15 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
pub trait TakeGLWECt {
|
||||
fn take_glwe_ct<A>(&mut self, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGLWECtSlice {
|
||||
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGLWEPt<B: Backend> {
|
||||
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWE {
|
||||
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWEPrepared<B: Backend> {
|
||||
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGSW {
|
||||
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGSWPrepared<B: Backend> {
|
||||
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGSWPreparedSlice<B: Backend> {
|
||||
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGLWESecret {
|
||||
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self);
|
||||
}
|
||||
|
||||
pub trait TakeGLWESecretPrepared<B: Backend> {
|
||||
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self);
|
||||
}
|
||||
|
||||
pub trait TakeGLWEPk {
|
||||
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGLWEPkPrepared<B: Backend> {
|
||||
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGLWESwitchingKey {
|
||||
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWESwitchingKeyPrepared<B: Backend> {
|
||||
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeTensorKey {
|
||||
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWETensorKeyPrepared<B: Backend> {
|
||||
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWEAutomorphismKey {
|
||||
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
pub trait TakeGGLWEAutomorphismKeyPrepared<B: Backend> {
|
||||
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWECt for Scratch<B>
|
||||
pub trait ScratchTakeCore<B: Backend>
|
||||
where
|
||||
Scratch<B>: TakeVecZnx,
|
||||
Self: ScratchTakeBasic<B> + ScratchAvailable,
|
||||
{
|
||||
fn take_glwe_ct<A>(&mut self, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
||||
fn take_glwe_ct<A>(&mut self, module: &Module<B>, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
let (data, scratch) = self.take_vec_znx(infos.n().into(), (infos.rank() + 1).into(), infos.size());
|
||||
let (data, scratch) = self.take_vec_znx(module, (infos.rank() + 1).into(), infos.size());
|
||||
(
|
||||
GLWE {
|
||||
k: infos.k(),
|
||||
@@ -137,12 +33,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWECtSlice for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVecZnx,
|
||||
{
|
||||
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos,
|
||||
@@ -156,12 +47,7 @@ where
|
||||
}
|
||||
(cts, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWEPt<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVecZnx,
|
||||
{
|
||||
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos,
|
||||
@@ -176,12 +62,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWE for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeMatZnx,
|
||||
{
|
||||
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -203,12 +84,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWEPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVmpPMat<B>,
|
||||
{
|
||||
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -230,12 +106,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGSW for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeMatZnx,
|
||||
{
|
||||
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos,
|
||||
@@ -257,12 +128,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGSWPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVmpPMat<B>,
|
||||
{
|
||||
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos,
|
||||
@@ -284,12 +150,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGSWPreparedSlice<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeGGSWPrepared<B>,
|
||||
{
|
||||
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
||||
where
|
||||
A: GGSWInfos,
|
||||
@@ -303,12 +164,7 @@ where
|
||||
}
|
||||
(cts, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWEPk for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVecZnx,
|
||||
{
|
||||
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos,
|
||||
@@ -324,12 +180,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWEPkPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVecZnxDft<B>,
|
||||
{
|
||||
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GLWEInfos,
|
||||
@@ -345,12 +196,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWESecret for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeScalarZnx,
|
||||
{
|
||||
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self) {
|
||||
let (data, scratch) = self.take_scalar_znx(n.into(), rank.into());
|
||||
(
|
||||
@@ -361,12 +207,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWESecretPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeSvpPPol<B>,
|
||||
{
|
||||
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self) {
|
||||
let (data, scratch) = self.take_svp_ppol(n.into(), rank.into());
|
||||
(
|
||||
@@ -377,12 +218,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGLWESwitchingKey for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeMatZnx,
|
||||
{
|
||||
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -397,12 +233,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWESwitchingKeyPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeGGLWEPrepared<B>,
|
||||
{
|
||||
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -417,12 +248,7 @@ where
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWEAutomorphismKey for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeMatZnx,
|
||||
{
|
||||
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -430,12 +256,7 @@ where
|
||||
let (data, scratch) = self.take_glwe_switching_key(infos);
|
||||
(AutomorphismKey { key: data, p: 0 }, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWEAutomorphismKeyPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeGGLWESwitchingKeyPrepared<B>,
|
||||
{
|
||||
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -443,12 +264,7 @@ where
|
||||
let (data, scratch) = self.take_gglwe_switching_key_prepared(infos);
|
||||
(AutomorphismKeyPrepared { key: data, p: 0 }, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeTensorKey for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeMatZnx,
|
||||
{
|
||||
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -478,12 +294,7 @@ where
|
||||
}
|
||||
(TensorKey { keys }, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> TakeGGLWETensorKeyPrepared<B> for Scratch<B>
|
||||
where
|
||||
Scratch<B>: TakeVmpPMat<B>,
|
||||
{
|
||||
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -515,3 +326,5 @@ where
|
||||
(TensorKeyPrepared { keys }, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> ScratchTakeCore<B> for Scratch<B> where Self: ScratchTakeBasic<B> + ScratchAvailable {}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -27,7 +27,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_gglwe_automorphism_key_automorphism<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -38,8 +38,8 @@ where
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VmpPMatAlloc<B>
|
||||
+ VmpPrepare<B>
|
||||
@@ -224,7 +224,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_gglwe_automorphism_key_automorphism_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -238,9 +238,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
@@ -255,8 +255,8 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
@@ -27,8 +27,8 @@ use crate::{
|
||||
|
||||
pub fn test_ggsw_automorphism<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -41,7 +41,7 @@ where
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxCopy
|
||||
@@ -219,8 +219,8 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_ggsw_automorphism_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -233,7 +233,7 @@ where
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxCopy
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -26,7 +26,7 @@ use crate::{
|
||||
|
||||
pub fn test_glwe_automorphism<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -40,9 +40,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
@@ -169,7 +169,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_glwe_automorphism_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -183,9 +183,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||
},
|
||||
@@ -23,7 +23,7 @@ use crate::layouts::{
|
||||
|
||||
pub fn test_lwe_to_glwe<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -36,9 +36,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -142,7 +142,7 @@ where
|
||||
|
||||
pub fn test_glwe_to_lwe<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -155,9 +155,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -26,7 +26,7 @@ use crate::{
|
||||
|
||||
pub fn test_gglwe_automorphisk_key_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -40,7 +40,7 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
@@ -51,7 +51,7 @@ where
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxSubScalarInplace
|
||||
+ VecZnxCopy
|
||||
@@ -129,7 +129,7 @@ where
|
||||
|
||||
pub fn test_gglwe_automorphisk_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -143,7 +143,7 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
@@ -154,7 +154,7 @@ where
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxSubScalarInplace
|
||||
+ VecZnxCopy
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSubScalarInplace, VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -25,7 +25,7 @@ use crate::{
|
||||
|
||||
pub fn test_gglwe_switching_key_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -39,12 +39,12 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxSubScalarInplace
|
||||
+ VecZnxCopy
|
||||
@@ -117,7 +117,7 @@ where
|
||||
|
||||
pub fn test_gglwe_switching_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -131,12 +131,12 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxSubScalarInplace
|
||||
+ VecZnxCopy
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Module, ScalarZnx, ScratchOwned},
|
||||
oep::{
|
||||
@@ -79,7 +79,7 @@ where
|
||||
|
||||
pub fn test_ggsw_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -93,11 +93,11 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VmpPMatAlloc<B>
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned},
|
||||
oep::{
|
||||
@@ -26,8 +25,8 @@ use crate::{
|
||||
|
||||
pub fn test_glwe_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -36,7 +35,7 @@ where
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddNormal<B>
|
||||
@@ -117,8 +116,8 @@ where
|
||||
|
||||
pub fn test_glwe_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -127,7 +126,7 @@ where
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddNormal<B>
|
||||
@@ -219,8 +218,8 @@ where
|
||||
|
||||
pub fn test_glwe_encrypt_zero_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
@@ -229,7 +228,7 @@ where
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddNormal<B>
|
||||
@@ -294,7 +293,7 @@ where
|
||||
|
||||
pub fn test_glwe_encrypt_pk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -308,10 +307,10 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxDftAlloc<B>
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||
},
|
||||
@@ -25,7 +25,7 @@ use crate::{
|
||||
|
||||
pub fn test_gglwe_tensor_key_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -39,10 +39,10 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxDftAlloc<B>
|
||||
@@ -145,7 +145,7 @@ where
|
||||
|
||||
pub fn test_gglwe_tensor_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -159,10 +159,10 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxDftAlloc<B>
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
@@ -27,7 +27,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_gglwe_switching_key_external_product<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -41,9 +41,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxSwitchRing
|
||||
@@ -209,7 +209,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_gglwe_switching_key_external_product_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -223,9 +223,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxSwitchRing
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Module, ScalarZnx, ScalarZnxToMut, ScratchOwned, ZnxViewMut},
|
||||
oep::{
|
||||
@@ -27,7 +27,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_ggsw_external_product<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -41,9 +41,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
@@ -192,7 +192,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_ggsw_external_product_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -206,9 +206,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -26,7 +26,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_glwe_external_product<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -39,9 +39,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -178,7 +178,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_glwe_external_product_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -191,9 +191,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||
VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned},
|
||||
oep::{
|
||||
@@ -26,7 +26,7 @@ use crate::{
|
||||
|
||||
pub fn test_gglwe_switching_key_keyswitch<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -39,9 +39,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -196,7 +196,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_gglwe_switching_key_keyswitch_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -209,9 +209,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAlloc,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAlloc,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -27,7 +27,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_ggsw_keyswitch<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -40,9 +40,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -216,7 +216,7 @@ where
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_ggsw_keyswitch_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -229,9 +229,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||
VmpPrepare,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned},
|
||||
oep::{
|
||||
@@ -27,7 +26,7 @@ use crate::{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn test_glwe_keyswitch<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -40,9 +39,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -169,7 +168,7 @@ where
|
||||
|
||||
pub fn test_glwe_keyswitch_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -182,9 +181,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||
},
|
||||
@@ -22,7 +22,7 @@ use crate::layouts::{
|
||||
|
||||
pub fn test_lwe_keyswitch<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -35,9 +35,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -2,10 +2,10 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
@@ -28,7 +28,7 @@ use crate::{
|
||||
|
||||
pub fn test_glwe_packing<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
@@ -48,9 +48,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -2,13 +2,13 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||
VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||
VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned, ZnxView, ZnxViewMut},
|
||||
oep::{
|
||||
@@ -29,7 +29,7 @@ use crate::{
|
||||
|
||||
pub fn test_glwe_trace_inplace<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
@@ -47,9 +47,9 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use crate::layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
|
||||
use crate::{
|
||||
api::{SvpPPolBytesOf, VecZnxBigBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
||||
layouts::{Backend, MatZnx, Module, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
};
|
||||
|
||||
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
||||
pub trait ScratchOwnedAlloc<B: Backend> {
|
||||
@@ -25,76 +28,124 @@ pub trait TakeSlice {
|
||||
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self);
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [ScalarZnx] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeScalarZnx {
|
||||
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self);
|
||||
}
|
||||
pub trait ScratchTakeBasic<B: Backend>
|
||||
where
|
||||
Self: TakeSlice,
|
||||
{
|
||||
fn take_scalar_znx(&mut self, module: &Module<B>, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
||||
let (take_slice, rem_slice) = self.take_slice(ScalarZnx::bytes_of(module.n(), cols));
|
||||
(
|
||||
ScalarZnx::from_data(take_slice, module.n(), cols),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [SvpPPol] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeSvpPPol<B: Backend> {
|
||||
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self);
|
||||
}
|
||||
fn take_svp_ppol(&mut self, module: &Module<B>, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
Module<B>: SvpPPolBytesOf,
|
||||
{
|
||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_svp_ppol(cols));
|
||||
(SvpPPol::from_data(take_slice, module.n(), cols), rem_slice)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnx] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVecZnx {
|
||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self);
|
||||
}
|
||||
fn take_vec_znx(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
||||
let (take_slice, rem_slice) = self.take_slice(VecZnx::bytes_of(module.n(), cols, size));
|
||||
(
|
||||
VecZnx::from_data(take_slice, module.n(), cols, size),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnx] aand returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVecZnxSlice {
|
||||
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self);
|
||||
}
|
||||
fn take_vec_znx_big(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
Module<B>: VecZnxBigBytesOf,
|
||||
{
|
||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_big(cols, size));
|
||||
(
|
||||
VecZnxBig::from_data(take_slice, module.n(), cols, size),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxBig] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVecZnxBig<B: Backend> {
|
||||
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self);
|
||||
}
|
||||
fn take_vec_znx_dft(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
Module<B>: VecZnxDftBytesOf,
|
||||
{
|
||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_dft(cols, size));
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxDft] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVecZnxDft<B: Backend> {
|
||||
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self);
|
||||
}
|
||||
(
|
||||
VecZnxDft::from_data(take_slice, module.n(), cols, size),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnxDft] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVecZnxDftSlice<B: Backend> {
|
||||
fn take_vec_znx_dft_slice(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self);
|
||||
}
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self)
|
||||
where
|
||||
Module<B>: VecZnxDftBytesOf,
|
||||
{
|
||||
let mut scratch: &mut Self = self;
|
||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = scratch.take_vec_znx_dft(module, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
|
||||
fn take_vec_znx_slice(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
len: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
||||
let mut scratch: &mut Self = self;
|
||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||
for _ in 0..len {
|
||||
let (znx, new_scratch) = scratch.take_vec_znx(module, cols, size);
|
||||
scratch = new_scratch;
|
||||
slice.push(znx);
|
||||
}
|
||||
(slice, scratch)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VmpPMat] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeVmpPMat<B: Backend> {
|
||||
fn take_vmp_pmat(
|
||||
&mut self,
|
||||
n: usize,
|
||||
module: &Module<B>,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Self);
|
||||
}
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
Module<B>: VmpPMatBytesOf,
|
||||
{
|
||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vmp_pmat(rows, cols_in, cols_out, size));
|
||||
(
|
||||
VmpPMat::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
|
||||
/// Take a slice of bytes from a [Scratch], wraps it into a [MatZnx] and returns it
|
||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
||||
pub trait TakeMatZnx {
|
||||
fn take_mat_znx(
|
||||
&mut self,
|
||||
n: usize,
|
||||
module: &Module<B>,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Self);
|
||||
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
||||
let (take_slice, rem_slice) = self.take_slice(MatZnx::bytes_of(module.n(), rows, cols_in, cols_out, size));
|
||||
(
|
||||
MatZnx::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
||||
rem_slice,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ pub trait SvpPPolAlloc<B: Backend> {
|
||||
}
|
||||
|
||||
/// Returns the size in bytes to allocate a [crate::layouts::SvpPPol].
|
||||
pub trait SvpPPolAllocBytes {
|
||||
fn svp_ppol_bytes_of(&self, cols: usize) -> usize;
|
||||
pub trait SvpPPolBytesOf {
|
||||
fn bytes_of_svp_ppol(&self, cols: usize) -> usize;
|
||||
}
|
||||
|
||||
/// Consume a vector of bytes into a [crate::layouts::MatZnx].
|
||||
|
||||
@@ -16,8 +16,8 @@ pub trait VecZnxBigAlloc<B: Backend> {
|
||||
}
|
||||
|
||||
/// Returns the size in bytes to allocate a [crate::layouts::VecZnxBig].
|
||||
pub trait VecZnxBigAllocBytes {
|
||||
fn vec_znx_big_bytes_of(&self, cols: usize, size: usize) -> usize;
|
||||
pub trait VecZnxBigBytesOf {
|
||||
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize;
|
||||
}
|
||||
|
||||
/// Consume a vector of bytes into a [crate::layouts::VecZnxBig].
|
||||
|
||||
@@ -10,8 +10,8 @@ pub trait VecZnxDftFromBytes<B: Backend> {
|
||||
fn vec_znx_dft_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B>;
|
||||
}
|
||||
|
||||
pub trait VecZnxDftAllocBytes {
|
||||
fn vec_znx_dft_bytes_of(&self, cols: usize, size: usize) -> usize;
|
||||
pub trait VecZnxDftBytesOf {
|
||||
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize;
|
||||
}
|
||||
|
||||
pub trait VecZnxDftApply<B: Backend> {
|
||||
|
||||
@@ -6,8 +6,8 @@ pub trait VmpPMatAlloc<B: Backend> {
|
||||
fn vmp_pmat_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B>;
|
||||
}
|
||||
|
||||
pub trait VmpPMatAllocBytes {
|
||||
fn vmp_pmat_bytes_of(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
||||
pub trait VmpPMatBytesOf {
|
||||
fn bytes_of_vmp_pmat(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
||||
}
|
||||
|
||||
pub trait VmpPMatFromBytes<B: Backend> {
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
use crate::{
|
||||
api::{
|
||||
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeMatZnx, TakeScalarZnx, TakeSlice,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
|
||||
},
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
oep::{
|
||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeMatZnxImpl,
|
||||
TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl,
|
||||
TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl,
|
||||
},
|
||||
api::{ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeSlice},
|
||||
layouts::{Backend, Scratch, ScratchOwned},
|
||||
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||
};
|
||||
|
||||
impl<B> ScratchOwnedAlloc<B> for ScratchOwned<B>
|
||||
@@ -55,104 +48,3 @@ where
|
||||
B::take_slice_impl(self, len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeScalarZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeScalarZnxImpl<B>,
|
||||
{
|
||||
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_scalar_znx_impl(self, n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeSvpPPol<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeSvpPPolImpl<B>,
|
||||
{
|
||||
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self) {
|
||||
B::take_svp_ppol_impl(self, n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxImpl<B>,
|
||||
{
|
||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_vec_znx_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxSlice for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxSliceImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
||||
B::take_vec_znx_slice_impl(self, len, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxBig<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxBigImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self) {
|
||||
B::take_vec_znx_big_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxDft<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxDftImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self) {
|
||||
B::take_vec_znx_dft_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxDftSlice<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxDftSliceImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_slice(
|
||||
&mut self,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self) {
|
||||
B::take_vec_znx_dft_slice_impl(self, len, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVmpPMat<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVmpPMatImpl<B>,
|
||||
{
|
||||
fn take_vmp_pmat(
|
||||
&mut self,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Self) {
|
||||
B::take_vmp_pmat_impl(self, n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeMatZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeMatZnxImpl<B>,
|
||||
{
|
||||
fn take_mat_znx(
|
||||
&mut self,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_mat_znx_impl(self, n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
api::{
|
||||
SvpApplyDft, SvpApplyDftToDft, SvpApplyDftToDftAdd, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
SvpApplyDft, SvpApplyDftToDft, SvpApplyDftToDftAdd, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPPolFromBytes, SvpPrepare,
|
||||
},
|
||||
layouts::{
|
||||
@@ -30,11 +30,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpPPolAllocBytes for Module<B>
|
||||
impl<B> SvpPPolBytesOf for Module<B>
|
||||
where
|
||||
B: Backend + SvpPPolAllocBytesImpl<B>,
|
||||
{
|
||||
fn svp_ppol_bytes_of(&self, cols: usize) -> usize {
|
||||
fn bytes_of_svp_ppol(&self, cols: usize) -> usize {
|
||||
B::svp_ppol_bytes_of_impl(self.n(), cols)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||
VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes,
|
||||
VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes, VecZnxBigBytesOf,
|
||||
VecZnxBigFromBytes, VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubInplace, VecZnxBigSubNegateInplace, VecZnxBigSubSmallA,
|
||||
VecZnxBigSubSmallB, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace,
|
||||
@@ -49,11 +49,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAllocBytes for Module<B>
|
||||
impl<B> VecZnxBigBytesOf for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAllocBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_bytes_of(&self, cols: usize, size: usize) -> usize {
|
||||
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
|
||||
B::vec_znx_big_bytes_of_impl(self.n(), cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
|
||||
VecZnxDftFromBytes, VecZnxDftSub, VecZnxDftSubInplace, VecZnxDftSubNegateInplace, VecZnxDftZero, VecZnxIdftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
|
||||
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxDftFromBytes,
|
||||
VecZnxDftSub, VecZnxDftSubInplace, VecZnxDftSubNegateInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
|
||||
},
|
||||
layouts::{
|
||||
Backend, Data, Module, Scratch, VecZnxBig, VecZnxBigToMut, VecZnxDft, VecZnxDftOwned, VecZnxDftToMut, VecZnxDftToRef,
|
||||
@@ -24,11 +24,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftAllocBytes for Module<B>
|
||||
impl<B> VecZnxDftBytesOf for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftAllocBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_bytes_of(&self, cols: usize, size: usize) -> usize {
|
||||
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize {
|
||||
B::vec_znx_dft_bytes_of_impl(self.n(), cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VmpApplyDft, VmpApplyDftTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftAddTmpBytes,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatFromBytes, VmpPrepare, VmpPrepareTmpBytes,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPMatBytesOf, VmpPMatFromBytes, VmpPrepare, VmpPrepareTmpBytes,
|
||||
},
|
||||
layouts::{
|
||||
Backend, MatZnxToRef, Module, Scratch, VecZnxDftToMut, VecZnxDftToRef, VecZnxToRef, VmpPMatOwned, VmpPMatToMut,
|
||||
@@ -23,11 +23,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpPMatAllocBytes for Module<B>
|
||||
impl<B> VmpPMatBytesOf for Module<B>
|
||||
where
|
||||
B: Backend + VmpPMatAllocBytesImpl<B>,
|
||||
{
|
||||
fn vmp_pmat_bytes_of(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||
fn bytes_of_vmp_pmat(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||
B::vmp_pmat_bytes_of_impl(self.n(), rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
|
||||
use crate::layouts::{Backend, Scratch, ScratchOwned};
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
@@ -39,111 +39,3 @@ pub unsafe trait ScratchAvailableImpl<B: Backend> {
|
||||
pub unsafe trait TakeSliceImpl<B: Backend> {
|
||||
fn take_slice_impl<T>(scratch: &mut Scratch<B>, len: usize) -> (&mut [T], &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeScalarZnx] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeScalarZnxImpl<B: Backend> {
|
||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeSvpPPol] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeSvpPPolImpl<B: Backend> {
|
||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVecZnx] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVecZnxImpl<B: Backend> {
|
||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVecZnxSlice] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVecZnxSliceImpl<B: Backend> {
|
||||
fn take_vec_znx_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVecZnxBig] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVecZnxBigImpl<B: Backend> {
|
||||
fn take_vec_znx_big_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVecZnxDft] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVecZnxDftImpl<B: Backend> {
|
||||
fn take_vec_znx_dft_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVecZnxDftSlice] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVecZnxDftSliceImpl<B: Backend> {
|
||||
fn take_vec_znx_dft_slice_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeVmpPMat] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeVmpPMatImpl<B: Backend> {
|
||||
fn take_vmp_pmat_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||
/// * See [crate::api::TakeMatZnx] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait TakeMatZnxImpl<B: Backend> {
|
||||
fn take_mat_znx_impl(
|
||||
scratch: &mut Scratch<B>,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>);
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ use poulpy_core::layouts::{
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc,
|
||||
SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
||||
VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform,
|
||||
SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
||||
VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||
@@ -42,7 +42,7 @@ where
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -55,7 +55,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -70,7 +70,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
@@ -80,7 +80,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
@@ -124,7 +124,7 @@ where
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -137,7 +137,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -152,7 +152,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
@@ -162,7 +162,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
|
||||
@@ -2,8 +2,8 @@ use std::marker::PhantomData;
|
||||
|
||||
use poulpy_core::layouts::{Base2K, GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, Rank, TorusPrecision};
|
||||
|
||||
use poulpy_core::{TakeGLWEPt, layouts::prepared::GLWESecretPrepared};
|
||||
use poulpy_hal::api::VecZnxBigAllocBytes;
|
||||
use poulpy_core::{TakeGLWEPlaintext, layouts::prepared::GLWESecretPrepared};
|
||||
use poulpy_hal::api::VecZnxBigBytesOf;
|
||||
#[cfg(test)]
|
||||
use poulpy_hal::api::{
|
||||
ScratchAvailable, TakeVecZnx, VecZnxAddInplace, VecZnxAddNormal, VecZnxFillUniform, VecZnxNormalize, VecZnxSub,
|
||||
@@ -12,8 +12,8 @@ use poulpy_hal::api::{
|
||||
use poulpy_hal::source::Source;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||
};
|
||||
@@ -83,7 +83,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
S: DataRef,
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -96,7 +96,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxSub,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
use poulpy_core::layouts::GLWEPlaintextLayout;
|
||||
|
||||
@@ -136,7 +136,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxBigAddInplace<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -175,8 +175,8 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> Vec<f64>
|
||||
where
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
@@ -186,7 +186,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxNormalizeInplace<BE>,
|
||||
Scratch<BE>: TakeGLWEPt<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
||||
Scratch<BE>: TakeGLWEPlaintext<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -14,8 +14,8 @@ use poulpy_hal::{
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VmpPrepare,
|
||||
},
|
||||
@@ -123,7 +123,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits, BE: Backend> FheUintBlocksPrep<D,
|
||||
) where
|
||||
S: DataRef,
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -190,8 +190,8 @@ impl<D: DataRef, T: UnsignedInteger + ToBits> FheUintBlocksPrepDebug<D, T> {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn noise<S: DataRef, BE: Backend>(&self, module: &Module<BE>, sk: &GLWESecretPrepared<S, BE>, want: T)
|
||||
where
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use poulpy_core::{
|
||||
GLWEOperations, TakeGLWECtSlice, TakeGLWEPt, glwe_packing,
|
||||
GLWEOperations, TakeGLWEPlaintext, TakeGLWESlice, glwe_packing,
|
||||
layouts::{
|
||||
GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, TorusPrecision,
|
||||
prepared::{AutomorphismKeyPrepared, GLWESecretPrepared},
|
||||
@@ -11,7 +11,7 @@ use poulpy_hal::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||
VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
@@ -39,7 +39,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
||||
Module<BE>: VecZnxSub
|
||||
+ VecZnxCopy
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxAddInplace
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -62,7 +62,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
+ VecZnxRotate,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWECtSlice,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWESlice,
|
||||
{
|
||||
// Repacks the GLWE ciphertexts bits
|
||||
let gap: usize = module.n() / T::WORD_SIZE;
|
||||
@@ -109,7 +109,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -122,7 +122,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxSub,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -167,7 +167,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits> FheUintWord<D, T> {
|
||||
+ VecZnxBigAddInplace<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use poulpy_core::{
|
||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWECtSlice,
|
||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWESlice,
|
||||
layouts::{
|
||||
GLWE, GLWEToMut, LWEInfos,
|
||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||
@@ -49,7 +49,7 @@ impl<C: BitCircuitInfo, const N: usize, T: UnsignedInteger, BE: Backend> Circuit
|
||||
where
|
||||
Self: GetBitCircuitInfo<T>,
|
||||
Module<BE>: Cmux<BE> + VecZnxCopy,
|
||||
Scratch<BE>: TakeGLWECtSlice,
|
||||
Scratch<BE>: TakeGLWESlice,
|
||||
{
|
||||
fn execute<O>(
|
||||
&self,
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::tfhe::{
|
||||
},
|
||||
};
|
||||
use poulpy_core::{
|
||||
TakeGGSW, TakeGLWECt,
|
||||
TakeGGSW, TakeGLWE,
|
||||
layouts::{
|
||||
GLWESecret, GLWEToLWEKeyLayout, GLWEToLWESwitchingKey, LWE, LWESecret,
|
||||
prepared::{GLWEToLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
||||
@@ -17,10 +17,10 @@ use poulpy_core::{
|
||||
};
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPrepare,
|
||||
},
|
||||
@@ -77,7 +77,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
||||
Module<BE>: SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -92,7 +92,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<BE>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<BE>
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxAutomorphismInplace<BE>,
|
||||
@@ -157,7 +157,7 @@ where
|
||||
BE: Backend,
|
||||
Module<BE>: VmpPrepare<BE>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
@@ -168,7 +168,7 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||
{
|
||||
fn prepare(
|
||||
@@ -206,7 +206,7 @@ where
|
||||
BE: Backend,
|
||||
Module<BE>: VmpPrepare<BE>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
@@ -217,7 +217,7 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||
{
|
||||
fn prepare(
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::time::Instant;
|
||||
|
||||
use poulpy_backend::FFT64Ref;
|
||||
use poulpy_core::{
|
||||
TakeGGSW, TakeGLWEPt,
|
||||
TakeGGSW, TakeGLWEPlaintext,
|
||||
layouts::{
|
||||
GGSWCiphertextLayout, GLWELayout, GLWESecret, LWEInfos, LWESecret,
|
||||
prepared::{GLWESecretPrepared, PrepareAlloc},
|
||||
@@ -11,11 +11,11 @@ use poulpy_core::{
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleNew, ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace,
|
||||
SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||
SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||
@@ -51,7 +51,7 @@ where
|
||||
Module<BE>: ModuleNew<BE> + SvpPPolAlloc<BE> + SvpPrepare<BE> + VmpPMatAlloc<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -68,16 +68,16 @@ where
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGGSW + TakeScalarZnx + TakeSlice,
|
||||
Module<BE>: VecZnxCopy + VecZnxNegateInplace + VmpApplyDftToDftTmpBytes + VmpApplyDftToDft<BE> + VmpApplyDftToDftAdd<BE>,
|
||||
Module<BE>: VecZnxBigAddInplace<BE> + VecZnxBigAddSmallInplace<BE> + VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
Module<BE>: VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxBigAlloc<BE>
|
||||
+ VecZnxDftAlloc<BE>
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<BE>
|
||||
+ VecZnxBigAutomorphismInplace<BE>
|
||||
+ VecZnxRshInplace<BE>
|
||||
@@ -85,7 +85,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use itertools::izip;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolBytesOf, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxDftSubInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes,
|
||||
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use poulpy_core::{
|
||||
Distribution, GLWEOperations, TakeGLWECt,
|
||||
Distribution, GLWEOperations, TakeGLWE,
|
||||
layouts::{GGSWInfos, GLWE, GLWEInfos, GLWEToMut, LWE, LWECiphertextToRef, LWEInfos},
|
||||
};
|
||||
|
||||
@@ -31,10 +31,10 @@ pub fn cggi_blind_rotate_scratch_space<B: Backend, OUT, GGSW>(
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
@@ -43,10 +43,10 @@ where
|
||||
if block_size > 1 {
|
||||
let cols: usize = (brk_infos.rank() + 1).into();
|
||||
let dnum: usize = brk_infos.dnum().into();
|
||||
let acc_dft: usize = module.vec_znx_dft_bytes_of(cols, dnum) * extension_factor;
|
||||
let acc_big: usize = module.vec_znx_big_bytes_of(1, brk_size);
|
||||
let vmp_res: usize = module.vec_znx_dft_bytes_of(cols, brk_size) * extension_factor;
|
||||
let vmp_xai: usize = module.vec_znx_dft_bytes_of(1, brk_size);
|
||||
let acc_dft: usize = module.bytes_of_vec_znx_dft(cols, dnum) * extension_factor;
|
||||
let acc_big: usize = module.bytes_of_vec_znx_big(1, brk_size);
|
||||
let vmp_res: usize = module.bytes_of_vec_znx_dft(cols, brk_size) * extension_factor;
|
||||
let vmp_xai: usize = module.bytes_of_vec_znx_dft(1, brk_size);
|
||||
let acc_dft_add: usize = vmp_res;
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(brk_size, dnum, dnum, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
|
||||
let acc: usize = if extension_factor > 1 {
|
||||
@@ -67,9 +67,9 @@ where
|
||||
|
||||
impl<D: DataRef, B: Backend> BlincRotationExecute<B> for BlindRotationKeyPrepared<D, CGGI, B>
|
||||
where
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -129,9 +129,9 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -296,9 +296,9 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -418,9 +418,9 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut},
|
||||
source::Source,
|
||||
@@ -47,7 +46,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -56,7 +55,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
||||
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
|
||||
where
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -149,7 +148,7 @@ impl BlindRotationKeyCompressed<Vec<u8>, CGGI> {
|
||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSWCompressed::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -169,7 +168,7 @@ impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
|
||||
DataSkGLWE: DataRef,
|
||||
DataSkLWE: DataRef,
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal,
|
||||
@@ -29,9 +29,9 @@ use poulpy_core::layouts::{
|
||||
|
||||
pub fn test_blind_rotation<B>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
|
||||
where
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
|
||||
@@ -3,9 +3,9 @@ use std::collections::HashMap;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeMatZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
|
||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||
VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
@@ -15,7 +15,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use poulpy_core::{
|
||||
GLWEOperations, TakeGGLWE, TakeGLWECt,
|
||||
GLWEOperations, TakeGGLWE, TakeGLWE,
|
||||
layouts::{Dsize, GGLWECiphertextLayout, GGSWInfos, GLWEInfos, LWEInfos},
|
||||
};
|
||||
|
||||
@@ -44,7 +44,7 @@ where
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -56,7 +56,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxNormalize<B>,
|
||||
@@ -145,7 +145,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -156,7 +156,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxRotate
|
||||
@@ -286,7 +286,7 @@ fn post_process<DataRes, DataA, B: Backend>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -7,9 +7,9 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -78,7 +78,7 @@ where
|
||||
Module<B>: SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -93,7 +93,7 @@ where
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxAutomorphism,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeSvpPPol<B> + TakeVecZnxBig<B>,
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::time::Instant;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||
@@ -45,7 +45,7 @@ where
|
||||
Module<B>: VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -58,7 +58,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -73,7 +73,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
@@ -83,7 +83,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
@@ -267,7 +267,7 @@ where
|
||||
Module<B>: VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -280,7 +280,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -295,7 +295,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
@@ -305,7 +305,7 @@ where
|
||||
+ VecZnxCopy
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
|
||||
Reference in New Issue
Block a user