mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
wip
This commit is contained in:
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
DEFAULTALIGN, alloc_aligned,
|
DEFAULTALIGN, alloc_aligned,
|
||||||
api::ScratchFromBytes,
|
api::ScratchFromBytes,
|
||||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
layouts::{Backend, Scratch, ScratchOwned},
|
||||||
oep::{
|
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
|
||||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
|
||||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
|
||||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_fft64_avx::FFT64Avx;
|
use crate::cpu_fft64_avx::FFT64Avx;
|
||||||
@@ -64,178 +59,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
|
||||||
(
|
|
||||||
ScalarZnx::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
|
||||||
(
|
|
||||||
SvpPPol::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
|
||||||
(
|
|
||||||
VecZnx::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_big_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
|
|
||||||
(
|
|
||||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vmp_pmat_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Avx
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_mat_znx_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||||
let ptr: *mut u8 = data.as_mut_ptr();
|
let ptr: *mut u8 = data.as_mut_ptr();
|
||||||
let self_len: usize = data.len();
|
let self_len: usize = data.len();
|
||||||
|
|||||||
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
DEFAULTALIGN, alloc_aligned,
|
DEFAULTALIGN, alloc_aligned,
|
||||||
api::ScratchFromBytes,
|
api::ScratchFromBytes,
|
||||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
layouts::{Backend, Scratch, ScratchOwned},
|
||||||
oep::{
|
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
|
||||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
|
||||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
|
||||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_fft64_ref::FFT64Ref;
|
use crate::cpu_fft64_ref::FFT64Ref;
|
||||||
@@ -64,178 +59,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
|
||||||
(
|
|
||||||
ScalarZnx::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
|
||||||
(
|
|
||||||
SvpPPol::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
|
||||||
(
|
|
||||||
VecZnx::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_big_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
|
|
||||||
(
|
|
||||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vmp_pmat_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Ref
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_mat_znx_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||||
let ptr: *mut u8 = data.as_mut_ptr();
|
let ptr: *mut u8 = data.as_mut_ptr();
|
||||||
let self_len: usize = data.len();
|
let self_len: usize = data.len();
|
||||||
|
|||||||
@@ -3,13 +3,8 @@ use std::marker::PhantomData;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
DEFAULTALIGN, alloc_aligned,
|
DEFAULTALIGN, alloc_aligned,
|
||||||
api::ScratchFromBytes,
|
api::ScratchFromBytes,
|
||||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
layouts::{Backend, Scratch, ScratchOwned},
|
||||||
oep::{
|
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
|
|
||||||
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
|
|
||||||
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
|
|
||||||
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_spqlios::FFT64Spqlios;
|
use crate::cpu_spqlios::FFT64Spqlios;
|
||||||
@@ -64,178 +59,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::bytes_of(n, cols));
|
|
||||||
(
|
|
||||||
ScalarZnx::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_bytes_of_impl(n, cols));
|
|
||||||
(
|
|
||||||
SvpPPol::from_data(take_slice, n, cols),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::bytes_of(n, cols, size));
|
|
||||||
(
|
|
||||||
VecZnx::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_big_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_big_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VecZnxBig::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vec_znx_dft_bytes_of_impl(n, cols, size),
|
|
||||||
);
|
|
||||||
|
|
||||||
(
|
|
||||||
VecZnxDft::from_data(take_slice, n, cols, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
|
|
||||||
let mut scratch: &mut Scratch<B> = scratch;
|
|
||||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
|
||||||
for _ in 0..len {
|
|
||||||
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
|
|
||||||
scratch = new_scratch;
|
|
||||||
slice.push(znx);
|
|
||||||
}
|
|
||||||
(slice, scratch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vmp_pmat_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
B::vmp_pmat_bytes_of_impl(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Spqlios
|
|
||||||
where
|
|
||||||
B: ScratchFromBytesImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_mat_znx_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
|
|
||||||
let (take_slice, rem_slice) = take_slice_aligned(
|
|
||||||
&mut scratch.data,
|
|
||||||
MatZnx::bytes_of(n, rows, cols_in, cols_out, size),
|
|
||||||
);
|
|
||||||
(
|
|
||||||
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
|
|
||||||
Scratch::from_bytes(rem_slice),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
|
||||||
let ptr: *mut u8 = data.as_mut_ptr();
|
let ptr: *mut u8 = data.as_mut_ptr();
|
||||||
let self_len: usize = data.len();
|
let self_len: usize = data.len();
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
||||||
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
IN: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::keyswitch_scratch_space(
|
GLWE::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
@@ -34,7 +34,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
AutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos)
|
AutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -48,7 +48,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -136,7 +136,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAllocBytes,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -26,14 +26,11 @@ impl GGSW<Vec<u8>> {
|
|||||||
IN: GGSWInfos,
|
IN: GGSWInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
TSK: GGLWEInfos,
|
TSK: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>:
|
||||||
+ VmpApplyDftToDftTmpBytes
|
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
+ VecZnxBigAllocBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
let out_size: usize = out_infos.size();
|
let out_size: usize = out_infos.size();
|
||||||
let ci_dft: usize = module.vec_znx_dft_bytes_of((key_infos.rank_out() + 1).into(), out_size);
|
let ci_dft: usize = module.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
||||||
let ks_internal: usize = GLWE::keyswitch_scratch_space(
|
let ks_internal: usize = GLWE::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
&out_infos.glwe_layout(),
|
&out_infos.glwe_layout(),
|
||||||
@@ -54,11 +51,8 @@ impl GGSW<Vec<u8>> {
|
|||||||
OUT: GGSWInfos,
|
OUT: GGSWInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
TSK: GGLWEInfos,
|
TSK: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>:
|
||||||
+ VmpApplyDftToDftTmpBytes
|
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
+ VecZnxBigAllocBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
GGSW::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos)
|
GGSW::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos)
|
||||||
}
|
}
|
||||||
@@ -73,7 +67,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -83,7 +77,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
@@ -141,7 +135,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -151,7 +145,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace,
|
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace,
|
||||||
VecZnxBigSubSmallNegateInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
||||||
@@ -21,7 +21,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
IN: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -30,7 +30,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -44,7 +44,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -70,7 +70,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -97,7 +97,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -138,7 +138,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -180,7 +180,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -222,7 +222,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -265,7 +265,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -307,7 +307,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWE,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared},
|
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ impl LWE<Vec<u8>> {
|
|||||||
OUT: LWEInfos,
|
OUT: LWEInfos,
|
||||||
IN: GLWEInfos,
|
IN: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let glwe_layout: GLWELayout = GLWELayout {
|
let glwe_layout: GLWELayout = GLWELayout {
|
||||||
n: module.n().into(),
|
n: module.n().into(),
|
||||||
@@ -69,7 +69,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
|||||||
) where
|
) where
|
||||||
DGlwe: DataRef,
|
DGlwe: DataRef,
|
||||||
DKs: DataRef,
|
DKs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -80,7 +80,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWE,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared},
|
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
IN: LWEInfos,
|
IN: LWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let ct: usize = GLWE::bytes_of(
|
let ct: usize = GLWE::bytes_of(
|
||||||
module.n().into(),
|
module.n().into(),
|
||||||
@@ -51,7 +51,7 @@ impl<D: DataMut> GLWE<D> {
|
|||||||
) where
|
) where
|
||||||
DLwe: DataRef,
|
DLwe: DataRef,
|
||||||
DKsk: DataRef,
|
DKsk: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -62,7 +62,7 @@ impl<D: DataMut> GLWE<D> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -12,10 +12,10 @@ impl GLWE<Vec<u8>> {
|
|||||||
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
let size: usize = infos.size();
|
let size: usize = infos.size();
|
||||||
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_bytes_of(1, size)) + module.vec_znx_dft_bytes_of(1, size)
|
(module.vec_znx_normalize_tmp_bytes() | module.bytes_of_vec_znx_dft(1, size)) + module.bytes_of_vec_znx_dft(1, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{ScratchAvailable, SvpPPolAllocBytes, VecZnxAutomorphism, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
api::{ScratchAvailable, SvpPPolBytesOf, VecZnxAutomorphism, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
@@ -17,7 +17,7 @@ impl AutomorphismKeyCompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||||
{
|
{
|
||||||
assert_eq!(module.n() as u32, infos.n());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||||
@@ -40,8 +40,7 @@ pub trait GGLWEAutomorphismKeyCompressedEncryptSk<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>:
|
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
|
||||||
GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxAutomorphism,
|
|
||||||
Scratch<B>: TakeGLWESecret + ScratchAvailable,
|
Scratch<B>: TakeGLWESecret + ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||||
ZnNormalizeInplace,
|
ZnNormalizeInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||||
@@ -8,7 +8,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPlaintext,
|
||||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWE, GGLWEInfos, LWEInfos,
|
GGLWE, GGLWEInfos, LWEInfos,
|
||||||
@@ -38,7 +38,7 @@ impl GGLWECompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWE::encrypt_sk_scratch_space(module, infos)
|
GGLWE::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
@@ -64,10 +64,10 @@ where
|
|||||||
Module<B>: GLWEEncryptSkInternal<B>
|
Module<B>: GLWEEncryptSkInternal<B>
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ ZnNormalizeInplace<B>,
|
+ ZnNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPt<B> + ScratchAvailable,
|
Scratch<B>: TakeGLWEPlaintext<B> + ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes,
|
ScratchAvailable, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing,
|
||||||
VecZnxSwitchRing,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -21,7 +20,7 @@ impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||||
{
|
{
|
||||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||||
@@ -64,9 +63,9 @@ pub trait GGLWEKeyCompressedEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGLWEKeyCompressedEncryptSk<B> for Module<B>
|
impl<B: Backend> GGLWEKeyCompressedEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GGLWECompressedEncryptSk<B>
|
Module<B>: GGLWECompressedEncryptSk<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPrepare<B>,
|
+ SvpPrepare<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
SvpApplyDftToDft, SvpPPolAllocBytes, SvpPrepare, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -20,8 +20,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>:
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
|
||||||
{
|
{
|
||||||
TensorKey::encrypt_sk_scratch_space(module, infos)
|
TensorKey::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPlaintext,
|
||||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
||||||
@@ -18,7 +18,7 @@ impl GGSWCompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
@@ -42,7 +42,7 @@ pub trait GGSWCompressedEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPt<B>,
|
Scratch<B>: TakeGLWEPlaintext<B>,
|
||||||
{
|
{
|
||||||
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
api::{VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
@@ -17,7 +17,7 @@ impl GLWECompressed<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
GLWE::encrypt_sk_scratch_space(module, infos)
|
GLWE::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, Scratch},
|
layouts::{Backend, DataMut, Module, Scratch},
|
||||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<BE>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
infos.rank_in(),
|
infos.rank_in(),
|
||||||
@@ -80,7 +80,7 @@ where
|
|||||||
impl<BE: Backend> GGLWEAutomorphismKeyEncryptSk<BE> for Module<BE>
|
impl<BE: Backend> GGLWEAutomorphismKeyEncryptSk<BE> for Module<BE>
|
||||||
where
|
where
|
||||||
Module<BE>: VecZnxAddScalarInplace
|
Module<BE>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -95,7 +95,7 @@ where
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<BE>
|
+ SvpPrepare<BE>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxAutomorphism,
|
+ VecZnxAutomorphism,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<BE>,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes,
|
VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||||
@@ -8,7 +8,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPlaintext,
|
||||||
encryption::glwe_ct::GLWEEncryptSk,
|
encryption::glwe_ct::GLWEEncryptSk,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
||||||
@@ -20,7 +20,7 @@ impl GGLWE<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||||
+ (GLWEPlaintext::bytes_of(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
+ (GLWEPlaintext::bytes_of(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||||
@@ -51,8 +51,7 @@ pub trait GGLWEEncryptSk<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>:
|
Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
fn gglwe_encrypt_sk<R, P, S>(
|
fn gglwe_encrypt_sk<R, P, S>(
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
@@ -18,7 +18,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||||
@@ -45,7 +45,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxAddScalarInplace
|
Module<B>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -60,7 +60,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
||||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
@@ -20,13 +20,12 @@ impl TensorKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>:
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
||||||
+ module.vec_znx_dft_bytes_of(infos.rank_out().into(), 1)
|
+ module.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
|
||||||
+ module.vec_znx_big_bytes_of(1, 1)
|
+ module.bytes_of_vec_znx_big(1, 1)
|
||||||
+ module.vec_znx_dft_bytes_of(1, 1)
|
+ module.bytes_of_vec_znx_dft(1, 1)
|
||||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
+ GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
@@ -44,7 +43,7 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
|||||||
Module<B>: SvpApplyDftToDft<B>
|
Module<B>: SvpApplyDftToDft<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -59,7 +58,7 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>:
|
Scratch<B>:
|
||||||
TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B> + TakeVecZnxBig<B>,
|
TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B> + TakeVecZnxBig<B>,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxAddScalarInplace, VecZnxDftAllocBytes, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
SIGMA, TakeGLWEPt,
|
SIGMA, TakeGLWEPlaintext,
|
||||||
encryption::glwe_ct::GLWEEncryptSkInternal,
|
encryption::glwe_ct::GLWEEncryptSkInternal,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
||||||
@@ -17,13 +17,13 @@ impl GGSW<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
let size = infos.size();
|
let size = infos.size();
|
||||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||||
+ VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
|
+ VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
|
||||||
+ VecZnx::bytes_of(module.n(), 1, size)
|
+ VecZnx::bytes_of(module.n(), 1, size)
|
||||||
+ module.vec_znx_dft_bytes_of((infos.rank() + 1).into(), size)
|
+ module.bytes_of_vec_znx_dft((infos.rank() + 1).into(), size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ pub trait GGSWEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPt<B>,
|
Scratch<B>: TakeGLWEPlaintext<B>,
|
||||||
{
|
{
|
||||||
fn ggsw_encrypt_sk<R, P, S>(
|
fn ggsw_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
||||||
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
||||||
@@ -22,21 +22,21 @@ impl GLWE<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
let size: usize = infos.size();
|
let size: usize = infos.size();
|
||||||
assert_eq!(module.n() as u32, infos.n());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.vec_znx_dft_bytes_of(1, size)
|
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
|
||||||
}
|
}
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let size: usize = infos.size();
|
let size: usize = infos.size();
|
||||||
assert_eq!(module.n() as u32, infos.n());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
((module.vec_znx_dft_bytes_of(1, size) + module.vec_znx_big_bytes_of(1, size)) | ScalarZnx::bytes_of(module.n(), 1))
|
((module.bytes_of_vec_znx_dft(1, size) + module.bytes_of_vec_znx_big(1, size)) | ScalarZnx::bytes_of(module.n(), 1))
|
||||||
+ module.svp_ppol_bytes_of(1)
|
+ module.bytes_of_svp_ppol(1)
|
||||||
+ module.vec_znx_normalize_tmp_bytes()
|
+ module.vec_znx_normalize_tmp_bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -120,7 +120,7 @@ pub trait GLWEEncryptSk<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GLWEEncryptSk<B> for Module<B>
|
impl<B: Backend> GLWEEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn glwe_encrypt_sk<R, P, S>(
|
fn glwe_encrypt_sk<R, P, S>(
|
||||||
@@ -186,7 +186,7 @@ pub trait GLWEEncryptZeroSk<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GLWEEncryptZeroSk<B> for Module<B>
|
impl<B: Backend> GLWEEncryptZeroSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn glwe_encrypt_zero_sk<R, S>(
|
fn glwe_encrypt_zero_sk<R, S>(
|
||||||
@@ -440,7 +440,7 @@ pub(crate) trait GLWEEncryptSkInternal<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GLWEEncryptSkInternal<B> for Module<B>
|
impl<B: Backend> GLWEEncryptSkInternal<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftAllocBytes, VecZnxNormalizeTmpBytes},
|
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
@@ -21,7 +21,7 @@ pub trait GLWEPublicKeyGenerate<B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyGenerate<B> for Module<B>
|
impl<B: Backend> GLWEPublicKeyGenerate<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptZeroSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: GLWEEncryptZeroSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||||
{
|
{
|
||||||
fn glwe_public_key_generate<R, S>(&self, res: &mut R, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
|
fn glwe_public_key_generate<R, S>(&self, res: &mut R, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
@@ -20,7 +20,7 @@ impl GLWEToLWESwitchingKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
||||||
+ (GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
+ (GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||||
@@ -42,7 +42,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
|||||||
DGlwe: DataRef,
|
DGlwe: DataRef,
|
||||||
Module<B>: VecZnxAutomorphismInplace<B>
|
Module<B>: VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -57,7 +57,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
@@ -21,7 +21,7 @@ impl LWESwitchingKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
debug_assert_eq!(
|
debug_assert_eq!(
|
||||||
infos.dsize().0,
|
infos.dsize().0,
|
||||||
@@ -59,7 +59,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
|||||||
DOut: DataRef,
|
DOut: DataRef,
|
||||||
Module<B>: VecZnxAutomorphismInplace<B>
|
Module<B>: VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -74,7 +74,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
@@ -18,7 +18,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
|||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
debug_assert_eq!(
|
debug_assert_eq!(
|
||||||
infos.rank_in(),
|
infos.rank_in(),
|
||||||
@@ -45,7 +45,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
|||||||
DGlwe: DataRef,
|
DGlwe: DataRef,
|
||||||
Module<B>: VecZnxAutomorphismInplace<B>
|
Module<B>: VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -60,7 +60,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
@@ -20,7 +20,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
IN: GGLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESwitchingKey::external_product_scratch_space(module, out_infos, in_infos, ggsw_infos)
|
GLWESwitchingKey::external_product_scratch_space(module, out_infos, in_infos, ggsw_infos)
|
||||||
}
|
}
|
||||||
@@ -33,7 +33,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESwitchingKey::external_product_inplace_scratch_space(module, out_infos, ggsw_infos)
|
GLWESwitchingKey::external_product_inplace_scratch_space(module, out_infos, ggsw_infos)
|
||||||
}
|
}
|
||||||
@@ -47,7 +47,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
@@ -67,7 +67,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
@@ -20,7 +20,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
IN: GGLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::external_product_scratch_space(
|
GLWE::external_product_scratch_space(
|
||||||
module,
|
module,
|
||||||
@@ -38,7 +38,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos)
|
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos)
|
||||||
}
|
}
|
||||||
@@ -52,7 +52,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
@@ -110,7 +110,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
@@ -21,7 +21,7 @@ impl GGSW<Vec<u8>> {
|
|||||||
OUT: GGSWInfos,
|
OUT: GGSWInfos,
|
||||||
IN: GGSWInfos,
|
IN: GGSWInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::external_product_scratch_space(
|
GLWE::external_product_scratch_space(
|
||||||
module,
|
module,
|
||||||
@@ -39,7 +39,7 @@ impl GGSW<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
OUT: GGSWInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), apply_infos)
|
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), apply_infos)
|
||||||
}
|
}
|
||||||
@@ -53,7 +53,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
@@ -108,7 +108,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
|
|||||||
@@ -1,21 +1,22 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
ScratchAvailable, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::{
|
||||||
GGSWInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
ScratchTakeCore,
|
||||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
layouts::{
|
||||||
|
GGSWInfos, GGSWToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetDegree, LWEInfos,
|
||||||
|
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn external_product_scratch_space<OUT, IN, GGSW, B: Backend>(
|
||||||
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
module: Module<B>,
|
||||||
module: &Module<B>,
|
|
||||||
out_infos: &OUT,
|
out_infos: &OUT,
|
||||||
in_infos: &IN,
|
in_infos: &IN,
|
||||||
apply_infos: &GGSW,
|
apply_infos: &GGSW,
|
||||||
@@ -24,76 +25,35 @@ impl GLWE<Vec<u8>> {
|
|||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
IN: GLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: GLWEExternalProduct<B>,
|
||||||
{
|
{
|
||||||
let in_size: usize = in_infos
|
module.glwe_external_product_scratch_space(out_infos, in_infos, apply_infos)
|
||||||
.k()
|
|
||||||
.div_ceil(apply_infos.base2k())
|
|
||||||
.div_ceil(apply_infos.dsize().into()) as usize;
|
|
||||||
let out_size: usize = out_infos.size();
|
|
||||||
let ggsw_size: usize = apply_infos.size();
|
|
||||||
let res_dft: usize = module.vec_znx_dft_bytes_of((apply_infos.rank() + 1).into(), ggsw_size);
|
|
||||||
let a_dft: usize = module.vec_znx_dft_bytes_of((apply_infos.rank() + 1).into(), in_size);
|
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
|
||||||
out_size,
|
|
||||||
in_size,
|
|
||||||
in_size, // rows
|
|
||||||
(apply_infos.rank() + 1).into(), // cols in
|
|
||||||
(apply_infos.rank() + 1).into(), // cols out
|
|
||||||
ggsw_size,
|
|
||||||
);
|
|
||||||
let normalize_big: usize = module.vec_znx_normalize_tmp_bytes();
|
|
||||||
|
|
||||||
if in_infos.base2k() == apply_infos.base2k() {
|
|
||||||
res_dft + a_dft + (vmp | normalize_big)
|
|
||||||
} else {
|
|
||||||
let normalize_conv: usize = VecZnx::bytes_of(module.n(), (apply_infos.rank() + 1).into(), in_size);
|
|
||||||
res_dft + ((a_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
pub fn external_product<L, R, B: Backend>(&mut self, module: &Module<B>, lhs: &L, rhs: &R, scratch: &mut Scratch<B>)
|
||||||
module: &Module<B>,
|
|
||||||
out_infos: &OUT,
|
|
||||||
apply_infos: &GGSW,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
L: GLWEToRef,
|
||||||
GGSW: GGSWInfos,
|
R: GGSWToRef,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
|
||||||
{
|
|
||||||
Self::external_product_scratch_space(module, out_infos, out_infos, apply_infos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|
||||||
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
|
||||||
&mut self,
|
|
||||||
module: &Module<B>,
|
|
||||||
lhs: &GLWE<DataLhs>,
|
|
||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: GLWEExternalProduct<B>,
|
Module<B>: GLWEExternalProduct<B>,
|
||||||
|
Scratch<B>: ScratchTakeCore<B>,
|
||||||
{
|
{
|
||||||
module.external_product(self, lhs, rhs, scratch);
|
module.glwe_external_product(self, lhs, rhs, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
|
pub fn external_product_inplace<R, B: Backend>(&mut self, module: &Module<B>, rhs: &R, scratch: &mut Scratch<B>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GGSWToRef,
|
||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: GLWEExternalProduct<B>,
|
Module<B>: GLWEExternalProduct<B>,
|
||||||
|
Scratch<B>: ScratchTakeCore<B>,
|
||||||
{
|
{
|
||||||
module.external_product_inplace(self, rhs, scratch);
|
module.glwe_external_product_inplace(self, rhs, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GLWEExternalProduct<BE: Backend>
|
pub trait GLWEExternalProduct<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: VecZnxDftAllocBytes
|
Self: GetDegree
|
||||||
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
@@ -101,13 +61,49 @@ where
|
|||||||
+ VmpApplyDftToDftAdd<BE>
|
+ VmpApplyDftToDftAdd<BE>
|
||||||
+ VecZnxIdftApplyConsume<BE>
|
+ VecZnxIdftApplyConsume<BE>
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxNormalize<BE>,
|
+ VecZnxNormalize<BE>
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
|
+ VecZnxDftBytesOf
|
||||||
|
+ VmpApplyDftToDftTmpBytes
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn glwe_external_product_scratch_space<OUT, IN, GGSW>(&self, out_infos: &OUT, in_infos: &IN, apply_infos: &GGSW) -> usize
|
||||||
|
where
|
||||||
|
OUT: GLWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
|
{
|
||||||
|
let in_size: usize = in_infos
|
||||||
|
.k()
|
||||||
|
.div_ceil(apply_infos.base2k())
|
||||||
|
.div_ceil(apply_infos.dsize().into()) as usize;
|
||||||
|
let out_size: usize = out_infos.size();
|
||||||
|
let ggsw_size: usize = apply_infos.size();
|
||||||
|
let res_dft: usize = self.bytes_of_vec_znx_dft((apply_infos.rank() + 1).into(), ggsw_size);
|
||||||
|
let a_dft: usize = self.bytes_of_vec_znx_dft((apply_infos.rank() + 1).into(), in_size);
|
||||||
|
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
|
out_size,
|
||||||
|
in_size,
|
||||||
|
in_size, // rows
|
||||||
|
(apply_infos.rank() + 1).into(), // cols in
|
||||||
|
(apply_infos.rank() + 1).into(), // cols out
|
||||||
|
ggsw_size,
|
||||||
|
);
|
||||||
|
let normalize_big: usize = self.vec_znx_normalize_tmp_bytes();
|
||||||
|
|
||||||
|
if in_infos.base2k() == apply_infos.base2k() {
|
||||||
|
res_dft + a_dft + (vmp | normalize_big)
|
||||||
|
} else {
|
||||||
|
let normalize_conv: usize = VecZnx::bytes_of(self.n().into(), (apply_infos.rank() + 1).into(), in_size);
|
||||||
|
res_dft + ((a_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>)
|
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
D: GGSWCiphertextPreparedToRef<BE>,
|
D: GGSWCiphertextPreparedToRef<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
let rhs: &GGSWPrepared<&[u8], BE> = &ggsw.to_ref();
|
let rhs: &GGSWPrepared<&[u8], BE> = &ggsw.to_ref();
|
||||||
@@ -121,7 +117,7 @@ where
|
|||||||
|
|
||||||
assert_eq!(rhs.rank(), res.rank());
|
assert_eq!(rhs.rank(), res.rank());
|
||||||
assert_eq!(rhs.n(), res.n());
|
assert_eq!(rhs.n(), res.n());
|
||||||
assert!(scratch.available() >= GLWE::external_product_inplace_scratch_space(self, res, rhs));
|
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, res, rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = (rhs.rank() + 1).into();
|
let cols: usize = (rhs.rank() + 1).into();
|
||||||
@@ -157,7 +153,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size);
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
||||||
|
|
||||||
for j in 0..cols {
|
for j in 0..cols {
|
||||||
self.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
@@ -216,6 +212,7 @@ where
|
|||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
A: GLWEToRef,
|
A: GLWEToRef,
|
||||||
D: GGSWCiphertextPreparedToRef<BE>,
|
D: GGSWCiphertextPreparedToRef<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
let lhs: &GLWE<&[u8]> = &lhs.to_ref();
|
let lhs: &GLWE<&[u8]> = &lhs.to_ref();
|
||||||
@@ -234,7 +231,7 @@ where
|
|||||||
assert_eq!(rhs.rank(), res.rank());
|
assert_eq!(rhs.rank(), res.rank());
|
||||||
assert_eq!(rhs.n(), res.n());
|
assert_eq!(rhs.n(), res.n());
|
||||||
assert_eq!(lhs.n(), res.n());
|
assert_eq!(lhs.n(), res.n());
|
||||||
assert!(scratch.available() >= GLWE::external_product_scratch_space(self, res, lhs, rhs));
|
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, lhs, rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = (rhs.rank() + 1).into();
|
let cols: usize = (rhs.rank() + 1).into();
|
||||||
@@ -242,8 +239,8 @@ where
|
|||||||
|
|
||||||
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
||||||
|
|
||||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise
|
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), cols, rhs.size()); // Todo optimise
|
||||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), cols, a_size.div_ceil(dsize));
|
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), cols, a_size.div_ceil(dsize));
|
||||||
a_dft.data_mut().fill(0);
|
a_dft.data_mut().fill(0);
|
||||||
|
|
||||||
if basek_in == basek_ggsw {
|
if basek_in == basek_ggsw {
|
||||||
@@ -271,7 +268,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size);
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
||||||
|
|
||||||
for j in 0..cols {
|
for j in 0..cols {
|
||||||
self.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
@@ -326,9 +323,9 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE>
|
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> where
|
||||||
where
|
Self: GetDegree
|
||||||
Self: VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
@@ -336,7 +333,9 @@ where
|
|||||||
+ VmpApplyDftToDftAdd<BE>
|
+ VmpApplyDftToDftAdd<BE>
|
||||||
+ VecZnxIdftApplyConsume<BE>
|
+ VecZnxIdftApplyConsume<BE>
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxNormalize<BE>,
|
+ VecZnxNormalize<BE>
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
|
+ VecZnxDftBytesOf
|
||||||
|
+ VmpApplyDftToDftTmpBytes
|
||||||
|
+ VecZnxNormalizeTmpBytes
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
use poulpy_hal::layouts::{Backend, Scratch};
|
|
||||||
|
|
||||||
use crate::layouts::{GLWEToMut, GLWEToRef, prepared::GGSWCiphertextPreparedToRef};
|
|
||||||
|
|
||||||
mod gglwe_atk;
|
mod gglwe_atk;
|
||||||
mod gglwe_ksk;
|
mod gglwe_ksk;
|
||||||
mod ggsw_ct;
|
mod ggsw_ct;
|
||||||
mod glwe_ct;
|
mod glwe_ct;
|
||||||
|
|
||||||
|
pub use glwe_ct::*;
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
||||||
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
GLWEOperations, TakeGLWECt,
|
GLWEOperations, TakeGLWE,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ impl GLWEPacker {
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
pack_core_scratch_space(module, out_infos, key_infos)
|
pack_core_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -119,7 +119,7 @@ impl GLWEPacker {
|
|||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -181,7 +181,7 @@ fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos:
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
combine_scratch_space(module, out_infos, key_infos)
|
combine_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -194,7 +194,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -272,7 +272,7 @@ fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &O
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::bytes_of(out_infos)
|
GLWE::bytes_of(out_infos)
|
||||||
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
||||||
@@ -287,7 +287,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -310,7 +310,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWECt,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWE,
|
||||||
{
|
{
|
||||||
let log_n: usize = acc.data.n().log2();
|
let log_n: usize = acc.data.n().log2();
|
||||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||||
@@ -413,7 +413,7 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
|
|||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -480,7 +480,7 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -3,14 +3,14 @@ use std::collections::HashMap;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWE,
|
||||||
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||||
operations::GLWEOperations,
|
operations::GLWEOperations,
|
||||||
};
|
};
|
||||||
@@ -38,7 +38,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
IN: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos);
|
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos);
|
||||||
if in_infos.base2k() != key_infos.base2k() {
|
if in_infos.base2k() != key_infos.base2k() {
|
||||||
@@ -57,7 +57,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::trace_scratch_space(module, out_infos, out_infos, key_infos)
|
Self::trace_scratch_space(module, out_infos, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -73,7 +73,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -101,7 +101,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
@@ -23,7 +23,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
IN: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
GLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -46,7 +46,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -68,7 +68,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -96,7 +96,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
OUT: GGLWEInfos,
|
OUT: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
IN: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
GLWE::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||||
}
|
}
|
||||||
@@ -105,7 +105,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGLWEInfos + GLWEInfos,
|
OUT: GGLWEInfos + GLWEInfos,
|
||||||
KEY: GGLWEInfos + GLWEInfos,
|
KEY: GGLWEInfos + GLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
GLWE::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||||
}
|
}
|
||||||
@@ -119,7 +119,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -190,7 +190,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
@@ -21,7 +21,7 @@ impl GGSW<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
OUT: GGSWInfos,
|
||||||
TSK: GGLWEInfos,
|
TSK: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||||
let size_in: usize = out_infos
|
let size_in: usize = out_infos
|
||||||
@@ -29,8 +29,8 @@ impl GGSW<Vec<u8>> {
|
|||||||
.div_ceil(tsk_infos.base2k())
|
.div_ceil(tsk_infos.base2k())
|
||||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||||
|
|
||||||
let tmp_dft_i: usize = module.vec_znx_dft_bytes_of((tsk_infos.rank_out() + 1).into(), tsk_size);
|
let tmp_dft_i: usize = module.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||||
let tmp_a: usize = module.vec_znx_dft_bytes_of(1, size_in);
|
let tmp_a: usize = module.bytes_of_vec_znx_dft(1, size_in);
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
tsk_size,
|
tsk_size,
|
||||||
size_in,
|
size_in,
|
||||||
@@ -39,7 +39,7 @@ impl GGSW<Vec<u8>> {
|
|||||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||||
tsk_size,
|
tsk_size,
|
||||||
);
|
);
|
||||||
let tmp_idft: usize = module.vec_znx_big_bytes_of(1, tsk_size);
|
let tmp_idft: usize = module.bytes_of_vec_znx_big(1, tsk_size);
|
||||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||||
|
|
||||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||||
@@ -58,11 +58,8 @@ impl GGSW<Vec<u8>> {
|
|||||||
IN: GGSWInfos,
|
IN: GGSWInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
TSK: GGLWEInfos,
|
TSK: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>:
|
||||||
+ VmpApplyDftToDftTmpBytes
|
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
+ VecZnxBigAllocBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -75,10 +72,10 @@ impl GGSW<Vec<u8>> {
|
|||||||
|
|
||||||
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||||
let res_znx: usize = VecZnx::bytes_of(module.n(), rank + 1, size_out);
|
let res_znx: usize = VecZnx::bytes_of(module.n(), rank + 1, size_out);
|
||||||
let ci_dft: usize = module.vec_znx_dft_bytes_of(rank + 1, size_out);
|
let ci_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||||
let ks: usize = GLWE::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
let ks: usize = GLWE::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||||
let expand_rows: usize = GGSW::expand_row_scratch_space(module, out_infos, tsk_infos);
|
let expand_rows: usize = GGSW::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||||
let res_dft: usize = module.vec_znx_dft_bytes_of(rank + 1, size_out);
|
let res_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||||
|
|
||||||
if in_infos.base2k() == tsk_infos.base2k() {
|
if in_infos.base2k() == tsk_infos.base2k() {
|
||||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||||
@@ -103,11 +100,8 @@ impl GGSW<Vec<u8>> {
|
|||||||
OUT: GGSWInfos,
|
OUT: GGSWInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
TSK: GGLWEInfos,
|
TSK: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>:
|
||||||
+ VmpApplyDftToDftTmpBytes
|
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
+ VecZnxBigAllocBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
GGSW::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
GGSW::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||||
}
|
}
|
||||||
@@ -124,9 +118,9 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
DataA: DataRef,
|
DataA: DataRef,
|
||||||
DataTsk: DataRef,
|
DataTsk: DataRef,
|
||||||
Module<B>: VecZnxCopy
|
Module<B>: VecZnxCopy
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
@@ -162,7 +156,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -171,8 +165,8 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
@@ -196,7 +190,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -205,8 +199,8 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
@@ -229,9 +223,9 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||||
@@ -20,7 +20,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
IN: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let in_size: usize = in_infos
|
let in_size: usize = in_infos
|
||||||
.k()
|
.k()
|
||||||
@@ -28,8 +28,8 @@ impl GLWE<Vec<u8>> {
|
|||||||
.div_ceil(key_apply.dsize().into()) as usize;
|
.div_ceil(key_apply.dsize().into()) as usize;
|
||||||
let out_size: usize = out_infos.size();
|
let out_size: usize = out_infos.size();
|
||||||
let ksk_size: usize = key_apply.size();
|
let ksk_size: usize = key_apply.size();
|
||||||
let res_dft: usize = module.vec_znx_dft_bytes_of((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
let res_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||||
let ai_dft: usize = module.vec_znx_dft_bytes_of((key_apply.rank_in()).into(), in_size);
|
let ai_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
out_size,
|
out_size,
|
||||||
in_size,
|
in_size,
|
||||||
@@ -37,7 +37,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
(key_apply.rank_in()).into(),
|
(key_apply.rank_in()).into(),
|
||||||
(key_apply.rank_out() + 1).into(),
|
(key_apply.rank_out() + 1).into(),
|
||||||
ksk_size,
|
ksk_size,
|
||||||
) + module.vec_znx_dft_bytes_of((key_apply.rank_in()).into(), in_size);
|
) + module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||||
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||||
if in_infos.base2k() == key_apply.base2k() {
|
if in_infos.base2k() == key_apply.base2k() {
|
||||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||||
@@ -56,7 +56,7 @@ impl GLWE<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||||
}
|
}
|
||||||
@@ -73,7 +73,7 @@ impl<DataSelf: DataRef> GLWE<DataSelf> {
|
|||||||
) where
|
) where
|
||||||
DataLhs: DataRef,
|
DataLhs: DataRef,
|
||||||
DataRhs: DataRef,
|
DataRhs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -121,7 +121,7 @@ impl<DataSelf: DataRef> GLWE<DataSelf> {
|
|||||||
scratch: &Scratch<B>,
|
scratch: &Scratch<B>,
|
||||||
) where
|
) where
|
||||||
DataRhs: DataRef,
|
DataRhs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -152,7 +152,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -194,7 +194,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
@@ -243,7 +243,7 @@ impl<D: DataRef> GLWE<D> {
|
|||||||
where
|
where
|
||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataKey: DataRef,
|
DataKey: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
@@ -294,7 +294,7 @@ where
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataVmp: DataRef,
|
DataVmp: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -340,7 +340,7 @@ where
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataVmp: DataRef,
|
DataVmp: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWE,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared},
|
layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ impl LWE<Vec<u8>> {
|
|||||||
OUT: LWEInfos,
|
OUT: LWEInfos,
|
||||||
IN: LWEInfos,
|
IN: LWEInfos,
|
||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
@@ -69,7 +69,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
|||||||
) where
|
) where
|
||||||
A: DataRef,
|
A: DataRef,
|
||||||
DKs: DataRef,
|
DKs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ impl<D: DataMut> AutomorphismKey<D>
|
|||||||
where
|
where
|
||||||
Self: SetAutomorphismGaloisElement,
|
Self: SetAutomorphismGaloisElement,
|
||||||
{
|
{
|
||||||
pub fn decompressed<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
||||||
where
|
where
|
||||||
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
|
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
|
||||||
Module<B>: AutomorphismKeyDecompress,
|
Module<B>: AutomorphismKeyDecompress,
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ pub trait TensorKeyCompressedAlloc
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyCompressedAlloc,
|
Self: GLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
fn tensor_key_compressed_alloc(
|
fn alloc_tensor_key_compressed(
|
||||||
&self,
|
&self,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
@@ -102,7 +102,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tensor_key_compressed_alloc_from_infos<A>(&self, infos: &A) -> TensorKeyCompressed<Vec<u8>>
|
fn alloc_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> TensorKeyCompressed<Vec<u8>>
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -111,7 +111,7 @@ where
|
|||||||
infos.rank_out(),
|
infos.rank_out(),
|
||||||
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
|
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
|
||||||
);
|
);
|
||||||
self.tensor_key_compressed_alloc(
|
self.alloc_tensor_key_compressed(
|
||||||
infos.base2k(),
|
infos.base2k(),
|
||||||
infos.k(),
|
infos.k(),
|
||||||
infos.rank(),
|
infos.rank(),
|
||||||
@@ -120,16 +120,16 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tensor_key_compressed_bytes_of(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
fn bytes_of_tensor_key_compressed(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||||
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||||
pairs * self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, dsize)
|
pairs * self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tensor_key_compressed_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
self.tensor_key_compressed_bytes_of(
|
self.bytes_of_tensor_key_compressed(
|
||||||
infos.base2k(),
|
infos.base2k(),
|
||||||
infos.k(),
|
infos.k(),
|
||||||
infos.rank(),
|
infos.rank(),
|
||||||
@@ -145,14 +145,14 @@ impl TensorKeyCompressed<Vec<u8>> {
|
|||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
Module<B>: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.tensor_key_compressed_alloc_from_infos(infos)
|
module.alloc_tensor_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
Module<B>: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.tensor_key_compressed_alloc(base2k, k, rank, dnum, dsize)
|
module.alloc_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
||||||
@@ -160,7 +160,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
|||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
Module<B>: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.tensor_key_compressed_bytes_of_from_infos(infos)
|
module.bytes_of_tensor_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<B: Backend>(
|
||||||
@@ -174,7 +174,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
Module<B>: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.tensor_key_compressed_bytes_of(base2k, k, rank, dnum, dsize)
|
module.bytes_of_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -224,6 +224,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GLWESwitchingKeyAlloc for Module<B> where Self: GGLWEAlloc {}
|
||||||
|
|
||||||
impl GLWESwitchingKey<Vec<u8>> {
|
impl GLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -176,7 +176,7 @@ impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait AutomorphismKeyPrepare<B: Backend>
|
pub trait PrepareAutomorphismKey<B: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPrepare<B>,
|
Self: GLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
@@ -197,7 +197,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> AutomorphismKeyPrepare<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> PrepareAutomorphismKey<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize
|
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize
|
||||||
@@ -212,7 +212,7 @@ impl<D: DataMut, B: Backend> AutomorphismKeyPrepared<D, B> {
|
|||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
||||||
Module<B>: AutomorphismKeyPrepare<B>,
|
Module<B>: PrepareAutomorphismKey<B>,
|
||||||
{
|
{
|
||||||
module.prepare_automorphism_key(self, other, scratch);
|
module.prepare_automorphism_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes},
|
api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWEPrepared<D, B> {
|
|||||||
|
|
||||||
pub trait GGLWEPreparedAlloc<B: Backend>
|
pub trait GGLWEPreparedAlloc<B: Backend>
|
||||||
where
|
where
|
||||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes,
|
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
fn alloc_gglwe_prepared(
|
fn alloc_gglwe_prepared(
|
||||||
&self,
|
&self,
|
||||||
@@ -130,7 +130,7 @@ where
|
|||||||
dsize.0,
|
dsize.0,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.vmp_pmat_bytes_of(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
|
self.bytes_of_vmp_pmat(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_gglwe_prepared_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_gglwe_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
@@ -149,7 +149,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes {}
|
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEPrepared<Vec<u8>, B>
|
impl<B: Backend> GGLWEPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
fn alloc_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -121,7 +121,7 @@ where
|
|||||||
self.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
self.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -146,7 +146,7 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.glwe_switching_key_prepared_alloc_from_infos(infos)
|
module.alloc_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(
|
pub fn alloc(
|
||||||
@@ -165,7 +165,7 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.glwe_switching_key_prepared_bytes_of_from_infos(infos)
|
module.bytes_of_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(
|
pub fn bytes_of(
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes},
|
api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ impl<D: Data, B: Backend> GGSWInfos for GGSWPrepared<D, B> {
|
|||||||
|
|
||||||
pub trait GGSWPreparedAlloc<B: Backend>
|
pub trait GGSWPreparedAlloc<B: Backend>
|
||||||
where
|
where
|
||||||
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes,
|
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
fn alloc_ggsw_prepared(
|
fn alloc_ggsw_prepared(
|
||||||
&self,
|
&self,
|
||||||
@@ -117,7 +117,7 @@ where
|
|||||||
dsize.0,
|
dsize.0,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.vmp_pmat_bytes_of(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
|
self.bytes_of_vmp_pmat(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_ggsw_prepared_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_ggsw_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
@@ -135,7 +135,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes {}
|
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
|
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply},
|
api::{VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos},
|
layouts::{Backend, Data, DataMut, DataRef, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ impl<D: Data, B: Backend> GLWEInfos for GLWEPublicKeyPrepared<D, B> {
|
|||||||
|
|
||||||
pub trait GLWEPublicKeyPreparedAlloc<B: Backend>
|
pub trait GLWEPublicKeyPreparedAlloc<B: Backend>
|
||||||
where
|
where
|
||||||
Self: GetDegree + VecZnxDftAlloc<B> + VecZnxDftAllocBytes,
|
Self: GetDegree + VecZnxDftAlloc<B> + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
fn alloc_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
fn alloc_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
||||||
GLWEPublicKeyPrepared {
|
GLWEPublicKeyPrepared {
|
||||||
@@ -71,7 +71,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
fn bytes_of_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
||||||
self.vec_znx_dft_bytes_of((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
self.bytes_of_vec_znx_dft((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_glwe_public_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_glwe_public_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
@@ -82,7 +82,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftAllocBytes {}
|
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare},
|
api::{SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, SvpPPol, SvpPPolToMut, SvpPPolToRef, ZnxInfos},
|
layouts::{Backend, Data, DataMut, DataRef, Module, SvpPPol, SvpPPolToMut, SvpPPolToRef, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ impl<D: Data, B: Backend> GLWEInfos for GLWESecretPrepared<D, B> {
|
|||||||
|
|
||||||
pub trait GLWESecretPreparedAlloc<B: Backend>
|
pub trait GLWESecretPreparedAlloc<B: Backend>
|
||||||
where
|
where
|
||||||
Self: GetDegree + SvpPPolAllocBytes + SvpPPolAlloc<B>,
|
Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
fn alloc_glwe_secret_prepared(&self, rank: Rank) -> GLWESecretPrepared<Vec<u8>, B> {
|
fn alloc_glwe_secret_prepared(&self, rank: Rank) -> GLWESecretPrepared<Vec<u8>, B> {
|
||||||
GLWESecretPrepared {
|
GLWESecretPrepared {
|
||||||
@@ -64,7 +64,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_glwe_secret(&self, rank: Rank) -> usize {
|
fn bytes_of_glwe_secret(&self, rank: Rank) -> usize {
|
||||||
self.svp_ppol_bytes_of(rank.into())
|
self.bytes_of_svp_ppol(rank.into())
|
||||||
}
|
}
|
||||||
fn bytes_of_glwe_secret_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_glwe_secret_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
@@ -75,7 +75,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolAllocBytes + SvpPPolAlloc<B> {}
|
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ pub trait GLWEToLWESwitchingKeyPreparedAlloc<B: Backend>
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
fn glwe_to_lwe_switching_key_prepared_alloc(
|
fn alloc_glwe_to_lwe_switching_key_prepared(
|
||||||
&self,
|
&self,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
@@ -66,7 +66,7 @@ where
|
|||||||
) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
GLWEToLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
|
GLWEToLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
|
||||||
}
|
}
|
||||||
fn glwe_to_lwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
fn alloc_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -80,14 +80,14 @@ where
|
|||||||
1,
|
1,
|
||||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||||
);
|
);
|
||||||
self.glwe_to_lwe_switching_key_prepared_alloc(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
self.alloc_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_to_lwe_switching_key_prepared_bytes_of(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
fn bytes_of_glwe_to_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
|
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_to_lwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -101,7 +101,7 @@ where
|
|||||||
1,
|
1,
|
||||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||||
);
|
);
|
||||||
self.glwe_to_lwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
self.bytes_of_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,22 +115,22 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.glwe_to_lwe_switching_key_prepared_alloc_from_infos(infos)
|
module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self {
|
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self {
|
||||||
module.glwe_to_lwe_switching_key_prepared_alloc(base2k, k, rank_in, dnum)
|
module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.glwe_to_lwe_switching_key_prepared_bytes_of_from_infos(infos)
|
module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||||
module.glwe_to_lwe_switching_key_prepared_bytes_of(base2k, k, rank_in, dnum)
|
module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,14 +138,14 @@ pub trait GLWEToLWESwitchingKeyPrepare<B: Backend>
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPrepare<B>,
|
Self: GLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
fn glwe_to_lwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
fn prepare_glwe_to_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_to_lwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
fn prepare_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
R: GLWEToLWESwitchingKeyPreparedToMut<B>,
|
R: GLWEToLWESwitchingKeyPreparedToMut<B>,
|
||||||
O: GLWEToLWESwitchingKeyToRef,
|
O: GLWEToLWESwitchingKeyToRef,
|
||||||
@@ -162,7 +162,7 @@ impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
|||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.glwe_to_lwe_switching_key_prepare_tmp_bytes(infos);
|
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,7 +172,7 @@ impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
|||||||
O: GLWEToLWESwitchingKeyToRef,
|
O: GLWEToLWESwitchingKeyToRef,
|
||||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.glwe_to_lwe_switching_key_prepare(self, other, scratch);
|
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ where
|
|||||||
LWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
|
LWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyPrepared<Vec<u8>, B>
|
fn alloc_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -87,11 +87,11 @@ where
|
|||||||
self.alloc_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
|
self.alloc_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_switching_key_prepared_bytes_of(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
fn bytes_of_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
|
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -110,7 +110,7 @@ where
|
|||||||
1,
|
1,
|
||||||
"rank_out > 1 is not supported for LWESwitchingKey"
|
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||||
);
|
);
|
||||||
self.lwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.dnum())
|
self.bytes_of_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,7 +124,7 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.lwe_switching_key_prepared_alloc_from_infos(infos)
|
module.alloc_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self {
|
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self {
|
||||||
@@ -135,11 +135,11 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.lwe_switching_key_prepared_bytes_of_from_infos(infos)
|
module.bytes_of_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||||
module.lwe_switching_key_prepared_bytes_of(base2k, k, dnum)
|
module.bytes_of_lwe_switching_key_prepared(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,13 +147,13 @@ pub trait LWESwitchingKeyPrepare<B: Backend>
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPrepare<B>,
|
Self: GLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
fn lwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
fn prepare_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
fn lwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
fn prepare_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
R: LWESwitchingKeyPreparedToMut<B>,
|
R: LWESwitchingKeyPreparedToMut<B>,
|
||||||
O: LWESwitchingKeyToRef,
|
O: LWESwitchingKeyToRef,
|
||||||
@@ -170,7 +170,7 @@ impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
|||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
Module<B>: LWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.lwe_switching_key_prepare_tmp_bytes(infos);
|
module.prepare_lwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ impl<D: DataMut, B: Backend> LWESwitchingKeyPrepared<D, B> {
|
|||||||
O: LWESwitchingKeyToRef,
|
O: LWESwitchingKeyToRef,
|
||||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
Module<B>: LWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.lwe_switching_key_prepare(self, other, scratch);
|
module.prepare_lwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ pub trait LWEToGLWESwitchingKeyPreparedAlloc<B: Backend>
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
Self: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
fn lwe_to_glwe_switching_key_prepared_alloc(
|
fn alloc_lwe_to_glwe_switching_key_prepared(
|
||||||
&self,
|
&self,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
@@ -67,7 +67,7 @@ where
|
|||||||
) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
LWEToGLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
|
LWEToGLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
|
||||||
}
|
}
|
||||||
fn lwe_to_glwe_switching_key_prepared_alloc_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
fn alloc_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -81,10 +81,10 @@ where
|
|||||||
1,
|
1,
|
||||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
);
|
);
|
||||||
self.lwe_to_glwe_switching_key_prepared_alloc(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
self.alloc_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_to_glwe_switching_key_prepared_bytes_of(
|
fn bytes_of_lwe_to_glwe_switching_key_prepared(
|
||||||
&self,
|
&self,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
@@ -94,7 +94,7 @@ where
|
|||||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
|
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_to_glwe_switching_key_prepared_bytes_of_from_infos<A>(&self, infos: &A) -> usize
|
fn bytes_of_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
@@ -108,7 +108,7 @@ where
|
|||||||
1,
|
1,
|
||||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
);
|
);
|
||||||
self.lwe_to_glwe_switching_key_prepared_bytes_of(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
self.bytes_of_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,22 +122,22 @@ where
|
|||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.lwe_to_glwe_switching_key_prepared_alloc_from_infos(infos)
|
module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
||||||
module.lwe_to_glwe_switching_key_prepared_alloc(base2k, k, rank_out, dnum)
|
module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
module.lwe_to_glwe_switching_key_prepared_bytes_of_from_infos(infos)
|
module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
|
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
|
||||||
module.lwe_to_glwe_switching_key_prepared_bytes_of(base2k, k, rank_out, dnum)
|
module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,14 +145,14 @@ pub trait LWEToGLWESwitchingKeyPrepare<B: Backend>
|
|||||||
where
|
where
|
||||||
Self: GLWESwitchingKeyPrepare<B>,
|
Self: GLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
fn lwe_to_glwe_switching_key_prepare_tmp_bytes<A>(&self, infos: &A)
|
fn prepare_lwe_to_glwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
{
|
{
|
||||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lwe_to_glwe_switching_key_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
fn prepare_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
R: LWEToGLWESwitchingKeyPreparedToMut<B>,
|
R: LWEToGLWESwitchingKeyPreparedToMut<B>,
|
||||||
O: LWEToGLWESwitchingKeyToRef,
|
O: LWEToGLWESwitchingKeyToRef,
|
||||||
@@ -169,7 +169,7 @@ impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
|||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.lwe_to_glwe_switching_key_prepare_tmp_bytes(infos);
|
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,7 +179,7 @@ impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
|||||||
O: LWEToGLWESwitchingKeyToRef,
|
O: LWEToGLWESwitchingKeyToRef,
|
||||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.lwe_to_glwe_switching_key_prepare(self, other, scratch);
|
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSubScalarInplace,
|
VecZnxSubScalarInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
|
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||||
@@ -20,8 +20,8 @@ impl<D: DataRef> GGLWE<D> {
|
|||||||
) where
|
) where
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
DataWant: DataRef,
|
DataWant: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddScalarInplace, VecZnxBigAddInplace,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddScalarInplace, VecZnxBigAddInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, VecZnxBig, VecZnxDft, ZnxZero},
|
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, VecZnxBig, VecZnxDft, ZnxZero},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||||
@@ -21,8 +21,8 @@ impl<D: DataRef> GGSW<D> {
|
|||||||
) where
|
) where
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
DataScalar: DataRef,
|
DataScalar: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -94,8 +94,8 @@ impl<D: DataRef> GGSW<D> {
|
|||||||
) where
|
) where
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
DataScalar: DataRef,
|
DataScalar: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, Scratch, ScratchOwned},
|
layouts::{Backend, DataRef, Module, Scratch, ScratchOwned},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
||||||
@@ -48,8 +48,8 @@ impl<D: DataRef> GLWE<D> {
|
|||||||
) where
|
) where
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
DataPt: DataRef,
|
DataPt: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{TakeMatZnx, TakeScalarZnx, TakeSvpPPol, TakeVecZnx, TakeVecZnxDft, TakeVmpPMat},
|
api::{ScratchAvailable, ScratchTakeBasic},
|
||||||
layouts::{Backend, Scratch},
|
layouts::{Backend, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
dist::Distribution,
|
dist::Distribution,
|
||||||
layouts::{
|
layouts::{
|
||||||
AutomorphismKey, Degree, GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPublicKey, GLWESecret,
|
AutomorphismKey, Degree, GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPublicKey, GLWESecret,
|
||||||
GLWESwitchingKey, Rank, TensorKey,
|
GLWESwitchingKey, GetDegree, Rank, TensorKey,
|
||||||
prepared::{
|
prepared::{
|
||||||
AutomorphismKeyPrepared, GGLWEPrepared, GGSWPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
AutomorphismKeyPrepared, GGLWEPrepared, GGSWPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
||||||
GLWESwitchingKeyPrepared, TensorKeyPrepared,
|
GLWESwitchingKeyPrepared, TensorKeyPrepared,
|
||||||
@@ -15,119 +15,15 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub trait TakeGLWECt {
|
pub trait ScratchTakeCore<B: Backend>
|
||||||
fn take_glwe_ct<A>(&mut self, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWECtSlice {
|
|
||||||
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWEPt<B: Backend> {
|
|
||||||
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWE {
|
|
||||||
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWEPrepared<B: Backend> {
|
|
||||||
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGSW {
|
|
||||||
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGSWInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGSWPrepared<B: Backend> {
|
|
||||||
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGSWInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGSWPreparedSlice<B: Backend> {
|
|
||||||
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGSWInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWESecret {
|
|
||||||
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWESecretPrepared<B: Backend> {
|
|
||||||
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWEPk {
|
|
||||||
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWEPkPrepared<B: Backend> {
|
|
||||||
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGLWESwitchingKey {
|
|
||||||
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWESwitchingKeyPrepared<B: Backend> {
|
|
||||||
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeTensorKey {
|
|
||||||
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWETensorKeyPrepared<B: Backend> {
|
|
||||||
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWEAutomorphismKey {
|
|
||||||
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TakeGGLWEAutomorphismKeyPrepared<B: Backend> {
|
|
||||||
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
|
||||||
where
|
|
||||||
A: GGLWEInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWECt for Scratch<B>
|
|
||||||
where
|
where
|
||||||
Scratch<B>: TakeVecZnx,
|
Self: ScratchTakeBasic<B> + ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn take_glwe_ct<A>(&mut self, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
fn take_glwe_ct<A>(&mut self, module: &Module<B>, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_vec_znx(infos.n().into(), (infos.rank() + 1).into(), infos.size());
|
let (data, scratch) = self.take_vec_znx(module, (infos.rank() + 1).into(), infos.size());
|
||||||
(
|
(
|
||||||
GLWE {
|
GLWE {
|
||||||
k: infos.k(),
|
k: infos.k(),
|
||||||
@@ -137,12 +33,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWECtSlice for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVecZnx,
|
|
||||||
{
|
|
||||||
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
@@ -156,12 +47,7 @@ where
|
|||||||
}
|
}
|
||||||
(cts, scratch)
|
(cts, scratch)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWEPt<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVecZnx,
|
|
||||||
{
|
|
||||||
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
@@ -176,12 +62,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWE for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeMatZnx,
|
|
||||||
{
|
|
||||||
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -203,12 +84,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWEPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVmpPMat<B>,
|
|
||||||
{
|
|
||||||
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -230,12 +106,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGSW for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeMatZnx,
|
|
||||||
{
|
|
||||||
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
@@ -257,12 +128,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGSWPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVmpPMat<B>,
|
|
||||||
{
|
|
||||||
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
@@ -284,12 +150,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGSWPreparedSlice<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeGGSWPrepared<B>,
|
|
||||||
{
|
|
||||||
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
@@ -303,12 +164,7 @@ where
|
|||||||
}
|
}
|
||||||
(cts, scratch)
|
(cts, scratch)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWEPk for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVecZnx,
|
|
||||||
{
|
|
||||||
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
@@ -324,12 +180,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWEPkPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVecZnxDft<B>,
|
|
||||||
{
|
|
||||||
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
@@ -345,12 +196,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWESecret for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeScalarZnx,
|
|
||||||
{
|
|
||||||
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self) {
|
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self) {
|
||||||
let (data, scratch) = self.take_scalar_znx(n.into(), rank.into());
|
let (data, scratch) = self.take_scalar_znx(n.into(), rank.into());
|
||||||
(
|
(
|
||||||
@@ -361,12 +207,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWESecretPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeSvpPPol<B>,
|
|
||||||
{
|
|
||||||
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self) {
|
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self) {
|
||||||
let (data, scratch) = self.take_svp_ppol(n.into(), rank.into());
|
let (data, scratch) = self.take_svp_ppol(n.into(), rank.into());
|
||||||
(
|
(
|
||||||
@@ -377,12 +218,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGLWESwitchingKey for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeMatZnx,
|
|
||||||
{
|
|
||||||
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -397,12 +233,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWESwitchingKeyPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeGGLWEPrepared<B>,
|
|
||||||
{
|
|
||||||
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -417,12 +248,7 @@ where
|
|||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWEAutomorphismKey for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeMatZnx,
|
|
||||||
{
|
|
||||||
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -430,12 +256,7 @@ where
|
|||||||
let (data, scratch) = self.take_glwe_switching_key(infos);
|
let (data, scratch) = self.take_glwe_switching_key(infos);
|
||||||
(AutomorphismKey { key: data, p: 0 }, scratch)
|
(AutomorphismKey { key: data, p: 0 }, scratch)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWEAutomorphismKeyPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeGGLWESwitchingKeyPrepared<B>,
|
|
||||||
{
|
|
||||||
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -443,12 +264,7 @@ where
|
|||||||
let (data, scratch) = self.take_gglwe_switching_key_prepared(infos);
|
let (data, scratch) = self.take_gglwe_switching_key_prepared(infos);
|
||||||
(AutomorphismKeyPrepared { key: data, p: 0 }, scratch)
|
(AutomorphismKeyPrepared { key: data, p: 0 }, scratch)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeTensorKey for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeMatZnx,
|
|
||||||
{
|
|
||||||
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -478,12 +294,7 @@ where
|
|||||||
}
|
}
|
||||||
(TensorKey { keys }, scratch)
|
(TensorKey { keys }, scratch)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> TakeGGLWETensorKeyPrepared<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
Scratch<B>: TakeVmpPMat<B>,
|
|
||||||
{
|
|
||||||
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -515,3 +326,5 @@ where
|
|||||||
(TensorKeyPrepared { keys }, scratch)
|
(TensorKeyPrepared { keys }, scratch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> ScratchTakeCore<B> for Scratch<B> where Self: ScratchTakeBasic<B> + ScratchAvailable {}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -27,7 +27,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_gglwe_automorphism_key_automorphism<B>(module: &Module<B>)
|
pub fn test_gglwe_automorphism_key_automorphism<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -38,8 +38,8 @@ where
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VmpPMatAlloc<B>
|
+ VmpPMatAlloc<B>
|
||||||
+ VmpPrepare<B>
|
+ VmpPrepare<B>
|
||||||
@@ -224,7 +224,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_gglwe_automorphism_key_automorphism_inplace<B>(module: &Module<B>)
|
pub fn test_gglwe_automorphism_key_automorphism_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -238,9 +238,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
@@ -255,8 +255,8 @@ where
|
|||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
@@ -27,8 +27,8 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_ggsw_automorphism<B>(module: &Module<B>)
|
pub fn test_ggsw_automorphism<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -41,7 +41,7 @@ where
|
|||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
@@ -219,8 +219,8 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_ggsw_automorphism_inplace<B>(module: &Module<B>)
|
pub fn test_ggsw_automorphism_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -233,7 +233,7 @@ where
|
|||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -26,7 +26,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_glwe_automorphism<B>(module: &Module<B>)
|
pub fn test_glwe_automorphism<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -40,9 +40,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
@@ -169,7 +169,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_glwe_automorphism_inplace<B>(module: &Module<B>)
|
pub fn test_glwe_automorphism_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -183,9 +183,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes,
|
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||||
},
|
},
|
||||||
@@ -23,7 +23,7 @@ use crate::layouts::{
|
|||||||
|
|
||||||
pub fn test_lwe_to_glwe<B>(module: &Module<B>)
|
pub fn test_lwe_to_glwe<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -36,9 +36,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -142,7 +142,7 @@ where
|
|||||||
|
|
||||||
pub fn test_glwe_to_lwe<B>(module: &Module<B>)
|
pub fn test_glwe_to_lwe<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -155,9 +155,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -26,7 +26,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_gglwe_automorphisk_key_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_automorphisk_key_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -40,7 +40,7 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
@@ -51,7 +51,7 @@ where
|
|||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxSubScalarInplace
|
+ VecZnxSubScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
@@ -129,7 +129,7 @@ where
|
|||||||
|
|
||||||
pub fn test_gglwe_automorphisk_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_automorphisk_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -143,7 +143,7 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
@@ -154,7 +154,7 @@ where
|
|||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxSubScalarInplace
|
+ VecZnxSubScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSubScalarInplace, VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
VecZnxSubScalarInplace, VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -25,7 +25,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_gglwe_switching_key_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -39,12 +39,12 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxSubScalarInplace
|
+ VecZnxSubScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
@@ -117,7 +117,7 @@ where
|
|||||||
|
|
||||||
pub fn test_gglwe_switching_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -131,12 +131,12 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxSubScalarInplace
|
+ VecZnxSubScalarInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScalarZnx, ScratchOwned},
|
layouts::{Backend, Module, ScalarZnx, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -79,7 +79,7 @@ where
|
|||||||
|
|
||||||
pub fn test_ggsw_compressed_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_ggsw_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -93,11 +93,11 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VmpPMatAlloc<B>
|
+ VmpPMatAlloc<B>
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSubInplace,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned},
|
layouts::{Backend, Module, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -26,8 +25,8 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_glwe_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_glwe_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -36,7 +35,7 @@ where
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddNormal<B>
|
+ VecZnxBigAddNormal<B>
|
||||||
@@ -117,8 +116,8 @@ where
|
|||||||
|
|
||||||
pub fn test_glwe_compressed_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_glwe_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -127,7 +126,7 @@ where
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddNormal<B>
|
+ VecZnxBigAddNormal<B>
|
||||||
@@ -219,8 +218,8 @@ where
|
|||||||
|
|
||||||
pub fn test_glwe_encrypt_zero_sk<B>(module: &Module<B>)
|
pub fn test_glwe_encrypt_zero_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
@@ -229,7 +228,7 @@ where
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddNormal<B>
|
+ VecZnxBigAddNormal<B>
|
||||||
@@ -294,7 +293,7 @@ where
|
|||||||
|
|
||||||
pub fn test_glwe_encrypt_pk<B>(module: &Module<B>)
|
pub fn test_glwe_encrypt_pk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -308,10 +307,10 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxDftAlloc<B>
|
+ VecZnxDftAlloc<B>
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
@@ -25,7 +25,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_gglwe_tensor_key_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_tensor_key_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -39,10 +39,10 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxDftAlloc<B>
|
+ VecZnxDftAlloc<B>
|
||||||
@@ -145,7 +145,7 @@ where
|
|||||||
|
|
||||||
pub fn test_gglwe_tensor_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
pub fn test_gglwe_tensor_key_compressed_encrypt_sk<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -159,10 +159,10 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxDftAlloc<B>
|
+ VecZnxDftAlloc<B>
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub,
|
||||||
VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
@@ -27,7 +27,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_gglwe_switching_key_external_product<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_external_product<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -41,9 +41,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
@@ -209,7 +209,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_gglwe_switching_key_external_product_inplace<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_external_product_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -223,9 +223,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftAllocBytes,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScalarZnx, ScalarZnxToMut, ScratchOwned, ZnxViewMut},
|
layouts::{Backend, Module, ScalarZnx, ScalarZnxToMut, ScratchOwned, ZnxViewMut},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -27,7 +27,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_ggsw_external_product<B>(module: &Module<B>)
|
pub fn test_ggsw_external_product<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -41,9 +41,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
@@ -192,7 +192,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_ggsw_external_product_inplace<B>(module: &Module<B>)
|
pub fn test_ggsw_external_product_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -206,9 +206,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSub, VecZnxSubInplace,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -26,7 +26,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_glwe_external_product<B>(module: &Module<B>)
|
pub fn test_glwe_external_product<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -39,9 +39,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -178,7 +178,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_glwe_external_product_inplace<B>(module: &Module<B>)
|
pub fn test_glwe_external_product_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -191,9 +191,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSubInplace, VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxSubScalarInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpPrepare,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned},
|
layouts::{Backend, Module, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -26,7 +26,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_gglwe_switching_key_keyswitch<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_keyswitch<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -39,9 +39,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -196,7 +196,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_gglwe_switching_key_keyswitch_inplace<B>(module: &Module<B>)
|
pub fn test_gglwe_switching_key_keyswitch_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -209,9 +209,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAlloc,
|
VecZnxBigAlloc, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAlloc,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -27,7 +27,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_ggsw_keyswitch<B>(module: &Module<B>)
|
pub fn test_ggsw_keyswitch<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -40,9 +40,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -216,7 +216,7 @@ where
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_ggsw_keyswitch_inplace<B>(module: &Module<B>)
|
pub fn test_ggsw_keyswitch_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -229,9 +229,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
VmpPrepare,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned},
|
layouts::{Backend, Module, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -27,7 +26,7 @@ use crate::{
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn test_glwe_keyswitch<B>(module: &Module<B>)
|
pub fn test_glwe_keyswitch<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -40,9 +39,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -169,7 +168,7 @@ where
|
|||||||
|
|
||||||
pub fn test_glwe_keyswitch_inplace<B>(module: &Module<B>)
|
pub fn test_glwe_keyswitch_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -182,9 +181,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
|
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||||
},
|
},
|
||||||
@@ -22,7 +22,7 @@ use crate::layouts::{
|
|||||||
|
|
||||||
pub fn test_lwe_keyswitch<B>(module: &Module<B>)
|
pub fn test_lwe_keyswitch<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -35,9 +35,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
@@ -28,7 +28,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_glwe_packing<B>(module: &Module<B>)
|
pub fn test_glwe_packing<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
@@ -48,9 +48,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigAddInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigAddInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||||
VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VmpPrepare,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned, ZnxView, ZnxViewMut},
|
layouts::{Backend, Module, ScratchOwned, ZnxView, ZnxViewMut},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -29,7 +29,7 @@ use crate::{
|
|||||||
|
|
||||||
pub fn test_glwe_trace_inplace<B>(module: &Module<B>)
|
pub fn test_glwe_trace_inplace<B>(module: &Module<B>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
@@ -47,9 +47,9 @@ where
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
use crate::layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
|
use crate::{
|
||||||
|
api::{SvpPPolBytesOf, VecZnxBigBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
||||||
|
layouts::{Backend, MatZnx, Module, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||||
|
};
|
||||||
|
|
||||||
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
||||||
pub trait ScratchOwnedAlloc<B: Backend> {
|
pub trait ScratchOwnedAlloc<B: Backend> {
|
||||||
@@ -25,76 +28,124 @@ pub trait TakeSlice {
|
|||||||
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self);
|
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [ScalarZnx] and returns it
|
pub trait ScratchTakeBasic<B: Backend>
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
where
|
||||||
pub trait TakeScalarZnx {
|
Self: TakeSlice,
|
||||||
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self);
|
{
|
||||||
}
|
fn take_scalar_znx(&mut self, module: &Module<B>, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
||||||
|
let (take_slice, rem_slice) = self.take_slice(ScalarZnx::bytes_of(module.n(), cols));
|
||||||
|
(
|
||||||
|
ScalarZnx::from_data(take_slice, module.n(), cols),
|
||||||
|
rem_slice,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [SvpPPol] and returns it
|
fn take_svp_ppol(&mut self, module: &Module<B>, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self)
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
where
|
||||||
pub trait TakeSvpPPol<B: Backend> {
|
Module<B>: SvpPPolBytesOf,
|
||||||
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self);
|
{
|
||||||
}
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_svp_ppol(cols));
|
||||||
|
(SvpPPol::from_data(take_slice, module.n(), cols), rem_slice)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnx] and returns it
|
fn take_vec_znx(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
let (take_slice, rem_slice) = self.take_slice(VecZnx::bytes_of(module.n(), cols, size));
|
||||||
pub trait TakeVecZnx {
|
(
|
||||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self);
|
VecZnx::from_data(take_slice, module.n(), cols, size),
|
||||||
}
|
rem_slice,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnx] aand returns it
|
fn take_vec_znx_big(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self)
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
where
|
||||||
pub trait TakeVecZnxSlice {
|
Module<B>: VecZnxBigBytesOf,
|
||||||
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self);
|
{
|
||||||
}
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_big(cols, size));
|
||||||
|
(
|
||||||
|
VecZnxBig::from_data(take_slice, module.n(), cols, size),
|
||||||
|
rem_slice,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxBig] and returns it
|
fn take_vec_znx_dft(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self)
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
where
|
||||||
pub trait TakeVecZnxBig<B: Backend> {
|
Module<B>: VecZnxDftBytesOf,
|
||||||
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self);
|
{
|
||||||
}
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_dft(cols, size));
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxDft] and returns it
|
(
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
VecZnxDft::from_data(take_slice, module.n(), cols, size),
|
||||||
pub trait TakeVecZnxDft<B: Backend> {
|
rem_slice,
|
||||||
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self);
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnxDft] and returns it
|
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
|
||||||
pub trait TakeVecZnxDftSlice<B: Backend> {
|
|
||||||
fn take_vec_znx_dft_slice(
|
fn take_vec_znx_dft_slice(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
module: &Module<B>,
|
||||||
len: usize,
|
len: usize,
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
cols: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self);
|
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self)
|
||||||
}
|
where
|
||||||
|
Module<B>: VecZnxDftBytesOf,
|
||||||
|
{
|
||||||
|
let mut scratch: &mut Self = self;
|
||||||
|
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||||
|
for _ in 0..len {
|
||||||
|
let (znx, new_scratch) = scratch.take_vec_znx_dft(module, cols, size);
|
||||||
|
scratch = new_scratch;
|
||||||
|
slice.push(znx);
|
||||||
|
}
|
||||||
|
(slice, scratch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn take_vec_znx_slice(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<B>,
|
||||||
|
len: usize,
|
||||||
|
cols: usize,
|
||||||
|
size: usize,
|
||||||
|
) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
||||||
|
let mut scratch: &mut Self = self;
|
||||||
|
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||||
|
for _ in 0..len {
|
||||||
|
let (znx, new_scratch) = scratch.take_vec_znx(module, cols, size);
|
||||||
|
scratch = new_scratch;
|
||||||
|
slice.push(znx);
|
||||||
|
}
|
||||||
|
(slice, scratch)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [VmpPMat] and returns it
|
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
|
||||||
pub trait TakeVmpPMat<B: Backend> {
|
|
||||||
fn take_vmp_pmat(
|
fn take_vmp_pmat(
|
||||||
&mut self,
|
&mut self,
|
||||||
n: usize,
|
module: &Module<B>,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
cols_in: usize,
|
cols_in: usize,
|
||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Self);
|
) -> (VmpPMat<&mut [u8], B>, &mut Self)
|
||||||
}
|
where
|
||||||
|
Module<B>: VmpPMatBytesOf,
|
||||||
|
{
|
||||||
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vmp_pmat(rows, cols_in, cols_out, size));
|
||||||
|
(
|
||||||
|
VmpPMat::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
||||||
|
rem_slice,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Take a slice of bytes from a [Scratch], wraps it into a [MatZnx] and returns it
|
|
||||||
/// as well as a new [Scratch] minus the taken array of bytes.
|
|
||||||
pub trait TakeMatZnx {
|
|
||||||
fn take_mat_znx(
|
fn take_mat_znx(
|
||||||
&mut self,
|
&mut self,
|
||||||
n: usize,
|
module: &Module<B>,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
cols_in: usize,
|
cols_in: usize,
|
||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Self);
|
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
||||||
|
let (take_slice, rem_slice) = self.take_slice(MatZnx::bytes_of(module.n(), rows, cols_in, cols_out, size));
|
||||||
|
(
|
||||||
|
MatZnx::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
||||||
|
rem_slice,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ pub trait SvpPPolAlloc<B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the size in bytes to allocate a [crate::layouts::SvpPPol].
|
/// Returns the size in bytes to allocate a [crate::layouts::SvpPPol].
|
||||||
pub trait SvpPPolAllocBytes {
|
pub trait SvpPPolBytesOf {
|
||||||
fn svp_ppol_bytes_of(&self, cols: usize) -> usize;
|
fn bytes_of_svp_ppol(&self, cols: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consume a vector of bytes into a [crate::layouts::MatZnx].
|
/// Consume a vector of bytes into a [crate::layouts::MatZnx].
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ pub trait VecZnxBigAlloc<B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the size in bytes to allocate a [crate::layouts::VecZnxBig].
|
/// Returns the size in bytes to allocate a [crate::layouts::VecZnxBig].
|
||||||
pub trait VecZnxBigAllocBytes {
|
pub trait VecZnxBigBytesOf {
|
||||||
fn vec_znx_big_bytes_of(&self, cols: usize, size: usize) -> usize;
|
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consume a vector of bytes into a [crate::layouts::VecZnxBig].
|
/// Consume a vector of bytes into a [crate::layouts::VecZnxBig].
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ pub trait VecZnxDftFromBytes<B: Backend> {
|
|||||||
fn vec_znx_dft_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B>;
|
fn vec_znx_dft_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VecZnxDftAllocBytes {
|
pub trait VecZnxDftBytesOf {
|
||||||
fn vec_znx_dft_bytes_of(&self, cols: usize, size: usize) -> usize;
|
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VecZnxDftApply<B: Backend> {
|
pub trait VecZnxDftApply<B: Backend> {
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ pub trait VmpPMatAlloc<B: Backend> {
|
|||||||
fn vmp_pmat_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B>;
|
fn vmp_pmat_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VmpPMatAllocBytes {
|
pub trait VmpPMatBytesOf {
|
||||||
fn vmp_pmat_bytes_of(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
fn bytes_of_vmp_pmat(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VmpPMatFromBytes<B: Backend> {
|
pub trait VmpPMatFromBytes<B: Backend> {
|
||||||
|
|||||||
@@ -1,14 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeSlice},
|
||||||
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeMatZnx, TakeScalarZnx, TakeSlice,
|
layouts::{Backend, Scratch, ScratchOwned},
|
||||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
|
oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
|
||||||
},
|
|
||||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
|
||||||
oep::{
|
|
||||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeMatZnxImpl,
|
|
||||||
TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl,
|
|
||||||
TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl<B> ScratchOwnedAlloc<B> for ScratchOwned<B>
|
impl<B> ScratchOwnedAlloc<B> for ScratchOwned<B>
|
||||||
@@ -55,104 +48,3 @@ where
|
|||||||
B::take_slice_impl(self, len)
|
B::take_slice_impl(self, len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> TakeScalarZnx for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeScalarZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
|
||||||
B::take_scalar_znx_impl(self, n, cols)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeSvpPPol<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeSvpPPolImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self) {
|
|
||||||
B::take_svp_ppol_impl(self, n, cols)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVecZnx for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVecZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
|
||||||
B::take_vec_znx_impl(self, n, cols, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVecZnxSlice for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVecZnxSliceImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
|
||||||
B::take_vec_znx_slice_impl(self, len, n, cols, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVecZnxBig<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVecZnxBigImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self) {
|
|
||||||
B::take_vec_znx_big_impl(self, n, cols, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVecZnxDft<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVecZnxDftImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self) {
|
|
||||||
B::take_vec_znx_dft_impl(self, n, cols, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVecZnxDftSlice<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVecZnxDftSliceImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vec_znx_dft_slice(
|
|
||||||
&mut self,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self) {
|
|
||||||
B::take_vec_znx_dft_slice_impl(self, len, n, cols, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeVmpPMat<B> for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeVmpPMatImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_vmp_pmat(
|
|
||||||
&mut self,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Self) {
|
|
||||||
B::take_vmp_pmat_impl(self, n, rows, cols_in, cols_out, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> TakeMatZnx for Scratch<B>
|
|
||||||
where
|
|
||||||
B: Backend + TakeMatZnxImpl<B>,
|
|
||||||
{
|
|
||||||
fn take_mat_znx(
|
|
||||||
&mut self,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
|
||||||
B::take_mat_znx_impl(self, n, rows, cols_in, cols_out, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
SvpApplyDft, SvpApplyDftToDft, SvpApplyDftToDftAdd, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
SvpApplyDft, SvpApplyDftToDft, SvpApplyDftToDftAdd, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPPolFromBytes, SvpPrepare,
|
SvpPPolFromBytes, SvpPrepare,
|
||||||
},
|
},
|
||||||
layouts::{
|
layouts::{
|
||||||
@@ -30,11 +30,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> SvpPPolAllocBytes for Module<B>
|
impl<B> SvpPPolBytesOf for Module<B>
|
||||||
where
|
where
|
||||||
B: Backend + SvpPPolAllocBytesImpl<B>,
|
B: Backend + SvpPPolAllocBytesImpl<B>,
|
||||||
{
|
{
|
||||||
fn svp_ppol_bytes_of(&self, cols: usize) -> usize {
|
fn bytes_of_svp_ppol(&self, cols: usize) -> usize {
|
||||||
B::svp_ppol_bytes_of_impl(self.n(), cols)
|
B::svp_ppol_bytes_of_impl(self.n(), cols)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
|
||||||
VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes,
|
VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes, VecZnxBigBytesOf,
|
||||||
VecZnxBigFromBytes, VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize,
|
VecZnxBigFromBytes, VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubInplace, VecZnxBigSubNegateInplace, VecZnxBigSubSmallA,
|
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubInplace, VecZnxBigSubNegateInplace, VecZnxBigSubSmallA,
|
||||||
VecZnxBigSubSmallB, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace,
|
VecZnxBigSubSmallB, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace,
|
||||||
@@ -49,11 +49,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> VecZnxBigAllocBytes for Module<B>
|
impl<B> VecZnxBigBytesOf for Module<B>
|
||||||
where
|
where
|
||||||
B: Backend + VecZnxBigAllocBytesImpl<B>,
|
B: Backend + VecZnxBigAllocBytesImpl<B>,
|
||||||
{
|
{
|
||||||
fn vec_znx_big_bytes_of(&self, cols: usize, size: usize) -> usize {
|
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
|
||||||
B::vec_znx_big_bytes_of_impl(self.n(), cols, size)
|
B::vec_znx_big_bytes_of_impl(self.n(), cols, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
|
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxDftFromBytes,
|
||||||
VecZnxDftFromBytes, VecZnxDftSub, VecZnxDftSubInplace, VecZnxDftSubNegateInplace, VecZnxDftZero, VecZnxIdftApply,
|
VecZnxDftSub, VecZnxDftSubInplace, VecZnxDftSubNegateInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
|
VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{
|
layouts::{
|
||||||
Backend, Data, Module, Scratch, VecZnxBig, VecZnxBigToMut, VecZnxDft, VecZnxDftOwned, VecZnxDftToMut, VecZnxDftToRef,
|
Backend, Data, Module, Scratch, VecZnxBig, VecZnxBigToMut, VecZnxDft, VecZnxDftOwned, VecZnxDftToMut, VecZnxDftToRef,
|
||||||
@@ -24,11 +24,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> VecZnxDftAllocBytes for Module<B>
|
impl<B> VecZnxDftBytesOf for Module<B>
|
||||||
where
|
where
|
||||||
B: Backend + VecZnxDftAllocBytesImpl<B>,
|
B: Backend + VecZnxDftAllocBytesImpl<B>,
|
||||||
{
|
{
|
||||||
fn vec_znx_dft_bytes_of(&self, cols: usize, size: usize) -> usize {
|
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize {
|
||||||
B::vec_znx_dft_bytes_of_impl(self.n(), cols, size)
|
B::vec_znx_dft_bytes_of_impl(self.n(), cols, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
VmpApplyDft, VmpApplyDftTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftAddTmpBytes,
|
VmpApplyDft, VmpApplyDftTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftAddTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatFromBytes, VmpPrepare, VmpPrepareTmpBytes,
|
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPMatBytesOf, VmpPMatFromBytes, VmpPrepare, VmpPrepareTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{
|
layouts::{
|
||||||
Backend, MatZnxToRef, Module, Scratch, VecZnxDftToMut, VecZnxDftToRef, VecZnxToRef, VmpPMatOwned, VmpPMatToMut,
|
Backend, MatZnxToRef, Module, Scratch, VecZnxDftToMut, VecZnxDftToRef, VecZnxToRef, VmpPMatOwned, VmpPMatToMut,
|
||||||
@@ -23,11 +23,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> VmpPMatAllocBytes for Module<B>
|
impl<B> VmpPMatBytesOf for Module<B>
|
||||||
where
|
where
|
||||||
B: Backend + VmpPMatAllocBytesImpl<B>,
|
B: Backend + VmpPMatAllocBytesImpl<B>,
|
||||||
{
|
{
|
||||||
fn vmp_pmat_bytes_of(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
fn bytes_of_vmp_pmat(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||||
B::vmp_pmat_bytes_of_impl(self.n(), rows, cols_in, cols_out, size)
|
B::vmp_pmat_bytes_of_impl(self.n(), rows, cols_in, cols_out, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
|
use crate::layouts::{Backend, Scratch, ScratchOwned};
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
||||||
@@ -39,111 +39,3 @@ pub unsafe trait ScratchAvailableImpl<B: Backend> {
|
|||||||
pub unsafe trait TakeSliceImpl<B: Backend> {
|
pub unsafe trait TakeSliceImpl<B: Backend> {
|
||||||
fn take_slice_impl<T>(scratch: &mut Scratch<B>, len: usize) -> (&mut [T], &mut Scratch<B>);
|
fn take_slice_impl<T>(scratch: &mut Scratch<B>, len: usize) -> (&mut [T], &mut Scratch<B>);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeScalarZnx] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeScalarZnxImpl<B: Backend> {
|
|
||||||
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeSvpPPol] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeSvpPPolImpl<B: Backend> {
|
|
||||||
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVecZnx] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVecZnxImpl<B: Backend> {
|
|
||||||
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVecZnxSlice] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVecZnxSliceImpl<B: Backend> {
|
|
||||||
fn take_vec_znx_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVecZnxBig] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVecZnxBigImpl<B: Backend> {
|
|
||||||
fn take_vec_znx_big_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVecZnxDft] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVecZnxDftImpl<B: Backend> {
|
|
||||||
fn take_vec_znx_dft_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVecZnxDftSlice] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVecZnxDftSliceImpl<B: Backend> {
|
|
||||||
fn take_vec_znx_dft_slice_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
len: usize,
|
|
||||||
n: usize,
|
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeVmpPMat] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeVmpPMatImpl<B: Backend> {
|
|
||||||
fn take_vmp_pmat_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
|
||||||
/// * See the [poulpy-backend/src/cpu_fft64_ref/scratch.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/scratch.rs) reference implementation.
|
|
||||||
/// * See [crate::api::TakeMatZnx] for corresponding public API.
|
|
||||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
|
||||||
pub unsafe trait TakeMatZnxImpl<B: Backend> {
|
|
||||||
fn take_mat_znx_impl(
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ use poulpy_core::layouts::{
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc,
|
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc,
|
||||||
SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
||||||
VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||||
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform,
|
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub,
|
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub,
|
||||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc,
|
||||||
@@ -42,7 +42,7 @@ where
|
|||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -55,7 +55,7 @@ where
|
|||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
@@ -70,7 +70,7 @@ where
|
|||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxRotateInplace<B>
|
+ VecZnxRotateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
@@ -80,7 +80,7 @@ where
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ ZnFillUniform
|
+ ZnFillUniform
|
||||||
@@ -124,7 +124,7 @@ where
|
|||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -137,7 +137,7 @@ where
|
|||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
@@ -152,7 +152,7 @@ where
|
|||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxRotateInplace<B>
|
+ VecZnxRotateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
@@ -162,7 +162,7 @@ where
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ ZnFillUniform
|
+ ZnFillUniform
|
||||||
|
|||||||
@@ -2,8 +2,8 @@ use std::marker::PhantomData;
|
|||||||
|
|
||||||
use poulpy_core::layouts::{Base2K, GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, Rank, TorusPrecision};
|
use poulpy_core::layouts::{Base2K, GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, Rank, TorusPrecision};
|
||||||
|
|
||||||
use poulpy_core::{TakeGLWEPt, layouts::prepared::GLWESecretPrepared};
|
use poulpy_core::{TakeGLWEPlaintext, layouts::prepared::GLWESecretPrepared};
|
||||||
use poulpy_hal::api::VecZnxBigAllocBytes;
|
use poulpy_hal::api::VecZnxBigBytesOf;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use poulpy_hal::api::{
|
use poulpy_hal::api::{
|
||||||
ScratchAvailable, TakeVecZnx, VecZnxAddInplace, VecZnxAddNormal, VecZnxFillUniform, VecZnxNormalize, VecZnxSub,
|
ScratchAvailable, TakeVecZnx, VecZnxAddInplace, VecZnxAddNormal, VecZnxFillUniform, VecZnxNormalize, VecZnxSub,
|
||||||
@@ -12,8 +12,8 @@ use poulpy_hal::api::{
|
|||||||
use poulpy_hal::source::Source;
|
use poulpy_hal::source::Source;
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -83,7 +83,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
|||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
S: DataRef,
|
S: DataRef,
|
||||||
Module<BE>: VecZnxDftAllocBytes
|
Module<BE>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -96,7 +96,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
|||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>
|
||||||
+ VecZnxSub,
|
+ VecZnxSub,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||||
{
|
{
|
||||||
use poulpy_core::layouts::GLWEPlaintextLayout;
|
use poulpy_core::layouts::GLWEPlaintextLayout;
|
||||||
|
|
||||||
@@ -136,7 +136,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
|||||||
+ VecZnxBigAddInplace<BE>
|
+ VecZnxBigAddInplace<BE>
|
||||||
+ VecZnxBigAddSmallInplace<BE>
|
+ VecZnxBigAddSmallInplace<BE>
|
||||||
+ VecZnxBigNormalize<BE>,
|
+ VecZnxBigNormalize<BE>,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -175,8 +175,8 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
|||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) -> Vec<f64>
|
) -> Vec<f64>
|
||||||
where
|
where
|
||||||
Module<BE>: VecZnxDftAllocBytes
|
Module<BE>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
+ VecZnxIdftApplyConsume<BE>
|
+ VecZnxIdftApplyConsume<BE>
|
||||||
@@ -186,7 +186,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxNormalizeInplace<BE>,
|
+ VecZnxNormalizeInplace<BE>,
|
||||||
Scratch<BE>: TakeGLWEPt<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
Scratch<BE>: TakeGLWEPlaintext<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ use poulpy_hal::{
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxSubInplace, VmpPrepare,
|
VecZnxSubInplace, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -123,7 +123,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits, BE: Backend> FheUintBlocksPrep<D,
|
|||||||
) where
|
) where
|
||||||
S: DataRef,
|
S: DataRef,
|
||||||
Module<BE>: VecZnxAddScalarInplace
|
Module<BE>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -190,8 +190,8 @@ impl<D: DataRef, T: UnsignedInteger + ToBits> FheUintBlocksPrepDebug<D, T> {
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub(crate) fn noise<S: DataRef, BE: Backend>(&self, module: &Module<BE>, sk: &GLWESecretPrepared<S, BE>, want: T)
|
pub(crate) fn noise<S: DataRef, BE: Backend>(&self, module: &Module<BE>, sk: &GLWESecretPrepared<S, BE>, want: T)
|
||||||
where
|
where
|
||||||
Module<BE>: VecZnxDftAllocBytes
|
Module<BE>: VecZnxDftBytesOf
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
+ VecZnxIdftApplyConsume<BE>
|
+ VecZnxIdftApplyConsume<BE>
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
GLWEOperations, TakeGLWECtSlice, TakeGLWEPt, glwe_packing,
|
GLWEOperations, TakeGLWEPlaintext, TakeGLWESlice, glwe_packing,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, TorusPrecision,
|
GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, TorusPrecision,
|
||||||
prepared::{AutomorphismKeyPrepared, GLWESecretPrepared},
|
prepared::{AutomorphismKeyPrepared, GLWESecretPrepared},
|
||||||
@@ -11,7 +11,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||||
VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
@@ -39,7 +39,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
|||||||
Module<BE>: VecZnxSub
|
Module<BE>: VecZnxSub
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -62,7 +62,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
|||||||
+ VecZnxAutomorphismInplace<BE>
|
+ VecZnxAutomorphismInplace<BE>
|
||||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||||
+ VecZnxRotate,
|
+ VecZnxRotate,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWECtSlice,
|
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWESlice,
|
||||||
{
|
{
|
||||||
// Repacks the GLWE ciphertexts bits
|
// Repacks the GLWE ciphertexts bits
|
||||||
let gap: usize = module.n() / T::WORD_SIZE;
|
let gap: usize = module.n() / T::WORD_SIZE;
|
||||||
@@ -109,7 +109,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
|||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
Module<BE>: VecZnxAddScalarInplace
|
Module<BE>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -122,7 +122,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
|||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>
|
||||||
+ VecZnxSub,
|
+ VecZnxSub,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -167,7 +167,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits> FheUintWord<D, T> {
|
|||||||
+ VecZnxBigAddInplace<BE>
|
+ VecZnxBigAddInplace<BE>
|
||||||
+ VecZnxBigAddSmallInplace<BE>
|
+ VecZnxBigAddSmallInplace<BE>
|
||||||
+ VecZnxBigNormalize<BE>,
|
+ VecZnxBigNormalize<BE>,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWECtSlice,
|
GLWEExternalProductInplace, GLWEOperations, TakeGLWESlice,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWE, GLWEToMut, LWEInfos,
|
GLWE, GLWEToMut, LWEInfos,
|
||||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||||
@@ -49,7 +49,7 @@ impl<C: BitCircuitInfo, const N: usize, T: UnsignedInteger, BE: Backend> Circuit
|
|||||||
where
|
where
|
||||||
Self: GetBitCircuitInfo<T>,
|
Self: GetBitCircuitInfo<T>,
|
||||||
Module<BE>: Cmux<BE> + VecZnxCopy,
|
Module<BE>: Cmux<BE> + VecZnxCopy,
|
||||||
Scratch<BE>: TakeGLWECtSlice,
|
Scratch<BE>: TakeGLWESlice,
|
||||||
{
|
{
|
||||||
fn execute<O>(
|
fn execute<O>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crate::tfhe::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
TakeGGSW, TakeGLWECt,
|
TakeGGSW, TakeGLWE,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWESecret, GLWEToLWEKeyLayout, GLWEToLWESwitchingKey, LWE, LWESecret,
|
GLWESecret, GLWEToLWEKeyLayout, GLWEToLWESwitchingKey, LWE, LWESecret,
|
||||||
prepared::{GLWEToLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
prepared::{GLWEToLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
||||||
@@ -17,10 +17,10 @@ use poulpy_core::{
|
|||||||
};
|
};
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||||
VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPrepare,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -77,7 +77,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
|||||||
Module<BE>: SvpApplyDftToDft<BE>
|
Module<BE>: SvpApplyDftToDft<BE>
|
||||||
+ VecZnxIdftApplyTmpA<BE>
|
+ VecZnxIdftApplyTmpA<BE>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -92,7 +92,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<BE>
|
+ SvpPrepare<BE>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<BE>
|
+ SvpPPolAlloc<BE>
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxAutomorphismInplace<BE>,
|
+ VecZnxAutomorphismInplace<BE>,
|
||||||
@@ -157,7 +157,7 @@ where
|
|||||||
BE: Backend,
|
BE: Backend,
|
||||||
Module<BE>: VmpPrepare<BE>
|
Module<BE>: VmpPrepare<BE>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<BE>
|
+ VmpApplyDftToDft<BE>
|
||||||
@@ -168,7 +168,7 @@ where
|
|||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||||
{
|
{
|
||||||
fn prepare(
|
fn prepare(
|
||||||
@@ -206,7 +206,7 @@ where
|
|||||||
BE: Backend,
|
BE: Backend,
|
||||||
Module<BE>: VmpPrepare<BE>
|
Module<BE>: VmpPrepare<BE>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<BE>
|
+ VmpApplyDftToDft<BE>
|
||||||
@@ -217,7 +217,7 @@ where
|
|||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||||
{
|
{
|
||||||
fn prepare(
|
fn prepare(
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use poulpy_backend::FFT64Ref;
|
use poulpy_backend::FFT64Ref;
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
TakeGGSW, TakeGLWEPt,
|
TakeGGSW, TakeGLWEPlaintext,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSWCiphertextLayout, GLWELayout, GLWESecret, LWEInfos, LWESecret,
|
GGSWCiphertextLayout, GLWELayout, GLWESecret, LWEInfos, LWESecret,
|
||||||
prepared::{GLWESecretPrepared, PrepareAlloc},
|
prepared::{GLWESecretPrepared, PrepareAlloc},
|
||||||
@@ -11,11 +11,11 @@ use poulpy_core::{
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ModuleNew, ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace,
|
ModuleNew, ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace,
|
||||||
SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||||
@@ -51,7 +51,7 @@ where
|
|||||||
Module<BE>: ModuleNew<BE> + SvpPPolAlloc<BE> + SvpPrepare<BE> + VmpPMatAlloc<BE>,
|
Module<BE>: ModuleNew<BE> + SvpPPolAlloc<BE> + SvpPrepare<BE> + VmpPMatAlloc<BE>,
|
||||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||||
Module<BE>: VecZnxAddScalarInplace
|
Module<BE>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxDftApply<BE>
|
+ VecZnxDftApply<BE>
|
||||||
+ SvpApplyDftToDftInplace<BE>
|
+ SvpApplyDftToDftInplace<BE>
|
||||||
@@ -68,16 +68,16 @@ where
|
|||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGGSW + TakeScalarZnx + TakeSlice,
|
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGGSW + TakeScalarZnx + TakeSlice,
|
||||||
Module<BE>: VecZnxCopy + VecZnxNegateInplace + VmpApplyDftToDftTmpBytes + VmpApplyDftToDft<BE> + VmpApplyDftToDftAdd<BE>,
|
Module<BE>: VecZnxCopy + VecZnxNegateInplace + VmpApplyDftToDftTmpBytes + VmpApplyDftToDft<BE> + VmpApplyDftToDftAdd<BE>,
|
||||||
Module<BE>: VecZnxBigAddInplace<BE> + VecZnxBigAddSmallInplace<BE> + VecZnxBigNormalize<BE>,
|
Module<BE>: VecZnxBigAddInplace<BE> + VecZnxBigAddSmallInplace<BE> + VecZnxBigNormalize<BE>,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||||
Module<BE>: VecZnxAutomorphism
|
Module<BE>: VecZnxAutomorphism
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpA<BE>
|
+ VecZnxIdftApplyTmpA<BE>
|
||||||
+ SvpApplyDftToDft<BE>
|
+ SvpApplyDftToDft<BE>
|
||||||
+ VecZnxBigAlloc<BE>
|
+ VecZnxBigAlloc<BE>
|
||||||
+ VecZnxDftAlloc<BE>
|
+ VecZnxDftAlloc<BE>
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxRotateInplace<BE>
|
+ VecZnxRotateInplace<BE>
|
||||||
+ VecZnxBigAutomorphismInplace<BE>
|
+ VecZnxBigAutomorphismInplace<BE>
|
||||||
+ VecZnxRshInplace<BE>
|
+ VecZnxRshInplace<BE>
|
||||||
@@ -85,7 +85,7 @@ where
|
|||||||
+ VecZnxAutomorphismInplace<BE>
|
+ VecZnxAutomorphismInplace<BE>
|
||||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<BE>
|
+ VecZnxDftAddInplace<BE>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ ZnFillUniform
|
+ ZnFillUniform
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
ScratchAvailable, SvpApplyDftToDft, SvpPPolBytesOf, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftSubInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes,
|
VecZnxDftSubInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes,
|
||||||
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||||
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
Distribution, GLWEOperations, TakeGLWECt,
|
Distribution, GLWEOperations, TakeGLWE,
|
||||||
layouts::{GGSWInfos, GLWE, GLWEInfos, GLWEToMut, LWE, LWECiphertextToRef, LWEInfos},
|
layouts::{GGSWInfos, GLWE, GLWEInfos, GLWEToMut, LWE, LWECiphertextToRef, LWEInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -31,10 +31,10 @@ pub fn cggi_blind_rotate_scratch_space<B: Backend, OUT, GGSW>(
|
|||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
OUT: GLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
+ VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
@@ -43,10 +43,10 @@ where
|
|||||||
if block_size > 1 {
|
if block_size > 1 {
|
||||||
let cols: usize = (brk_infos.rank() + 1).into();
|
let cols: usize = (brk_infos.rank() + 1).into();
|
||||||
let dnum: usize = brk_infos.dnum().into();
|
let dnum: usize = brk_infos.dnum().into();
|
||||||
let acc_dft: usize = module.vec_znx_dft_bytes_of(cols, dnum) * extension_factor;
|
let acc_dft: usize = module.bytes_of_vec_znx_dft(cols, dnum) * extension_factor;
|
||||||
let acc_big: usize = module.vec_znx_big_bytes_of(1, brk_size);
|
let acc_big: usize = module.bytes_of_vec_znx_big(1, brk_size);
|
||||||
let vmp_res: usize = module.vec_znx_dft_bytes_of(cols, brk_size) * extension_factor;
|
let vmp_res: usize = module.bytes_of_vec_znx_dft(cols, brk_size) * extension_factor;
|
||||||
let vmp_xai: usize = module.vec_znx_dft_bytes_of(1, brk_size);
|
let vmp_xai: usize = module.bytes_of_vec_znx_dft(1, brk_size);
|
||||||
let acc_dft_add: usize = vmp_res;
|
let acc_dft_add: usize = vmp_res;
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(brk_size, dnum, dnum, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(brk_size, dnum, dnum, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
|
||||||
let acc: usize = if extension_factor > 1 {
|
let acc: usize = if extension_factor > 1 {
|
||||||
@@ -67,9 +67,9 @@ where
|
|||||||
|
|
||||||
impl<D: DataRef, B: Backend> BlincRotationExecute<B> for BlindRotationKeyPrepared<D, CGGI, B>
|
impl<D: DataRef, B: Backend> BlincRotationExecute<B> for BlindRotationKeyPrepared<D, CGGI, B>
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxBigAllocBytes
|
Module<B>: VecZnxBigBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
@@ -129,9 +129,9 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataBrk: DataRef,
|
DataBrk: DataRef,
|
||||||
Module<B>: VecZnxBigAllocBytes
|
Module<B>: VecZnxBigBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
@@ -296,9 +296,9 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataBrk: DataRef,
|
DataBrk: DataRef,
|
||||||
Module<B>: VecZnxBigAllocBytes
|
Module<B>: VecZnxBigBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
@@ -418,9 +418,9 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataBrk: DataRef,
|
DataBrk: DataRef,
|
||||||
Module<B>: VecZnxBigAllocBytes
|
Module<B>: VecZnxBigBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||||
VmpPMatAlloc, VmpPrepare,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -47,7 +46,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
|||||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
@@ -56,7 +55,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
|||||||
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
|
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxAddScalarInplace
|
Module<B>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -149,7 +148,7 @@ impl BlindRotationKeyCompressed<Vec<u8>, CGGI> {
|
|||||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
GGSWCompressed::encrypt_sk_scratch_space(module, infos)
|
GGSWCompressed::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
@@ -169,7 +168,7 @@ impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
|
|||||||
DataSkGLWE: DataRef,
|
DataSkGLWE: DataRef,
|
||||||
DataSkLWE: DataRef,
|
DataSkLWE: DataRef,
|
||||||
Module<B>: VecZnxAddScalarInplace
|
Module<B>: VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal,
|
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal,
|
||||||
@@ -29,9 +29,9 @@ use poulpy_core::layouts::{
|
|||||||
|
|
||||||
pub fn test_blind_rotation<B>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
|
pub fn test_blind_rotation<B>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxBigAllocBytes
|
Module<B>: VecZnxBigBytesOf
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxIdftApplyTmpBytes
|
+ VecZnxIdftApplyTmpBytes
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ use std::collections::HashMap;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeMatZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
|
ScratchAvailable, TakeMatZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
|
||||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||||
VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
@@ -15,7 +15,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
GLWEOperations, TakeGGLWE, TakeGLWECt,
|
GLWEOperations, TakeGGLWE, TakeGLWE,
|
||||||
layouts::{Dsize, GGLWECiphertextLayout, GGSWInfos, GLWEInfos, LWEInfos},
|
layouts::{Dsize, GGLWECiphertextLayout, GGSWInfos, GLWEInfos, LWEInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -44,7 +44,7 @@ where
|
|||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -56,7 +56,7 @@ where
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
@@ -145,7 +145,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
|||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
@@ -156,7 +156,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
@@ -286,7 +286,7 @@ fn post_process<DataRes, DataA, B: Backend>(
|
|||||||
+ VecZnxNegateInplace
|
+ VecZnxNegateInplace
|
||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||||
},
|
},
|
||||||
@@ -78,7 +78,7 @@ where
|
|||||||
Module<B>: SvpApplyDftToDft<B>
|
Module<B>: SvpApplyDftToDft<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -93,7 +93,7 @@ where
|
|||||||
+ VecZnxSub
|
+ VecZnxSub
|
||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ SvpPPolAlloc<B>
|
+ SvpPPolAlloc<B>
|
||||||
+ VecZnxAutomorphism,
|
+ VecZnxAutomorphism,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeSvpPPol<B> + TakeVecZnxBig<B>,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeSvpPPol<B> + TakeVecZnxBig<B>,
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ use std::time::Instant;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||||
@@ -45,7 +45,7 @@ where
|
|||||||
Module<B>: VecZnxFillUniform
|
Module<B>: VecZnxFillUniform
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -58,7 +58,7 @@ where
|
|||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
@@ -73,7 +73,7 @@ where
|
|||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxRotateInplace<B>
|
+ VecZnxRotateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
@@ -83,7 +83,7 @@ where
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ ZnFillUniform
|
+ ZnFillUniform
|
||||||
@@ -267,7 +267,7 @@ where
|
|||||||
Module<B>: VecZnxFillUniform
|
Module<B>: VecZnxFillUniform
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxDftAllocBytes
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ SvpApplyDftToDftInplace<B>
|
+ SvpApplyDftToDftInplace<B>
|
||||||
@@ -280,7 +280,7 @@ where
|
|||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ SvpApplyDftToDft<B>
|
+ SvpApplyDftToDft<B>
|
||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
@@ -295,7 +295,7 @@ where
|
|||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ SvpPPolAllocBytes
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxRotateInplace<B>
|
+ VecZnxRotateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRotateInplaceTmpBytes
|
+ VecZnxRotateInplaceTmpBytes
|
||||||
@@ -305,7 +305,7 @@ where
|
|||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigBytesOf
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ ZnFillUniform
|
+ ZnFillUniform
|
||||||
|
|||||||
Reference in New Issue
Block a user