This commit is contained in:
Pro7ech
2025-10-12 21:34:10 +02:00
committed by Jean-Philippe Bossuat
parent f72363cc4b
commit 2b2b994f7d
169 changed files with 8705 additions and 7677 deletions

View File

@@ -3,13 +3,8 @@ use std::marker::PhantomData;
use poulpy_hal::{ use poulpy_hal::{
DEFAULTALIGN, alloc_aligned, DEFAULTALIGN, alloc_aligned,
api::ScratchFromBytes, api::ScratchFromBytes,
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat}, layouts::{Backend, Scratch, ScratchOwned},
oep::{ oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
},
}; };
use crate::cpu_fft64_avx::FFT64Avx; use crate::cpu_fft64_avx::FFT64Avx;
@@ -64,178 +59,6 @@ where
} }
} }
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Avx
where
B: ScratchFromBytesImpl<B>,
{
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::alloc_bytes(n, cols));
(
ScalarZnx::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Avx
where
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_alloc_bytes_impl(n, cols));
(
SvpPPol::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Avx
where
B: ScratchFromBytesImpl<B>,
{
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::alloc_bytes(n, cols, size));
(
VecZnx::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Avx
where
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_big_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_big_alloc_bytes_impl(n, cols, size),
);
(
VecZnxBig::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Avx
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_dft_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_dft_alloc_bytes_impl(n, cols, size),
);
(
VecZnxDft::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Avx
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
{
fn take_vec_znx_dft_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Avx
where
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
{
fn take_vec_znx_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Avx
where
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vmp_pmat_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size),
);
(
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Avx
where
B: ScratchFromBytesImpl<B>,
{
fn take_mat_znx_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
MatZnx::alloc_bytes(n, rows, cols_in, cols_out, size),
);
(
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) { fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
let ptr: *mut u8 = data.as_mut_ptr(); let ptr: *mut u8 = data.as_mut_ptr();
let self_len: usize = data.len(); let self_len: usize = data.len();

View File

@@ -22,7 +22,7 @@ unsafe impl SvpPPolAllocImpl<Self> for FFT64Avx {
} }
unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Avx { unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Avx {
fn svp_ppol_alloc_bytes_impl(n: usize, cols: usize) -> usize { fn svp_ppol_bytes_of_impl(n: usize, cols: usize) -> usize {
Self::layout_prep_word_count() * n * cols * size_of::<f64>() Self::layout_prep_word_count() * n * cols * size_of::<f64>()
} }
} }

View File

@@ -27,7 +27,7 @@ use poulpy_hal::{
}; };
unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Avx { unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Avx {
fn vec_znx_big_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_big_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_big_word_count() * n * cols * size * size_of::<f64>() Self::layout_big_word_count() * n * cols * size * size_of::<f64>()
} }
} }

View File

@@ -24,7 +24,7 @@ unsafe impl VecZnxDftFromBytesImpl<Self> for FFT64Avx {
} }
unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Avx { unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Avx {
fn vec_znx_dft_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_dft_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Avx as Backend>::ScalarPrep>() Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Avx as Backend>::ScalarPrep>()
} }
} }

View File

@@ -16,7 +16,7 @@ use poulpy_hal::{
use crate::cpu_fft64_avx::{FFT64Avx, module::FFT64ModuleHandle}; use crate::cpu_fft64_avx::{FFT64Avx, module::FFT64ModuleHandle};
unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Avx { unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Avx {
fn vmp_pmat_alloc_bytes_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize { fn vmp_pmat_bytes_of_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>() Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>()
} }
} }

View File

@@ -3,13 +3,8 @@ use std::marker::PhantomData;
use poulpy_hal::{ use poulpy_hal::{
DEFAULTALIGN, alloc_aligned, DEFAULTALIGN, alloc_aligned,
api::ScratchFromBytes, api::ScratchFromBytes,
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat}, layouts::{Backend, Scratch, ScratchOwned},
oep::{ oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
},
}; };
use crate::cpu_fft64_ref::FFT64Ref; use crate::cpu_fft64_ref::FFT64Ref;
@@ -64,178 +59,6 @@ where
} }
} }
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Ref
where
B: ScratchFromBytesImpl<B>,
{
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::alloc_bytes(n, cols));
(
ScalarZnx::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Ref
where
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_alloc_bytes_impl(n, cols));
(
SvpPPol::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Ref
where
B: ScratchFromBytesImpl<B>,
{
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::alloc_bytes(n, cols, size));
(
VecZnx::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Ref
where
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_big_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_big_alloc_bytes_impl(n, cols, size),
);
(
VecZnxBig::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Ref
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_dft_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_dft_alloc_bytes_impl(n, cols, size),
);
(
VecZnxDft::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Ref
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
{
fn take_vec_znx_dft_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Ref
where
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
{
fn take_vec_znx_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Ref
where
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vmp_pmat_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size),
);
(
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Ref
where
B: ScratchFromBytesImpl<B>,
{
fn take_mat_znx_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
MatZnx::alloc_bytes(n, rows, cols_in, cols_out, size),
);
(
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) { fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
let ptr: *mut u8 = data.as_mut_ptr(); let ptr: *mut u8 = data.as_mut_ptr();
let self_len: usize = data.len(); let self_len: usize = data.len();

View File

@@ -22,7 +22,7 @@ unsafe impl SvpPPolAllocImpl<Self> for FFT64Ref {
} }
unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Ref { unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Ref {
fn svp_ppol_alloc_bytes_impl(n: usize, cols: usize) -> usize { fn svp_ppol_bytes_of_impl(n: usize, cols: usize) -> usize {
Self::layout_prep_word_count() * n * cols * size_of::<f64>() Self::layout_prep_word_count() * n * cols * size_of::<f64>()
} }
} }

View File

@@ -27,7 +27,7 @@ use poulpy_hal::{
}; };
unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Ref { unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Ref {
fn vec_znx_big_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_big_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_big_word_count() * n * cols * size * size_of::<f64>() Self::layout_big_word_count() * n * cols * size * size_of::<f64>()
} }
} }

View File

@@ -24,7 +24,7 @@ unsafe impl VecZnxDftFromBytesImpl<Self> for FFT64Ref {
} }
unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Ref { unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Ref {
fn vec_znx_dft_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_dft_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Ref as Backend>::ScalarPrep>() Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Ref as Backend>::ScalarPrep>()
} }
} }

View File

@@ -16,7 +16,7 @@ use poulpy_hal::{
use crate::cpu_fft64_ref::{FFT64Ref, module::FFT64ModuleHandle}; use crate::cpu_fft64_ref::{FFT64Ref, module::FFT64ModuleHandle};
unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Ref { unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Ref {
fn vmp_pmat_alloc_bytes_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize { fn vmp_pmat_bytes_of_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>() Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>()
} }
} }

View File

@@ -3,13 +3,8 @@ use std::marker::PhantomData;
use poulpy_hal::{ use poulpy_hal::{
DEFAULTALIGN, alloc_aligned, DEFAULTALIGN, alloc_aligned,
api::ScratchFromBytes, api::ScratchFromBytes,
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat}, layouts::{Backend, Scratch, ScratchOwned},
oep::{ oep::{ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeSliceImpl},
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
},
}; };
use crate::cpu_spqlios::FFT64Spqlios; use crate::cpu_spqlios::FFT64Spqlios;
@@ -64,178 +59,6 @@ where
} }
} }
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for FFT64Spqlios
where
B: ScratchFromBytesImpl<B>,
{
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::alloc_bytes(n, cols));
(
ScalarZnx::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeSvpPPolImpl<B> for FFT64Spqlios
where
B: SvpPPolAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_svp_ppol_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::svp_ppol_alloc_bytes_impl(n, cols));
(
SvpPPol::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxImpl<B> for FFT64Spqlios
where
B: ScratchFromBytesImpl<B>,
{
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::alloc_bytes(n, cols, size));
(
VecZnx::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxBigImpl<B> for FFT64Spqlios
where
B: VecZnxBigAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_big_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxBig<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_big_alloc_bytes_impl(n, cols, size),
);
(
VecZnxBig::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftImpl<B> for FFT64Spqlios
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vec_znx_dft_impl(
scratch: &mut Scratch<B>,
n: usize,
cols: usize,
size: usize,
) -> (VecZnxDft<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_dft_alloc_bytes_impl(n, cols, size),
);
(
VecZnxDft::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeVecZnxDftSliceImpl<B> for FFT64Spqlios
where
B: VecZnxDftAllocBytesImpl<B> + ScratchFromBytesImpl<B> + TakeVecZnxDftImpl<B>,
{
fn take_vec_znx_dft_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_dft_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVecZnxSliceImpl<B> for FFT64Spqlios
where
B: ScratchFromBytesImpl<B> + TakeVecZnxImpl<B>,
{
fn take_vec_znx_slice_impl(
scratch: &mut Scratch<B>,
len: usize,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnx<&mut [u8]>>, &mut Scratch<B>) {
let mut scratch: &mut Scratch<B> = scratch;
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
for _ in 0..len {
let (znx, new_scratch) = B::take_vec_znx_impl(scratch, n, cols, size);
scratch = new_scratch;
slice.push(znx);
}
(slice, scratch)
}
}
unsafe impl<B: Backend> TakeVmpPMatImpl<B> for FFT64Spqlios
where
B: VmpPMatAllocBytesImpl<B> + ScratchFromBytesImpl<B>,
{
fn take_vmp_pmat_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (VmpPMat<&mut [u8], B>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size),
);
(
VmpPMat::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
unsafe impl<B: Backend> TakeMatZnxImpl<B> for FFT64Spqlios
where
B: ScratchFromBytesImpl<B>,
{
fn take_mat_znx_impl(
scratch: &mut Scratch<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
MatZnx::alloc_bytes(n, rows, cols_in, cols_out, size),
);
(
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),
Scratch::from_bytes(rem_slice),
)
}
}
fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) { fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]) {
let ptr: *mut u8 = data.as_mut_ptr(); let ptr: *mut u8 = data.as_mut_ptr();
let self_len: usize = data.len(); let self_len: usize = data.len();

View File

@@ -27,7 +27,7 @@ unsafe impl SvpPPolAllocImpl<Self> for FFT64Spqlios {
} }
unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Spqlios { unsafe impl SvpPPolAllocBytesImpl<Self> for FFT64Spqlios {
fn svp_ppol_alloc_bytes_impl(n: usize, cols: usize) -> usize { fn svp_ppol_bytes_of_impl(n: usize, cols: usize) -> usize {
FFT64Spqlios::layout_prep_word_count() * n * cols * size_of::<f64>() FFT64Spqlios::layout_prep_word_count() * n * cols * size_of::<f64>()
} }
} }

View File

@@ -22,7 +22,7 @@ use poulpy_hal::{
}; };
unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Spqlios { unsafe impl VecZnxBigAllocBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_big_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_big_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_big_word_count() * n * cols * size * size_of::<f64>() Self::layout_big_word_count() * n * cols * size * size_of::<f64>()
} }
} }

View File

@@ -30,7 +30,7 @@ unsafe impl VecZnxDftFromBytesImpl<Self> for FFT64Spqlios {
} }
unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Spqlios { unsafe impl VecZnxDftAllocBytesImpl<Self> for FFT64Spqlios {
fn vec_znx_dft_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_dft_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Spqlios as Backend>::ScalarPrep>() Self::layout_prep_word_count() * n * cols * size * size_of::<<FFT64Spqlios as Backend>::ScalarPrep>()
} }
} }

View File

@@ -16,7 +16,7 @@ use crate::cpu_spqlios::{
}; };
unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Spqlios { unsafe impl VmpPMatAllocBytesImpl<Self> for FFT64Spqlios {
fn vmp_pmat_alloc_bytes_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize { fn vmp_pmat_bytes_of_impl(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>() Self::layout_prep_word_count() * n * rows * cols_in * cols_out * size * size_of::<f64>()
} }
} }

View File

@@ -18,7 +18,7 @@ unsafe impl SvpPPolAllocImpl<Self> for NTT120 {
} }
unsafe impl SvpPPolAllocBytesImpl<Self> for NTT120 { unsafe impl SvpPPolAllocBytesImpl<Self> for NTT120 {
fn svp_ppol_alloc_bytes_impl(n: usize, cols: usize) -> usize { fn svp_ppol_bytes_of_impl(n: usize, cols: usize) -> usize {
NTT120::layout_prep_word_count() * n * cols * size_of::<i64>() NTT120::layout_prep_word_count() * n * cols * size_of::<i64>()
} }
} }

View File

@@ -3,7 +3,7 @@ use poulpy_hal::{layouts::Backend, oep::VecZnxBigAllocBytesImpl};
use crate::cpu_spqlios::NTT120; use crate::cpu_spqlios::NTT120;
unsafe impl VecZnxBigAllocBytesImpl<NTT120> for NTT120 { unsafe impl VecZnxBigAllocBytesImpl<NTT120> for NTT120 {
fn vec_znx_big_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_big_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
NTT120::layout_big_word_count() * n * cols * size * size_of::<i128>() NTT120::layout_big_word_count() * n * cols * size * size_of::<i128>()
} }
} }

View File

@@ -6,7 +6,7 @@ use poulpy_hal::{
use crate::cpu_spqlios::NTT120; use crate::cpu_spqlios::NTT120;
unsafe impl VecZnxDftAllocBytesImpl<NTT120> for NTT120 { unsafe impl VecZnxDftAllocBytesImpl<NTT120> for NTT120 {
fn vec_znx_dft_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize { fn vec_znx_dft_bytes_of_impl(n: usize, cols: usize, size: usize) -> usize {
NTT120::layout_prep_word_count() * n * cols * size * size_of::<i64>() NTT120::layout_prep_word_count() * n * cols * size * size_of::<i64>()
} }
} }

View File

@@ -52,8 +52,8 @@ fn main() {
// Scratch space // Scratch space
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(&module, n, base2k, ct.k()) GLWECiphertext::encrypt_sk_tmp_bytes(&module, n, base2k, ct.k())
| GLWECiphertext::decrypt_scratch_space(&module, n, base2k, ct.k()), | GLWECiphertext::decrypt_tmp_bytes(&module, n, base2k, ct.k()),
); );
// Generate secret-key // Generate secret-key

View File

@@ -1,7 +1,6 @@
use poulpy_core::layouts::{ use poulpy_core::layouts::{
Base2K, Degree, Dnum, Dsize, GGSWCiphertext, GGSWCiphertextLayout, GLWECiphertext, GLWECiphertextLayout, GLWESecret, Rank, Base2K, Dnum, Dsize, GGSW, GGSWLayout, GLWE, GLWELayout, GLWESecret, Rank, RingDegree, TorusPrecision,
TorusPrecision, prepared::{GGSWPrepared, GLWESecretPrepared, PrepareAlloc},
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared, PrepareAlloc},
}; };
use std::hint::black_box; use std::hint::black_box;
@@ -29,7 +28,7 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() { fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n); let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
let n: Degree = Degree(module.n() as u32); let n: RingDegree = RingDegree(module.n() as u32);
let base2k: Base2K = p.base2k; let base2k: Base2K = p.base2k;
let k_ct_in: TorusPrecision = p.k_ct_in; let k_ct_in: TorusPrecision = p.k_ct_in;
let k_ct_out: TorusPrecision = p.k_ct_out; let k_ct_out: TorusPrecision = p.k_ct_out;
@@ -39,7 +38,7 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
let dnum: Dnum = Dnum(1); //(p.k_ct_in.div_ceil(p.base2k); let dnum: Dnum = Dnum(1); //(p.k_ct_in.div_ceil(p.base2k);
let ggsw_layout: GGSWCiphertextLayout = GGSWCiphertextLayout { let ggsw_layout: GGSWLayout = GGSWLayout {
n, n,
base2k, base2k,
k: k_ggsw, k: k_ggsw,
@@ -48,36 +47,36 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
rank, rank,
}; };
let glwe_out_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_out_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_ct_out, k: k_ct_out,
rank, rank,
}; };
let glwe_in_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_in_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_ct_in, k: k_ct_in,
rank, rank,
}; };
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&ggsw_layout); let mut ct_ggsw: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_layout);
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_in_layout); let mut ct_glwe_in: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_in_layout);
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_out_layout); let mut ct_glwe_out: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_out_layout);
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1); let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1);
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(&module, &ggsw_layout) GGSW::encrypt_sk_tmp_bytes(&module, &ggsw_layout)
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_in_layout) | GLWE::encrypt_sk_tmp_bytes(&module, &glwe_in_layout)
| GLWECiphertext::external_product_scratch_space(&module, &glwe_out_layout, &glwe_in_layout, &ggsw_layout), | GLWE::external_product_tmp_bytes(&module, &glwe_out_layout, &glwe_in_layout, &ggsw_layout),
); );
let mut source_xs = Source::new([0u8; 32]); let mut source_xs = Source::new([0u8; 32]);
let mut source_xe = Source::new([0u8; 32]); let mut source_xe = Source::new([0u8; 32]);
let mut source_xa = Source::new([0u8; 32]); let mut source_xa = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_in_layout); let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_in_layout);
sk.fill_ternary_prob(0.5, &mut source_xs); sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow()); let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
@@ -98,7 +97,7 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
scratch.borrow(), scratch.borrow(),
); );
let ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, FFT64Spqlios> = ct_ggsw.prepare_alloc(&module, scratch.borrow()); let ggsw_prepared: GGSWPrepared<Vec<u8>, FFT64Spqlios> = ct_ggsw.prepare_alloc(&module, scratch.borrow());
move || { move || {
ct_glwe_out.external_product(&module, &ct_glwe_in, &ggsw_prepared, scratch.borrow()); ct_glwe_out.external_product(&module, &ct_glwe_in, &ggsw_prepared, scratch.borrow());
@@ -138,7 +137,7 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() { fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n); let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
let n: Degree = Degree(module.n() as u32); let n: RingDegree = RingDegree(module.n() as u32);
let base2k: Base2K = p.base2k; let base2k: Base2K = p.base2k;
let k_glwe: TorusPrecision = p.k_ct; let k_glwe: TorusPrecision = p.k_ct;
let k_ggsw: TorusPrecision = p.k_ggsw; let k_ggsw: TorusPrecision = p.k_ggsw;
@@ -147,7 +146,7 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
let dnum: Dnum = p.k_ct.div_ceil(p.base2k).into(); let dnum: Dnum = p.k_ct.div_ceil(p.base2k).into();
let ggsw_layout: GGSWCiphertextLayout = GGSWCiphertextLayout { let ggsw_layout: GGSWLayout = GGSWLayout {
n, n,
base2k, base2k,
k: k_ggsw, k: k_ggsw,
@@ -156,28 +155,28 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
rank, rank,
}; };
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_glwe, k: k_glwe,
rank, rank,
}; };
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&ggsw_layout); let mut ct_ggsw: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_layout);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_layout); let mut ct_glwe: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_layout);
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1); let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1);
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(&module, &ggsw_layout) GGSW::encrypt_sk_tmp_bytes(&module, &ggsw_layout)
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_layout) | GLWE::encrypt_sk_tmp_bytes(&module, &glwe_layout)
| GLWECiphertext::external_product_inplace_scratch_space(&module, &glwe_layout, &ggsw_layout), | GLWE::external_product_inplace_tmp_bytes(&module, &glwe_layout, &ggsw_layout),
); );
let mut source_xs: Source = Source::new([0u8; 32]); let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]); let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]); let mut source_xa: Source = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout); let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_layout);
sk.fill_ternary_prob(0.5, &mut source_xs); sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow()); let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
@@ -198,7 +197,7 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
scratch.borrow(), scratch.borrow(),
); );
let ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, FFT64Spqlios> = ct_ggsw.prepare_alloc(&module, scratch.borrow()); let ggsw_prepared: GGSWPrepared<Vec<u8>, FFT64Spqlios> = ct_ggsw.prepare_alloc(&module, scratch.borrow());
move || { move || {
let scratch_borrow = scratch.borrow(); let scratch_borrow = scratch.borrow();

View File

@@ -1,7 +1,7 @@
use poulpy_core::layouts::{ use poulpy_core::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEAutomorphismKey, GGLWEAutomorphismKeyLayout, GGLWESwitchingKey, GGLWESwitchingKeyLayout, AutomorphismKey, AutomorphismKeyLayout, Base2K, Dnum, Dsize, GLWE, GLWELayout, GLWESecret, GLWESwitchingKey,
GLWECiphertext, GLWECiphertextLayout, GLWESecret, Rank, TorusPrecision, GLWESwitchingKeyLayout, Rank, RingDegree, TorusPrecision,
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared, GLWESecretPrepared, PrepareAlloc}, prepared::{AutomorphismKeyPrepared, GLWESecretPrepared, GLWESwitchingKeyPrepared, PrepareAlloc},
}; };
use std::{hint::black_box, time::Duration}; use std::{hint::black_box, time::Duration};
@@ -29,7 +29,7 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() { fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n); let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
let n: Degree = Degree(module.n() as u32); let n: RingDegree = RingDegree(module.n() as u32);
let base2k: Base2K = p.base2k; let base2k: Base2K = p.base2k;
let k_glwe_in: TorusPrecision = p.k_ct_in; let k_glwe_in: TorusPrecision = p.k_ct_in;
let k_glwe_out: TorusPrecision = p.k_ct_out; let k_glwe_out: TorusPrecision = p.k_ct_out;
@@ -39,7 +39,7 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
let dnum: Dnum = p.k_ct_in.div_ceil(p.base2k.0 * dsize.0).into(); let dnum: Dnum = p.k_ct_in.div_ceil(p.base2k.0 * dsize.0).into();
let gglwe_atk_layout: GGLWEAutomorphismKeyLayout = GGLWEAutomorphismKeyLayout { let gglwe_atk_layout: AutomorphismKeyLayout = AutomorphismKeyLayout {
n, n,
base2k, base2k,
k: k_gglwe, k: k_gglwe,
@@ -48,28 +48,28 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
dsize, dsize,
}; };
let glwe_in_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_in_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_glwe_in, k: k_glwe_in,
rank, rank,
}; };
let glwe_out_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_out_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_glwe_out, k: k_glwe_out,
rank, rank,
}; };
let mut ksk: GGLWEAutomorphismKey<Vec<u8>> = GGLWEAutomorphismKey::alloc(&gglwe_atk_layout); let mut ksk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc_from_infos(&gglwe_atk_layout);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_in_layout); let mut ct_in: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_in_layout);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_out_layout); let mut ct_out: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_out_layout);
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, &gglwe_atk_layout) GLWESwitchingKey::encrypt_sk_tmp_bytes(&module, &gglwe_atk_layout)
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_in_layout) | GLWE::encrypt_sk_tmp_bytes(&module, &glwe_in_layout)
| GLWECiphertext::keyswitch_scratch_space( | GLWE::keyswitch_tmp_bytes(
&module, &module,
&glwe_out_layout, &glwe_out_layout,
&glwe_in_layout, &glwe_in_layout,
@@ -81,7 +81,7 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
let mut source_xe: Source = Source::new([0u8; 32]); let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]); let mut source_xa: Source = Source::new([0u8; 32]);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_in_layout); let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_in_layout);
sk_in.fill_ternary_prob(0.5, &mut source_xs); sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow()); let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
@@ -102,7 +102,7 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
scratch.borrow(), scratch.borrow(),
); );
let ksk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, _> = ksk.prepare_alloc(&module, scratch.borrow()); let ksk_prepared: AutomorphismKeyPrepared<Vec<u8>, _> = ksk.prepare_alloc(&module, scratch.borrow());
move || { move || {
ct_out.automorphism(&module, &ct_in, &ksk_prepared, scratch.borrow()); ct_out.automorphism(&module, &ct_in, &ksk_prepared, scratch.borrow());
@@ -148,7 +148,7 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() { fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n); let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
let n: Degree = Degree(module.n() as u32); let n: RingDegree = RingDegree(module.n() as u32);
let base2k: Base2K = p.base2k; let base2k: Base2K = p.base2k;
let k_ct: TorusPrecision = p.k_ct; let k_ct: TorusPrecision = p.k_ct;
let k_ksk: TorusPrecision = p.k_ksk; let k_ksk: TorusPrecision = p.k_ksk;
@@ -157,7 +157,7 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
let dnum: Dnum = p.k_ct.div_ceil(p.base2k).into(); let dnum: Dnum = p.k_ct.div_ceil(p.base2k).into();
let gglwe_layout: GGLWESwitchingKeyLayout = GGLWESwitchingKeyLayout { let gglwe_layout: GLWESwitchingKeyLayout = GLWESwitchingKeyLayout {
n, n,
base2k, base2k,
k: k_ksk, k: k_ksk,
@@ -167,31 +167,31 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
rank_out: rank, rank_out: rank,
}; };
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_layout: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_ct, k: k_ct,
rank, rank,
}; };
let mut ksk: GGLWESwitchingKey<Vec<u8>> = GGLWESwitchingKey::alloc(&gglwe_layout); let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc_from_infos(&gglwe_layout);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_layout); let mut ct: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_layout);
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, &gglwe_layout) GLWESwitchingKey::encrypt_sk_tmp_bytes(&module, &gglwe_layout)
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_layout) | GLWE::encrypt_sk_tmp_bytes(&module, &glwe_layout)
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, &glwe_layout, &gglwe_layout), | GLWE::keyswitch_inplace_tmp_bytes(&module, &glwe_layout, &gglwe_layout),
); );
let mut source_xs: Source = Source::new([0u8; 32]); let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]); let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]); let mut source_xa: Source = Source::new([0u8; 32]);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout); let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_layout);
sk_in.fill_ternary_prob(0.5, &mut source_xs); sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow()); let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout); let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_layout);
sk_out.fill_ternary_prob(0.5, &mut source_xs); sk_out.fill_ternary_prob(0.5, &mut source_xs);
ksk.encrypt_sk( ksk.encrypt_sk(
@@ -211,7 +211,7 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
scratch.borrow(), scratch.borrow(),
); );
let ksk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, FFT64Spqlios> = ksk.prepare_alloc(&module, scratch.borrow()); let ksk_prepared: GLWESwitchingKeyPrepared<Vec<u8>, FFT64Spqlios> = ksk.prepare_alloc(&module, scratch.borrow());
move || { move || {
ct.keyswitch_inplace(&module, &ksk_prepared, scratch.borrow()); ct.keyswitch_inplace(&module, &ksk_prepared, scratch.borrow());

View File

@@ -2,8 +2,7 @@ use poulpy_backend::cpu_spqlios::FFT64Spqlios;
use poulpy_core::{ use poulpy_core::{
GLWEOperations, SIGMA, GLWEOperations, SIGMA,
layouts::{ layouts::{
Base2K, Degree, GLWECiphertext, GLWECiphertextLayout, GLWEPlaintext, GLWEPlaintextLayout, GLWESecret, LWEInfos, Rank, Base2K, GLWE, GLWELayout, GLWEPlaintext, GLWEPlaintextLayout, GLWESecret, LWEInfos, Rank, RingDegree, TorusPrecision,
TorusPrecision,
prepared::{GLWESecretPrepared, PrepareAlloc}, prepared::{GLWESecretPrepared, PrepareAlloc},
}, },
}; };
@@ -17,7 +16,7 @@ fn main() {
// Ring degree // Ring degree
let log_n: usize = 10; let log_n: usize = 10;
let n: Degree = Degree(1 << log_n); let n: RingDegree = RingDegree(1 << log_n);
// Base-2-k (implicit digit decomposition) // Base-2-k (implicit digit decomposition)
let base2k: Base2K = Base2K(14); let base2k: Base2K = Base2K(14);
@@ -34,7 +33,7 @@ fn main() {
// Instantiate Module (DFT Tables) // Instantiate Module (DFT Tables)
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n.0 as u64); let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n.0 as u64);
let glwe_ct_infos: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_ct_infos: GLWELayout = GLWELayout {
n, n,
base2k, base2k,
k: k_ct, k: k_ct,
@@ -44,9 +43,9 @@ fn main() {
let glwe_pt_infos: GLWEPlaintextLayout = GLWEPlaintextLayout { n, base2k, k: k_pt }; let glwe_pt_infos: GLWEPlaintextLayout = GLWEPlaintextLayout { n, base2k, k: k_pt };
// Allocates ciphertext & plaintexts // Allocates ciphertext & plaintexts
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_ct_infos); let mut ct: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_ct_infos);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&glwe_pt_infos); let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&glwe_pt_infos);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&glwe_pt_infos); let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&glwe_pt_infos);
// CPRNG // CPRNG
let mut source_xs: Source = Source::new([0u8; 32]); let mut source_xs: Source = Source::new([0u8; 32]);
@@ -55,12 +54,11 @@ fn main() {
// Scratch space // Scratch space
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc( let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_ct_infos) GLWE::encrypt_sk_tmp_bytes(&module, &glwe_ct_infos) | GLWE::decrypt_tmp_bytes(&module, &glwe_ct_infos),
| GLWECiphertext::decrypt_scratch_space(&module, &glwe_ct_infos),
); );
// Generate secret-key // Generate secret-key
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_ct_infos); let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_ct_infos);
sk.fill_ternary_prob(0.5, &mut source_xs); sk.fill_ternary_prob(0.5, &mut source_xs);
// Backend-prepared secret // Backend-prepared secret

View File

@@ -1,16 +1,16 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, ScratchAvailable, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero}, layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
}; };
use crate::layouts::{GGLWEAutomorphismKey, GGLWEInfos, GLWECiphertext, prepared::GGLWEAutomorphismKeyPrepared}; use crate::layouts::{AutomorphismKey, GGLWEInfos, GLWE, prepared::AutomorphismKeyPrepared};
impl GGLWEAutomorphismKey<Vec<u8>> { impl AutomorphismKey<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>( pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>, module: &Module<B>,
out_infos: &OUT, out_infos: &OUT,
in_infos: &IN, in_infos: &IN,
@@ -20,9 +20,9 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
OUT: GGLWEInfos, OUT: GGLWEInfos,
IN: GGLWEInfos, IN: GGLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
GLWECiphertext::keyswitch_scratch_space( GLWE::keyswitch_tmp_bytes(
module, module,
&out_infos.glwe_layout(), &out_infos.glwe_layout(),
&in_infos.glwe_layout(), &in_infos.glwe_layout(),
@@ -30,25 +30,25 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
) )
} }
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GGLWEInfos, OUT: GGLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
GGLWEAutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos) AutomorphismKey::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos)
} }
} }
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> { impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GGLWEAutomorphismKey<DataLhs>, lhs: &AutomorphismKey<DataLhs>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -61,7 +61,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -103,8 +103,8 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
(0..self.rank_in().into()).for_each(|col_i| { (0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| { (0..self.dnum().into()).for_each(|row_j| {
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i); let mut res_ct: GLWE<&mut [u8]> = self.at_mut(row_j, col_i);
let lhs_ct: GLWECiphertext<&[u8]> = lhs.at(row_j, col_i); let lhs_ct: GLWE<&[u8]> = lhs.at(row_j, col_i);
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a) // Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
(0..cols_out).for_each(|i| { (0..cols_out).for_each(|i| {
@@ -133,10 +133,10 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>( pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -149,7 +149,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -176,7 +176,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
(0..self.rank_in().into()).for_each(|col_i| { (0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| { (0..self.dnum().into()).for_each(|row_j| {
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i); let mut res_ct: GLWE<&mut [u8]> = self.at_mut(row_j, col_i);
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a) // Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
(0..cols_out).for_each(|i| { (0..cols_out).for_each(|i| {

View File

@@ -1,20 +1,20 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume,
VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
}; };
use crate::layouts::{ use crate::layouts::{
GGLWEInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext, GGLWEInfos, GGSW, GGSWInfos, GLWE,
prepared::{GGLWEAutomorphismKeyPrepared, GGLWETensorKeyPrepared}, prepared::{AutomorphismKeyPrepared, TensorKeyPrepared},
}; };
impl GGSWCiphertext<Vec<u8>> { impl GGSW<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY, TSK>( pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY, TSK>(
module: &Module<B>, module: &Module<B>,
out_infos: &OUT, out_infos: &OUT,
in_infos: &IN, in_infos: &IN,
@@ -26,25 +26,22 @@ impl GGSWCiphertext<Vec<u8>> {
IN: GGSWInfos, IN: GGSWInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
TSK: GGLWEInfos, TSK: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes Module<B>:
+ VmpApplyDftToDftTmpBytes VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxBigNormalizeTmpBytes,
{ {
let out_size: usize = out_infos.size(); let out_size: usize = out_infos.size();
let ci_dft: usize = module.vec_znx_dft_alloc_bytes((key_infos.rank_out() + 1).into(), out_size); let ci_dft: usize = module.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
let ks_internal: usize = GLWECiphertext::keyswitch_scratch_space( let ks_internal: usize = GLWE::keyswitch_tmp_bytes(
module, module,
&out_infos.glwe_layout(), &out_infos.glwe_layout(),
&in_infos.glwe_layout(), &in_infos.glwe_layout(),
key_infos, key_infos,
); );
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos); let expand: usize = GGSW::expand_row_tmp_bytes(module, out_infos, tsk_infos);
ci_dft + (ks_internal | expand) ci_dft + (ks_internal | expand)
} }
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY, TSK>( pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY, TSK>(
module: &Module<B>, module: &Module<B>,
out_infos: &OUT, out_infos: &OUT,
key_infos: &KEY, key_infos: &KEY,
@@ -54,26 +51,23 @@ impl GGSWCiphertext<Vec<u8>> {
OUT: GGSWInfos, OUT: GGSWInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
TSK: GGLWEInfos, TSK: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes Module<B>:
+ VmpApplyDftToDftTmpBytes VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxBigNormalizeTmpBytes,
{ {
GGSWCiphertext::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos) GGSW::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos, tsk_infos)
} }
} }
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> { impl<DataSelf: DataMut> GGSW<DataSelf> {
pub fn automorphism<DataLhs: DataRef, DataAk: DataRef, DataTsk: DataRef, B: Backend>( pub fn automorphism<DataLhs: DataRef, DataAk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GGSWCiphertext<DataLhs>, lhs: &GGSW<DataLhs>,
auto_key: &GGLWEAutomorphismKeyPrepared<DataAk, B>, auto_key: &AutomorphismKeyPrepared<DataAk, B>,
tensor_key: &GGLWETensorKeyPrepared<DataTsk, B>, tensor_key: &TensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -83,13 +77,13 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
+ VecZnxBigAddSmallInplace<B> + VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxBigAllocBytes + VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B> + VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B> + VecZnxDftAddInplace<B>
+ VecZnxIdftApplyTmpA<B> + VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -121,7 +115,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
self.rank(), self.rank(),
tensor_key.rank_out() tensor_key.rank_out()
); );
assert!(scratch.available() >= GGSWCiphertext::automorphism_scratch_space(module, self, lhs, auto_key, tensor_key)) assert!(scratch.available() >= GGSW::automorphism_tmp_bytes(module, self, lhs, auto_key, tensor_key))
}; };
// Keyswitch the j-th row of the col 0 // Keyswitch the j-th row of the col 0
@@ -137,11 +131,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
pub fn automorphism_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>( pub fn automorphism_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
auto_key: &GGLWEAutomorphismKeyPrepared<DataKsk, B>, auto_key: &AutomorphismKeyPrepared<DataKsk, B>,
tensor_key: &GGLWETensorKeyPrepared<DataTsk, B>, tensor_key: &TensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -151,13 +145,13 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
+ VecZnxBigAddSmallInplace<B> + VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxBigAllocBytes + VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B> + VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B> + VecZnxDftAddInplace<B>
+ VecZnxIdftApplyTmpA<B> + VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
// Keyswitch the j-th row of the col 0 // Keyswitch the j-th row of the col 0
(0..self.dnum().into()).for_each(|row_i| { (0..self.dnum().into()).for_each(|row_i| {

View File

@@ -1,17 +1,17 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf,
VecZnxBigSubSmallNegateInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig}, layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
}; };
use crate::layouts::{GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared}; use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared};
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>( pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>, module: &Module<B>,
out_infos: &OUT, out_infos: &OUT,
in_infos: &IN, in_infos: &IN,
@@ -21,30 +21,30 @@ impl GLWECiphertext<Vec<u8>> {
OUT: GLWEInfos, OUT: GLWEInfos,
IN: GLWEInfos, IN: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos) Self::keyswitch_tmp_bytes(module, out_infos, in_infos, key_infos)
} }
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos) Self::keyswitch_inplace_tmp_bytes(module, out_infos, key_infos)
} }
} }
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> { impl<DataSelf: DataMut> GLWE<DataSelf> {
pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>, lhs: &GLWE<DataLhs>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -56,7 +56,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
self.keyswitch(module, lhs, &rhs.key, scratch); self.keyswitch(module, lhs, &rhs.key, scratch);
(0..(self.rank() + 1).into()).for_each(|i| { (0..(self.rank() + 1).into()).for_each(|i| {
@@ -67,10 +67,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>( pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -82,7 +82,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
self.keyswitch_inplace(module, &rhs.key, scratch); self.keyswitch_inplace(module, &rhs.key, scratch);
(0..(self.rank() + 1).into()).for_each(|i| { (0..(self.rank() + 1).into()).for_each(|i| {
@@ -93,11 +93,11 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_add<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn automorphism_add<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>, lhs: &GLWE<DataLhs>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -109,7 +109,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigAutomorphismInplace<B> + VecZnxBigAutomorphismInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -135,10 +135,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_add_inplace<DataRhs: DataRef, B: Backend>( pub fn automorphism_add_inplace<DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -150,7 +150,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigAutomorphismInplace<B> + VecZnxBigAutomorphismInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -176,11 +176,11 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_sub_ab<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn automorphism_sub_ab<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>, lhs: &GLWE<DataLhs>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -193,7 +193,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigSubSmallInplace<B> + VecZnxBigSubSmallInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -219,10 +219,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_sub_inplace<DataRhs: DataRef, B: Backend>( pub fn automorphism_sub_inplace<DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -235,7 +235,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigSubSmallInplace<B> + VecZnxBigSubSmallInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -261,11 +261,11 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_sub_negate<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn automorphism_sub_negate<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>, lhs: &GLWE<DataLhs>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -278,7 +278,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigSubSmallNegateInplace<B> + VecZnxBigSubSmallNegateInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -304,10 +304,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn automorphism_sub_negate_inplace<DataRhs: DataRef, B: Backend>( pub fn automorphism_sub_negate_inplace<DataRhs: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, rhs: &AutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -320,7 +320,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigSubSmallNegateInplace<B> + VecZnxBigSubSmallNegateInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {

View File

@@ -0,0 +1,279 @@
use poulpy_hal::{
api::{
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAddInplace,
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, Module, Scratch, VmpPMat, ZnxInfos},
};
use crate::{
ScratchTakeCore,
layouts::{
GGLWE, GGLWEInfos, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, LWEInfos,
prepared::{TensorKeyPrepared, TensorKeyPreparedToRef},
},
operations::GLWEOperations,
};
impl GGLWE<Vec<u8>> {
pub fn from_gglw_tmp_bytes<R, A, M, BE: Backend>(module: &M, res_infos: &R, tsk_infos: &A) -> usize
where
M: GGSWFromGGLWE<BE>,
R: GGSWInfos,
A: GGLWEInfos,
{
module.ggsw_from_gglwe_tmp_bytes(res_infos, tsk_infos)
}
}
impl<D: DataMut> GGSW<D> {
pub fn from_gglwe<G, M, T, BE: Backend>(&mut self, module: &M, gglwe: &G, tsk: &T, scratch: &mut Scratch<BE>)
where
M: GGSWFromGGLWE<BE>,
G: GGLWEToRef,
T: TensorKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
module.ggsw_from_gglwe(self, gglwe, tsk, scratch);
}
}
impl<BE: Backend> GGSWFromGGLWE<BE> for Module<BE> where Self: GGSWExpandRows<BE> + VecZnxCopy {}
pub trait GGSWFromGGLWE<BE: Backend>
where
Self: GGSWExpandRows<BE> + VecZnxCopy,
{
fn ggsw_from_gglwe_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
where
R: GGSWInfos,
A: GGLWEInfos,
{
self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos)
}
fn ggsw_from_gglwe<R, A, T>(&self, res: &mut R, a: &A, tsk: &T, scratch: &mut Scratch<BE>)
where
R: GGSWToMut,
A: GGLWEToRef,
T: TensorKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let a: &GGLWE<&[u8]> = &a.to_ref();
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
assert_eq!(res.rank(), a.rank_out());
assert_eq!(res.dnum(), a.dnum());
assert_eq!(res.n(), self.n() as u32);
assert_eq!(a.n(), self.n() as u32);
assert_eq!(tsk.n(), self.n() as u32);
for row in 0..res.dnum().into() {
res.at_mut(row, 0).copy(self, &a.at(row, 0));
}
self.ggsw_expand_row(res, tsk, scratch);
}
}
impl<BE: Backend> GGSWExpandRows<BE> for Module<BE> where
Self: Sized
+ ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<BE>
+ VecZnxDftCopy<BE>
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxDftAddInplace<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxIdftApplyTmpA<BE>
+ VecZnxNormalize<BE>
{
}
pub(crate) trait GGSWExpandRows<BE: Backend>
where
Self: Sized
+ ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigBytesOf
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<BE>
+ VecZnxDftCopy<BE>
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxDftAddInplace<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxIdftApplyTmpA<BE>
+ VecZnxNormalize<BE>,
{
fn ggsw_expand_rows_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
where
R: GGSWInfos,
A: GGLWEInfos,
{
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
let size_in: usize = res_infos
.k()
.div_ceil(tsk_infos.base2k())
.div_ceil(tsk_infos.dsize().into()) as usize;
let tmp_dft_i: usize = self.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
let tmp_a: usize = self.bytes_of_vec_znx_dft(1, size_in);
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
tsk_size,
size_in,
size_in,
(tsk_infos.rank_in()).into(), // Verify if rank+1
(tsk_infos.rank_out()).into(), // Verify if rank+1
tsk_size,
);
let tmp_idft: usize = self.bytes_of_vec_znx_big(1, tsk_size);
let norm: usize = self.vec_znx_normalize_tmp_bytes();
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
}
fn ggsw_expand_row<R, T>(&self, res: &mut R, tsk: &T, scratch: &mut Scratch<BE>)
where
R: GGSWToMut,
T: TensorKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
let basek_in: usize = res.base2k().into();
let basek_tsk: usize = tsk.base2k().into();
assert!(scratch.available() >= self.ggsw_expand_rows_tmp_bytes(res, tsk));
let rank: usize = res.rank().into();
let cols: usize = rank + 1;
let a_size: usize = (res.size() * basek_in).div_ceil(basek_tsk);
// Keyswitch the j-th row of the col 0
for row_i in 0..res.dnum().into() {
let a = &res.at(row_i, 0).data;
// Pre-compute DFT of (a0, a1, a2)
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, a_size);
if basek_in == basek_tsk {
for i in 0..cols {
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
}
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self, 1, a_size);
for i in 0..cols {
self.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
}
}
for col_j in 1..cols {
// Example for rank 3:
//
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
// actually composed of that many dnum and we focus on a specific row here
// implicitely given ci_dft.
//
// # Input
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (0, 0, 0, 0)
// col 2: (0, 0, 0, 0)
// col 3: (0, 0, 0, 0)
//
// # Output
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
let dsize: usize = tsk.dsize().into();
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, tsk.size());
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(self, 1, ci_dft.size().div_ceil(dsize));
{
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
//
// # Example for col=1
//
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
// +
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
// +
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
for col_i in 1..cols {
let pmat: &VmpPMat<&[u8], BE> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
// Extracts a[i] and multipies with Enc(s[i]s[j])
for di in 0..dsize {
tmp_a.set_size((ci_dft.size() + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
self.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
if di == 0 && col_i == 1 {
self.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
} else {
self.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
}
}
}
}
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
//
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
// +
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
// =
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
self.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(self, 1, tsk.size());
for i in 0..cols {
self.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
self.vec_znx_big_normalize(
basek_in,
&mut res.at_mut(row_i, col_j).data,
i,
basek_tsk,
&tmp_idft,
0,
scratch_3,
);
}
}
}
}
}

View File

@@ -1,22 +1,16 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero}, layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
}; };
use crate::{ use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared};
TakeGLWECt,
layouts::{
GGLWEInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWECiphertext, LWEInfos, Rank,
prepared::GLWEToLWESwitchingKeyPrepared,
},
};
impl LWECiphertext<Vec<u8>> { impl LWE<Vec<u8>> {
pub fn from_glwe_scratch_space<B: Backend, OUT, IN, KEY>( pub fn from_glwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>, module: &Module<B>,
lwe_infos: &OUT, lwe_infos: &OUT,
glwe_infos: &IN, glwe_infos: &IN,
@@ -26,26 +20,26 @@ impl LWECiphertext<Vec<u8>> {
OUT: LWEInfos, OUT: LWEInfos,
IN: GLWEInfos, IN: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_layout: GLWELayout = GLWELayout {
n: module.n().into(), n: module.n().into(),
base2k: lwe_infos.base2k(), base2k: lwe_infos.base2k(),
k: lwe_infos.k(), k: lwe_infos.k(),
rank: Rank(1), rank: Rank(1),
}; };
GLWECiphertext::alloc_bytes_with( GLWE::bytes_of(
module.n().into(), module.n().into(),
lwe_infos.base2k(), lwe_infos.base2k(),
lwe_infos.k(), lwe_infos.k(),
1u32.into(), 1u32.into(),
) + GLWECiphertext::keyswitch_scratch_space(module, &glwe_layout, glwe_infos, key_infos) ) + GLWE::keyswitch_tmp_bytes(module, &glwe_layout, glwe_infos, key_infos)
} }
} }
impl<DLwe: DataMut> LWECiphertext<DLwe> { impl<DLwe: DataMut> LWE<DLwe> {
pub fn sample_extract<DGlwe: DataRef>(&mut self, a: &GLWECiphertext<DGlwe>) { pub fn sample_extract<DGlwe: DataRef>(&mut self, a: &GLWE<DGlwe>) {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert!(self.n() <= a.n()); assert!(self.n() <= a.n());
@@ -66,13 +60,13 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
pub fn from_glwe<DGlwe, DKs, B: Backend>( pub fn from_glwe<DGlwe, DKs, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
a: &GLWECiphertext<DGlwe>, a: &GLWE<DGlwe>,
ks: &GLWEToLWESwitchingKeyPrepared<DKs, B>, ks: &GLWEToLWESwitchingKeyPrepared<DKs, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
DGlwe: DataRef, DGlwe: DataRef,
DKs: DataRef, DKs: DataRef,
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -83,7 +77,7 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx, Scratch<B>:,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -92,7 +86,7 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
assert!(self.n() <= module.n() as u32); assert!(self.n() <= module.n() as u32);
} }
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout { let glwe_layout: GLWELayout = GLWELayout {
n: module.n().into(), n: module.n().into(),
base2k: self.base2k(), base2k: self.base2k(),
k: self.k(), k: self.k(),

View File

@@ -1,22 +1,16 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero}, layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
}; };
use crate::{ use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared};
TakeGLWECt,
layouts::{
GGLWEInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWECiphertext, LWEInfos,
prepared::LWEToGLWESwitchingKeyPrepared,
},
};
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn from_lwe_scratch_space<B: Backend, OUT, IN, KEY>( pub fn from_lwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>, module: &Module<B>,
glwe_infos: &OUT, glwe_infos: &OUT,
lwe_infos: &IN, lwe_infos: &IN,
@@ -26,35 +20,35 @@ impl GLWECiphertext<Vec<u8>> {
OUT: GLWEInfos, OUT: GLWEInfos,
IN: LWEInfos, IN: LWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
let ct: usize = GLWECiphertext::alloc_bytes_with( let ct: usize = GLWE::bytes_of(
module.n().into(), module.n().into(),
key_infos.base2k(), key_infos.base2k(),
lwe_infos.k().max(glwe_infos.k()), lwe_infos.k().max(glwe_infos.k()),
1u32.into(), 1u32.into(),
); );
let ks: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, glwe_infos, key_infos); let ks: usize = GLWE::keyswitch_inplace_tmp_bytes(module, glwe_infos, key_infos);
if lwe_infos.base2k() == key_infos.base2k() { if lwe_infos.base2k() == key_infos.base2k() {
ct + ks ct + ks
} else { } else {
let a_conv = VecZnx::alloc_bytes(module.n(), 1, lwe_infos.size()) + module.vec_znx_normalize_tmp_bytes(); let a_conv = VecZnx::bytes_of(module.n(), 1, lwe_infos.size()) + module.vec_znx_normalize_tmp_bytes();
ct + a_conv + ks ct + a_conv + ks
} }
} }
} }
impl<D: DataMut> GLWECiphertext<D> { impl<D: DataMut> GLWE<D> {
pub fn from_lwe<DLwe, DKsk, B: Backend>( pub fn from_lwe<DLwe, DKsk, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
lwe: &LWECiphertext<DLwe>, lwe: &LWE<DLwe>,
ksk: &LWEToGLWESwitchingKeyPrepared<DKsk, B>, ksk: &LWEToGLWESwitchingKeyPrepared<DKsk, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
DLwe: DataRef, DLwe: DataRef,
DKsk: DataRef, DKsk: DataRef,
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -65,7 +59,7 @@ impl<D: DataMut> GLWECiphertext<D> {
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -74,7 +68,7 @@ impl<D: DataMut> GLWECiphertext<D> {
assert!(lwe.n() <= module.n() as u32); assert!(lwe.n() <= module.n() as u32);
} }
let (mut glwe, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout { let (mut glwe, scratch_1) = scratch.take_glwe_ct(&GLWELayout {
n: ksk.n(), n: ksk.n(),
base2k: ksk.base2k(), base2k: ksk.base2k(),
k: lwe.k(), k: lwe.k(),

View File

@@ -1,2 +1,5 @@
mod gglwe_to_ggsw;
mod glwe_to_lwe; mod glwe_to_lwe;
mod lwe_to_glwe; mod lwe_to_glwe;
pub use gglwe_to_ggsw::*;

View File

@@ -1,25 +1,25 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftApply,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch}, layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
}; };
use crate::layouts::{GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared}; use crate::layouts::{GLWE, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn decrypt_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes, Module<B>: VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
{ {
let size: usize = infos.size(); let size: usize = infos.size();
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_alloc_bytes(1, size)) + module.vec_znx_dft_alloc_bytes(1, size) (module.vec_znx_normalize_tmp_bytes() | module.bytes_of_vec_znx_dft(1, size)) + module.bytes_of_vec_znx_dft(1, size)
} }
} }
impl<DataSelf: DataRef> GLWECiphertext<DataSelf> { impl<DataSelf: DataRef> GLWE<DataSelf> {
pub fn decrypt<DataPt: DataMut, DataSk: DataRef, B: Backend>( pub fn decrypt<DataPt: DataMut, DataSk: DataRef, B: Backend>(
&self, &self,
module: &Module<B>, module: &Module<B>,
@@ -33,7 +33,7 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
+ VecZnxBigAddInplace<B> + VecZnxBigAddInplace<B>
+ VecZnxBigAddSmallInplace<B> + VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>, + VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B>, Scratch<B>:,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {

View File

@@ -4,9 +4,9 @@ use poulpy_hal::{
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl}, oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
}; };
use crate::layouts::{LWECiphertext, LWEInfos, LWEPlaintext, LWESecret}; use crate::layouts::{LWE, LWEInfos, LWEPlaintext, LWESecret};
impl<DataSelf> LWECiphertext<DataSelf> impl<DataSelf> LWE<DataSelf>
where where
DataSelf: DataRef, DataSelf: DataRef,
{ {

View File

@@ -1,35 +1,96 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{ScratchAvailable, SvpPPolBytesOf, VecZnxAutomorphism, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWESecret, TakeGLWESecretPrepared, encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
layouts::{ layouts::{
GGLWEInfos, GLWEInfos, GLWESecret, LWEInfos, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
compressed::{GGLWEAutomorphismKeyCompressed, GGLWESwitchingKeyCompressed}, compressed::{AutomorphismKeyCompressed, AutomorphismKeyCompressedToMut, GLWESwitchingKeyCompressed},
}, },
}; };
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> { impl AutomorphismKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
{ {
assert_eq!(module.n() as u32, infos.n()); assert_eq!(module.n() as u32, infos.n());
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
+ GLWESecret::alloc_bytes_with(infos.n(), infos.rank_out())
} }
} }
impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> { pub trait GGLWEAutomorphismKeyCompressedEncryptSk<B: Backend> {
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
&self,
res: &mut R,
p: i64,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: AutomorphismKeyCompressedToMut,
S: GLWESecretToRef;
}
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
where
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
Scratch<B>: ScratchAvailable,
{
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
&self,
res: &mut R,
p: i64,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: AutomorphismKeyCompressedToMut,
S: GLWESecretToRef,
{
let res: &mut AutomorphismKeyCompressed<&mut [u8]> = &mut res.to_mut();
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), sk.n());
assert_eq!(res.rank_out(), res.rank_in());
assert_eq!(sk.rank(), res.rank_out());
assert!(
scratch.available() >= AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res),
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {}",
scratch.available(),
AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res)
)
}
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
{
(0..res.rank_out().into()).for_each(|i| {
self.vec_znx_automorphism(
self.galois_element_inv(p),
&mut sk_out.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
}
self.gglwe_key_compressed_encrypt_sk(&mut res.key, sk, &sk_out, seed_xa, source_xe, scratch_1);
res.p = p;
}
}
impl<DataSelf: DataMut> AutomorphismKeyCompressed<DataSelf> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataSk: DataRef, B: Backend>(
&mut self, &mut self,
@@ -40,56 +101,8 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAutomorphism Module<B>: GGLWEAutomorphismKeyCompressedEncryptSk<B>,
+ SvpPrepare<B>
+ SvpPPolAllocBytes
+ VecZnxSwitchRing
+ VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub
+ VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
{ {
#[cfg(debug_assertions)] module.gglwe_automorphism_key_compressed_encrypt_sk(self, p, sk, seed_xa, source_xe, scratch);
{
assert_eq!(self.n(), sk.n());
assert_eq!(self.rank_out(), self.rank_in());
assert_eq!(sk.rank(), self.rank_out());
assert!(
scratch.available() >= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self),
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {}",
scratch.available(),
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self)
)
}
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
{
(0..self.rank_out().into()).for_each(|i| {
module.vec_znx_automorphism(
module.galois_element_inv(p),
&mut sk_out.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
}
self.key
.encrypt_sk(module, sk, &sk_out, seed_xa, source_xe, scratch_1);
self.p = p;
} }
} }

View File

@@ -1,30 +1,22 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, ZnNormalizeInplace,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
}, },
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero}, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWEPt, encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
encryption::{SIGMA, glwe_encrypt_sk_internal}, layouts::{
layouts::{GGLWECiphertext, GGLWEInfos, LWEInfos, compressed::GGLWECiphertextCompressed, prepared::GLWESecretPrepared}, GGLWE, GGLWEInfos, LWEInfos,
compressed::{GGLWECompressed, GGLWECompressedToMut},
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
},
}; };
impl GGLWECiphertextCompressed<Vec<u8>> { impl<D: DataMut> GGLWECompressed<D> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
{
GGLWECiphertext::encrypt_sk_scratch_space(module, infos)
}
}
impl<D: DataMut> GGLWECiphertextCompressed<D> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self, &mut self,
@@ -35,83 +27,124 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAddScalarInplace Module<B>: GGLWECompressedEncryptSk<B>,
+ VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
module.gglwe_compressed_encrypt_sk(self, pt, sk, seed, source_xe, scratch);
}
}
impl GGLWECompressed<Vec<u8>> {
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{
GGLWE::encrypt_sk_tmp_bytes(module, infos)
}
}
pub trait GGLWECompressedEncryptSk<B: Backend> {
fn gglwe_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGLWECompressedToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GGLWECompressedEncryptSk<B> for Module<B>
where
Module<B>: GLWEEncryptSkInternal<B>
+ VecZnxNormalizeInplace<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VecZnxAddScalarInplace
+ ZnNormalizeInplace<B>,
Scratch<B>: ScratchAvailable,
{
fn gglwe_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGLWECompressedToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>,
{
let res: &mut GGLWECompressed<&mut [u8]> = &mut res.to_mut();
let pt: &ScalarZnx<&[u8]> = &pt.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
use poulpy_hal::layouts::ZnxInfos; use poulpy_hal::layouts::ZnxInfos;
let sk = &sk.to_ref();
assert_eq!( assert_eq!(
self.rank_in(), res.rank_in(),
pt.cols() as u32, pt.cols() as u32,
"self.rank_in(): {} != pt.cols(): {}", "res.rank_in(): {} != pt.cols(): {}",
self.rank_in(), res.rank_in(),
pt.cols() pt.cols()
); );
assert_eq!( assert_eq!(
self.rank_out(), res.rank_out(),
sk.rank(), sk.rank(),
"self.rank_out(): {} != sk.rank(): {}", "res.rank_out(): {} != sk.rank(): {}",
self.rank_out(), res.rank_out(),
sk.rank() sk.rank()
); );
assert_eq!(self.n(), sk.n()); assert_eq!(res.n(), sk.n());
assert_eq!(pt.n() as u32, sk.n()); assert_eq!(pt.n() as u32, sk.n());
assert!( assert!(
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self), scratch.available() >= GGLWECompressed::encrypt_sk_tmp_bytes(self, res),
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space: {}", "scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes: {}",
scratch.available(), scratch.available(),
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self) GGLWECompressed::encrypt_sk_tmp_bytes(self, res)
); );
assert!( assert!(
self.dnum().0 * self.dsize().0 * self.base2k().0 <= self.k().0, res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
"self.dnum() : {} * self.dsize() : {} * self.base2k() : {} = {} >= self.k() = {}", "res.dnum() : {} * res.dsize() : {} * res.base2k() : {} = {} >= res.k() = {}",
self.dnum(), res.dnum(),
self.dsize(), res.dsize(),
self.base2k(), res.base2k(),
self.dnum().0 * self.dsize().0 * self.base2k().0, res.dnum().0 * res.dsize().0 * res.base2k().0,
self.k() res.k()
); );
} }
let dnum: usize = self.dnum().into(); let dnum: usize = res.dnum().into();
let dsize: usize = self.dsize().into(); let dsize: usize = res.dsize().into();
let base2k: usize = self.base2k().into(); let base2k: usize = res.base2k().into();
let rank_in: usize = self.rank_in().into(); let rank_in: usize = res.rank_in().into();
let cols: usize = (self.rank_out() + 1).into(); let cols: usize = (res.rank_out() + 1).into();
let mut source_xa = Source::new(seed); let mut source_xa = Source::new(seed);
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self); let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(res);
(0..rank_in).for_each(|col_i| { (0..rank_in).for_each(|col_i| {
(0..dnum).for_each(|d_i| { (0..dnum).for_each(|d_i| {
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt // Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
tmp_pt.data.zero(); // zeroes for next iteration tmp_pt.data.zero(); // zeroes for next iteration
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + d_i * dsize, pt, col_i); self.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + d_i * dsize, pt, col_i);
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1); self.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1);
let (seed, mut source_xa_tmp) = source_xa.branch(); let (seed, mut source_xa_tmp) = source_xa.branch();
self.seed[col_i * dnum + d_i] = seed; res.seed[col_i * dnum + d_i] = seed;
glwe_encrypt_sk_internal( self.glwe_encrypt_sk_internal(
module, res.base2k().into(),
self.base2k().into(), res.k().into(),
self.k().into(), &mut res.at_mut(d_i, col_i).data,
&mut self.at_mut(d_i, col_i).data,
cols, cols,
true, true,
Some((&tmp_pt, 0)), Some((&tmp_pt, 0)),

View File

@@ -1,35 +1,31 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{ScratchAvailable, SvpPPolBytesOf, SvpPrepare, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing},
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
VecZnxSubInplace, VecZnxSwitchRing,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch}, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWESecretPrepared, encryption::compressed::gglwe_ct::GGLWECompressedEncryptSk,
layouts::{ layouts::{
Degree, GGLWECiphertext, GGLWEInfos, GLWEInfos, GLWESecret, LWEInfos, compressed::GGLWESwitchingKeyCompressed, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, RingDegree,
compressed::{GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedToMut},
prepared::GLWESecretPrepared, prepared::GLWESecretPrepared,
}, },
}; };
impl GGLWESwitchingKeyCompressed<Vec<u8>> { impl GLWESwitchingKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
{ {
(GGLWECiphertext::encrypt_sk_scratch_space(module, infos) | ScalarZnx::alloc_bytes(module.n(), 1)) (GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
+ ScalarZnx::alloc_bytes(module.n(), infos.rank_in().into()) + ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
+ GLWESecretPrepared::alloc_bytes_with(module, infos.rank_out()) + GLWESecretPrepared::bytes_of(module, infos.rank_out())
} }
} }
impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> { impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataSkIn: DataRef, DataSkOut: DataRef, B: Backend>( pub fn encrypt_sk<DataSkIn: DataRef, DataSkOut: DataRef, B: Backend>(
&mut self, &mut self,
@@ -40,36 +36,65 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: SvpPrepare<B> Module<B>: GGLWEKeyCompressedEncryptSk<B>,
+ SvpPPolAllocBytes
+ VecZnxSwitchRing
+ VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub
+ VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
{ {
module.gglwe_key_compressed_encrypt_sk(self, sk_in, sk_out, seed_xa, source_xe, scratch);
}
}
pub trait GGLWEKeyCompressedEncryptSk<B: Backend> {
fn gglwe_key_compressed_encrypt_sk<R, SI, SO>(
&self,
res: &mut R,
sk_in: &SI,
sk_out: &SO,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWESwitchingKeyCompressedToMut,
SI: GLWESecretToRef,
SO: GLWESecretToRef;
}
impl<B: Backend> GGLWEKeyCompressedEncryptSk<B> for Module<B>
where
Module<B>: GGLWECompressedEncryptSk<B>
+ SvpPPolBytesOf
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VecZnxSwitchRing
+ SvpPrepare<B>,
Scratch<B>: ScratchAvailable,
{
fn gglwe_key_compressed_encrypt_sk<R, SI, SO>(
&self,
res: &mut R,
sk_in: &SI,
sk_out: &SO,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWESwitchingKeyCompressedToMut,
SI: GLWESecretToRef,
SO: GLWESecretToRef,
{
let res: &mut GLWESwitchingKeyCompressed<&mut [u8]> = &mut res.to_mut();
let sk_in: &GLWESecret<&[u8]> = &sk_in.to_ref();
let sk_out: &GLWESecret<&[u8]> = &sk_out.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
use crate::layouts::GGLWESwitchingKey; use crate::layouts::GLWESwitchingKey;
assert!(sk_in.n().0 <= module.n() as u32); assert!(sk_in.n().0 <= self.n() as u32);
assert!(sk_out.n().0 <= module.n() as u32); assert!(sk_out.n().0 <= self.n() as u32);
assert!( assert!(
scratch.available() >= GGLWESwitchingKey::encrypt_sk_scratch_space(module, self), scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res),
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}", "scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
scratch.available(), scratch.available(),
GGLWESwitchingKey::encrypt_sk_scratch_space(module, self) GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res)
) )
} }
@@ -77,7 +102,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into()); let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
(0..sk_in.rank().into()).for_each(|i| { (0..sk_in.rank().into()).for_each(|i| {
module.vec_znx_switch_ring( self.vec_znx_switch_ring(
&mut sk_in_tmp.as_vec_znx_mut(), &mut sk_in_tmp.as_vec_znx_mut(),
i, i,
&sk_in.data.as_vec_znx(), &sk_in.data.as_vec_znx(),
@@ -85,24 +110,24 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
); );
}); });
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(Degree(n as u32), sk_out.rank()); let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(RingDegree(n as u32), sk_out.rank());
{ {
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1); let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
(0..sk_out.rank().into()).for_each(|i| { (0..sk_out.rank().into()).for_each(|i| {
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i); self.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0); self.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
}); });
} }
self.key.encrypt_sk( self.gglwe_compressed_encrypt_sk(
module, &mut res.key,
&sk_in_tmp, &sk_in_tmp,
&sk_out_tmp, &sk_out_tmp,
seed_xa, seed_xa,
source_xe, source_xe,
scratch_2, scratch_2,
); );
self.sk_in_n = sk_in.n().into(); res.sk_in_n = sk_in.n().into();
self.sk_out_n = sk_out.n().into(); res.sk_out_n = sk_out.n().into();
} }
} }

View File

@@ -1,86 +1,83 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWESecret, TakeGLWESecretPrepared, encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
layouts::{ layouts::{
GGLWEInfos, GGLWETensorKey, GLWEInfos, GLWESecret, LWEInfos, Rank, compressed::GGLWETensorKeyCompressed, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, Rank, TensorKey,
prepared::Prepare, compressed::{TensorKeyCompressed, TensorKeyCompressedToMut},
}, },
}; };
impl GGLWETensorKeyCompressed<Vec<u8>> { impl TensorKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
{ {
GGLWETensorKey::encrypt_sk_scratch_space(module, infos) TensorKey::encrypt_sk_tmp_bytes(module, infos)
} }
} }
impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> { pub trait GGLWETensorKeyCompressedEncryptSk<B: Backend> {
pub fn encrypt_sk<DataSk: DataRef, B: Backend>( fn gglwe_tensor_key_encrypt_sk<R, S>(
&mut self, &self,
module: &Module<B>, res: &mut R,
sk: &GLWESecret<DataSk>, sk: &S,
seed_xa: [u8; 32], seed_xa: [u8; 32],
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: SvpApplyDftToDft<B> R: TensorKeyCompressedToMut,
+ VecZnxIdftApplyTmpA<B> S: GLWESecretToRef;
+ VecZnxDftAllocBytes }
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B> impl<B: Backend> GGLWETensorKeyCompressedEncryptSk<B> for Module<B>
+ SvpApplyDftToDftInplace<B> where
+ VecZnxIdftApplyConsume<B> Module<B>: GGLWEKeyCompressedEncryptSk<B>
+ VecZnxNormalizeTmpBytes + VecZnxDftApply<B>
+ VecZnxFillUniform + SvpApplyDftToDft<B>
+ VecZnxSubInplace + VecZnxIdftApplyTmpA<B>
+ VecZnxAddInplace + VecZnxBigNormalize<B>
+ VecZnxNormalizeInplace<B> + SvpPrepare<B>,
+ VecZnxAddNormal Scratch<B>:,
+ VecZnxNormalize<B> {
+ VecZnxSub fn gglwe_tensor_key_encrypt_sk<R, S>(
+ VecZnxSwitchRing &self,
+ VecZnxAddScalarInplace res: &mut R,
+ SvpPrepare<B> sk: &S,
+ SvpPPolAllocBytes seed_xa: [u8; 32],
+ SvpPPolAlloc<B>, source_xe: &mut Source,
Scratch<B>: ScratchAvailable scratch: &mut Scratch<B>,
+ TakeScalarZnx ) where
+ TakeVecZnxDft<B> R: TensorKeyCompressedToMut,
+ TakeGLWESecretPrepared<B> S: GLWESecretToRef,
+ ScratchAvailable
+ TakeVecZnx
+ TakeVecZnxBig<B>,
{ {
let res: &mut TensorKeyCompressed<&mut [u8]> = &mut res.to_mut();
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_eq!(self.rank_out(), sk.rank()); assert_eq!(res.rank_out(), sk.rank());
assert_eq!(self.n(), sk.n()); assert_eq!(res.n(), sk.n());
} }
let n: usize = sk.n().into(); let n: usize = sk.n().into();
let rank: usize = self.rank_out().into(); let rank: usize = res.rank_out().into();
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(sk.n(), self.rank_out()); let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(sk.n(), res.rank_out());
sk_dft_prep.prepare(module, sk, scratch_1); sk_dft_prep.prepare(self, sk, scratch_1);
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1); let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
for i in 0..rank { for i in 0..rank {
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i); self.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
} }
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1); let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
@@ -91,14 +88,14 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
for i in 0..rank { for i in 0..rank {
for j in i..rank { for j in i..rank {
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i); self.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0); self.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
module.vec_znx_big_normalize( self.vec_znx_big_normalize(
self.base2k().into(), res.base2k().into(),
&mut sk_ij.data.as_vec_znx_mut(), &mut sk_ij.data.as_vec_znx_mut(),
0, 0,
self.base2k().into(), res.base2k().into(),
&sk_ij_big, &sk_ij_big,
0, 0,
scratch_5, scratch_5,
@@ -106,9 +103,30 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
let (seed_xa_tmp, _) = source_xa.branch(); let (seed_xa_tmp, _) = source_xa.branch();
self.at_mut(i, j) self.gglwe_key_compressed_encrypt_sk(
.encrypt_sk(module, &sk_ij, sk, seed_xa_tmp, source_xe, scratch_5); res.at_mut(i, j),
&sk_ij,
sk,
seed_xa_tmp,
source_xe,
scratch_5,
);
} }
} }
} }
} }
impl<DataSelf: DataMut> TensorKeyCompressed<DataSelf> {
pub fn encrypt_sk<DataSk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
sk: &GLWESecret<DataSk>,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
Module<B>: GGLWETensorKeyCompressedEncryptSk<B>,
{
module.gglwe_tensor_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
}
}

View File

@@ -1,32 +1,118 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWEPt, encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
encryption::{SIGMA, glwe_encrypt_sk_internal},
layouts::{ layouts::{
GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, compressed::GGSWCiphertextCompressed, prepared::GLWESecretPrepared, GGSW, GGSWInfos, GLWEInfos, LWEInfos,
compressed::{GGSWCompressed, GGSWCompressedToMut},
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
}, },
}; };
impl GGSWCiphertextCompressed<Vec<u8>> { impl GGSWCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGSWInfos, A: GGSWInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
{ {
GGSWCiphertext::encrypt_sk_scratch_space(module, infos) GGSW::encrypt_sk_tmp_bytes(module, infos)
} }
} }
impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> { pub trait GGSWCompressedEncryptSk<B: Backend> {
fn ggsw_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGSWCompressedToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
where
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
Scratch<B>:,
{
fn ggsw_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGSWCompressedToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>,
{
let res: &mut GGSWCompressed<&mut [u8]> = &mut res.to_mut();
let sk: &GLWESecretPrepared<&[u8], B> = &sk.to_ref();
let pt: &ScalarZnx<&[u8]> = &pt.to_ref();
#[cfg(debug_assertions)]
{
use poulpy_hal::layouts::ZnxInfos;
assert_eq!(res.rank(), sk.rank());
assert_eq!(res.n(), sk.n());
assert_eq!(pt.n() as u32, sk.n());
}
let base2k: usize = res.base2k().into();
let rank: usize = res.rank().into();
let cols: usize = rank + 1;
let dsize: usize = res.dsize().into();
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&res.glwe_layout());
let mut source = Source::new(seed_xa);
res.seed = vec![[0u8; 32]; res.dnum().0 as usize * cols];
for row_i in 0..res.dnum().into() {
tmp_pt.data.zero();
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
self.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, 0);
self.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
for col_j in 0..rank + 1 {
// rlwe encrypt of vec_znx_pt into vec_znx_ct
let (seed, mut source_xa_tmp) = source.branch();
res.seed[row_i * cols + col_j] = seed;
self.glwe_encrypt_sk_internal(
res.base2k().into(),
res.k().into(),
&mut res.at_mut(row_i, col_j).data,
cols,
true,
Some((&tmp_pt, col_j)),
sk,
&mut source_xa_tmp,
source_xe,
SIGMA,
scratch_1,
);
}
}
}
}
impl<DataSelf: DataMut> GGSWCompressed<DataSelf> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self, &mut self,
@@ -37,71 +123,8 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAddScalarInplace Module<B>: GGSWCompressedEncryptSk<B>,
+ VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.ggsw_compressed_encrypt_sk(self, pt, sk, seed_xa, source_xe, scratch);
{
use poulpy_hal::layouts::ZnxInfos;
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n() as u32, sk.n());
}
let base2k: usize = self.base2k().into();
let rank: usize = self.rank().into();
let cols: usize = rank + 1;
let dsize: usize = self.dsize().into();
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&self.glwe_layout());
let mut source = Source::new(seed_xa);
self.seed = vec![[0u8; 32]; self.dnum().0 as usize * cols];
(0..self.dnum().into()).for_each(|row_i| {
tmp_pt.data.zero();
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, 0);
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
(0..rank + 1).for_each(|col_j| {
// rlwe encrypt of vec_znx_pt into vec_znx_ct
let (seed, mut source_xa_tmp) = source.branch();
self.seed[row_i * cols + col_j] = seed;
glwe_encrypt_sk_internal(
module,
self.base2k().into(),
self.k().into(),
&mut self.at_mut(row_i, col_j).data,
cols,
true,
Some((&tmp_pt, col_j)),
sk,
&mut source_xa_tmp,
source_xe,
SIGMA,
scratch_1,
);
});
});
} }
} }

View File

@@ -1,31 +1,83 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::{
encryption::{SIGMA, glwe_ct::glwe_encrypt_sk_internal}, encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
layouts::{ layouts::{
GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, compressed::GLWECiphertextCompressed, prepared::GLWESecretPrepared, GLWE, GLWEInfos, GLWEPlaintext, GLWEPlaintextToRef, LWEInfos,
compressed::{GLWECompressed, GLWECompressedToMut},
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
}, },
}; };
impl GLWECiphertextCompressed<Vec<u8>> { impl GLWECompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
{ {
GLWECiphertext::encrypt_sk_scratch_space(module, infos) GLWE::encrypt_sk_tmp_bytes(module, infos)
} }
} }
impl<D: DataMut> GLWECiphertextCompressed<D> { pub trait GLWECompressedEncryptSk<B: Backend> {
fn glwe_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWECompressedToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GLWECompressedEncryptSk<B> for Module<B>
where
Module<B>: GLWEEncryptSkInternal<B>,
{
fn glwe_compressed_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWECompressedToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>,
{
let res: &mut GLWECompressed<&mut [u8]> = &mut res.to_mut();
let mut source_xa: Source = Source::new(seed_xa);
let cols: usize = (res.rank() + 1).into();
self.glwe_encrypt_sk_internal(
res.base2k().into(),
res.k().into(),
&mut res.data,
cols,
true,
Some((pt, 0)),
sk,
&mut source_xa,
source_xe,
SIGMA,
scratch,
);
res.seed = seed_xa;
}
}
impl<D: DataMut> GLWECompressed<D> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self, &mut self,
@@ -36,65 +88,8 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: GLWECompressedEncryptSk<B>,
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
self.encrypt_sk_internal(module, Some((pt, 0)), sk, seed_xa, source_xe, scratch); module.glwe_compressed_encrypt_sk(self, pt, sk, seed_xa, source_xe, scratch);
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn encrypt_sk_internal<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
pt: Option<(&GLWEPlaintext<DataPt>, usize)>,
sk: &GLWESecretPrepared<DataSk, B>,
seed_xa: [u8; 32],
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let mut source_xa = Source::new(seed_xa);
let cols: usize = (self.rank() + 1).into();
glwe_encrypt_sk_internal(
module,
self.base2k().into(),
self.k().into(),
&mut self.data,
cols,
true,
pt,
sk,
&mut source_xa,
source_xe,
SIGMA,
scratch,
);
self.seed = seed_xa;
} }
} }

View File

@@ -1,34 +1,33 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, Module, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{
TakeGLWESecret, TakeGLWESecretPrepared, AutomorphismKey, AutomorphismKeyToMut, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, GLWESwitchingKey, LWEInfos,
layouts::{GGLWEAutomorphismKey, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, GLWESecret, LWEInfos},
}; };
impl GGLWEAutomorphismKey<Vec<u8>> { impl AutomorphismKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
assert_eq!( assert_eq!(
infos.rank_in(), infos.rank_in(),
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKey" "rank_in != rank_out is not supported for GGLWEAutomorphismKey"
); );
GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::alloc_bytes(&infos.glwe_layout()) GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
} }
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
@@ -37,62 +36,102 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
_infos.rank_out(), _infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKey" "rank_in != rank_out is not supported for GGLWEAutomorphismKey"
); );
GGLWESwitchingKey::encrypt_pk_scratch_space(module, _infos) GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
} }
} }
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> { pub trait GGLWEAutomorphismKeyEncryptSk<BE: Backend> {
#[allow(clippy::too_many_arguments)] fn gglwe_automorphism_key_encrypt_sk<A, B>(
pub fn encrypt_sk<DataSk: DataRef, B: Backend>( &self,
&mut self, res: &mut A,
module: &Module<B>,
p: i64, p: i64,
sk: &GLWESecret<DataSk>, sk: &B,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<BE>,
) where ) where
Module<B>: VecZnxAddScalarInplace A: AutomorphismKeyToMut,
+ VecZnxDftAllocBytes B: GLWESecretToRef;
+ VecZnxBigNormalize<B> }
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> impl<DM: DataMut> AutomorphismKey<DM>
+ VecZnxIdftApplyConsume<B> where
+ VecZnxNormalizeTmpBytes Self: AutomorphismKeyToMut,
+ VecZnxFillUniform {
+ VecZnxSubInplace pub fn encrypt_sk<S, BE: Backend>(
+ VecZnxAddInplace &mut self,
+ VecZnxNormalizeInplace<B> module: &Module<BE>,
+ VecZnxAddNormal p: i64,
+ VecZnxNormalize<B> sk: &S,
+ VecZnxSub source_xa: &mut Source,
+ SvpPrepare<B> source_xe: &mut Source,
+ VecZnxSwitchRing scratch: &mut Scratch<BE>,
+ SvpPPolAllocBytes ) where
+ VecZnxAutomorphism, S: GLWESecretToRef,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>, Module<BE>: GGLWEAutomorphismKeyEncryptSk<BE>,
{ {
module.gglwe_automorphism_key_encrypt_sk(self, p, sk, source_xa, source_xe, scratch);
}
}
impl<BE: Backend> GGLWEAutomorphismKeyEncryptSk<BE> for Module<BE>
where
Module<BE>: VecZnxAddScalarInplace
+ VecZnxDftBytesOf
+ VecZnxBigNormalize<BE>
+ VecZnxDftApply<BE>
+ SvpApplyDftToDftInplace<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<BE>
+ VecZnxAddNormal
+ VecZnxNormalize<BE>
+ VecZnxSub
+ SvpPrepare<BE>
+ VecZnxSwitchRing
+ SvpPPolBytesOf
+ VecZnxAutomorphism,
Scratch<BE>: ScratchAvailable,
{
fn gglwe_automorphism_key_encrypt_sk<A, B>(
&self,
res: &mut A,
p: i64,
sk: &B,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<BE>,
) where
A: AutomorphismKeyToMut,
B: GLWESecretToRef,
{
let res: &mut AutomorphismKey<&mut [u8]> = &mut res.to_mut();
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
use crate::layouts::{GLWEInfos, LWEInfos}; use crate::layouts::{GLWEInfos, LWEInfos};
assert_eq!(self.n(), sk.n()); assert_eq!(res.n(), sk.n());
assert_eq!(self.rank_out(), self.rank_in()); assert_eq!(res.rank_out(), res.rank_in());
assert_eq!(sk.rank(), self.rank_out()); assert_eq!(sk.rank(), res.rank_out());
assert!( assert!(
scratch.available() >= GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self), scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {:?}", "scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
scratch.available(), scratch.available(),
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self) AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
) )
} }
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank()); let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
{ {
(0..self.rank_out().into()).for_each(|i| { (0..res.rank_out().into()).for_each(|i| {
module.vec_znx_automorphism( self.vec_znx_automorphism(
module.galois_element_inv(p), self.galois_element_inv(p),
&mut sk_out.data.as_vec_znx_mut(), &mut sk_out.data.as_vec_znx_mut(),
i, i,
&sk.data.as_vec_znx(), &sk.data.as_vec_znx(),
@@ -101,9 +140,9 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
}); });
} }
self.key res.key
.encrypt_sk(module, sk, &sk_out, source_xa, source_xe, scratch_1); .encrypt_sk(self, sk, &sk_out, source_xa, source_xe, scratch_1);
self.p = p; res.p = p;
} }
} }

View File

@@ -1,29 +1,28 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWEPt, encryption::glwe_ct::GLWEEncryptSk,
layouts::{GGLWECiphertext, GGLWEInfos, GLWECiphertext, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared}, layouts::{
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
},
}; };
impl GGLWECiphertext<Vec<u8>> { impl GGLWE<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
GLWECiphertext::encrypt_sk_scratch_space(module, &infos.glwe_layout()) GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
+ (GLWEPlaintext::alloc_bytes(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes()) + (GLWEPlaintext::bytes_of_from_infos(module, &infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
} }
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize pub fn encrypt_pk_tmp_bytes<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
@@ -31,78 +30,88 @@ impl GGLWECiphertext<Vec<u8>> {
} }
} }
impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> { pub trait GGLWEEncryptSk<B: Backend> {
#[allow(clippy::too_many_arguments)] fn gglwe_encrypt_sk<R, P, S>(
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>( &self,
&mut self, res: &mut R,
module: &Module<B>, pt: &P,
pt: &ScalarZnx<DataPt>, sk: &S,
sk: &GLWESecretPrepared<DataSk, B>,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAddScalarInplace R: GGLWEToMut,
+ VecZnxDftAllocBytes P: ScalarZnxToRef,
+ VecZnxBigNormalize<B> S: GLWESecretPreparedToRef<B>;
+ VecZnxDftApply<B> }
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B> impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
+ VecZnxNormalizeTmpBytes where
+ VecZnxFillUniform Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
+ VecZnxSubInplace Scratch<B>: ScratchAvailable,
+ VecZnxAddInplace {
+ VecZnxNormalizeInplace<B> fn gglwe_encrypt_sk<R, P, S>(
+ VecZnxAddNormal &self,
+ VecZnxNormalize<B> res: &mut R,
+ VecZnxSub, pt: &P,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGLWEToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>,
{ {
let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
let pt: &ScalarZnx<&[u8]> = &pt.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
use poulpy_hal::layouts::ZnxInfos; use poulpy_hal::layouts::ZnxInfos;
let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
assert_eq!( assert_eq!(
self.rank_in(), res.rank_in(),
pt.cols() as u32, pt.cols() as u32,
"self.rank_in(): {} != pt.cols(): {}", "res.rank_in(): {} != pt.cols(): {}",
self.rank_in(), res.rank_in(),
pt.cols() pt.cols()
); );
assert_eq!( assert_eq!(
self.rank_out(), res.rank_out(),
sk.rank(), sk.rank(),
"self.rank_out(): {} != sk.rank(): {}", "res.rank_out(): {} != sk.rank(): {}",
self.rank_out(), res.rank_out(),
sk.rank() sk.rank()
); );
assert_eq!(self.n(), sk.n()); assert_eq!(res.n(), sk.n());
assert_eq!(pt.n() as u32, sk.n()); assert_eq!(pt.n() as u32, sk.n());
assert!( assert!(
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, self), scratch.available() >= GGLWE::encrypt_sk_tmp_bytes(self, res),
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}", "scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes(self, res.rank()={}, res.size()={}): {}",
scratch.available(), scratch.available(),
self.rank_out(), res.rank_out(),
self.size(), res.size(),
GGLWECiphertext::encrypt_sk_scratch_space(module, self) GGLWE::encrypt_sk_tmp_bytes(self, res)
); );
assert!( assert!(
self.dnum().0 * self.dsize().0 * self.base2k().0 <= self.k().0, res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
"self.dnum() : {} * self.dsize() : {} * self.base2k() : {} = {} >= self.k() = {}", "res.dnum() : {} * res.dsize() : {} * res.base2k() : {} = {} >= res.k() = {}",
self.dnum(), res.dnum(),
self.dsize(), res.dsize(),
self.base2k(), res.base2k(),
self.dnum().0 * self.dsize().0 * self.base2k().0, res.dnum().0 * res.dsize().0 * res.base2k().0,
self.k() res.k()
); );
} }
let dnum: usize = self.dnum().into(); let dnum: usize = res.dnum().into();
let dsize: usize = self.dsize().into(); let dsize: usize = res.dsize().into();
let base2k: usize = self.base2k().into(); let base2k: usize = res.base2k().into();
let rank_in: usize = self.rank_in().into(); let rank_in: usize = res.rank_in().into();
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self); let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(res);
// For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns // For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns
// //
// Example for ksk rank 2 to rank 3: // Example for ksk rank 2 to rank 3:
@@ -114,17 +123,39 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
// //
// (-(a*s) + s0, a) // (-(a*s) + s0, a)
// (-(b*s) + s1, b) // (-(b*s) + s1, b)
(0..rank_in).for_each(|col_i| {
(0..dnum).for_each(|row_i| { for col_i in 0..rank_in {
for row_i in 0..dnum {
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt // Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
tmp_pt.data.zero(); // zeroes for next iteration tmp_pt.data.zero(); // zeroes for next iteration
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, col_i); self.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, col_i);
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1); self.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1);
self.glwe_encrypt_sk(
// rlwe encrypt of vec_znx_pt into vec_znx_ct &mut res.at_mut(row_i, col_i),
self.at_mut(row_i, col_i) &tmp_pt,
.encrypt_sk(module, &tmp_pt, sk, source_xa, source_xe, scrach_1); sk,
}); source_xa,
}); source_xe,
scrach_1,
);
}
}
}
}
impl<DataSelf: DataMut> GGLWE<DataSelf> {
#[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
pt: &ScalarZnx<DataPt>,
sk: &GLWESecretPrepared<DataSk, B>,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
Module<B>: GGLWEEncryptSk<B>,
{
module.gglwe_encrypt_sk(self, pt, sk, source_xa, source_xe, scratch);
} }
} }

View File

@@ -1,41 +1,37 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
VecZnxSubInplace, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch}, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{
TakeGLWESecretPrepared, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, RingDegree, prepared::GLWESecretPrepared,
layouts::{
Degree, GGLWECiphertext, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, GLWESecret, LWEInfos, prepared::GLWESecretPrepared,
},
}; };
impl GGLWESwitchingKey<Vec<u8>> { impl GLWESwitchingKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
(GGLWECiphertext::encrypt_sk_scratch_space(module, infos) | ScalarZnx::alloc_bytes(module.n(), 1)) (GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
+ ScalarZnx::alloc_bytes(module.n(), infos.rank_in().into()) + ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
+ GLWESecretPrepared::alloc_bytes(module, &infos.glwe_layout()) + GLWESecretPrepared::bytes_of_from_infos(module, &infos.glwe_layout())
} }
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
GGLWECiphertext::encrypt_pk_scratch_space(module, _infos) GGLWE::encrypt_pk_tmp_bytes(module, _infos)
} }
} }
impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> { impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataSkIn: DataRef, DataSkOut: DataRef, B: Backend>( pub fn encrypt_sk<DataSkIn: DataRef, DataSkOut: DataRef, B: Backend>(
&mut self, &mut self,
@@ -47,7 +43,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAddScalarInplace Module<B>: VecZnxAddScalarInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -62,18 +58,18 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
+ VecZnxSub + VecZnxSub
+ SvpPrepare<B> + SvpPrepare<B>
+ VecZnxSwitchRing + VecZnxSwitchRing
+ SvpPPolAllocBytes, + SvpPPolBytesOf,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert!(sk_in.n().0 <= module.n() as u32); assert!(sk_in.n().0 <= module.n() as u32);
assert!(sk_out.n().0 <= module.n() as u32); assert!(sk_out.n().0 <= module.n() as u32);
assert!( assert!(
scratch.available() >= GGLWESwitchingKey::encrypt_sk_scratch_space(module, self), scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self),
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}", "scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
scratch.available(), scratch.available(),
GGLWESwitchingKey::encrypt_sk_scratch_space(module, self) GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self)
) )
} }
@@ -89,7 +85,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
); );
}); });
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(Degree(n as u32), sk_out.rank()); let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(RingDegree(n as u32), sk_out.rank());
{ {
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1); let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
(0..sk_out.rank().into()).for_each(|i| { (0..sk_out.rank().into()).for_each(|i| {

View File

@@ -1,39 +1,34 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes, VecZnxAddScalarInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VecZnxSubInplace, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{
TakeGLWESecret, TakeGLWESecretPrepared, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, Rank, RingDegree, TensorKey, prepared::GLWESecretPrepared,
layouts::{
Degree, GGLWEInfos, GGLWESwitchingKey, GGLWETensorKey, GLWEInfos, GLWESecret, LWEInfos, Rank,
prepared::{GLWESecretPrepared, Prepare},
},
}; };
impl GGLWETensorKey<Vec<u8>> { impl TensorKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
{ {
GLWESecretPrepared::alloc_bytes_with(module, infos.rank_out()) GLWESecretPrepared::bytes_of(module, infos.rank_out())
+ module.vec_znx_dft_alloc_bytes(infos.rank_out().into(), 1) + module.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
+ module.vec_znx_big_alloc_bytes(1, 1) + module.bytes_of_vec_znx_big(1, 1)
+ module.vec_znx_dft_alloc_bytes(1, 1) + module.bytes_of_vec_znx_dft(1, 1)
+ GLWESecret::alloc_bytes_with(Degree(module.n() as u32), Rank(1)) + GLWESecret::bytes_of(RingDegree(module.n() as u32), Rank(1))
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
} }
} }
impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> { impl<DataSelf: DataMut> TensorKey<DataSelf> {
pub fn encrypt_sk<DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataSk: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
@@ -45,7 +40,7 @@ impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> {
Module<B>: SvpApplyDftToDft<B> Module<B>: SvpApplyDftToDft<B>
+ VecZnxIdftApplyTmpA<B> + VecZnxIdftApplyTmpA<B>
+ VecZnxAddScalarInplace + VecZnxAddScalarInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -60,9 +55,8 @@ impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> {
+ VecZnxSub + VecZnxSub
+ SvpPrepare<B> + SvpPrepare<B>
+ VecZnxSwitchRing + VecZnxSwitchRing
+ SvpPPolAllocBytes, + SvpPPolBytesOf,
Scratch<B>: Scratch<B>:,
TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B> + TakeVecZnxBig<B>,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -70,7 +64,7 @@ impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> {
assert_eq!(self.n(), sk.n()); assert_eq!(self.n(), sk.n());
} }
let n: Degree = sk.n(); let n: RingDegree = sk.n();
let rank: Rank = self.rank_out(); let rank: Rank = self.rank_out();
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank); let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);

View File

@@ -1,33 +1,112 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, ZnxZero},
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::{
TakeGLWEPt, SIGMA,
layouts::{GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GLWESecretPrepared}, encryption::glwe_ct::GLWEEncryptSkInternal,
layouts::{
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
},
}; };
impl GGSWCiphertext<Vec<u8>> { impl GGSW<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGSWInfos, A: GGSWInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
{ {
let size = infos.size(); let size = infos.size();
GLWECiphertext::encrypt_sk_scratch_space(module, &infos.glwe_layout()) GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
+ VecZnx::alloc_bytes(module.n(), (infos.rank() + 1).into(), size) + VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
+ VecZnx::alloc_bytes(module.n(), 1, size) + VecZnx::bytes_of(module.n(), 1, size)
+ module.vec_znx_dft_alloc_bytes((infos.rank() + 1).into(), size) + module.bytes_of_vec_znx_dft((infos.rank() + 1).into(), size)
} }
} }
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> { pub trait GGSWEncryptSk<B: Backend> {
fn ggsw_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGSWToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
where
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
Scratch<B>:,
{
fn ggsw_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GGSWToMut,
P: ScalarZnxToRef,
S: GLWESecretPreparedToRef<B>,
{
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let pt: &ScalarZnx<&[u8]> = &pt.to_ref();
let sk: &GLWESecretPrepared<&[u8], B> = &sk.to_ref();
#[cfg(debug_assertions)]
{
use poulpy_hal::layouts::ZnxInfos;
assert_eq!(res.rank(), sk.rank());
assert_eq!(res.n(), self.n() as u32);
assert_eq!(pt.n(), self.n());
assert_eq!(sk.n(), self.n() as u32);
}
let k: usize = res.k().into();
let base2k: usize = res.base2k().into();
let rank: usize = res.rank().into();
let dsize: usize = res.dsize().into();
let cols: usize = (rank + 1).into();
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&res.glwe_layout());
for row_i in 0..res.dnum().into() {
tmp_pt.data.zero();
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
self.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, 0);
self.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
for col_j in 0..rank + 1 {
self.glwe_encrypt_sk_internal(
base2k,
k,
res.at_mut(row_i, col_j).data_mut(),
cols,
false,
Some((&tmp_pt, col_j)),
sk,
source_xa,
source_xe,
SIGMA,
scratch_1,
);
}
}
}
}
impl<DataSelf: DataMut> GGSW<DataSelf> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>( pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self, &mut self,
@@ -38,56 +117,8 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxAddScalarInplace Module<B>: GGSWEncryptSk<B>,
+ VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.ggsw_encrypt_sk(self, pt, sk, source_xa, source_xe, scratch);
{
use poulpy_hal::layouts::ZnxInfos;
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n() as u32, sk.n());
}
let base2k: usize = self.base2k().into();
let rank: usize = self.rank().into();
let dsize: usize = self.dsize().into();
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&self.glwe_layout());
(0..self.dnum().into()).for_each(|row_i| {
tmp_pt.data.zero();
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (dsize - 1) + row_i * dsize, pt, 0);
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
(0..rank + 1).for_each(|col_j| {
// rlwe encrypt of vec_znx_pt into vec_znx_ct
self.at_mut(row_i, col_j).encrypt_sk_internal(
module,
Some((&tmp_pt, col_j)),
sk,
source_xa,
source_xe,
scratch_1,
);
});
});
} }
} }

View File

@@ -1,11 +1,11 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol, ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace,
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply,
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
}, },
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, ZnxInfos, ZnxZero}, layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
source::Source, source::Source,
}; };
@@ -13,157 +13,155 @@ use crate::{
dist::Distribution, dist::Distribution,
encryption::{SIGMA, SIGMA_BOUND}, encryption::{SIGMA, SIGMA_BOUND},
layouts::{ layouts::{
GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPlaintextToRef, GLWEToMut, LWEInfos,
prepared::{GLWEPublicKeyPrepared, GLWESecretPrepared}, prepared::{GLWEPublicKeyPrepared, GLWEPublicKeyPreparedToRef, GLWESecretPrepared, GLWESecretPreparedToRef},
}, },
}; };
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes, Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
{ {
let size: usize = infos.size(); let size: usize = infos.size();
assert_eq!(module.n() as u32, infos.n()); assert_eq!(module.n() as u32, infos.n());
module.vec_znx_normalize_tmp_bytes() module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
+ 2 * VecZnx::alloc_bytes(module.n(), 1, size)
+ module.vec_znx_dft_alloc_bytes(1, size)
} }
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
{ {
let size: usize = infos.size(); let size: usize = infos.size();
assert_eq!(module.n() as u32, infos.n()); assert_eq!(module.n() as u32, infos.n());
((module.vec_znx_dft_alloc_bytes(1, size) + module.vec_znx_big_alloc_bytes(1, size)) ((module.bytes_of_vec_znx_dft(1, size) + module.bytes_of_vec_znx_big(1, size)) | ScalarZnx::bytes_of(module.n(), 1))
| ScalarZnx::alloc_bytes(module.n(), 1)) + module.bytes_of_svp_ppol(1)
+ module.svp_ppol_alloc_bytes(1)
+ module.vec_znx_normalize_tmp_bytes() + module.vec_znx_normalize_tmp_bytes()
} }
} }
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> { impl<D: DataMut> GLWE<D> {
#[allow(clippy::too_many_arguments)] pub fn encrypt_sk<R, P, S, B: Backend>(
pub fn encrypt_sk<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
pt: &GLWEPlaintext<DataPt>, pt: &P,
sk: &GLWESecretPrepared<DataSk, B>, sk: &S,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes P: GLWEPlaintextToRef,
+ VecZnxBigNormalize<B> S: GLWESecretPreparedToRef<B>,
+ VecZnxDftApply<B> Module<B>: GLWEEncryptSk<B>,
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
module.glwe_encrypt_sk(self, pt, sk, source_xa, source_xe, scratch);
}
pub fn encrypt_zero_sk<S, B: Backend>(
&mut self,
module: &Module<B>,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
S: GLWESecretPreparedToRef<B>,
Module<B>: GLWEEncryptZeroSk<B>,
{
module.glwe_encrypt_zero_sk(self, sk, source_xa, source_xe, scratch);
}
pub fn encrypt_pk<P, K, B: Backend>(
&mut self,
module: &Module<B>,
pt: &P,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
P: GLWEPlaintextToRef,
K: GLWEPublicKeyPreparedToRef<B>,
Module<B>: GLWEEncryptPk<B>,
{
module.glwe_encrypt_pk(self, pt, pk, source_xu, source_xe, scratch);
}
pub fn encrypt_zero_pk<K, B: Backend>(
&mut self,
module: &Module<B>,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
K: GLWEPublicKeyPreparedToRef<B>,
Module<B>: GLWEEncryptZeroPk<B>,
{
module.glwe_encrypt_zero_pk(self, pk, source_xu, source_xe, scratch);
}
}
pub trait GLWEEncryptSk<B: Backend> {
fn glwe_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GLWEEncryptSk<B> for Module<B>
where
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
Scratch<B>: ScratchAvailable,
{
fn glwe_encrypt_sk<R, P, S>(
&self,
res: &mut R,
pt: &P,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>,
{
let mut res: GLWE<&mut [u8]> = res.to_mut();
let pt: GLWEPlaintext<&[u8]> = pt.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_eq!(self.rank(), sk.rank()); let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
assert_eq!(sk.n(), self.n()); assert_eq!(res.rank(), sk.rank());
assert_eq!(pt.n(), self.n()); assert_eq!(res.n(), self.n() as u32);
assert_eq!(sk.n(), self.n() as u32);
assert_eq!(pt.n(), self.n() as u32);
assert!( assert!(
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self), scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}", "scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
scratch.available(), scratch.available(),
GLWECiphertext::encrypt_sk_scratch_space(module, self) GLWE::encrypt_sk_tmp_bytes(self, &res)
) )
} }
self.encrypt_sk_internal(module, Some((pt, 0)), sk, source_xa, source_xe, scratch); let cols: usize = (res.rank() + 1).into();
} self.glwe_encrypt_sk_internal(
res.base2k().into(),
pub fn encrypt_zero_sk<DataSk: DataRef, B: Backend>( res.k().into(),
&mut self, res.data_mut(),
module: &Module<B>,
sk: &GLWESecretPrepared<DataSk, B>,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), sk.rank());
assert_eq!(sk.n(), self.n());
assert!(
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self),
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
scratch.available(),
GLWECiphertext::encrypt_sk_scratch_space(module, self)
)
}
self.encrypt_sk_internal(
module,
None::<(&GLWEPlaintext<Vec<u8>>, usize)>,
sk,
source_xa,
source_xe,
scratch,
);
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn encrypt_sk_internal<DataPt: DataRef, DataSk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
pt: Option<(&GLWEPlaintext<DataPt>, usize)>,
sk: &GLWESecretPrepared<DataSk, B>,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VecZnxBigNormalize<B>
+ VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxFillUniform
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let cols: usize = (self.rank() + 1).into();
glwe_encrypt_sk_internal(
module,
self.base2k().into(),
self.k().into(),
&mut self.data,
cols, cols,
false, false,
pt, Some((&pt, 0)),
sk, sk,
source_xa, source_xa,
source_xe, source_xe,
@@ -171,46 +169,136 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch, scratch,
); );
} }
}
#[allow(clippy::too_many_arguments)] pub trait GLWEEncryptZeroSk<B: Backend> {
pub fn encrypt_pk<DataPt: DataRef, DataPk: DataRef, B: Backend>( fn glwe_encrypt_zero_sk<R, S>(
&mut self, &self,
module: &Module<B>, res: &mut R,
pt: &GLWEPlaintext<DataPt>, sk: &S,
pk: &GLWEPublicKeyPrepared<DataPk, B>, source_xa: &mut Source,
source_xu: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: SvpPrepare<B> R: GLWEToMut,
+ SvpApplyDftToDft<B> S: GLWESecretPreparedToRef<B>;
+ VecZnxIdftApplyConsume<B> }
+ VecZnxBigAddNormal<B>
+ VecZnxBigAddSmallInplace<B> impl<B: Backend> GLWEEncryptZeroSk<B> for Module<B>
+ VecZnxBigNormalize<B>, where
Scratch<B>: TakeSvpPPol<B> + TakeScalarZnx + TakeVecZnxDft<B>, Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
Scratch<B>: ScratchAvailable,
{
fn glwe_encrypt_zero_sk<R, S>(
&self,
res: &mut R,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
S: GLWESecretPreparedToRef<B>,
{ {
self.encrypt_pk_internal::<DataPt, DataPk, B>(module, Some((pt, 0)), pk, source_xu, source_xe, scratch); let mut res: GLWE<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
assert_eq!(res.rank(), sk.rank());
assert_eq!(res.n(), self.n() as u32);
assert_eq!(sk.n(), self.n() as u32);
assert!(
scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
"scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
scratch.available(),
GLWE::encrypt_sk_tmp_bytes(self, &res)
)
}
let cols: usize = (res.rank() + 1).into();
self.glwe_encrypt_sk_internal(
res.base2k().into(),
res.k().into(),
res.data_mut(),
cols,
false,
None::<(&GLWEPlaintext<Vec<u8>>, usize)>,
sk,
source_xa,
source_xe,
SIGMA,
scratch,
);
} }
}
pub fn encrypt_zero_pk<DataPk: DataRef, B: Backend>( pub trait GLWEEncryptPk<B: Backend> {
&mut self, fn glwe_encrypt_pk<R, P, K>(
module: &Module<B>, &self,
pk: &GLWEPublicKeyPrepared<DataPk, B>, res: &mut R,
pt: &P,
pk: &K,
source_xu: &mut Source, source_xu: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: SvpPrepare<B> R: GLWEToMut,
+ SvpApplyDftToDft<B> P: GLWEPlaintextToRef,
+ VecZnxIdftApplyConsume<B> K: GLWEPublicKeyPreparedToRef<B>;
+ VecZnxBigAddNormal<B> }
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>, impl<B: Backend> GLWEEncryptPk<B> for Module<B>
Scratch<B>: TakeSvpPPol<B> + TakeScalarZnx + TakeVecZnxDft<B>, where
Module<B>: GLWEEncryptPkInternal<B>,
{
fn glwe_encrypt_pk<R, P, K>(
&self,
res: &mut R,
pt: &P,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
P: GLWEPlaintextToRef,
K: GLWEPublicKeyPreparedToRef<B>,
{ {
self.encrypt_pk_internal::<Vec<u8>, DataPk, B>( self.glwe_encrypt_pk_internal(res, Some((pt, 0)), pk, source_xu, source_xe, scratch);
module, }
}
pub trait GLWEEncryptZeroPk<B: Backend> {
fn glwe_encrypt_zero_pk<R, K>(
&self,
res: &mut R,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
K: GLWEPublicKeyPreparedToRef<B>;
}
impl<B: Backend> GLWEEncryptZeroPk<B> for Module<B>
where
Module<B>: GLWEEncryptPkInternal<B>,
{
fn glwe_encrypt_zero_pk<R, K>(
&self,
res: &mut R,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
K: GLWEPublicKeyPreparedToRef<B>,
{
self.glwe_encrypt_pk_internal(
res,
None::<(&GLWEPlaintext<Vec<u8>>, usize)>, None::<(&GLWEPlaintext<Vec<u8>>, usize)>,
pk, pk,
source_xu, source_xu,
@@ -218,45 +306,69 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch, scratch,
); );
} }
}
#[allow(clippy::too_many_arguments)] pub(crate) trait GLWEEncryptPkInternal<B: Backend> {
pub(crate) fn encrypt_pk_internal<DataPt: DataRef, DataPk: DataRef, B: Backend>( fn glwe_encrypt_pk_internal<R, P, K>(
&mut self, &self,
module: &Module<B>, res: &mut R,
pt: Option<(&GLWEPlaintext<DataPt>, usize)>, pt: Option<(&P, usize)>,
pk: &GLWEPublicKeyPrepared<DataPk, B>, pk: &K,
source_xu: &mut Source, source_xu: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: SvpPrepare<B> R: GLWEToMut,
+ SvpApplyDftToDft<B> P: GLWEPlaintextToRef,
+ VecZnxIdftApplyConsume<B> K: GLWEPublicKeyPreparedToRef<B>;
+ VecZnxBigAddNormal<B> }
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>, impl<B: Backend> GLWEEncryptPkInternal<B> for Module<B>
Scratch<B>: TakeSvpPPol<B> + TakeScalarZnx + TakeVecZnxDft<B>, where
Module<B>: SvpPrepare<B>
+ SvpApplyDftToDft<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddNormal<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>:,
{
fn glwe_encrypt_pk_internal<R, P, K>(
&self,
res: &mut R,
pt: Option<(&P, usize)>,
pk: &K,
source_xu: &mut Source,
source_xe: &mut Source,
scratch: &mut Scratch<B>,
) where
R: GLWEToMut,
P: GLWEPlaintextToRef,
K: GLWEPublicKeyPreparedToRef<B>,
{ {
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
let pk: &GLWEPublicKeyPrepared<&[u8], B> = &pk.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_eq!(self.base2k(), pk.base2k()); assert_eq!(res.base2k(), pk.base2k());
assert_eq!(self.n(), pk.n()); assert_eq!(res.n(), pk.n());
assert_eq!(self.rank(), pk.rank()); assert_eq!(res.rank(), pk.rank());
if let Some((pt, _)) = pt { if let Some((pt, _)) = pt {
assert_eq!(pt.base2k(), pk.base2k()); assert_eq!(pt.to_ref().base2k(), pk.base2k());
assert_eq!(pt.n(), pk.n()); assert_eq!(pt.to_ref().n(), pk.n());
} }
} }
let base2k: usize = pk.base2k().into(); let base2k: usize = pk.base2k().into();
let size_pk: usize = pk.size(); let size_pk: usize = pk.size();
let cols: usize = (self.rank() + 1).into(); let cols: usize = (res.rank() + 1).into();
// Generates u according to the underlying secret distribution. // Generates u according to the underlying secret distribution.
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(self.n().into(), 1); let (mut u_dft, scratch_1) = scratch.take_svp_ppol(res.n().into(), 1);
{ {
let (mut u, _) = scratch_1.take_scalar_znx(self.n().into(), 1); let (mut u, _) = scratch_1.take_scalar_znx(res.n().into(), 1);
match pk.dist { match pk.dist {
Distribution::NONE => panic!( Distribution::NONE => panic!(
"invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \ "invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \
@@ -270,20 +382,20 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
Distribution::ZERO => {} Distribution::ZERO => {}
} }
module.svp_prepare(&mut u_dft, 0, &u, 0); self.svp_prepare(&mut u_dft, 0, &u, 0);
} }
// ct[i] = pk[i] * u + ei (+ m if col = i) // ct[i] = pk[i] * u + ei (+ m if col = i)
(0..cols).for_each(|i| { (0..cols).for_each(|i| {
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), 1, size_pk); let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(res.n().into(), 1, size_pk);
// ci_dft = DFT(u) * DFT(pk[i]) // ci_dft = DFT(u) * DFT(pk[i])
module.svp_apply_dft_to_dft(&mut ci_dft, 0, &u_dft, 0, &pk.data, i); self.svp_apply_dft_to_dft(&mut ci_dft, 0, &u_dft, 0, &pk.data, i);
// ci_big = u * p[i] // ci_big = u * p[i]
let mut ci_big = module.vec_znx_idft_apply_consume(ci_dft); let mut ci_big = self.vec_znx_idft_apply_consume(ci_dft);
// ci_big = u * pk[i] + e // ci_big = u * pk[i] + e
module.vec_znx_big_add_normal( self.vec_znx_big_add_normal(
base2k, base2k,
&mut ci_big, &mut ci_big,
0, 0,
@@ -297,31 +409,38 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
if let Some((pt, col)) = pt if let Some((pt, col)) = pt
&& col == i && col == i
{ {
module.vec_znx_big_add_small_inplace(&mut ci_big, 0, &pt.data, 0); self.vec_znx_big_add_small_inplace(&mut ci_big, 0, &pt.to_ref().data, 0);
} }
// ct[i] = norm(ci_big) // ct[i] = norm(ci_big)
module.vec_znx_big_normalize(base2k, &mut self.data, i, base2k, &ci_big, 0, scratch_2); self.vec_znx_big_normalize(base2k, &mut res.data, i, base2k, &ci_big, 0, scratch_2);
}); });
} }
} }
#[allow(clippy::too_many_arguments)] pub(crate) trait GLWEEncryptSkInternal<B: Backend> {
pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: DataRef, B: Backend>( fn glwe_encrypt_sk_internal<R, P, S>(
module: &Module<B>, &self,
base2k: usize, base2k: usize,
k: usize, k: usize,
ct: &mut VecZnx<DataCt>, res: &mut R,
cols: usize, cols: usize,
compressed: bool, compressed: bool,
pt: Option<(&GLWEPlaintext<DataPt>, usize)>, pt: Option<(&P, usize)>,
sk: &GLWESecretPrepared<DataSk, B>, sk: &S,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
sigma: f64, sigma: f64,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes R: VecZnxToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>;
}
impl<B: Backend> GLWEEncryptSkInternal<B> for Module<B>
where
Module<B>: VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -334,74 +453,96 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
+ VecZnxAddNormal + VecZnxAddNormal
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxSub, + VecZnxSub,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] fn glwe_encrypt_sk_internal<R, P, S>(
&self,
base2k: usize,
k: usize,
res: &mut R,
cols: usize,
compressed: bool,
pt: Option<(&P, usize)>,
sk: &S,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) where
R: VecZnxToMut,
P: GLWEPlaintextToRef,
S: GLWESecretPreparedToRef<B>,
{ {
if compressed { let ct: &mut VecZnx<&mut [u8]> = &mut res.to_mut();
assert_eq!( let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
ct.cols(),
1,
"invalid ciphertext: compressed tag=true but #cols={} != 1",
ct.cols()
)
}
}
let size: usize = ct.size(); #[cfg(debug_assertions)]
{
let (mut c0, scratch_1) = scratch.take_vec_znx(ct.n(), 1, size); if compressed {
c0.zero(); assert_eq!(
ct.cols(),
{ 1,
let (mut ci, scratch_2) = scratch_1.take_vec_znx(ct.n(), 1, size); "invalid ciphertext: compressed tag=true but #cols={} != 1",
ct.cols()
// ct[i] = uniform )
// ct[0] -= c[i] * s[i],
(1..cols).for_each(|i| {
let col_ct: usize = if compressed { 0 } else { i };
// ct[i] = uniform (+ pt)
module.vec_znx_fill_uniform(base2k, ct, col_ct, source_xa);
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
// ci = ct[i] - pt
// i.e. we act as we sample ct[i] already as uniform + pt
// and if there is a pt, then we subtract it before applying DFT
if let Some((pt, col)) = pt {
if i == col {
module.vec_znx_sub(&mut ci, 0, ct, col_ct, &pt.data, 0);
module.vec_znx_normalize_inplace(base2k, &mut ci, 0, scratch_3);
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &ci, 0);
} else {
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
}
} else {
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
} }
}
module.svp_apply_dft_to_dft_inplace(&mut ci_dft, 0, &sk.data, i - 1); let size: usize = ct.size();
let ci_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(ci_dft);
// use c[0] as buffer, which is overwritten later by the normalization step let (mut c0, scratch_1) = scratch.take_vec_znx(ct.n(), 1, size);
module.vec_znx_big_normalize(base2k, &mut ci, 0, base2k, &ci_big, 0, scratch_3); c0.zero();
// c0_tmp = -c[i] * s[i] (use c[0] as buffer) {
module.vec_znx_sub_inplace(&mut c0, 0, &ci, 0); let (mut ci, scratch_2) = scratch_1.take_vec_znx(ct.n(), 1, size);
});
// ct[i] = uniform
// ct[0] -= c[i] * s[i],
(1..cols).for_each(|i| {
let col_ct: usize = if compressed { 0 } else { i };
// ct[i] = uniform (+ pt)
self.vec_znx_fill_uniform(base2k, ct, col_ct, source_xa);
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
// ci = ct[i] - pt
// i.e. we act as we sample ct[i] already as uniform + pt
// and if there is a pt, then we subtract it before applying DFT
if let Some((pt, col)) = pt {
if i == col {
self.vec_znx_sub(&mut ci, 0, ct, col_ct, &pt.to_ref().data, 0);
self.vec_znx_normalize_inplace(base2k, &mut ci, 0, scratch_3);
self.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &ci, 0);
} else {
self.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
}
} else {
self.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
}
self.svp_apply_dft_to_dft_inplace(&mut ci_dft, 0, &sk.data, i - 1);
let ci_big: VecZnxBig<&mut [u8], B> = self.vec_znx_idft_apply_consume(ci_dft);
// use c[0] as buffer, which is overwritten later by the normalization step
self.vec_znx_big_normalize(base2k, &mut ci, 0, base2k, &ci_big, 0, scratch_3);
// c0_tmp = -c[i] * s[i] (use c[0] as buffer)
self.vec_znx_sub_inplace(&mut c0, 0, &ci, 0);
});
}
// c[0] += e
self.vec_znx_add_normal(base2k, &mut c0, 0, k, source_xe, sigma, SIGMA_BOUND);
// c[0] += m if col = 0
if let Some((pt, col)) = pt
&& col == 0
{
self.vec_znx_add_inplace(&mut c0, 0, &pt.to_ref().data, 0);
}
// c[0] = norm(c[0])
self.vec_znx_normalize(base2k, ct, 0, base2k, &c0, 0, scratch_1);
} }
// c[0] += e
module.vec_znx_add_normal(base2k, &mut c0, 0, k, source_xe, sigma, SIGMA_BOUND);
// c[0] += m if col = 0
if let Some((pt, col)) = pt
&& col == 0
{
module.vec_znx_add_inplace(&mut c0, 0, &pt.data, 0);
}
// c[0] = norm(c[0])
module.vec_znx_normalize(base2k, ct, 0, base2k, &c0, 0, scratch_1);
} }

View File

@@ -1,50 +1,43 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigNormalize,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
},
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned}, layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
oep::{ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxDftImpl, TakeVecZnxImpl},
source::Source, source::Source,
}; };
use crate::layouts::{GLWECiphertext, GLWEPublicKey, prepared::GLWESecretPrepared}; use crate::{
encryption::glwe_ct::GLWEEncryptZeroSk,
layouts::{
GLWE, GLWEPublicKey, GLWEPublicKeyToMut,
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
},
};
impl<D: DataMut> GLWEPublicKey<D> { pub trait GLWEPublicKeyGenerate<B: Backend> {
pub fn generate_from_sk<S: DataRef, B>( fn glwe_public_key_generate<R, S>(&self, res: &mut R, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
&mut self, where
module: &Module<B>, R: GLWEPublicKeyToMut,
sk: &GLWESecretPrepared<S, B>, S: GLWESecretPreparedToRef<B>;
source_xa: &mut Source, }
source_xe: &mut Source,
) where impl<B: Backend> GLWEPublicKeyGenerate<B> for Module<B>
Module<B>:, where
Module<B>: VecZnxDftAllocBytes Module<B>: GLWEEncryptZeroSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
+ VecZnxBigNormalize<B> ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
+ VecZnxDftApply<B> {
+ SvpApplyDftToDftInplace<B> fn glwe_public_key_generate<R, S>(&self, res: &mut R, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
+ VecZnxIdftApplyConsume<B> where
+ VecZnxNormalizeTmpBytes R: GLWEPublicKeyToMut,
+ VecZnxFillUniform S: GLWESecretPreparedToRef<B>,
+ VecZnxSubInplace
+ VecZnxAddInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub,
B: Backend
+ ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ TakeVecZnxDftImpl<B>
+ ScratchAvailableImpl<B>
+ TakeVecZnxImpl<B>,
{ {
let res: &mut GLWEPublicKey<&mut [u8]> = &mut res.to_mut();
let sk: &GLWESecretPrepared<&[u8], B> = &sk.to_ref();
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
use crate::{Distribution, layouts::LWEInfos}; use crate::{Distribution, layouts::LWEInfos};
assert_eq!(self.n(), sk.n()); assert_eq!(res.n(), self.n() as u32);
assert_eq!(sk.n(), self.n() as u32);
if sk.dist == Distribution::NONE { if sk.dist == Distribution::NONE {
panic!("invalid sk: SecretDistribution::NONE") panic!("invalid sk: SecretDistribution::NONE")
@@ -52,10 +45,25 @@ impl<D: DataMut> GLWEPublicKey<D> {
} }
// Its ok to allocate scratch space here since pk is usually generated only once. // Its ok to allocate scratch space here since pk is usually generated only once.
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(module, self)); let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_tmp_bytes(self, res));
let mut tmp: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(self); let mut tmp: GLWE<Vec<u8>> = GLWE::alloc_from_infos(res);
tmp.encrypt_zero_sk(module, sk, source_xa, source_xe, scratch.borrow());
self.dist = sk.dist; tmp.encrypt_zero_sk(self, sk, source_xa, source_xe, scratch.borrow());
res.dist = sk.dist;
}
}
impl<D: DataMut> GLWEPublicKey<D> {
pub fn generate<S: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
sk: &GLWESecretPrepared<S, B>,
source_xa: &mut Source,
source_xe: &mut Source,
) where
Module<B>: GLWEPublicKeyGenerate<B>,
{
module.glwe_public_key_generate(self, sk, source_xa, source_xe);
} }
} }

View File

@@ -1,32 +1,30 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VecZnxSubInplace, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero}, layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{
TakeGLWESecret, TakeGLWESecretPrepared, GGLWEInfos, GLWESecret, GLWESwitchingKey, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank, prepared::GLWESecretPrepared,
layouts::{GGLWEInfos, GGLWESwitchingKey, GLWESecret, GLWEToLWEKey, LWEInfos, LWESecret, Rank, prepared::GLWESecretPrepared},
}; };
impl GLWEToLWEKey<Vec<u8>> { impl GLWEToLWESwitchingKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
GLWESecretPrepared::alloc_bytes_with(module, infos.rank_in()) GLWESecretPrepared::bytes_of(module, infos.rank_in())
+ (GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + (GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
| GLWESecret::alloc_bytes_with(infos.n(), infos.rank_in()))
} }
} }
impl<D: DataMut> GLWEToLWEKey<D> { impl<D: DataMut> GLWEToLWESwitchingKey<D> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn encrypt_sk<DLwe, DGlwe, B: Backend>( pub fn encrypt_sk<DLwe, DGlwe, B: Backend>(
&mut self, &mut self,
@@ -41,7 +39,7 @@ impl<D: DataMut> GLWEToLWEKey<D> {
DGlwe: DataRef, DGlwe: DataRef,
Module<B>: VecZnxAutomorphismInplace<B> Module<B>: VecZnxAutomorphismInplace<B>
+ VecZnxAddScalarInplace + VecZnxAddScalarInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -56,8 +54,8 @@ impl<D: DataMut> GLWEToLWEKey<D> {
+ VecZnxSub + VecZnxSub
+ SvpPrepare<B> + SvpPrepare<B>
+ VecZnxSwitchRing + VecZnxSwitchRing
+ SvpPPolAllocBytes, + SvpPPolBytesOf,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {

View File

@@ -7,10 +7,10 @@ use poulpy_hal::{
use crate::{ use crate::{
encryption::{SIGMA, SIGMA_BOUND}, encryption::{SIGMA, SIGMA_BOUND},
layouts::{LWECiphertext, LWEInfos, LWEPlaintext, LWESecret}, layouts::{LWE, LWEInfos, LWEPlaintext, LWESecret},
}; };
impl<DataSelf: DataMut> LWECiphertext<DataSelf> { impl<DataSelf: DataMut> LWE<DataSelf> {
pub fn encrypt_sk<DataPt, DataSk, B>( pub fn encrypt_sk<DataPt, DataSk, B>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,

View File

@@ -1,27 +1,24 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut}, layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{
TakeGLWESecret, TakeGLWESecretPrepared, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWESwitchingKey, Rank, RingDegree,
layouts::{ prepared::GLWESecretPrepared,
Degree, GGLWEInfos, GGLWESwitchingKey, GLWESecret, LWEInfos, LWESecret, LWESwitchingKey, Rank,
prepared::GLWESecretPrepared,
},
}; };
impl LWESwitchingKey<Vec<u8>> { impl LWESwitchingKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
debug_assert_eq!( debug_assert_eq!(
infos.dsize().0, infos.dsize().0,
@@ -38,9 +35,9 @@ impl LWESwitchingKey<Vec<u8>> {
1, 1,
"rank_out > 1 is not supported for LWESwitchingKey" "rank_out > 1 is not supported for LWESwitchingKey"
); );
GLWESecret::alloc_bytes_with(Degree(module.n() as u32), Rank(1)) GLWESecret::bytes_of(RingDegree(module.n() as u32), Rank(1))
+ GLWESecretPrepared::alloc_bytes_with(module, Rank(1)) + GLWESecretPrepared::bytes_of(module, Rank(1))
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
} }
} }
@@ -59,7 +56,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
DOut: DataRef, DOut: DataRef,
Module<B>: VecZnxAutomorphismInplace<B> Module<B>: VecZnxAutomorphismInplace<B>
+ VecZnxAddScalarInplace + VecZnxAddScalarInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -74,8 +71,8 @@ impl<D: DataMut> LWESwitchingKey<D> {
+ VecZnxSub + VecZnxSub
+ SvpPrepare<B> + SvpPrepare<B>
+ VecZnxSwitchRing + VecZnxSwitchRing
+ SvpPPolAllocBytes, + SvpPPolBytesOf,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>, Scratch<B>:,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {

View File

@@ -1,32 +1,29 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VecZnxSubInplace, VecZnxSwitchRing,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut}, layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
source::Source, source::Source,
}; };
use crate::{ use crate::layouts::{GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank, RingDegree};
TakeGLWESecret, TakeGLWESecretPrepared,
layouts::{Degree, GGLWEInfos, GGLWESwitchingKey, GLWESecret, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank},
};
impl LWEToGLWESwitchingKey<Vec<u8>> { impl LWEToGLWESwitchingKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes, Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
{ {
debug_assert_eq!( debug_assert_eq!(
infos.rank_in(), infos.rank_in(),
Rank(1), Rank(1),
"rank_in != 1 is not supported for LWEToGLWESwitchingKey" "rank_in != 1 is not supported for LWEToGLWESwitchingKey"
); );
GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
+ GLWESecret::alloc_bytes_with(Degree(module.n() as u32), infos.rank_in()) + GLWESecret::bytes_of(RingDegree(module.n() as u32), infos.rank_in())
} }
} }
@@ -45,7 +42,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
DGlwe: DataRef, DGlwe: DataRef,
Module<B>: VecZnxAutomorphismInplace<B> Module<B>: VecZnxAutomorphismInplace<B>
+ VecZnxAddScalarInplace + VecZnxAddScalarInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
@@ -60,8 +57,8 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
+ VecZnxSub + VecZnxSub
+ SvpPrepare<B> + SvpPrepare<B>
+ VecZnxSwitchRing + VecZnxSwitchRing
+ SvpPPolAllocBytes, + SvpPPolBytesOf,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {

View File

@@ -11,7 +11,5 @@ mod lwe_ct;
mod lwe_ksk; mod lwe_ksk;
mod lwe_to_glwe_ksk; mod lwe_to_glwe_ksk;
pub(crate) use glwe_ct::glwe_encrypt_sk_internal;
pub const SIGMA: f64 = 3.2; pub const SIGMA: f64 = 3.2;
pub(crate) const SIGMA_BOUND: f64 = 6.0 * SIGMA; pub(crate) const SIGMA_BOUND: f64 = 6.0 * SIGMA;

View File

@@ -1,83 +1,46 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, DataMut, Scratch};
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, use crate::{
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, ScratchTakeCore,
VmpApplyDftToDftTmpBytes, external_product::gglwe_ksk::GGLWEExternalProduct,
}, layouts::{AutomorphismKey, AutomorphismKeyToRef, GGLWEInfos, GGSWInfos, prepared::GGSWPreparedToRef},
layouts::{Backend, DataMut, DataRef, Module, Scratch},
}; };
use crate::layouts::{GGLWEAutomorphismKey, GGLWEInfos, GGLWESwitchingKey, GGSWInfos, prepared::GGSWCiphertextPrepared}; impl AutomorphismKey<Vec<u8>> {
pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(
impl GGLWEAutomorphismKey<Vec<u8>> { &self,
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>( module: &M,
module: &Module<B>, res_infos: &R,
out_infos: &OUT, a_infos: &A,
in_infos: &IN, b_infos: &B,
ggsw_infos: &GGSW,
) -> usize ) -> usize
where where
OUT: GGLWEInfos, R: GGLWEInfos,
IN: GGLWEInfos, A: GGLWEInfos,
GGSW: GGSWInfos, B: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes, M: GGLWEExternalProduct<BE>,
{ {
GGLWESwitchingKey::external_product_scratch_space(module, out_infos, in_infos, ggsw_infos) module.gglwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
}
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
module: &Module<B>,
out_infos: &OUT,
ggsw_infos: &GGSW,
) -> usize
where
OUT: GGLWEInfos,
GGSW: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
{
GGLWESwitchingKey::external_product_inplace_scratch_space(module, out_infos, ggsw_infos)
} }
} }
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> { impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn external_product<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, M: GGLWEExternalProduct<BE>,
lhs: &GGLWEAutomorphismKey<DataLhs>, A: AutomorphismKeyToRef,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, B: GGSWPreparedToRef<BE>,
scratch: &mut Scratch<B>, Scratch<BE>: ScratchTakeCore<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
self.key.external_product(module, &lhs.key, rhs, scratch); module.gglwe_external_product(&mut self.key.key, &a.to_ref().key.key, b, scratch);
} }
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>( pub fn external_product_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, M: GGLWEExternalProduct<BE>,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, A: GGSWPreparedToRef<BE>,
scratch: &mut Scratch<B>, Scratch<BE>: ScratchTakeCore<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
self.key.external_product_inplace(module, rhs, scratch); module.gglwe_external_product_inplace(&mut self.key.key, a, scratch);
} }
} }

View File

@@ -1,144 +1,134 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch, ZnxZero};
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, use crate::{
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, GLWEExternalProduct, ScratchTakeCore,
VmpApplyDftToDftTmpBytes, layouts::{
GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GGSWInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyToRef,
prepared::{GGSWPrepared, GGSWPreparedToRef},
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
}; };
use crate::layouts::{GGLWEInfos, GGLWESwitchingKey, GGSWInfos, GLWECiphertext, prepared::GGSWCiphertextPrepared}; pub trait GGLWEExternalProduct<BE: Backend>
where
impl GGLWESwitchingKey<Vec<u8>> { Self: GLWEExternalProduct<BE>,
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>( {
module: &Module<B>, fn gglwe_external_product_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
out_infos: &OUT,
in_infos: &IN,
ggsw_infos: &GGSW,
) -> usize
where where
OUT: GGLWEInfos, R: GGLWEInfos,
IN: GGLWEInfos, A: GGLWEInfos,
GGSW: GGSWInfos, B: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
{ {
GLWECiphertext::external_product_scratch_space( self.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
module,
&out_infos.glwe_layout(),
&in_infos.glwe_layout(),
ggsw_infos,
)
} }
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>( fn gglwe_external_product<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
module: &Module<B>,
out_infos: &OUT,
ggsw_infos: &GGSW,
) -> usize
where where
OUT: GGLWEInfos, R: GGLWEToMut,
GGSW: GGSWInfos, A: GGLWEToRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes, B: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{ {
GLWECiphertext::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos) let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
let a: &GGLWE<&[u8]> = &a.to_ref();
let b: &GGSWPrepared<&[u8], BE> = &b.to_ref();
assert_eq!(
res.rank_in(),
a.rank_in(),
"res input rank_in: {} != a input rank_in: {}",
res.rank_in(),
a.rank_in()
);
assert_eq!(
a.rank_out(),
b.rank(),
"a output rank_out: {} != b rank: {}",
a.rank_out(),
b.rank()
);
assert_eq!(
res.rank_out(),
b.rank(),
"res output rank_out: {} != b rank: {}",
res.rank_out(),
b.rank()
);
for row in 0..res.dnum().into() {
for col in 0..res.rank_in().into() {
self.glwe_external_product(&mut res.at_mut(row, col), &a.at(row, col), b, scratch);
}
}
for row in res.dnum().min(a.dnum()).into()..res.dnum().into() {
for col in 0..res.rank_in().into() {
res.at_mut(row, col).data_mut().zero();
}
}
}
fn gglwe_external_product_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
where
R: GGLWEToMut,
A: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
let a: &GGSWPrepared<&[u8], BE> = &a.to_ref();
assert_eq!(
res.rank_out(),
a.rank(),
"res output rank: {} != a rank: {}",
res.rank_out(),
a.rank()
);
for row in 0..res.dnum().into() {
for col in 0..res.rank_in().into() {
self.glwe_external_product_inplace(&mut res.at_mut(row, col), a, scratch);
}
}
} }
} }
impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> { impl<BE: Backend> GGLWEExternalProduct<BE> for Module<BE> where Self: GLWEExternalProduct<BE> {}
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, impl GLWESwitchingKey<Vec<u8>> {
module: &Module<B>, pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(
lhs: &GGLWESwitchingKey<DataLhs>, &self,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, module: &M,
scratch: &mut Scratch<B>, res_infos: &R,
) where a_infos: &A,
Module<B>: VecZnxDftAllocBytes b_infos: &B,
+ VmpApplyDftToDftTmpBytes ) -> usize
+ VecZnxNormalizeTmpBytes where
+ VecZnxDftApply<B> R: GGLWEInfos,
+ VmpApplyDftToDft<B> A: GGLWEInfos,
+ VmpApplyDftToDftAdd<B> B: GGSWInfos,
+ VecZnxIdftApplyConsume<B> M: GGLWEExternalProduct<BE>,
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.gglwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
{ }
use crate::layouts::GLWEInfos; }
assert_eq!( impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
self.rank_in(), pub fn external_product<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
lhs.rank_in(), where
"ksk_out input rank: {} != ksk_in input rank: {}", M: GGLWEExternalProduct<BE>,
self.rank_in(), A: GLWESwitchingKeyToRef,
lhs.rank_in() B: GGSWPreparedToRef<BE>,
); Scratch<BE>: ScratchTakeCore<BE>,
assert_eq!( {
lhs.rank_out(), module.gglwe_external_product(&mut self.key, &a.to_ref().key, b, scratch);
rhs.rank(), }
"ksk_in output rank: {} != ggsw rank: {}",
self.rank_out(), pub fn external_product_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
rhs.rank() where
); M: GGLWEExternalProduct<BE>,
assert_eq!( A: GGSWPreparedToRef<BE>,
self.rank_out(), Scratch<BE>: ScratchTakeCore<BE>,
rhs.rank(), {
"ksk_out output rank: {} != ggsw rank: {}", module.gglwe_external_product_inplace(&mut self.key, a, scratch);
self.rank_out(),
rhs.rank()
);
}
(0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
});
});
(self.dnum().min(lhs.dnum()).into()..self.dnum().into()).for_each(|row_i| {
(0..self.rank_in().into()).for_each(|col_j| {
self.at_mut(row_i, col_j).data.zero();
});
});
}
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
rhs: &GGSWCiphertextPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
use crate::layouts::GLWEInfos;
assert_eq!(
self.rank_out(),
rhs.rank(),
"ksk_out output rank: {} != ggsw rank: {}",
self.rank_out(),
rhs.rank()
);
}
(0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.external_product_inplace(module, rhs, scratch);
});
});
} }
} }

View File

@@ -1,143 +1,136 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::ScratchAvailable,
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, layouts::{Backend, DataMut, Module, Scratch, ZnxZero},
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
}; };
use crate::layouts::{GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, prepared::GGSWCiphertextPrepared}; use crate::{
GLWEExternalProduct, ScratchTakeCore,
layouts::{
GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GLWEInfos, LWEInfos,
prepared::{GGSWPrepared, GGSWPreparedToRef},
},
};
impl GGSWCiphertext<Vec<u8>> { pub trait GGSWExternalProduct<BE: Backend>
#[allow(clippy::too_many_arguments)] where
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>( Self: GLWEExternalProduct<BE>,
module: &Module<B>, {
out_infos: &OUT, fn ggsw_external_product_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
in_infos: &IN,
apply_infos: &GGSW,
) -> usize
where where
OUT: GGSWInfos, R: GGSWInfos,
IN: GGSWInfos, A: GGSWInfos,
GGSW: GGSWInfos, B: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
{ {
GLWECiphertext::external_product_scratch_space( self.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
module,
&out_infos.glwe_layout(),
&in_infos.glwe_layout(),
apply_infos,
)
} }
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>( fn ggsw_external_product<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
module: &Module<B>,
out_infos: &OUT,
apply_infos: &GGSW,
) -> usize
where where
OUT: GGSWInfos, R: GGSWToMut,
GGSW: GGSWInfos, A: GGSWToRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes, B: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{ {
GLWECiphertext::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), apply_infos) let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let a: &GGSW<&[u8]> = &a.to_ref();
let b: &GGSWPrepared<&[u8], BE> = &b.to_ref();
assert_eq!(
res.rank(),
a.rank(),
"res rank: {} != a rank: {}",
res.rank(),
a.rank()
);
assert_eq!(
res.rank(),
b.rank(),
"res rank: {} != b rank: {}",
res.rank(),
b.rank()
);
assert!(scratch.available() >= self.ggsw_external_product_tmp_bytes(res, a, b));
let min_dnum: usize = res.dnum().min(a.dnum()).into();
for row in 0..min_dnum {
for col in 0..(res.rank() + 1).into() {
self.glwe_external_product(&mut res.at_mut(row, col), &a.at(row, col), b, scratch);
}
}
for row in min_dnum..res.dnum().into() {
for col in 0..(res.rank() + 1).into() {
res.at_mut(row, col).data.zero();
}
}
}
fn ggsw_external_product_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
where
R: GGSWToMut,
A: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let a: &GGSWPrepared<&[u8], BE> = &a.to_ref();
assert_eq!(res.n(), self.n() as u32);
assert_eq!(a.n(), self.n() as u32);
assert_eq!(
res.rank(),
a.rank(),
"res rank: {} != a rank: {}",
res.rank(),
a.rank()
);
for row in 0..res.dnum().into() {
for col in 0..(res.rank() + 1).into() {
self.glwe_external_product_inplace(&mut res.at_mut(row, col), a, scratch);
}
}
} }
} }
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> { impl<BE: Backend> GGSWExternalProduct<BE> for Module<BE> where Self: GLWEExternalProduct<BE> {}
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self, impl GGSW<Vec<u8>> {
module: &Module<B>, pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(
lhs: &GGSWCiphertext<DataLhs>, &self,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, module: &M,
scratch: &mut Scratch<B>, res_infos: &R,
) where a_infos: &A,
Module<B>: VecZnxDftAllocBytes b_infos: &B,
+ VmpApplyDftToDftTmpBytes ) -> usize
+ VecZnxNormalizeTmpBytes where
+ VecZnxDftApply<B> R: GGSWInfos,
+ VmpApplyDftToDft<B> A: GGSWInfos,
+ VmpApplyDftToDftAdd<B> B: GGSWInfos,
+ VecZnxIdftApplyConsume<B> M: GGSWExternalProduct<BE>,
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.ggsw_external_product_tmp_bytes(res_infos, a_infos, b_infos)
{ }
use crate::layouts::LWEInfos; }
assert_eq!(lhs.n(), self.n()); impl<DataSelf: DataMut> GGSW<DataSelf> {
assert_eq!(rhs.n(), self.n()); pub fn external_product<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
where
assert_eq!( M: GGSWExternalProduct<BE>,
self.rank(), A: GGSWToRef,
lhs.rank(), B: GGSWPreparedToRef<BE>,
"ggsw_out rank: {} != ggsw_in rank: {}", Scratch<BE>: ScratchTakeCore<BE>,
self.rank(), {
lhs.rank() module.ggsw_external_product(self, a, b, scratch);
); }
assert_eq!(
self.rank(), pub fn external_product_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
rhs.rank(), where
"ggsw_in rank: {} != ggsw_apply rank: {}", M: GGSWExternalProduct<BE>,
self.rank(), A: GGSWPreparedToRef<BE>,
rhs.rank() Scratch<BE>: ScratchTakeCore<BE>,
); {
module.ggsw_external_product_inplace(self, a, scratch);
assert!(scratch.available() >= GGSWCiphertext::external_product_scratch_space(module, self, lhs, rhs))
}
let min_dnum: usize = self.dnum().min(lhs.dnum()).into();
(0..(self.rank() + 1).into()).for_each(|col_i| {
(0..min_dnum).for_each(|row_j| {
self.at_mut(row_j, col_i)
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
});
(min_dnum..self.dnum().into()).for_each(|row_i| {
self.at_mut(row_i, col_i).data.zero();
});
});
}
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
rhs: &GGSWCiphertextPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
use crate::layouts::LWEInfos;
assert_eq!(rhs.n(), self.n());
assert_eq!(
self.rank(),
rhs.rank(),
"ggsw_out rank: {} != ggsw_apply: {}",
self.rank(),
rhs.rank()
);
}
(0..(self.rank() + 1).into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.external_product_inplace(module, rhs, scratch);
});
});
} }
} }

View File

@@ -1,102 +1,57 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, ModuleN, ScratchTakeBasic, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig}, layouts::{Backend, DataMut, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
}; };
use crate::{ use crate::{
GLWEExternalProduct, GLWEExternalProductInplace, ScratchTakeCore,
layouts::{ layouts::{
GGSWInfos, GLWECiphertext, GLWECiphertextToMut, GLWECiphertextToRef, GLWEInfos, LWEInfos, GGSWInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
prepared::{GGSWCiphertextPrepared, GGSWCiphertextPreparedToRef}, prepared::{GGSWPrepared, GGSWPreparedToRef},
}, },
}; };
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
#[allow(clippy::too_many_arguments)] pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
apply_infos: &GGSW,
) -> usize
where where
OUT: GLWEInfos, R: GLWEInfos,
IN: GLWEInfos, A: GLWEInfos,
GGSW: GGSWInfos, B: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes, M: GLWEExternalProduct<BE>,
{ {
let in_size: usize = in_infos module.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
.k()
.div_ceil(apply_infos.base2k())
.div_ceil(apply_infos.dsize().into()) as usize;
let out_size: usize = out_infos.size();
let ggsw_size: usize = apply_infos.size();
let res_dft: usize = module.vec_znx_dft_alloc_bytes((apply_infos.rank() + 1).into(), ggsw_size);
let a_dft: usize = module.vec_znx_dft_alloc_bytes((apply_infos.rank() + 1).into(), in_size);
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
out_size,
in_size,
in_size, // rows
(apply_infos.rank() + 1).into(), // cols in
(apply_infos.rank() + 1).into(), // cols out
ggsw_size,
);
let normalize_big: usize = module.vec_znx_normalize_tmp_bytes();
if in_infos.base2k() == apply_infos.base2k() {
res_dft + a_dft + (vmp | normalize_big)
} else {
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (apply_infos.rank() + 1).into(), in_size);
res_dft + ((a_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
}
}
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
module: &Module<B>,
out_infos: &OUT,
apply_infos: &GGSW,
) -> usize
where
OUT: GLWEInfos,
GGSW: GGSWInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
{
Self::external_product_scratch_space(module, out_infos, out_infos, apply_infos)
} }
} }
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> { impl<DataSelf: DataMut> GLWE<DataSelf> {
pub fn external_product<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn external_product<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: GLWEToRef,
lhs: &GLWECiphertext<DataLhs>, B: GGSWPreparedToRef<BE>,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, M: GLWEExternalProduct<BE>,
scratch: &mut Scratch<B>, Scratch<BE>: ScratchTakeCore<BE>,
) where
Module<B>: GLWEExternalProduct<B>,
{ {
module.external_product(self, lhs, rhs, scratch); module.glwe_external_product(self, a, b, scratch);
} }
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>( pub fn external_product_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: GGSWPreparedToRef<BE>,
rhs: &GGSWCiphertextPrepared<DataRhs, B>, M: GLWEExternalProduct<BE>,
scratch: &mut Scratch<B>, Scratch<BE>: ScratchTakeCore<BE>,
) where
Module<B>: GLWEExternalProductInplace<B>,
{ {
module.external_product_inplace(self, rhs, scratch); module.glwe_external_product_inplace(self, a, scratch);
} }
} }
impl<BE: Backend> GLWEExternalProductInplace<BE> for Module<BE> pub trait GLWEExternalProduct<BE: Backend>
where where
Module<BE>: VecZnxDftAllocBytes Self: Sized
+ ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxDftApply<BE> + VecZnxDftApply<BE>
@@ -105,15 +60,47 @@ where
+ VecZnxIdftApplyConsume<BE> + VecZnxIdftApplyConsume<BE>
+ VecZnxBigNormalize<BE> + VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>, + VecZnxNormalize<BE>,
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
{ {
fn external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>) fn glwe_external_product_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
where where
R: GLWECiphertextToMut, R: GLWEInfos,
D: GGSWCiphertextPreparedToRef<BE>, A: GLWEInfos,
B: GGSWInfos,
{ {
let res: &mut GLWECiphertext<&mut [u8]> = &mut res.to_mut(); let in_size: usize = a_infos
let rhs: &GGSWCiphertextPrepared<&[u8], BE> = &ggsw.to_ref(); .k()
.div_ceil(b_infos.base2k())
.div_ceil(b_infos.dsize().into()) as usize;
let out_size: usize = res_infos.size();
let ggsw_size: usize = b_infos.size();
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank() + 1).into(), ggsw_size);
let a_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank() + 1).into(), in_size);
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
out_size,
in_size,
in_size, // rows
(b_infos.rank() + 1).into(), // cols in
(b_infos.rank() + 1).into(), // cols out
ggsw_size,
);
let normalize_big: usize = self.vec_znx_normalize_tmp_bytes();
if a_infos.base2k() == b_infos.base2k() {
res_dft + a_dft + (vmp | normalize_big)
} else {
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank() + 1).into(), in_size);
res_dft + ((a_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
}
}
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, a: &D, scratch: &mut Scratch<BE>)
where
R: GLWEToMut,
D: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
let rhs: &GGSWPrepared<&[u8], BE> = &a.to_ref();
let basek_in: usize = res.base2k().into(); let basek_in: usize = res.base2k().into();
let basek_ggsw: usize = rhs.base2k().into(); let basek_ggsw: usize = rhs.base2k().into();
@@ -124,15 +111,15 @@ where
assert_eq!(rhs.rank(), res.rank()); assert_eq!(rhs.rank(), res.rank());
assert_eq!(rhs.n(), res.n()); assert_eq!(rhs.n(), res.n());
assert!(scratch.available() >= GLWECiphertext::external_product_inplace_scratch_space(self, res, rhs)); assert!(scratch.available() >= self.glwe_external_product_tmp_bytes(res, res, rhs));
} }
let cols: usize = (rhs.rank() + 1).into(); let cols: usize = (rhs.rank() + 1).into();
let dsize: usize = rhs.dsize().into(); let dsize: usize = rhs.dsize().into();
let a_size: usize = (res.size() * basek_in).div_ceil(basek_ggsw); let a_size: usize = (res.size() * basek_in).div_ceil(basek_ggsw);
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(res.n().into(), cols, rhs.size()); // Todo optimise let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, rhs.size()); // Todo optimise
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(res.n().into(), cols, a_size.div_ceil(dsize)); let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, a_size.div_ceil(dsize));
a_dft.data_mut().fill(0); a_dft.data_mut().fill(0);
if basek_in == basek_ggsw { if basek_in == basek_ggsw {
@@ -160,7 +147,7 @@ where
} }
} }
} else { } else {
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size); let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self, cols, a_size);
for j in 0..cols { for j in 0..cols {
self.vec_znx_normalize( self.vec_znx_normalize(
@@ -213,31 +200,18 @@ where
); );
} }
} }
}
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> fn glwe_external_product<R, A, D>(&self, res: &mut R, lhs: &A, rhs: &D, scratch: &mut Scratch<BE>)
where
Module<BE>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<BE>
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>,
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx,
{
fn external_product<R, A, D>(&self, res: &mut R, lhs: &A, rhs: &D, scratch: &mut Scratch<BE>)
where where
R: GLWECiphertextToMut, R: GLWEToMut,
A: GLWECiphertextToRef, A: GLWEToRef,
D: GGSWCiphertextPreparedToRef<BE>, D: GGSWPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{ {
let res: &mut GLWECiphertext<&mut [u8]> = &mut res.to_mut(); let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
let lhs: &GLWECiphertext<&[u8]> = &lhs.to_ref(); let lhs: &GLWE<&[u8]> = &lhs.to_ref();
let rhs: &GGSWCiphertextPrepared<&[u8], BE> = &rhs.to_ref(); let rhs: &GGSWPrepared<&[u8], BE> = &rhs.to_ref();
let basek_in: usize = lhs.base2k().into(); let basek_in: usize = lhs.base2k().into();
let basek_ggsw: usize = rhs.base2k().into(); let basek_ggsw: usize = rhs.base2k().into();
@@ -251,7 +225,7 @@ where
assert_eq!(rhs.rank(), res.rank()); assert_eq!(rhs.rank(), res.rank());
assert_eq!(rhs.n(), res.n()); assert_eq!(rhs.n(), res.n());
assert_eq!(lhs.n(), res.n()); assert_eq!(lhs.n(), res.n());
assert!(scratch.available() >= GLWECiphertext::external_product_scratch_space(self, res, lhs, rhs)); assert!(scratch.available() >= self.glwe_external_product_tmp_bytes(res, lhs, rhs));
} }
let cols: usize = (rhs.rank() + 1).into(); let cols: usize = (rhs.rank() + 1).into();
@@ -259,8 +233,8 @@ where
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw); let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, rhs.size()); // Todo optimise
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), cols, a_size.div_ceil(dsize)); let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, a_size.div_ceil(dsize));
a_dft.data_mut().fill(0); a_dft.data_mut().fill(0);
if basek_in == basek_ggsw { if basek_in == basek_ggsw {
@@ -288,7 +262,7 @@ where
} }
} }
} else { } else {
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n(), cols, a_size); let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self, cols, a_size);
for j in 0..cols { for j in 0..cols {
self.vec_znx_normalize( self.vec_znx_normalize(
@@ -342,3 +316,20 @@ where
}); });
} }
} }
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> where
Self: ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<BE>
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxNormalizeTmpBytes
{
}

View File

@@ -1,23 +1,8 @@
use poulpy_hal::layouts::{Backend, Scratch};
use crate::layouts::{GLWECiphertextToMut, GLWECiphertextToRef, prepared::GGSWCiphertextPreparedToRef};
mod gglwe_atk; mod gglwe_atk;
mod gglwe_ksk; mod gglwe_ksk;
mod ggsw_ct; mod ggsw_ct;
mod glwe_ct; mod glwe_ct;
pub trait GLWEExternalProduct<BE: Backend> { pub use gglwe_ksk::*;
fn external_product<R, A, D>(&self, res: &mut R, a: &A, ggsw: &D, scratch: &mut Scratch<BE>) pub use ggsw_ct::*;
where pub use glwe_ct::*;
R: GLWECiphertextToMut,
A: GLWECiphertextToRef,
D: GGSWCiphertextPreparedToRef<BE>;
}
pub trait GLWEExternalProductInplace<BE: Backend> {
fn external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>)
where
R: GLWECiphertextToMut,
D: GGSWCiphertextPreparedToRef<BE>;
}

View File

@@ -2,18 +2,18 @@ use std::collections::HashMap;
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, ScratchAvailable, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace,
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch}, layouts::{Backend, DataMut, DataRef, Module, Scratch},
}; };
use crate::{ use crate::{
GLWEOperations, TakeGLWECt, GLWEOperations,
layouts::{GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared}, layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
}; };
/// [GLWEPacker] enables only the fly GLWE packing /// [GLWEPacker] enables only the fly GLWE packing
@@ -29,7 +29,7 @@ pub struct GLWEPacker {
/// [Accumulator] stores intermediate packing result. /// [Accumulator] stores intermediate packing result.
/// There are Log(N) such accumulators in a [GLWEPacker]. /// There are Log(N) such accumulators in a [GLWEPacker].
struct Accumulator { struct Accumulator {
data: GLWECiphertext<Vec<u8>>, data: GLWE<Vec<u8>>,
value: bool, // Implicit flag for zero ciphertext value: bool, // Implicit flag for zero ciphertext
control: bool, // Can be combined with incoming value control: bool, // Can be combined with incoming value
} }
@@ -43,12 +43,12 @@ impl Accumulator {
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation. /// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus. /// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
/// * `rank`: rank of the GLWE ciphertext. /// * `rank`: rank of the GLWE ciphertext.
pub fn alloc<A>(infos: &A) -> Self pub fn alloc<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
where where
A: GLWEInfos, A: GLWEInfos,
{ {
Self { Self {
data: GLWECiphertext::alloc(infos), data: GLWE::alloc_from_infos(module, infos),
value: false, value: false,
control: false, control: false,
} }
@@ -66,13 +66,13 @@ impl GLWEPacker {
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients /// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts /// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
/// can be packed. /// can be packed.
pub fn new<A>(infos: &A, log_batch: usize) -> Self pub fn new<A, B: Backend>(module: Module<B>, infos: &A, log_batch: usize) -> Self
where where
A: GLWEInfos, A: GLWEInfos,
{ {
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new(); let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
let log_n: usize = infos.n().log2(); let log_n: usize = infos.n().log2();
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(infos))); (0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(module, infos)));
Self { Self {
accumulators, accumulators,
log_batch, log_batch,
@@ -90,17 +90,17 @@ impl GLWEPacker {
} }
/// Number of scratch space bytes required to call [Self::add]. /// Number of scratch space bytes required to call [Self::add].
pub fn scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize pub fn tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
pack_core_scratch_space(module, out_infos, key_infos) pack_core_tmp_bytes(module, out_infos, key_infos)
} }
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> { pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
GLWECiphertext::trace_galois_elements(module) GLWE::trace_galois_elements(module)
} }
/// Adds a GLWE ciphertext to the [GLWEPacker]. /// Adds a GLWE ciphertext to the [GLWEPacker].
@@ -111,15 +111,15 @@ impl GLWEPacker {
/// of packed ciphertexts reaches N/2^log_batch is a result written. /// of packed ciphertexts reaches N/2^log_batch is a result written.
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext. /// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s. /// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
/// * `scratch`: scratch space of size at least [Self::scratch_space]. /// * `scratch`: scratch space of size at least [Self::tmp_bytes].
pub fn add<DataA: DataRef, DataAK: DataRef, B: Backend>( pub fn add<DataA: DataRef, DataAK: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
a: Option<&GLWECiphertext<DataA>>, a: Option<&GLWE<DataA>>,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<DataAK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -142,7 +142,7 @@ impl GLWEPacker {
+ VecZnxBigAutomorphismInplace<B> + VecZnxBigAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
assert!( assert!(
(self.counter as u32) < self.accumulators[0].data.n(), (self.counter as u32) < self.accumulators[0].data.n(),
@@ -162,7 +162,7 @@ impl GLWEPacker {
} }
/// Flush result to`res`. /// Flush result to`res`.
pub fn flush<Data: DataMut, B: Backend>(&mut self, module: &Module<B>, res: &mut GLWECiphertext<Data>) pub fn flush<Data: DataMut, B: Backend>(&mut self, module: &Module<B>, res: &mut GLWE<Data>)
where where
Module<B>: VecZnxCopy, Module<B>: VecZnxCopy,
{ {
@@ -177,24 +177,24 @@ impl GLWEPacker {
} }
} }
fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize fn pack_core_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
combine_scratch_space(module, out_infos, key_infos) combine_tmp_bytes(module, out_infos, key_infos)
} }
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>( fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
module: &Module<B>, module: &Module<B>,
a: Option<&GLWECiphertext<D>>, a: Option<&GLWE<D>>,
accumulators: &mut [Accumulator], accumulators: &mut [Accumulator],
i: usize, i: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<DataAK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -217,7 +217,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
+ VecZnxBigAutomorphismInplace<B> + VecZnxBigAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
let log_n: usize = module.log_n(); let log_n: usize = module.log_n();
@@ -258,7 +258,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
} else { } else {
pack_core( pack_core(
module, module,
None::<&GLWECiphertext<Vec<u8>>>, None::<&GLWE<Vec<u8>>>,
acc_next, acc_next,
i + 1, i + 1,
auto_keys, auto_keys,
@@ -268,27 +268,26 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
} }
} }
fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize fn combine_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
GLWECiphertext::alloc_bytes(out_infos) GLWE::bytes_of_from_infos(module, out_infos)
+ (GLWECiphertext::rsh_scratch_space(module.n()) + (GLWE::rsh_tmp_bytes(module.n()) | GLWE::automorphism_inplace_tmp_bytes(module, out_infos, key_infos))
| GLWECiphertext::automorphism_inplace_scratch_space(module, out_infos, key_infos))
} }
/// [combine] merges two ciphertexts together. /// [combine] merges two ciphertexts together.
fn combine<D: DataRef, DataAK: DataRef, B: Backend>( fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
module: &Module<B>, module: &Module<B>,
acc: &mut Accumulator, acc: &mut Accumulator,
b: Option<&GLWECiphertext<D>>, b: Option<&GLWE<D>>,
i: usize, i: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<DataAK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -311,10 +310,10 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
+ VecZnxBigAutomorphismInplace<B> + VecZnxBigAutomorphismInplace<B>
+ VecZnxNormalize<B> + VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes, + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWECt, Scratch<B>: ScratchAvailable,
{ {
let log_n: usize = acc.data.n().log2(); let log_n: usize = acc.data.n().log2();
let a: &mut GLWECiphertext<Vec<u8>> = &mut acc.data; let a: &mut GLWE<Vec<u8>> = &mut acc.data;
let gal_el: i64 = if i == 0 { let gal_el: i64 = if i == 0 {
-1 -1
@@ -395,9 +394,9 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)] /// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
pub fn glwe_packing<D: DataMut, ATK, B: Backend>( pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
module: &Module<B>, module: &Module<B>,
cts: &mut HashMap<usize, &mut GLWECiphertext<D>>, cts: &mut HashMap<usize, &mut GLWE<D>>,
log_gap_out: usize, log_gap_out: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<ATK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<ATK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
ATK: DataRef, ATK: DataRef,
@@ -414,7 +413,7 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
+ VecZnxNegateInplace + VecZnxNegateInplace
+ VecZnxCopy + VecZnxCopy
+ VecZnxSubInplace + VecZnxSubInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -427,7 +426,7 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
+ VecZnxBigSubSmallNegateInplace<B> + VecZnxBigSubSmallNegateInplace<B>
+ VecZnxRotate + VecZnxRotate
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable, Scratch<B>: ScratchAvailable,
{ {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
@@ -439,15 +438,15 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
(0..log_n - log_gap_out).for_each(|i| { (0..log_n - log_gap_out).for_each(|i| {
let t: usize = (1 << log_n).min(1 << (log_n - 1 - i)); let t: usize = (1 << log_n).min(1 << (log_n - 1 - i));
let auto_key: &GGLWEAutomorphismKeyPrepared<ATK, B> = if i == 0 { let auto_key: &AutomorphismKeyPrepared<ATK, B> = if i == 0 {
auto_keys.get(&-1).unwrap() auto_keys.get(&-1).unwrap()
} else { } else {
auto_keys.get(&module.galois_element(1 << (i - 1))).unwrap() auto_keys.get(&module.galois_element(1 << (i - 1))).unwrap()
}; };
(0..t).for_each(|j| { (0..t).for_each(|j| {
let mut a: Option<&mut GLWECiphertext<D>> = cts.remove(&j); let mut a: Option<&mut GLWE<D>> = cts.remove(&j);
let mut b: Option<&mut GLWECiphertext<D>> = cts.remove(&(j + t)); let mut b: Option<&mut GLWE<D>> = cts.remove(&(j + t));
pack_internal(module, &mut a, &mut b, i, auto_key, scratch); pack_internal(module, &mut a, &mut b, i, auto_key, scratch);
@@ -463,10 +462,10 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>( fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
module: &Module<B>, module: &Module<B>,
a: &mut Option<&mut GLWECiphertext<A>>, a: &mut Option<&mut GLWE<A>>,
b: &mut Option<&mut GLWECiphertext<D>>, b: &mut Option<&mut GLWE<D>>,
i: usize, i: usize,
auto_key: &GGLWEAutomorphismKeyPrepared<DataAK, B>, auto_key: &AutomorphismKeyPrepared<DataAK, B>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxRotateInplace<B> Module<B>: VecZnxRotateInplace<B>
@@ -481,7 +480,7 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
+ VecZnxNegateInplace + VecZnxNegateInplace
+ VecZnxCopy + VecZnxCopy
+ VecZnxSubInplace + VecZnxSubInplace
+ VecZnxDftAllocBytes + VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -494,7 +493,7 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
+ VecZnxBigSubSmallNegateInplace<B> + VecZnxBigSubSmallNegateInplace<B>
+ VecZnxRotate + VecZnxRotate
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable, Scratch<B>: ScratchAvailable,
{ {
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t)) // Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g) // We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)

View File

@@ -2,22 +2,19 @@ use std::collections::HashMap;
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize, ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx}, layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
}; };
use crate::{ use crate::{
TakeGLWECt, layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
layouts::{
Base2K, GGLWEInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared,
},
operations::GLWEOperations, operations::GLWEOperations,
}; };
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn trace_galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> { pub fn trace_galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
let mut gal_els: Vec<i64> = Vec::new(); let mut gal_els: Vec<i64> = Vec::new();
(0..module.log_n()).for_each(|i| { (0..module.log_n()).for_each(|i| {
@@ -30,21 +27,16 @@ impl GLWECiphertext<Vec<u8>> {
gal_els gal_els
} }
pub fn trace_scratch_space<B: Backend, OUT, IN, KEY>( pub fn trace_tmp_bytes<B: Backend, OUT, IN, KEY>(module: &Module<B>, out_infos: &OUT, in_infos: &IN, key_infos: &KEY) -> usize
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
key_infos: &KEY,
) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
IN: GLWEInfos, IN: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos); let trace: usize = Self::automorphism_inplace_tmp_bytes(module, out_infos, key_infos);
if in_infos.base2k() != key_infos.base2k() { if in_infos.base2k() != key_infos.base2k() {
let glwe_conv: usize = VecZnx::alloc_bytes( let glwe_conv: usize = VecZnx::bytes_of(
module.n(), module.n(),
(key_infos.rank_out() + 1).into(), (key_infos.rank_out() + 1).into(),
out_infos.k().min(in_infos.k()).div_ceil(key_infos.base2k()) as usize, out_infos.k().min(in_infos.k()).div_ceil(key_infos.base2k()) as usize,
@@ -55,27 +47,27 @@ impl GLWECiphertext<Vec<u8>> {
trace trace
} }
pub fn trace_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize pub fn trace_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where where
OUT: GLWEInfos, OUT: GLWEInfos,
KEY: GGLWEInfos, KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{ {
Self::trace_scratch_space(module, out_infos, out_infos, key_infos) Self::trace_tmp_bytes(module, out_infos, out_infos, key_infos)
} }
} }
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> { impl<DataSelf: DataMut> GLWE<DataSelf> {
pub fn trace<DataLhs: DataRef, DataAK: DataRef, B: Backend>( pub fn trace<DataLhs: DataRef, DataAK: DataRef, B: Backend>(
&mut self, &mut self,
module: &Module<B>, module: &Module<B>,
start: usize, start: usize,
end: usize, end: usize,
lhs: &GLWECiphertext<DataLhs>, lhs: &GLWE<DataLhs>,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<DataAK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -89,7 +81,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxCopy + VecZnxCopy
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
self.copy(module, lhs); self.copy(module, lhs);
self.trace_inplace(module, start, end, auto_keys, scratch); self.trace_inplace(module, start, end, auto_keys, scratch);
@@ -100,10 +92,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
module: &Module<B>, module: &Module<B>,
start: usize, start: usize,
end: usize, end: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<DataAK, B>>, auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
scratch: &mut Scratch<B>, scratch: &mut Scratch<B>,
) where ) where
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes + VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B> + VmpApplyDftToDft<B>
@@ -116,7 +108,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxRshInplace<B> + VecZnxRshInplace<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxNormalize<B>, + VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx, Scratch<B>: ScratchAvailable,
{ {
let basek_ksk: Base2K = auto_keys let basek_ksk: Base2K = auto_keys
.get(auto_keys.keys().next().unwrap()) .get(auto_keys.keys().next().unwrap())
@@ -137,7 +129,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
} }
if self.base2k() != basek_ksk { if self.base2k() != basek_ksk {
let (mut self_conv, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout { let (mut self_conv, scratch_1) = scratch.take_glwe_ct(&GLWELayout {
n: module.n().into(), n: module.n().into(),
base2k: basek_ksk, base2k: basek_ksk,
k: self.k(), k: self.k(),

View File

@@ -1,224 +1,205 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch};
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, use crate::{
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, ScratchTakeCore,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, keyswitching::glwe_ct::GLWEKeySwitch,
layouts::{
AutomorphismKey, AutomorphismKeyToRef, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWESwitchingKey,
GLWESwitchingKeyToRef,
prepared::{GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedToRef},
}, },
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
}; };
use crate::layouts::{ impl AutomorphismKey<Vec<u8>> {
GGLWEAutomorphismKey, GGLWEInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos, pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
};
impl GGLWEAutomorphismKey<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
key_infos: &KEY,
) -> usize
where where
OUT: GGLWEInfos, R: GGLWEInfos,
IN: GGLWEInfos, A: GGLWEInfos,
KEY: GGLWEInfos, K: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, M: GGLWEKeySwitch<BE>,
{ {
GGLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos) module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
}
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
where
OUT: GGLWEInfos,
KEY: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
} }
} }
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> { impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: AutomorphismKeyToRef,
lhs: &GGLWEAutomorphismKey<DataLhs>, B: GLWESwitchingKeyPreparedToRef<BE>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>, Scratch<BE>: ScratchTakeCore<BE>,
scratch: &mut Scratch<B>, M: GGLWEKeySwitch<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
self.key.keyswitch(module, &lhs.key, rhs, scratch); module.gglwe_keyswitch(&mut self.key.key, &a.to_ref().key.key, b, scratch);
} }
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>( pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: GLWESwitchingKeyPreparedToRef<BE>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>, Scratch<BE>: ScratchTakeCore<BE>,
scratch: &mut Scratch<B>, M: GGLWEKeySwitch<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
self.key.keyswitch_inplace(module, &rhs.key, scratch); module.gglwe_keyswitch_inplace(&mut self.key.key, a, scratch);
} }
} }
impl GGLWESwitchingKey<Vec<u8>> { impl GLWESwitchingKey<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>( pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
key_apply: &KEY,
) -> usize
where where
OUT: GGLWEInfos, R: GGLWEInfos,
IN: GGLWEInfos, A: GGLWEInfos,
KEY: GGLWEInfos, K: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, M: GGLWEKeySwitch<BE>,
{ {
GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, key_apply) module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
}
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
where
OUT: GGLWEInfos + GLWEInfos,
KEY: GGLWEInfos + GLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
{
GLWECiphertext::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
} }
} }
impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> { impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>( pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: GLWESwitchingKeyToRef,
lhs: &GGLWESwitchingKey<DataLhs>, B: GLWESwitchingKeyPreparedToRef<BE>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>, Scratch<BE>: ScratchTakeCore<BE>,
scratch: &mut Scratch<B>, M: GGLWEKeySwitch<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.gglwe_keyswitch(&mut self.key, &a.to_ref().key, b, scratch);
{ }
assert_eq!(
self.rank_in(), pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
lhs.rank_in(), where
"ksk_out input rank: {} != ksk_in input rank: {}", A: GLWESwitchingKeyPreparedToRef<BE>,
self.rank_in(), Scratch<BE>: ScratchTakeCore<BE>,
lhs.rank_in() M: GGLWEKeySwitch<BE>,
); {
assert_eq!( module.gglwe_keyswitch_inplace(&mut self.key, a, scratch);
lhs.rank_out(), }
rhs.rank_in(), }
"ksk_in output rank: {} != ksk_apply input rank: {}",
self.rank_out(), impl GGLWE<Vec<u8>> {
rhs.rank_in() pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
); where
assert_eq!( R: GGLWEInfos,
self.rank_out(), A: GGLWEInfos,
rhs.rank_out(), K: GGLWEInfos,
"ksk_out output rank: {} != ksk_apply output rank: {}", M: GGLWEKeySwitch<BE>,
self.rank_out(), {
rhs.rank_out() module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
); }
assert!( }
self.dnum() <= lhs.dnum(),
"self.dnum()={} > lhs.dnum()={}", impl<DataSelf: DataMut> GGLWE<DataSelf> {
self.dnum(), pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
lhs.dnum() where
); A: GGLWEToRef,
assert_eq!( B: GLWESwitchingKeyPreparedToRef<BE>,
self.dsize(), Scratch<BE>: ScratchTakeCore<BE>,
lhs.dsize(), M: GGLWEKeySwitch<BE>,
"ksk_out dsize: {} != ksk_in dsize: {}", {
self.dsize(), module.gglwe_keyswitch(self, a, b, scratch);
lhs.dsize() }
)
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
where
A: GLWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
M: GGLWEKeySwitch<BE>,
{
module.gglwe_keyswitch_inplace(self, a, scratch);
}
}
impl<BE: Backend> GGLWEKeySwitch<BE> for Module<BE> where Self: GLWEKeySwitch<BE> {}
pub trait GGLWEKeySwitch<BE: Backend>
where
Self: GLWEKeySwitch<BE>,
{
fn gglwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
where
R: GGLWEInfos,
A: GGLWEInfos,
K: GGLWEInfos,
{
self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
}
fn gglwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
where
R: GGLWEToMut,
A: GGLWEToRef,
B: GLWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
let a: &GGLWE<&[u8]> = &a.to_ref();
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
assert_eq!(
res.rank_in(),
a.rank_in(),
"res input rank: {} != a input rank: {}",
res.rank_in(),
a.rank_in()
);
assert_eq!(
a.rank_out(),
b.rank_in(),
"res output rank: {} != b input rank: {}",
a.rank_out(),
b.rank_in()
);
assert_eq!(
res.rank_out(),
b.rank_out(),
"res output rank: {} != b output rank: {}",
res.rank_out(),
b.rank_out()
);
assert!(
res.dnum() <= a.dnum(),
"res.dnum()={} > a.dnum()={}",
res.dnum(),
a.dnum()
);
assert_eq!(
res.dsize(),
a.dsize(),
"res dsize: {} != a dsize: {}",
res.dsize(),
a.dsize()
);
for row in 0..res.dnum().into() {
for col in 0..res.rank_in().into() {
self.glwe_keyswitch(&mut res.at_mut(row, col), &a.at(row, col), b, scratch);
}
} }
(0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
});
});
(self.dnum().min(lhs.dnum()).into()..self.dnum().into()).for_each(|row_i| {
(0..self.rank_in().into()).for_each(|col_j| {
self.at_mut(row_i, col_j).data.zero();
});
});
} }
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>( fn gglwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, R: GGLWEToMut,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>, A: GLWESwitchingKeyPreparedToRef<BE>,
scratch: &mut Scratch<B>, Scratch<BE>: ScratchTakeCore<BE>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
{ {
#[cfg(debug_assertions)] let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
{ let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
assert_eq!(
self.rank_out(),
rhs.rank_out(),
"ksk_out output rank: {} != ksk_apply output rank: {}",
self.rank_out(),
rhs.rank_out()
);
}
(0..self.rank_in().into()).for_each(|col_i| { assert_eq!(
(0..self.dnum().into()).for_each(|row_j| { res.rank_out(),
self.at_mut(row_j, col_i) a.rank_out(),
.keyswitch_inplace(module, rhs, scratch) "res output rank: {} != a output rank: {}",
}); res.rank_out(),
}); a.rank_out()
);
for row in 0..res.dnum().into() {
for col in 0..res.rank_in().into() {
self.glwe_keyswitch_inplace(&mut res.at_mut(row, col), a, scratch);
}
}
} }
} }
impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {}

View File

@@ -1,366 +1,131 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, DataMut, Scratch, VecZnx};
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
};
use crate::{ use crate::{
GGSWExpandRows, ScratchTakeCore,
keyswitching::glwe_ct::GLWEKeySwitch,
layouts::{ layouts::{
GGLWECiphertext, GGLWEInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos, GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef,
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared}, prepared::{GLWESwitchingKeyPreparedToRef, TensorKeyPreparedToRef},
}, },
operations::GLWEOperations,
}; };
impl GGSWCiphertext<Vec<u8>> { impl GGSW<Vec<u8>> {
pub(crate) fn expand_row_scratch_space<B: Backend, OUT, TSK>(module: &Module<B>, out_infos: &OUT, tsk_infos: &TSK) -> usize pub fn keyswitch_tmp_bytes<R, A, K, T, M, BE: Backend>(
where module: &M,
OUT: GGSWInfos, res_infos: &R,
TSK: GGLWEInfos, a_infos: &A,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes, key_infos: &K,
{ tsk_infos: &T,
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
let size_in: usize = out_infos
.k()
.div_ceil(tsk_infos.base2k())
.div_ceil(tsk_infos.dsize().into()) as usize;
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes((tsk_infos.rank_out() + 1).into(), tsk_size);
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, size_in);
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
tsk_size,
size_in,
size_in,
(tsk_infos.rank_in()).into(), // Verify if rank+1
(tsk_infos.rank_out()).into(), // Verify if rank+1
tsk_size,
);
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
let norm: usize = module.vec_znx_normalize_tmp_bytes();
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
}
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
apply_infos: &KEY,
tsk_infos: &TSK,
) -> usize ) -> usize
where where
OUT: GGSWInfos, R: GGSWInfos,
IN: GGSWInfos, A: GGSWInfos,
KEY: GGLWEInfos, K: GGLWEInfos,
TSK: GGLWEInfos, T: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes M: GGSWKeySwitch<BE>,
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxBigNormalizeTmpBytes,
{ {
#[cfg(debug_assertions)] module.ggsw_keyswitch_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
{ }
assert_eq!(apply_infos.rank_in(), apply_infos.rank_out()); }
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
assert_eq!(apply_infos.rank_in(), tsk_infos.rank_in());
}
let rank: usize = apply_infos.rank_out().into(); impl<D: DataMut> GGSW<D> {
pub fn keyswitch<M, A, K, T, BE: Backend>(&mut self, module: &M, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
where
A: GGSWToRef,
K: GLWESwitchingKeyPreparedToRef<BE>,
T: TensorKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
M: GGSWKeySwitch<BE>,
{
module.ggsw_keyswitch(self, a, key, tsk, scratch);
}
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize; pub fn keyswitch_inplace<M, K, T, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, size_out); where
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out); K: GLWESwitchingKeyPreparedToRef<BE>,
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos); T: TensorKeyPreparedToRef<BE>,
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos); Scratch<BE>: ScratchTakeCore<BE>,
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out); M: GGSWKeySwitch<BE>,
{
module.ggsw_keyswitch_inplace(self, key, tsk, scratch);
}
}
if in_infos.base2k() == tsk_infos.base2k() { pub trait GGSWKeySwitch<BE: Backend>
where
Self: GLWEKeySwitch<BE> + GGSWExpandRows<BE>,
{
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
where
R: GGSWInfos,
A: GGSWInfos,
K: GGLWEInfos,
T: GGLWEInfos,
{
assert_eq!(key_infos.rank_in(), key_infos.rank_out());
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
assert_eq!(key_infos.rank_in(), tsk_infos.rank_in());
let rank: usize = key_infos.rank_out().into();
let size_out: usize = res_infos.k().div_ceil(res_infos.base2k()) as usize;
let res_znx: usize = VecZnx::bytes_of(self.n(), rank + 1, size_out);
let ci_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
let ks: usize = self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos);
let expand_rows: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
let res_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
if a_infos.base2k() == tsk_infos.base2k() {
res_znx + ci_dft + (ks | expand_rows | res_dft) res_znx + ci_dft + (ks | expand_rows | res_dft)
} else { } else {
let a_conv: usize = VecZnx::alloc_bytes( let a_conv: usize = VecZnx::bytes_of(
module.n(), self.n(),
1, 1,
out_infos.k().div_ceil(tsk_infos.base2k()) as usize, res_infos.k().div_ceil(tsk_infos.base2k()) as usize,
) + module.vec_znx_normalize_tmp_bytes(); ) + self.vec_znx_normalize_tmp_bytes();
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft) res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
} }
} }
#[allow(clippy::too_many_arguments)] fn ggsw_keyswitch<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
module: &Module<B>,
out_infos: &OUT,
apply_infos: &KEY,
tsk_infos: &TSK,
) -> usize
where where
OUT: GGSWInfos, R: GGSWToMut,
KEY: GGLWEInfos, A: GGSWToRef,
TSK: GGLWEInfos, K: GLWESwitchingKeyPreparedToRef<BE>,
Module<B>: VecZnxDftAllocBytes T: TensorKeyPreparedToRef<BE>,
+ VmpApplyDftToDftTmpBytes Scratch<BE>: ScratchTakeCore<BE>,
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxBigNormalizeTmpBytes,
{ {
GGSWCiphertext::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos) let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let a: &GGSW<&[u8]> = &a.to_ref();
assert_eq!(res.ggsw_layout(), a.ggsw_layout());
for row in 0..a.dnum().into() {
// Key-switch column 0, i.e.
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
self.glwe_keyswitch(&mut res.at_mut(row, 0), &a.at(row, 0), key, scratch);
}
self.ggsw_expand_row(res, tsk, scratch);
}
fn ggsw_keyswitch_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
where
R: GGSWToMut,
K: GLWESwitchingKeyPreparedToRef<BE>,
T: TensorKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
for row in 0..res.dnum().into() {
// Key-switch column 0, i.e.
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
self.glwe_keyswitch_inplace(&mut res.at_mut(row, 0), key, scratch);
}
self.ggsw_expand_row(res, tsk, scratch);
} }
} }
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> { impl<DataSelf: DataMut> GGSW<DataSelf> {}
pub fn from_gglwe<DataA, DataTsk, B: Backend>(
&mut self,
module: &Module<B>,
a: &GGLWECiphertext<DataA>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
DataA: DataRef,
DataTsk: DataRef,
Module<B>: VecZnxCopy
+ VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VecZnxDftCopy<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftAddInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
use crate::layouts::{GLWEInfos, LWEInfos};
assert_eq!(self.rank(), a.rank_out());
assert_eq!(self.dnum(), a.dnum());
assert_eq!(self.n(), module.n() as u32);
assert_eq!(a.n(), module.n() as u32);
assert_eq!(tsk.n(), module.n() as u32);
}
(0..self.dnum().into()).for_each(|row_i| {
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
});
self.expand_row(module, tsk, scratch);
}
pub fn keyswitch<DataLhs: DataRef, DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GGSWCiphertext<DataLhs>,
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftAllocBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B>
+ VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
{
(0..lhs.dnum().into()).for_each(|row_i| {
// Key-switch column 0, i.e.
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
self.at_mut(row_i, 0)
.keyswitch(module, &lhs.at(row_i, 0), ksk, scratch);
});
self.expand_row(module, tsk, scratch);
}
pub fn keyswitch_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftAllocBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B>
+ VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
{
(0..self.dnum().into()).for_each(|row_i| {
// Key-switch column 0, i.e.
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
self.at_mut(row_i, 0)
.keyswitch_inplace(module, ksk, scratch);
});
self.expand_row(module, tsk, scratch);
}
pub fn expand_row<DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftApply<B>
+ VecZnxDftCopy<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftAddInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxIdftApplyTmpA<B>
+ VecZnxNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
{
let basek_in: usize = self.base2k().into();
let basek_tsk: usize = tsk.base2k().into();
assert!(scratch.available() >= GGSWCiphertext::expand_row_scratch_space(module, self, tsk));
let n: usize = self.n().into();
let rank: usize = self.rank().into();
let cols: usize = rank + 1;
let a_size: usize = (self.size() * basek_in).div_ceil(basek_tsk);
// Keyswitch the j-th row of the col 0
for row_i in 0..self.dnum().into() {
let a = &self.at(row_i, 0).data;
// Pre-compute DFT of (a0, a1, a2)
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, a_size);
if basek_in == basek_tsk {
for i in 0..cols {
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
}
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(n, 1, a_size);
for i in 0..cols {
module.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
}
}
for col_j in 1..cols {
// Example for rank 3:
//
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
// actually composed of that many dnum and we focus on a specific row here
// implicitely given ci_dft.
//
// # Input
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (0, 0, 0, 0)
// col 2: (0, 0, 0, 0)
// col 3: (0, 0, 0, 0)
//
// # Output
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
let dsize: usize = tsk.dsize().into();
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(dsize));
{
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
//
// # Example for col=1
//
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
// +
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
// +
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
for col_i in 1..cols {
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
// Extracts a[i] and multipies with Enc(s[i]s[j])
for di in 0..dsize {
tmp_a.set_size((ci_dft.size() + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
module.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
if di == 0 && col_i == 1 {
module.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
} else {
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
}
}
}
}
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
//
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
// +
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
// =
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
for i in 0..cols {
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
module.vec_znx_big_normalize(
basek_in,
&mut self.at_mut(row_i, col_j).data,
i,
basek_tsk,
&tmp_idft,
0,
scratch_3,
);
}
}
}
}
}

View File

@@ -1,186 +1,179 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
}, },
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos}, layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
}; };
use crate::layouts::{GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWESwitchingKeyPrepared}; use crate::{
ScratchTakeCore,
layouts::{
GGLWEInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
prepared::{GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedToRef},
},
};
impl GLWECiphertext<Vec<u8>> { impl GLWE<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>( pub fn keyswitch_tmp_bytes<M, R, A, B, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
key_apply: &KEY,
) -> usize
where where
OUT: GLWEInfos, R: GLWEInfos,
IN: GLWEInfos, A: GLWEInfos,
KEY: GGLWEInfos, B: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, M: GLWEKeySwitch<BE>,
{ {
let in_size: usize = in_infos module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, b_infos)
}
}
impl<D: DataMut> GLWE<D> {
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
where
A: GLWEToRef,
B: GLWESwitchingKeyPreparedToRef<BE>,
M: GLWEKeySwitch<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
module.glwe_keyswitch(self, a, b, scratch);
}
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
where
A: GLWESwitchingKeyPreparedToRef<BE>,
M: GLWEKeySwitch<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
module.glwe_keyswitch_inplace(self, a, scratch);
}
}
impl<BE: Backend> GLWEKeySwitch<BE> for Module<BE> where
Self: Sized
+ ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxDftApply<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxBigAddSmallInplace<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>
+ VecZnxNormalizeTmpBytes
{
}
pub trait GLWEKeySwitch<BE: Backend>
where
Self: Sized
+ ModuleN
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<BE>
+ VmpApplyDftToDftAdd<BE>
+ VecZnxDftApply<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxBigAddSmallInplace<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>
+ VecZnxNormalizeTmpBytes,
{
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
where
R: GLWEInfos,
A: GLWEInfos,
B: GGLWEInfos,
{
let in_size: usize = a_infos
.k() .k()
.div_ceil(key_apply.base2k()) .div_ceil(b_infos.base2k())
.div_ceil(key_apply.dsize().into()) as usize; .div_ceil(b_infos.dsize().into()) as usize;
let out_size: usize = out_infos.size(); let out_size: usize = res_infos.size();
let ksk_size: usize = key_apply.size(); let ksk_size: usize = b_infos.size();
let res_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
let ai_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size); let ai_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes( let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
out_size, out_size,
in_size, in_size,
in_size, in_size,
(key_apply.rank_in()).into(), (b_infos.rank_in()).into(),
(key_apply.rank_out() + 1).into(), (b_infos.rank_out() + 1).into(),
ksk_size, ksk_size,
) + module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size); ) + self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes(); let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
if in_infos.base2k() == key_apply.base2k() { if a_infos.base2k() == b_infos.base2k() {
res_dft + ((ai_dft + vmp) | normalize_big) res_dft + ((ai_dft + vmp) | normalize_big)
} else if key_apply.dsize() == 1 { } else if b_infos.dsize() == 1 {
// In this case, we only need one column, temporary, that we can drop once a_dft is computed. // In this case, we only need one column, temporary, that we can drop once a_dft is computed.
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), 1, in_size) + module.vec_znx_normalize_tmp_bytes(); let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big) res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
} else { } else {
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion. // Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (key_apply.rank_in()).into(), in_size); let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank_in()).into(), in_size);
res_dft + ((ai_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big) res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
} }
} }
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize fn glwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
where where
OUT: GLWEInfos, R: GLWEToMut,
KEY: GGLWEInfos, A: GLWEToRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes, B: GLWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{ {
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply) let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
} let a: &GLWE<&[u8]> = &a.to_ref();
} let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
#[allow(dead_code)]
pub(crate) fn assert_keyswitch<B: Backend, DataLhs, DataRhs>(
&self,
module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &Scratch<B>,
) where
DataLhs: DataRef,
DataRhs: DataRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable,
{
assert_eq!( assert_eq!(
lhs.rank(), a.rank(),
rhs.rank_in(), b.rank_in(),
"lhs.rank(): {} != rhs.rank_in(): {}", "a.rank(): {} != b.rank_in(): {}",
lhs.rank(), a.rank(),
rhs.rank_in() b.rank_in()
); );
assert_eq!( assert_eq!(
self.rank(), res.rank(),
rhs.rank_out(), b.rank_out(),
"self.rank(): {} != rhs.rank_out(): {}", "res.rank(): {} != b.rank_out(): {}",
self.rank(), res.rank(),
rhs.rank_out() b.rank_out()
); );
assert_eq!(rhs.n(), self.n());
assert_eq!(lhs.n(), self.n());
let scrach_needed: usize = GLWECiphertext::keyswitch_scratch_space(module, self, lhs, rhs); assert_eq!(res.n(), self.n() as u32);
assert_eq!(a.n(), self.n() as u32);
assert_eq!(b.n(), self.n() as u32);
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, a, b);
assert!( assert!(
scratch.available() >= scrach_needed, scratch.available() >= scrach_needed,
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space( "scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
module,
self.base2k(),
self.k(),
lhs.base2k(),
lhs.k(),
rhs.base2k(),
rhs.k(),
rhs.dsize(),
rhs.rank_in(),
rhs.rank_out(),
)={scrach_needed}",
scratch.available(), scratch.available(),
); );
}
#[allow(dead_code)] let basek_out: usize = res.base2k().into();
pub(crate) fn assert_keyswitch_inplace<B: Backend, DataRhs>( let base2k_out: usize = b.base2k().into();
&self,
module: &Module<B>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &Scratch<B>,
) where
DataRhs: DataRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable,
{
assert_eq!(
self.rank(),
rhs.rank_out(),
"self.rank(): {} != rhs.rank_out(): {}",
self.rank(),
rhs.rank_out()
);
assert_eq!(rhs.n(), self.n()); let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), b.size()); // Todo optimise
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, a, b, scratch_1);
let scrach_needed: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, self, rhs); (0..(res.rank() + 1).into()).for_each(|i| {
self.vec_znx_big_normalize(
assert!(
scratch.available() >= scrach_needed,
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space()={scrach_needed}",
scratch.available(),
);
}
}
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
glwe_in: &GLWECiphertext<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
self.assert_keyswitch(module, glwe_in, rhs, scratch);
}
let basek_out: usize = self.base2k().into();
let basek_ksk: usize = rhs.base2k().into();
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
let res_big: VecZnxBig<_, B> = glwe_in.keyswitch_internal(module, res_dft, rhs, scratch_1);
(0..(self.rank() + 1).into()).for_each(|i| {
module.vec_znx_big_normalize(
basek_out, basek_out,
&mut self.data, &mut res.data,
i, i,
basek_ksk, base2k_out,
&res_big, &res_big,
i, i,
scratch_1, scratch_1,
@@ -188,227 +181,190 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
}) })
} }
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>( fn glwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
&mut self,
module: &Module<B>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDftTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
self.assert_keyswitch_inplace(module, rhs, scratch);
}
let basek_in: usize = self.base2k().into();
let basek_ksk: usize = rhs.base2k().into();
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
(0..(self.rank() + 1).into()).for_each(|i| {
module.vec_znx_big_normalize(
basek_in,
&mut self.data,
i,
basek_ksk,
&res_big,
i,
scratch_1,
);
})
}
}
impl<D: DataRef> GLWECiphertext<D> {
pub(crate) fn keyswitch_internal<B: Backend, DataRes, DataKey>(
&self,
module: &Module<B>,
res_dft: VecZnxDft<DataRes, B>,
rhs: &GGLWESwitchingKeyPrepared<DataKey, B>,
scratch: &mut Scratch<B>,
) -> VecZnxBig<DataRes, B>
where where
DataRes: DataMut, R: GLWEToMut,
DataKey: DataRef, A: GLWESwitchingKeyPreparedToRef<BE>,
Module<B>: VecZnxDftAllocBytes Scratch<BE>: ScratchTakeCore<BE>,
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDftTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
{ {
if rhs.dsize() == 1 { let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
return keyswitch_vmp_one_digit( let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
module,
self.base2k().into(), assert_eq!(
rhs.base2k().into(), res.rank(),
res_dft, a.rank_in(),
&self.data, "res.rank(): {} != a.rank_in(): {}",
&rhs.key.data, res.rank(),
scratch, a.rank_in()
);
assert_eq!(
res.rank(),
a.rank_out(),
"res.rank(): {} != b.rank_out(): {}",
res.rank(),
a.rank_out()
);
assert_eq!(res.n(), self.n() as u32);
assert_eq!(a.n(), self.n() as u32);
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, res, a);
assert!(
scratch.available() >= scrach_needed,
"scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
scratch.available(),
);
let base2k_in: usize = res.base2k().into();
let base2k_out: usize = a.base2k().into();
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), a.size()); // Todo optimise
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, res, a, scratch_1);
(0..(res.rank() + 1).into()).for_each(|i| {
self.vec_znx_big_normalize(
base2k_in,
&mut res.data,
i,
base2k_out,
&res_big,
i,
scratch_1,
); );
} })
keyswitch_vmp_multiple_digits(
module,
self.base2k().into(),
rhs.base2k().into(),
res_dft,
&self.data,
&rhs.key.data,
rhs.dsize().into(),
scratch,
)
} }
} }
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>( impl GLWE<Vec<u8>> {}
module: &Module<B>,
basek_in: usize, impl<DataSelf: DataMut> GLWE<DataSelf> {}
basek_ksk: usize,
mut res_dft: VecZnxDft<DataRes, B>, fn keyswitch_internal<BE: Backend, M, DR, DA, DB>(
a: &VecZnx<DataIn>, module: &M,
mat: &VmpPMat<DataVmp, B>, mut res: VecZnxDft<DR, BE>,
scratch: &mut Scratch<B>, a: &GLWE<DA>,
) -> VecZnxBig<DataRes, B> b: &GLWESwitchingKeyPrepared<DB, BE>,
scratch: &mut Scratch<BE>,
) -> VecZnxBig<DR, BE>
where where
DataRes: DataMut, DR: DataMut,
DataIn: DataRef, DA: DataRef,
DataVmp: DataRef, DB: DataRef,
Module<B>: VecZnxDftAllocBytes M: ModuleN
+ VecZnxDftApply<B> + VecZnxDftBytesOf
+ VmpApplyDftToDft<B> + VmpApplyDftToDftTmpBytes
+ VecZnxIdftApplyConsume<B> + VecZnxBigNormalizeTmpBytes
+ VecZnxBigAddSmallInplace<B> + VmpApplyDftToDftTmpBytes
+ VecZnxNormalize<B>, + VmpApplyDftToDft<BE>
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx, + VmpApplyDftToDftAdd<BE>
+ VecZnxDftApply<BE>
+ VecZnxIdftApplyConsume<BE>
+ VecZnxBigAddSmallInplace<BE>
+ VecZnxBigNormalize<BE>
+ VecZnxNormalize<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{ {
let cols: usize = a.cols(); let base2k_in: usize = a.base2k().into();
let base2k_out: usize = b.base2k().into();
let cols: usize = (a.rank() + 1).into();
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
let pmat: &VmpPMat<DB, BE> = &b.key.data;
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk); if b.dsize() == 1 {
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size()); let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
if basek_in == basek_ksk { if base2k_in == base2k_out {
(0..cols - 1).for_each(|col_i| { (0..cols - 1).for_each(|col_i| {
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1); module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a.data(), col_i + 1);
}); });
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, 1, a_size);
(0..cols - 1).for_each(|col_i| {
module.vec_znx_normalize(
base2k_out,
&mut a_conv,
0,
base2k_in,
a.data(),
col_i + 1,
scratch_2,
);
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
});
}
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
} else { } else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), 1, a_size); let dsize: usize = b.dsize().into();
(0..cols - 1).for_each(|col_i| {
module.vec_znx_normalize(basek_ksk, &mut a_conv, 0, basek_in, a, col_i + 1, scratch_2); let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0); ai_dft.data_mut().fill(0);
});
if base2k_in == base2k_out {
for di in 0..dsize {
ai_dft.set_size((a_size + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
for j in 0..cols - 1 {
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a.data(), j + 1);
}
if di == 0 {
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
} else {
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_1);
}
}
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, cols - 1, a_size);
for j in 0..cols - 1 {
module.vec_znx_normalize(
base2k_out,
&mut a_conv,
j,
base2k_in,
a.data(),
j + 1,
scratch_2,
);
}
for di in 0..dsize {
ai_dft.set_size((a_size + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
for j in 0..cols - 1 {
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
}
if di == 0 {
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_2);
} else {
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_2);
}
}
}
res.set_size(res.max_size());
} }
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1); let mut res_big: VecZnxBig<DR, BE> = module.vec_znx_idft_apply_consume(res);
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft); module.vec_znx_big_add_small_inplace(&mut res_big, 0, a.data(), 0);
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
res_big
}
#[allow(clippy::too_many_arguments)]
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
module: &Module<B>,
basek_in: usize,
basek_ksk: usize,
mut res_dft: VecZnxDft<DataRes, B>,
a: &VecZnx<DataIn>,
mat: &VmpPMat<DataVmp, B>,
dsize: usize,
scratch: &mut Scratch<B>,
) -> VecZnxBig<DataRes, B>
where
DataRes: DataMut,
DataIn: DataRef,
DataVmp: DataRef,
Module<B>: VecZnxDftAllocBytes
+ VecZnxDftApply<B>
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
{
let cols: usize = a.cols();
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a_size.div_ceil(dsize));
ai_dft.data_mut().fill(0);
if basek_in == basek_ksk {
for di in 0..dsize {
ai_dft.set_size((a_size + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
for j in 0..cols - 1 {
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a, j + 1);
}
if di == 0 {
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
} else {
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
}
}
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), cols - 1, a_size);
for j in 0..cols - 1 {
module.vec_znx_normalize(basek_ksk, &mut a_conv, j, basek_in, a, j + 1, scratch_2);
}
for di in 0..dsize {
ai_dft.set_size((a_size + di) / dsize);
// Small optimization for dsize > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last dsize-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
for j in 0..cols - 1 {
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
}
if di == 0 {
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_2);
} else {
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_2);
}
}
}
res_dft.set_size(res_dft.max_size());
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
res_big res_big
} }

View File

@@ -1,116 +1,116 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::ScratchAvailable,
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, layouts::{Backend, DataMut, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
}; };
use crate::{ use crate::{
TakeGLWECt, ScratchTakeCore,
keyswitching::glwe_ct::GLWEKeySwitch,
layouts::{ layouts::{
GGLWEInfos, GLWECiphertext, GLWECiphertextLayout, LWECiphertext, LWEInfos, Rank, TorusPrecision, GGLWEInfos, GLWE, GLWEAlloc, GLWELayout, LWE, LWEInfos, LWEToMut, LWEToRef, Rank, TorusPrecision,
prepared::LWESwitchingKeyPrepared, prepared::{LWESwitchingKeyPrepared, LWESwitchingKeyPreparedToRef},
}, },
}; };
impl LWECiphertext<Vec<u8>> { impl LWE<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>( pub fn keyswitch_tmp_bytes<M, R, A, K, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
module: &Module<B>,
out_infos: &OUT,
in_infos: &IN,
key_infos: &KEY,
) -> usize
where where
OUT: LWEInfos, R: LWEInfos,
IN: LWEInfos, A: LWEInfos,
KEY: GGLWEInfos, K: GGLWEInfos,
Module<B>: VecZnxDftAllocBytes M: LWEKeySwitch<BE>,
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDftTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalizeTmpBytes,
{ {
let max_k: TorusPrecision = in_infos.k().max(out_infos.k()); module.lwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
let glwe_in_infos: GLWECiphertextLayout = GLWECiphertextLayout {
n: module.n().into(),
base2k: in_infos.base2k(),
k: max_k,
rank: Rank(1),
};
let glwe_out_infos: GLWECiphertextLayout = GLWECiphertextLayout {
n: module.n().into(),
base2k: out_infos.base2k(),
k: max_k,
rank: Rank(1),
};
let glwe_in: usize = GLWECiphertext::alloc_bytes(&glwe_in_infos);
let glwe_out: usize = GLWECiphertext::alloc_bytes(&glwe_out_infos);
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
glwe_in + glwe_out + ks
} }
} }
impl<DLwe: DataMut> LWECiphertext<DLwe> { impl<D: DataMut> LWE<D> {
pub fn keyswitch<A, DKs, B: Backend>( pub fn keyswitch<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, ksk: &K, scratch: &mut Scratch<BE>)
&mut self, where
module: &Module<B>, A: LWEToRef,
a: &LWECiphertext<A>, K: LWESwitchingKeyPreparedToRef<BE>,
ksk: &LWESwitchingKeyPrepared<DKs, B>, Scratch<BE>: ScratchTakeCore<BE>,
scratch: &mut Scratch<B>, M: LWEKeySwitch<BE>,
) where
A: DataRef,
DKs: DataRef,
Module<B>: VecZnxDftAllocBytes
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes
+ VecZnxCopy,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{ {
#[cfg(debug_assertions)] module.lwe_keyswitch(self, a, ksk, scratch);
{ }
assert!(self.n() <= module.n() as u32); }
assert!(a.n() <= module.n() as u32);
assert!(scratch.available() >= LWECiphertext::keyswitch_scratch_space(module, self, a, ksk));
}
let max_k: TorusPrecision = self.k().max(a.k()); impl<BE: Backend> LWEKeySwitch<BE> for Module<BE> where Self: LWEKeySwitch<BE> {}
pub trait LWEKeySwitch<BE: Backend>
where
Self: GLWEKeySwitch<BE> + GLWEAlloc,
{
fn lwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
where
R: LWEInfos,
A: LWEInfos,
K: GGLWEInfos,
{
let max_k: TorusPrecision = a_infos.k().max(res_infos.k());
let glwe_a_infos: GLWELayout = GLWELayout {
n: self.ring_degree(),
base2k: a_infos.base2k(),
k: max_k,
rank: Rank(1),
};
let glwe_res_infos: GLWELayout = GLWELayout {
n: self.ring_degree(),
base2k: res_infos.base2k(),
k: max_k,
rank: Rank(1),
};
let glwe_in: usize = GLWE::bytes_of_from_infos(self, &glwe_a_infos);
let glwe_out: usize = GLWE::bytes_of_from_infos(self, &glwe_res_infos);
let ks: usize = self.glwe_keyswitch_tmp_bytes(&glwe_res_infos, &glwe_a_infos, key_infos);
glwe_in + glwe_out + ks
}
fn lwe_keyswitch<R, A, K>(&self, res: &mut R, a: &A, ksk: &K, scratch: &mut Scratch<BE>)
where
R: LWEToMut,
A: LWEToRef,
K: LWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
let a: &LWE<&[u8]> = &a.to_ref();
let ksk: &LWESwitchingKeyPrepared<&[u8], BE> = &ksk.to_ref();
assert!(res.n().as_usize() <= self.n());
assert!(a.n().as_usize() <= self.n());
assert_eq!(ksk.n(), self.n() as u32);
assert!(scratch.available() >= self.lwe_keyswitch_tmp_bytes(res, a, ksk));
let max_k: TorusPrecision = res.k().max(a.k());
let a_size: usize = a.k().div_ceil(ksk.base2k()) as usize; let a_size: usize = a.k().div_ceil(ksk.base2k()) as usize;
let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout { let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(
n: ksk.n(), self,
base2k: a.base2k(), &GLWELayout {
k: max_k, n: ksk.n(),
rank: Rank(1), base2k: a.base2k(),
}); k: max_k,
rank: Rank(1),
},
);
glwe_in.data.zero(); glwe_in.data.zero();
let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(&GLWECiphertextLayout { let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(
n: ksk.n(), self,
base2k: self.base2k(), &GLWELayout {
k: max_k, n: ksk.n(),
rank: Rank(1), base2k: res.base2k(),
}); k: max_k,
rank: Rank(1),
},
);
let n_lwe: usize = a.n().into(); let n_lwe: usize = a.n().into();
@@ -120,7 +120,7 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
glwe_in.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]); glwe_in.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
} }
glwe_out.keyswitch(module, &glwe_in, &ksk.0, scratch_1); self.glwe_keyswitch(&mut glwe_out, &glwe_in, &ksk.0, scratch_1);
self.sample_extract(&glwe_out); res.sample_extract(&glwe_out);
} }
} }

View File

@@ -1,24 +1,28 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEAutomorphismKey, GGLWEInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, AutomorphismKey, AutomorphismKeyToMut, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, Rank, RingDegree,
compressed::{Decompress, GGLWESwitchingKeyCompressed}, TorusPrecision,
compressed::{
GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedAlloc, GLWESwitchingKeyCompressedToMut,
GLWESwitchingKeyCompressedToRef, GLWESwitchingKeyDecompress,
},
prepared::{GetAutomorphismGaloisElement, SetAutomorphismGaloisElement},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWEAutomorphismKeyCompressed<D: Data> { pub struct AutomorphismKeyCompressed<D: Data> {
pub(crate) key: GGLWESwitchingKeyCompressed<D>, pub(crate) key: GLWESwitchingKeyCompressed<D>,
pub(crate) p: i64, pub(crate) p: i64,
} }
impl<D: Data> LWEInfos for GGLWEAutomorphismKeyCompressed<D> { impl<D: Data> LWEInfos for AutomorphismKeyCompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.key.n() self.key.n()
} }
@@ -34,13 +38,13 @@ impl<D: Data> LWEInfos for GGLWEAutomorphismKeyCompressed<D> {
self.key.size() self.key.size()
} }
} }
impl<D: Data> GLWEInfos for GGLWEAutomorphismKeyCompressed<D> { impl<D: Data> GLWEInfos for AutomorphismKeyCompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWEAutomorphismKeyCompressed<D> { impl<D: Data> GGLWEInfos for AutomorphismKeyCompressed<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -58,76 +62,185 @@ impl<D: Data> GGLWEInfos for GGLWEAutomorphismKeyCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKeyCompressed<D> { impl<D: DataRef> fmt::Debug for AutomorphismKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWEAutomorphismKeyCompressed<D> { impl<D: DataMut> FillUniform for AutomorphismKeyCompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.key.fill_uniform(log_bound, source); self.key.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKeyCompressed<D> { impl<D: DataRef> fmt::Display for AutomorphismKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(AutomorphismKeyCompressed: p={}) {}", self.p, self.key) write!(f, "(AutomorphismKeyCompressed: p={}) {}", self.p, self.key)
} }
} }
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> { pub trait AutomorphismKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GLWESwitchingKeyCompressedAlloc,
A: GGLWEInfos, {
{ fn alloc_automorphism_key_compressed(
debug_assert_eq!(infos.rank_in(), infos.rank_out()); &self,
Self { base2k: Base2K,
key: GGLWESwitchingKeyCompressed::alloc(infos), k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> AutomorphismKeyCompressed<Vec<u8>> {
AutomorphismKeyCompressed {
key: self.alloc_glwe_switching_key_compressed(base2k, k, rank, rank, dnum, dsize),
p: 0, p: 0,
} }
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self { fn alloc_automorphism_key_compressed_from_infos<A>(&self, infos: &A) -> AutomorphismKeyCompressed<Vec<u8>>
Self {
key: GGLWESwitchingKeyCompressed::alloc_with(n, base2k, k, rank, rank, dnum, dsize),
p: 0,
}
}
pub fn alloc_bytes<A>(infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!(infos.rank_in(), infos.rank_out()); assert_eq!(infos.rank_in(), infos.rank_out());
GGLWESwitchingKeyCompressed::alloc_bytes(infos) self.alloc_automorphism_key_compressed(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize { fn bytes_of_automorphism_key_compressed(
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rank, dnum, dsize) &self,
base2k: Base2K,
k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize {
self.bytes_of_glwe_switching_key_compressed(base2k, k, rank, dnum, dsize)
}
fn bytes_of_automorphism_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
assert_eq!(infos.rank_in(), infos.rank_out());
self.bytes_of_automorphism_key_compressed(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
} }
} }
impl<D: DataMut> ReaderFrom for GGLWEAutomorphismKeyCompressed<D> { impl AutomorphismKeyCompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: AutomorphismKeyCompressedAlloc,
{
module.alloc_automorphism_key_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: AutomorphismKeyCompressedAlloc,
{
module.alloc_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: AutomorphismKeyCompressedAlloc,
{
module.bytes_of_automorphism_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: AutomorphismKeyCompressedAlloc,
{
module.bytes_of_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
}
}
impl<D: DataMut> ReaderFrom for AutomorphismKeyCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.p = reader.read_u64::<LittleEndian>()? as i64; self.p = reader.read_u64::<LittleEndian>()? as i64;
self.key.read_from(reader) self.key.read_from(reader)
} }
} }
impl<D: DataRef> WriterTo for GGLWEAutomorphismKeyCompressed<D> { impl<D: DataRef> WriterTo for AutomorphismKeyCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.p as u64)?; writer.write_u64::<LittleEndian>(self.p as u64)?;
self.key.write_to(writer) self.key.write_to(writer)
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWEAutomorphismKeyCompressed<DR>> for GGLWEAutomorphismKey<D> pub trait AutomorphismKeyDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWESwitchingKeyDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GGLWEAutomorphismKeyCompressed<DR>) { fn decompress_automorphism_key<R, O>(&self, res: &mut R, other: &O)
self.key.decompress(module, &other.key); where
self.p = other.p; R: AutomorphismKeyToMut + SetAutomorphismGaloisElement,
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
{
self.decompress_glwe_switching_key(&mut res.to_mut().key, &other.to_ref().key);
res.set_p(other.p());
}
}
impl<B: Backend> AutomorphismKeyDecompress for Module<B> where Self: AutomorphismKeyDecompress {}
impl<D: DataMut> AutomorphismKey<D>
where
Self: SetAutomorphismGaloisElement,
{
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
M: AutomorphismKeyDecompress,
{
module.decompress_automorphism_key(self, other);
}
}
pub trait AutomorphismKeyCompressedToRef {
fn to_ref(&self) -> AutomorphismKeyCompressed<&[u8]>;
}
impl<D: DataRef> AutomorphismKeyCompressedToRef for AutomorphismKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToRef,
{
fn to_ref(&self) -> AutomorphismKeyCompressed<&[u8]> {
AutomorphismKeyCompressed {
key: self.key.to_ref(),
p: self.p,
}
}
}
pub trait AutomorphismKeyCompressedToMut {
fn to_mut(&mut self) -> AutomorphismKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> AutomorphismKeyCompressedToMut for AutomorphismKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToMut,
{
fn to_mut(&mut self) -> AutomorphismKeyCompressed<&mut [u8]> {
AutomorphismKeyCompressed {
p: self.p,
key: self.key.to_mut(),
}
} }
} }

View File

@@ -1,18 +1,20 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform}, api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, WriterTo, ZnxInfos}, layouts::{
Backend, Data, DataMut, DataRef, FillUniform, MatZnx, MatZnxToMut, MatZnxToRef, Module, ReaderFrom, WriterTo, ZnxInfos,
},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWECiphertext, GGLWEInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWE, GGLWEInfos, GGLWEToMut, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
compressed::{Decompress, GLWECiphertextCompressed}, compressed::{GLWECompressed, GLWEDecompress},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWECiphertextCompressed<D: Data> { pub struct GGLWECompressed<D: Data> {
pub(crate) data: MatZnx<D>, pub(crate) data: MatZnx<D>,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
@@ -21,9 +23,9 @@ pub struct GGLWECiphertextCompressed<D: Data> {
pub(crate) seed: Vec<[u8; 32]>, pub(crate) seed: Vec<[u8; 32]>,
} }
impl<D: Data> LWEInfos for GGLWECiphertextCompressed<D> { impl<D: Data> LWEInfos for GGLWECompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -38,13 +40,13 @@ impl<D: Data> LWEInfos for GGLWECiphertextCompressed<D> {
self.data.size() self.data.size()
} }
} }
impl<D: Data> GLWEInfos for GGLWECiphertextCompressed<D> { impl<D: Data> GLWEInfos for GGLWECompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWECiphertextCompressed<D> { impl<D: Data> GGLWEInfos for GGLWECompressed<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
Rank(self.data.cols_in() as u32) Rank(self.data.cols_in() as u32)
} }
@@ -62,53 +64,41 @@ impl<D: Data> GGLWEInfos for GGLWECiphertextCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWECiphertextCompressed<D> { impl<D: DataRef> fmt::Debug for GGLWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWECiphertextCompressed<D> { impl<D: DataMut> FillUniform for GGLWECompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GGLWECiphertextCompressed<D> { impl<D: DataRef> fmt::Display for GGLWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"(GGLWECiphertextCompressed: base2k={} k={} dsize={}) {}", "(GGLWECompressed: base2k={} k={} dsize={}) {}",
self.base2k.0, self.k.0, self.dsize.0, self.data self.base2k.0, self.k.0, self.dsize.0, self.data
) )
} }
} }
impl GGLWECiphertextCompressed<Vec<u8>> { pub trait GGLWECompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GGLWEInfos, {
{ fn alloc_gglwe_compressed(
Self::alloc_with( &self,
infos.n(),
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_with(
n: Degree,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
rank_out: Rank, rank_out: Rank,
dnum: Dnum, dnum: Dnum,
dsize: Dsize, dsize: Dsize,
) -> Self { ) -> GGLWECompressed<Vec<u8>> {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -123,9 +113,9 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
dsize.0, dsize.0,
); );
Self { GGLWECompressed {
data: MatZnx::alloc( data: MatZnx::alloc(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
rank_in.into(), rank_in.into(),
1, 1,
@@ -139,21 +129,22 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_gglwe_compressed_from_infos<A>(&self, infos: &A) -> GGLWECompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
Self::alloc_bytes_with( assert_eq!(infos.n(), self.ring_degree());
infos.n(), self.alloc_gglwe_compressed(
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank_in(), infos.rank_in(),
infos.rank_out(),
infos.dnum(), infos.dnum(),
infos.dsize(), infos.dsize(),
) )
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum, dsize: Dsize) -> usize { fn bytes_of_gglwe_compressed(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -168,20 +159,76 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
dsize.0, dsize.0,
); );
MatZnx::alloc_bytes( MatZnx::bytes_of(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
rank_in.into(), rank_in.into(),
1, 1,
k.0.div_ceil(base2k.0) as usize, k.0.div_ceil(base2k.0) as usize,
) )
} }
fn bytes_of_gglwe_compressed_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
assert_eq!(infos.n(), self.ring_degree());
self.bytes_of_gglwe_compressed(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.dnum(),
infos.dsize(),
)
}
} }
impl<D: DataRef> GGLWECiphertextCompressed<D> { impl<B: Backend> GGLWECompressedAlloc for Module<B> where Self: GetRingDegree {}
pub(crate) fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> {
impl GGLWECompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GGLWECompressedAlloc,
{
module.alloc_gglwe_compressed_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> Self
where
M: GGLWECompressedAlloc,
{
module.alloc_gglwe_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GGLWECompressedAlloc,
{
module.bytes_of_gglwe_compressed_from_infos(infos)
}
pub fn byte_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: GGLWECompressedAlloc,
{
module.bytes_of_gglwe_compressed(base2k, k, rank_in, dnum, dsize)
}
}
impl<D: DataRef> GGLWECompressed<D> {
pub(crate) fn at(&self, row: usize, col: usize) -> GLWECompressed<&[u8]> {
let rank_in: usize = self.rank_in().into(); let rank_in: usize = self.rank_in().into();
GLWECiphertextCompressed { GLWECompressed {
data: self.data.at(row, col), data: self.data.at(row, col),
k: self.k, k: self.k,
base2k: self.base2k, base2k: self.base2k,
@@ -191,10 +238,10 @@ impl<D: DataRef> GGLWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut> GGLWECiphertextCompressed<D> { impl<D: DataMut> GGLWECompressed<D> {
pub(crate) fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> { pub(crate) fn at_mut(&mut self, row: usize, col: usize) -> GLWECompressed<&mut [u8]> {
let rank_in: usize = self.rank_in().into(); let rank_in: usize = self.rank_in().into();
GLWECiphertextCompressed { GLWECompressed {
k: self.k, k: self.k,
base2k: self.base2k, base2k: self.base2k,
rank: self.rank_out, rank: self.rank_out,
@@ -204,7 +251,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> { impl<D: DataMut> ReaderFrom for GGLWECompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -219,7 +266,7 @@ impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWECiphertextCompressed<D> { impl<D: DataRef> WriterTo for GGLWECompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;
@@ -233,59 +280,73 @@ impl<D: DataRef> WriterTo for GGLWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGLWECiphertextCompressed<DR>> for GGLWECiphertext<D> pub trait GGLWEDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWEDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GGLWECiphertextCompressed<DR>) { fn decompress_gglwe<R, O>(&self, res: &mut R, other: &O)
#[cfg(debug_assertions)] where
{ R: GGLWEToMut,
assert_eq!( O: GGLWECompressedToRef,
self.n(), {
other.n(), let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
"invalid receiver: self.n()={} != other.n()={}", let other: &GGLWECompressed<&[u8]> = &other.to_ref();
self.n(),
other.n()
);
assert_eq!(
self.size(),
other.size(),
"invalid receiver: self.size()={} != other.size()={}",
self.size(),
other.size()
);
assert_eq!(
self.rank_in(),
other.rank_in(),
"invalid receiver: self.rank_in()={} != other.rank_in()={}",
self.rank_in(),
other.rank_in()
);
assert_eq!(
self.rank_out(),
other.rank_out(),
"invalid receiver: self.rank_out()={} != other.rank_out()={}",
self.rank_out(),
other.rank_out()
);
assert_eq!( assert_eq!(res.gglwe_layout(), other.gglwe_layout());
self.dnum(),
other.dnum(), let rank_in: usize = res.rank_in().into();
"invalid receiver: self.dnum()={} != other.dnum()={}", let dnum: usize = res.dnum().into();
self.dnum(),
other.dnum() for row_i in 0..dnum {
); for col_i in 0..rank_in {
self.decompress_glwe(&mut res.at_mut(row_i, col_i), &other.at(row_i, col_i));
}
}
}
}
impl<B: Backend> GGLWEDecompress for Module<B> where Self: VecZnxFillUniform + VecZnxCopy {}
impl<D: DataMut> GGLWE<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: GGLWECompressedToRef,
M: GGLWEDecompress,
{
module.decompress_gglwe(self, other);
}
}
pub trait GGLWECompressedToMut {
fn to_mut(&mut self) -> GGLWECompressed<&mut [u8]>;
}
impl<D: DataMut> GGLWECompressedToMut for GGLWECompressed<D> {
fn to_mut(&mut self) -> GGLWECompressed<&mut [u8]> {
GGLWECompressed {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
seed: self.seed.clone(),
rank_out: self.rank_out,
data: self.data.to_mut(),
}
}
}
pub trait GGLWECompressedToRef {
fn to_ref(&self) -> GGLWECompressed<&[u8]>;
}
impl<D: DataRef> GGLWECompressedToRef for GGLWECompressed<D> {
fn to_ref(&self) -> GGLWECompressed<&[u8]> {
GGLWECompressed {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
seed: self.seed.clone(),
rank_out: self.rank_out,
data: self.data.to_ref(),
} }
let rank_in: usize = self.rank_in().into();
let dnum: usize = self.dnum().into();
(0..rank_in).for_each(|col_i| {
(0..dnum).for_each(|row_i| {
self.at_mut(row_i, col_i)
.decompress(module, &other.at(row_i, col_i));
});
});
} }
} }

View File

@@ -1,25 +1,25 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeySetMetaData, GLWESwitchingKeyToMut, LWEInfos,
compressed::{Decompress, GGLWECiphertextCompressed}, Rank, RingDegree, TorusPrecision,
compressed::{GGLWECompressed, GGLWECompressedAlloc, GGLWECompressedToMut, GGLWECompressedToRef, GGLWEDecompress},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWESwitchingKeyCompressed<D: Data> { pub struct GLWESwitchingKeyCompressed<D: Data> {
pub(crate) key: GGLWECiphertextCompressed<D>, pub(crate) key: GGLWECompressed<D>,
pub(crate) sk_in_n: usize, // Degree of sk_in pub(crate) sk_in_n: usize, // Degree of sk_in
pub(crate) sk_out_n: usize, // Degree of sk_out pub(crate) sk_out_n: usize, // Degree of sk_out
} }
impl<D: Data> LWEInfos for GGLWESwitchingKeyCompressed<D> { impl<D: Data> LWEInfos for GLWESwitchingKeyCompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.key.n() self.key.n()
} }
@@ -35,13 +35,13 @@ impl<D: Data> LWEInfos for GGLWESwitchingKeyCompressed<D> {
self.key.size() self.key.size()
} }
} }
impl<D: Data> GLWEInfos for GGLWESwitchingKeyCompressed<D> { impl<D: Data> GLWEInfos for GLWESwitchingKeyCompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWESwitchingKeyCompressed<D> { impl<D: Data> GGLWEInfos for GLWESwitchingKeyCompressed<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -59,19 +59,19 @@ impl<D: Data> GGLWEInfos for GGLWESwitchingKeyCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWESwitchingKeyCompressed<D> { impl<D: DataRef> fmt::Debug for GLWESwitchingKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWESwitchingKeyCompressed<D> { impl<D: DataMut> FillUniform for GLWESwitchingKeyCompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.key.fill_uniform(log_bound, source); self.key.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GGLWESwitchingKeyCompressed<D> { impl<D: DataRef> fmt::Display for GLWESwitchingKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
@@ -81,47 +81,100 @@ impl<D: DataRef> fmt::Display for GGLWESwitchingKeyCompressed<D> {
} }
} }
impl GGLWESwitchingKeyCompressed<Vec<u8>> { pub trait GLWESwitchingKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GGLWECompressedAlloc,
A: GGLWEInfos, {
{ fn alloc_glwe_switching_key_compressed(
GGLWESwitchingKeyCompressed { &self,
key: GGLWECiphertextCompressed::alloc(infos),
sk_in_n: 0,
sk_out_n: 0,
}
}
pub fn alloc_with(
n: Degree,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
rank_out: Rank, rank_out: Rank,
dnum: Dnum, dnum: Dnum,
dsize: Dsize, dsize: Dsize,
) -> Self { ) -> GLWESwitchingKeyCompressed<Vec<u8>> {
GGLWESwitchingKeyCompressed { GLWESwitchingKeyCompressed {
key: GGLWECiphertextCompressed::alloc_with(n, base2k, k, rank_in, rank_out, dnum, dsize), key: self.alloc_gglwe_compressed(base2k, k, rank_in, rank_out, dnum, dsize),
sk_in_n: 0, sk_in_n: 0,
sk_out_n: 0, sk_out_n: 0,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyCompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
GGLWECiphertextCompressed::alloc_bytes(infos) self.alloc_glwe_switching_key_compressed(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum, dsize: Dsize) -> usize { fn bytes_of_glwe_switching_key_compressed(
GGLWECiphertextCompressed::alloc_bytes_with(n, base2k, k, rank_in, dnum, dsize) &self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize {
self.bytes_of_gglwe_compressed(base2k, k, rank_in, dnum, dsize)
}
fn bytes_of_glwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
self.bytes_of_gglwe_compressed_from_infos(infos)
} }
} }
impl<D: DataMut> ReaderFrom for GGLWESwitchingKeyCompressed<D> { impl GLWESwitchingKeyCompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GLWESwitchingKeyCompressedAlloc,
{
module.alloc_glwe_switching_key_compressed_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> Self
where
M: GLWESwitchingKeyCompressedAlloc,
{
module.alloc_glwe_switching_key_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_glwe_switching_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: GLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_glwe_switching_key_compressed(base2k, k, rank_in, dnum, dsize)
}
}
impl<D: DataMut> ReaderFrom for GLWESwitchingKeyCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.sk_in_n = reader.read_u64::<LittleEndian>()? as usize; self.sk_in_n = reader.read_u64::<LittleEndian>()? as usize;
self.sk_out_n = reader.read_u64::<LittleEndian>()? as usize; self.sk_out_n = reader.read_u64::<LittleEndian>()? as usize;
@@ -129,7 +182,7 @@ impl<D: DataMut> ReaderFrom for GGLWESwitchingKeyCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWESwitchingKeyCompressed<D> { impl<D: DataRef> WriterTo for GLWESwitchingKeyCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.sk_in_n as u64)?; writer.write_u64::<LittleEndian>(self.sk_in_n as u64)?;
writer.write_u64::<LittleEndian>(self.sk_out_n as u64)?; writer.write_u64::<LittleEndian>(self.sk_out_n as u64)?;
@@ -137,13 +190,64 @@ impl<D: DataRef> WriterTo for GGLWESwitchingKeyCompressed<D> {
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWESwitchingKeyCompressed<DR>> for GGLWESwitchingKey<D> pub trait GLWESwitchingKeyDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GGLWEDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GGLWESwitchingKeyCompressed<DR>) { fn decompress_glwe_switching_key<R, O>(&self, res: &mut R, other: &O)
self.key.decompress(module, &other.key); where
self.sk_in_n = other.sk_in_n; R: GLWESwitchingKeyToMut + GLWESwitchingKeySetMetaData,
self.sk_out_n = other.sk_out_n; O: GLWESwitchingKeyCompressedToRef,
{
let other: &GLWESwitchingKeyCompressed<&[u8]> = &other.to_ref();
self.decompress_gglwe(&mut res.to_mut().key, &other.key);
res.set_sk_in_n(other.sk_in_n);
res.set_sk_out_n(other.sk_out_n);
}
}
impl<B: Backend> GLWESwitchingKeyDecompress for Module<B> where Self: GGLWEDecompress {}
impl<D: DataMut> GLWESwitchingKey<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: GLWESwitchingKeyCompressedToRef,
M: GLWESwitchingKeyDecompress,
{
module.decompress_glwe_switching_key(self, other);
}
}
pub trait GLWESwitchingKeyCompressedToMut {
fn to_mut(&mut self) -> GLWESwitchingKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> GLWESwitchingKeyCompressedToMut for GLWESwitchingKeyCompressed<D>
where
GGLWECompressed<D>: GGLWECompressedToMut,
{
fn to_mut(&mut self) -> GLWESwitchingKeyCompressed<&mut [u8]> {
GLWESwitchingKeyCompressed {
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
key: self.key.to_mut(),
}
}
}
pub trait GLWESwitchingKeyCompressedToRef {
fn to_ref(&self) -> GLWESwitchingKeyCompressed<&[u8]>;
}
impl<D: DataRef> GLWESwitchingKeyCompressedToRef for GLWESwitchingKeyCompressed<D>
where
GGLWECompressed<D>: GGLWECompressedToRef,
{
fn to_ref(&self) -> GLWESwitchingKeyCompressed<&[u8]> {
GLWESwitchingKeyCompressed {
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
key: self.key.to_ref(),
}
} }
} }

View File

@@ -1,23 +1,25 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWETensorKey, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, Rank, RingDegree, TensorKey, TensorKeyToMut, TorusPrecision,
compressed::{Decompress, GGLWESwitchingKeyCompressed}, compressed::{
GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedAlloc, GLWESwitchingKeyCompressedToMut,
GLWESwitchingKeyCompressedToRef, GLWESwitchingKeyDecompress,
},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWETensorKeyCompressed<D: Data> { pub struct TensorKeyCompressed<D: Data> {
pub(crate) keys: Vec<GGLWESwitchingKeyCompressed<D>>, pub(crate) keys: Vec<GLWESwitchingKeyCompressed<D>>,
} }
impl<D: Data> LWEInfos for GGLWETensorKeyCompressed<D> { impl<D: Data> LWEInfos for TensorKeyCompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.keys[0].n() self.keys[0].n()
} }
@@ -32,13 +34,13 @@ impl<D: Data> LWEInfos for GGLWETensorKeyCompressed<D> {
self.keys[0].size() self.keys[0].size()
} }
} }
impl<D: Data> GLWEInfos for GGLWETensorKeyCompressed<D> { impl<D: Data> GLWEInfos for TensorKeyCompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWETensorKeyCompressed<D> { impl<D: Data> GGLWEInfos for TensorKeyCompressed<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank_out() self.rank_out()
} }
@@ -56,21 +58,21 @@ impl<D: Data> GGLWEInfos for GGLWETensorKeyCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWETensorKeyCompressed<D> { impl<D: DataRef> fmt::Debug for TensorKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWETensorKeyCompressed<D> { impl<D: DataMut> FillUniform for TensorKeyCompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.keys self.keys
.iter_mut() .iter_mut()
.for_each(|key: &mut GGLWESwitchingKeyCompressed<D>| key.fill_uniform(log_bound, source)) .for_each(|key: &mut GLWESwitchingKeyCompressed<D>| key.fill_uniform(log_bound, source))
} }
} }
impl<D: DataRef> fmt::Display for GGLWETensorKeyCompressed<D> { impl<D: DataRef> fmt::Display for TensorKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "(GLWETensorKeyCompressed)",)?; writeln!(f, "(GLWETensorKeyCompressed)",)?;
for (i, key) in self.keys.iter().enumerate() { for (i, key) in self.keys.iter().enumerate() {
@@ -80,8 +82,27 @@ impl<D: DataRef> fmt::Display for GGLWETensorKeyCompressed<D> {
} }
} }
impl GGLWETensorKeyCompressed<Vec<u8>> { pub trait TensorKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyCompressedAlloc,
{
fn alloc_tensor_key_compressed(
&self,
base2k: Base2K,
k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> TensorKeyCompressed<Vec<u8>> {
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
TensorKeyCompressed {
keys: (0..pairs)
.map(|_| self.alloc_glwe_switching_key_compressed(base2k, k, Rank(1), rank, dnum, dsize))
.collect(),
}
}
fn alloc_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> TensorKeyCompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
@@ -90,62 +111,67 @@ impl GGLWETensorKeyCompressed<Vec<u8>> {
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed" "rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
); );
Self::alloc_with( self.alloc_tensor_key_compressed(
infos.n(),
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank_out(), infos.rank(),
infos.dnum(), infos.dnum(),
infos.dsize(), infos.dsize(),
) )
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self { fn bytes_of_tensor_key_compressed(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let mut keys: Vec<GGLWESwitchingKeyCompressed<Vec<u8>>> = Vec::new(); let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1); pairs * self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, dsize)
(0..pairs).for_each(|_| {
keys.push(GGLWESwitchingKeyCompressed::alloc_with(
n,
base2k,
k,
Rank(1),
rank,
dnum,
dsize,
));
});
Self { keys }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_tensor_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
assert_eq!( self.bytes_of_tensor_key_compressed(
infos.rank_in(), infos.base2k(),
infos.rank_out(), infos.k(),
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed" infos.rank(),
); infos.dnum(),
let rank_out: usize = infos.rank_out().into(); infos.dsize(),
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1); )
pairs
* GGLWESwitchingKeyCompressed::alloc_bytes_with(
infos.n(),
infos.base2k(),
infos.k(),
Rank(1),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
pairs * GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, Rank(1), dnum, dsize)
} }
} }
impl<D: DataMut> ReaderFrom for GGLWETensorKeyCompressed<D> { impl TensorKeyCompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: TensorKeyCompressedAlloc,
{
module.alloc_tensor_key_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: TensorKeyCompressedAlloc,
{
module.alloc_tensor_key_compressed(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: TensorKeyCompressedAlloc,
{
module.bytes_of_tensor_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: TensorKeyCompressedAlloc,
{
module.bytes_of_tensor_key_compressed(base2k, k, rank, dnum, dsize)
}
}
impl<D: DataMut> ReaderFrom for TensorKeyCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
let len: usize = reader.read_u64::<LittleEndian>()? as usize; let len: usize = reader.read_u64::<LittleEndian>()? as usize;
if self.keys.len() != len { if self.keys.len() != len {
@@ -161,7 +187,7 @@ impl<D: DataMut> ReaderFrom for GGLWETensorKeyCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWETensorKeyCompressed<D> { impl<D: DataRef> WriterTo for TensorKeyCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?; writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
for key in &self.keys { for key in &self.keys {
@@ -171,8 +197,8 @@ impl<D: DataRef> WriterTo for GGLWETensorKeyCompressed<D> {
} }
} }
impl<D: DataMut> GGLWETensorKeyCompressed<D> { impl<D: DataMut> TensorKeyCompressed<D> {
pub(crate) fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWESwitchingKeyCompressed<D> { pub(crate) fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GLWESwitchingKeyCompressed<D> {
if i > j { if i > j {
std::mem::swap(&mut i, &mut j); std::mem::swap(&mut i, &mut j);
}; };
@@ -181,27 +207,70 @@ impl<D: DataMut> GGLWETensorKeyCompressed<D> {
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWETensorKeyCompressed<DR>> for GGLWETensorKey<D> pub trait TensorKeyDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWESwitchingKeyDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GGLWETensorKeyCompressed<DR>) { fn decompress_tensor_key<R, O>(&self, res: &mut R, other: &O)
#[cfg(debug_assertions)] where
{ R: TensorKeyToMut,
assert_eq!( O: TensorKeyCompressedToRef,
self.keys.len(), {
other.keys.len(), let res: &mut TensorKey<&mut [u8]> = &mut res.to_mut();
"invalid receiver: self.keys.len()={} != other.keys.len()={}", let other: &TensorKeyCompressed<&[u8]> = &other.to_ref();
self.keys.len(),
other.keys.len()
);
}
self.keys assert_eq!(
.iter_mut() res.keys.len(),
.zip(other.keys.iter()) other.keys.len(),
.for_each(|(a, b)| { "invalid receiver: res.keys.len()={} != other.keys.len()={}",
a.decompress(module, b); res.keys.len(),
}); other.keys.len()
);
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
self.decompress_glwe_switching_key(a, b);
}
}
}
impl<B: Backend> TensorKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
impl<D: DataMut> TensorKey<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: TensorKeyCompressedToRef,
M: TensorKeyDecompress,
{
module.decompress_tensor_key(self, other);
}
}
pub trait TensorKeyCompressedToMut {
fn to_mut(&mut self) -> TensorKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> TensorKeyCompressedToMut for TensorKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToMut,
{
fn to_mut(&mut self) -> TensorKeyCompressed<&mut [u8]> {
TensorKeyCompressed {
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
}
}
}
pub trait TensorKeyCompressedToRef {
fn to_ref(&self) -> TensorKeyCompressed<&[u8]>;
}
impl<D: DataRef> TensorKeyCompressedToRef for TensorKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToRef,
{
fn to_ref(&self) -> TensorKeyCompressed<&[u8]> {
TensorKeyCompressed {
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
}
} }
} }

View File

@@ -1,18 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform}, layouts::{
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, WriterTo, ZnxInfos}, Backend, Data, DataMut, DataRef, FillUniform, MatZnx, MatZnxToMut, MatZnxToRef, Module, ReaderFrom, WriterTo, ZnxInfos,
},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
compressed::{Decompress, GLWECiphertextCompressed}, compressed::{GLWECompressed, GLWEDecompress},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGSWCiphertextCompressed<D: Data> { pub struct GGSWCompressed<D: Data> {
pub(crate) data: MatZnx<D>, pub(crate) data: MatZnx<D>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
@@ -21,9 +22,9 @@ pub struct GGSWCiphertextCompressed<D: Data> {
pub(crate) seed: Vec<[u8; 32]>, pub(crate) seed: Vec<[u8; 32]>,
} }
impl<D: Data> LWEInfos for GGSWCiphertextCompressed<D> { impl<D: Data> LWEInfos for GGSWCompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -37,13 +38,13 @@ impl<D: Data> LWEInfos for GGSWCiphertextCompressed<D> {
self.data.size() self.data.size()
} }
} }
impl<D: Data> GLWEInfos for GGSWCiphertextCompressed<D> { impl<D: Data> GLWEInfos for GGSWCompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank self.rank
} }
} }
impl<D: Data> GGSWInfos for GGSWCiphertextCompressed<D> { impl<D: Data> GGSWInfos for GGSWCompressed<D> {
fn dsize(&self) -> Dsize { fn dsize(&self) -> Dsize {
self.dsize self.dsize
} }
@@ -53,46 +54,42 @@ impl<D: Data> GGSWInfos for GGSWCiphertextCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGSWCiphertextCompressed<D> { impl<D: DataRef> fmt::Debug for GGSWCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.data) write!(f, "{}", self.data)
} }
} }
impl<D: DataRef> fmt::Display for GGSWCiphertextCompressed<D> { impl<D: DataRef> fmt::Display for GGSWCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"(GGSWCiphertextCompressed: base2k={} k={} dsize={}) {}", "(GGSWCompressed: base2k={} k={} dsize={}) {}",
self.base2k, self.k, self.dsize, self.data self.base2k, self.k, self.dsize, self.data
) )
} }
} }
impl<D: DataMut> FillUniform for GGSWCiphertextCompressed<D> { impl<D: DataMut> FillUniform for GGSWCompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl GGSWCiphertextCompressed<Vec<u8>> { pub trait GGSWCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GGSWInfos, {
{ fn alloc_ggsw_compressed(
Self::alloc_with( &self,
infos.n(), base2k: Base2K,
infos.base2k(), k: TorusPrecision,
infos.k(), rank: Rank,
infos.rank(), dnum: Dnum,
infos.dnum(), dsize: Dsize,
infos.dsize(), ) -> GGSWCompressed<Vec<u8>> {
)
}
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
"invalid ggsw: ceil(k/base2k): {size} <= dsize: {}", "invalid ggsw: ceil(k/base2k): {size} <= dsize: {}",
dsize.0 dsize.0
@@ -105,9 +102,9 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
dsize.0, dsize.0,
); );
Self { GGSWCompressed {
data: MatZnx::alloc( data: MatZnx::alloc(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
(rank + 1).into(), (rank + 1).into(),
1, 1,
@@ -121,12 +118,11 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_ggsw_compressed_from_infos<A>(&self, infos: &A) -> GGSWCompressed<Vec<u8>>
where where
A: GGSWInfos, A: GGSWInfos,
{ {
Self::alloc_bytes_with( self.alloc_ggsw_compressed(
infos.n(),
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank(), infos.rank(),
@@ -135,9 +131,9 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
) )
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize { fn bytes_of_ggsw_compressed(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
"invalid ggsw: ceil(k/base2k): {size} <= dsize: {}", "invalid ggsw: ceil(k/base2k): {size} <= dsize: {}",
dsize.0 dsize.0
@@ -150,20 +146,65 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
dsize.0, dsize.0,
); );
MatZnx::alloc_bytes( MatZnx::bytes_of(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
(rank + 1).into(), (rank + 1).into(),
1, 1,
k.0.div_ceil(base2k.0) as usize, k.0.div_ceil(base2k.0) as usize,
) )
} }
fn bytes_of_ggsw_compressed_key_from_infos<A>(&self, infos: &A) -> usize
where
A: GGSWInfos,
{
self.bytes_of_ggsw_compressed(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
}
} }
impl<D: DataRef> GGSWCiphertextCompressed<D> { impl GGSWCompressed<Vec<u8>> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> { pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGSWInfos,
M: GGSWCompressedAlloc,
{
module.alloc_ggsw_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: GGSWCompressedAlloc,
{
module.alloc_ggsw_compressed(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGSWInfos,
M: GGSWCompressedAlloc,
{
module.bytes_of_ggsw_compressed_key_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: GGSWCompressedAlloc,
{
module.bytes_of_ggsw_compressed(base2k, k, rank, dnum, dsize)
}
}
impl<D: DataRef> GGSWCompressed<D> {
pub fn at(&self, row: usize, col: usize) -> GLWECompressed<&[u8]> {
let rank: usize = self.rank().into(); let rank: usize = self.rank().into();
GLWECiphertextCompressed { GLWECompressed {
data: self.data.at(row, col), data: self.data.at(row, col),
k: self.k, k: self.k,
base2k: self.base2k, base2k: self.base2k,
@@ -173,10 +214,10 @@ impl<D: DataRef> GGSWCiphertextCompressed<D> {
} }
} }
impl<D: DataMut> GGSWCiphertextCompressed<D> { impl<D: DataMut> GGSWCompressed<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> { pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECompressed<&mut [u8]> {
let rank: usize = self.rank().into(); let rank: usize = self.rank().into();
GLWECiphertextCompressed { GLWECompressed {
data: self.data.at_mut(row, col), data: self.data.at_mut(row, col),
k: self.k, k: self.k,
base2k: self.base2k, base2k: self.base2k,
@@ -186,7 +227,7 @@ impl<D: DataMut> GGSWCiphertextCompressed<D> {
} }
} }
impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> { impl<D: DataMut> ReaderFrom for GGSWCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -201,7 +242,7 @@ impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> { impl<D: DataRef> WriterTo for GGSWCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;
@@ -215,23 +256,72 @@ impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> {
} }
} }
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGSWCiphertextCompressed<DR>> for GGSWCiphertext<D> pub trait GGSWDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWEDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GGSWCiphertextCompressed<DR>) { fn decompress_ggsw<R, O>(&self, res: &mut R, other: &O)
#[cfg(debug_assertions)] where
{ R: GGSWToMut,
assert_eq!(self.rank(), other.rank()) O: GGSWCompressedToRef,
} {
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
let other: &GGSWCompressed<&[u8]> = &other.to_ref();
let dnum: usize = self.dnum().into(); assert_eq!(res.rank(), other.rank());
let rank: usize = self.rank().into(); let dnum: usize = res.dnum().into();
(0..dnum).for_each(|row_i| { let rank: usize = res.rank().into();
(0..rank + 1).for_each(|col_j| {
self.at_mut(row_i, col_j) for row_i in 0..dnum {
.decompress(module, &other.at(row_i, col_j)); for col_j in 0..rank + 1 {
}); self.decompress_glwe(&mut res.at_mut(row_i, col_j), &other.at(row_i, col_j));
}); }
}
}
}
impl<B: Backend> GGSWDecompress for Module<B> where Self: GGSWDecompress {}
impl<D: DataMut> GGSW<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: GGSWCompressedToRef,
M: GGSWDecompress,
{
module.decompress_ggsw(self, other);
}
}
pub trait GGSWCompressedToMut {
fn to_mut(&mut self) -> GGSWCompressed<&mut [u8]>;
}
impl<D: DataMut> GGSWCompressedToMut for GGSWCompressed<D> {
fn to_mut(&mut self) -> GGSWCompressed<&mut [u8]> {
GGSWCompressed {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
rank: self.rank(),
seed: self.seed.clone(),
data: self.data.to_mut(),
}
}
}
pub trait GGSWCompressedToRef {
fn to_ref(&self) -> GGSWCompressed<&[u8]>;
}
impl<D: DataRef> GGSWCompressedToRef for GGSWCompressed<D> {
fn to_ref(&self) -> GGSWCompressed<&[u8]> {
GGSWCompressed {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
rank: self.rank(),
seed: self.seed.clone(),
data: self.data.to_ref(),
}
} }
} }

View File

@@ -1,15 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform}, api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, VecZnx, WriterTo, ZnxInfos}, layouts::{
Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo, ZnxInfos,
},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision, compressed::Decompress}; use crate::layouts::{
Base2K, GLWE, GLWEInfos, GLWEToMut, GetRingDegree, LWEInfos, Rank, RingDegree, SetGLWEInfos, TorusPrecision,
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GLWECiphertextCompressed<D: Data> { pub struct GLWECompressed<D: Data> {
pub(crate) data: VecZnx<D>, pub(crate) data: VecZnx<D>,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
@@ -17,7 +21,7 @@ pub struct GLWECiphertextCompressed<D: Data> {
pub(crate) seed: [u8; 32], pub(crate) seed: [u8; 32],
} }
impl<D: Data> LWEInfos for GLWECiphertextCompressed<D> { impl<D: Data> LWEInfos for GLWECompressed<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -30,27 +34,27 @@ impl<D: Data> LWEInfos for GLWECiphertextCompressed<D> {
self.data.size() self.data.size()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
} }
impl<D: Data> GLWEInfos for GLWECiphertextCompressed<D> { impl<D: Data> GLWEInfos for GLWECompressed<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank self.rank
} }
} }
impl<D: DataRef> fmt::Debug for GLWECiphertextCompressed<D> { impl<D: DataRef> fmt::Debug for GLWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataRef> fmt::Display for GLWECiphertextCompressed<D> { impl<D: DataRef> fmt::Display for GLWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"GLWECiphertextCompressed: base2k={} k={} rank={} seed={:?}: {}", "GLWECompressed: base2k={} k={} rank={} seed={:?}: {}",
self.base2k(), self.base2k(),
self.k(), self.k(),
self.rank(), self.rank(),
@@ -60,23 +64,23 @@ impl<D: DataRef> fmt::Display for GLWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut> FillUniform for GLWECiphertextCompressed<D> { impl<D: DataMut> FillUniform for GLWECompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl GLWECiphertextCompressed<Vec<u8>> { pub trait GLWECompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GLWEInfos, {
{ fn alloc_glwe_compressed(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWECompressed<Vec<u8>> {
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank()) GLWECompressed {
} data: VecZnx::alloc(
self.ring_degree().into(),
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self { 1,
Self { k.0.div_ceil(base2k.0) as usize,
data: VecZnx::alloc(n.into(), 1, k.0.div_ceil(base2k.0) as usize), ),
base2k, base2k,
k, k,
rank, rank,
@@ -84,19 +88,66 @@ impl GLWECiphertextCompressed<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_compressed_from_infos<A>(&self, infos: &A) -> GLWECompressed<Vec<u8>>
where where
A: GLWEInfos, A: GLWEInfos,
{ {
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k()) assert_eq!(self.ring_degree(), infos.n());
self.alloc_glwe_compressed(infos.base2k(), infos.k(), infos.rank())
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> usize { fn bytes_of_glwe_compressed(&self, base2k: Base2K, k: TorusPrecision) -> usize {
VecZnx::alloc_bytes(n.into(), 1, k.0.div_ceil(base2k.0) as usize) VecZnx::bytes_of(
self.ring_degree().into(),
1,
k.0.div_ceil(base2k.0) as usize,
)
}
fn bytes_of_glwe_compressed_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
assert_eq!(self.ring_degree(), infos.n());
self.bytes_of_glwe_compressed(infos.base2k(), infos.k())
} }
} }
impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> { impl<B: Backend> GLWECompressedAlloc for Module<B> where Self: GetRingDegree {}
impl GLWECompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GLWEInfos,
M: GLWECompressedAlloc,
{
module.alloc_glwe_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
where
M: GLWECompressedAlloc,
{
module.alloc_glwe_compressed(base2k, k, rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GLWEInfos,
M: GLWECompressedAlloc,
{
module.bytes_of_glwe_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
where
M: GLWECompressedAlloc,
{
module.bytes_of_glwe_compressed(base2k, k)
}
}
impl<D: DataMut> ReaderFrom for GLWECompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -106,7 +157,7 @@ impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> { impl<D: DataRef> WriterTo for GLWECompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;
@@ -116,63 +167,82 @@ impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GLWECiphertextCompressed<DR>> for GLWECiphertext<D> pub trait GLWEDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GetRingDegree + VecZnxFillUniform + VecZnxCopy,
{ {
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>) { fn decompress_glwe<R, O>(&self, res: &mut R, other: &O)
#[cfg(debug_assertions)] where
{ R: GLWEToMut + SetGLWEInfos,
assert_eq!( O: GLWECompressedToRef + GLWEInfos,
self.n(),
other.n(),
"invalid receiver: self.n()={} != other.n()={}",
self.n(),
other.n()
);
assert_eq!(
self.size(),
other.size(),
"invalid receiver: self.size()={} != other.size()={}",
self.size(),
other.size()
);
assert_eq!(
self.rank(),
other.rank(),
"invalid receiver: self.rank()={} != other.rank()={}",
self.rank(),
other.rank()
);
}
let mut source: Source = Source::new(other.seed);
self.decompress_internal(module, other, &mut source);
}
}
impl<D: DataMut> GLWECiphertext<D> {
pub(crate) fn decompress_internal<DataOther, B: Backend>(
&mut self,
module: &Module<B>,
other: &GLWECiphertextCompressed<DataOther>,
source: &mut Source,
) where
DataOther: DataRef,
Module<B>: VecZnxCopy + VecZnxFillUniform,
{ {
#[cfg(debug_assertions)]
{ {
assert_eq!(self.rank(), other.rank()); let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
debug_assert_eq!(self.size(), other.size()); let other: &GLWECompressed<&[u8]> = &other.to_ref();
assert_eq!(
res.n(),
self.ring_degree(),
"invalid receiver: res.n()={} != other.n()={}",
res.n(),
self.ring_degree()
);
assert_eq!(res.lwe_layout(), other.lwe_layout());
assert_eq!(res.glwe_layout(), other.glwe_layout());
let mut source: Source = Source::new(other.seed);
self.vec_znx_copy(&mut res.data, 0, &other.data, 0);
(1..(other.rank() + 1).into()).for_each(|i| {
self.vec_znx_fill_uniform(other.base2k.into(), &mut res.data, i, &mut source);
});
} }
module.vec_znx_copy(&mut self.data, 0, &other.data, 0); res.set_base2k(other.base2k());
(1..(other.rank() + 1).into()).for_each(|i| { res.set_k(other.k());
module.vec_znx_fill_uniform(other.base2k.into(), &mut self.data, i, source); }
}); }
self.base2k = other.base2k; impl<B: Backend> GLWEDecompress for Module<B> where Self: GetRingDegree + VecZnxFillUniform + VecZnxCopy {}
self.k = other.k;
impl<D: DataMut> GLWE<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: GLWECompressedToRef + GLWEInfos,
M: GLWEDecompress,
{
module.decompress_glwe(self, other);
}
}
pub trait GLWECompressedToRef {
fn to_ref(&self) -> GLWECompressed<&[u8]>;
}
impl<D: DataRef> GLWECompressedToRef for GLWECompressed<D> {
fn to_ref(&self) -> GLWECompressed<&[u8]> {
GLWECompressed {
seed: self.seed.clone(),
base2k: self.base2k,
k: self.k,
rank: self.rank,
data: self.data.to_ref(),
}
}
}
pub trait GLWECompressedToMut {
fn to_mut(&mut self) -> GLWECompressed<&mut [u8]>;
}
impl<D: DataMut> GLWECompressedToMut for GLWECompressed<D> {
fn to_mut(&mut self) -> GLWECompressed<&mut [u8]> {
GLWECompressed {
seed: self.seed.clone(),
base2k: self.base2k,
k: self.k,
rank: self.rank,
data: self.data.to_mut(),
}
} }
} }

View File

@@ -1,16 +1,21 @@
use std::fmt; use std::fmt;
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, compressed::GGLWESwitchingKeyCompressed, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWEToLWESwitchingKey, GLWEToLWESwitchingKeyToMut, LWEInfos, Rank, RingDegree,
TorusPrecision,
compressed::{
GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedAlloc, GLWESwitchingKeyCompressedToMut,
GLWESwitchingKeyCompressedToRef, GLWESwitchingKeyDecompress,
},
}; };
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GLWEToLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>); pub struct GLWEToLWESwitchingKeyCompressed<D: Data>(pub(crate) GLWESwitchingKeyCompressed<D>);
impl<D: Data> LWEInfos for GLWEToLWESwitchingKeyCompressed<D> { impl<D: Data> LWEInfos for GLWEToLWESwitchingKeyCompressed<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -21,7 +26,7 @@ impl<D: Data> LWEInfos for GLWEToLWESwitchingKeyCompressed<D> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -83,54 +88,146 @@ impl<D: DataRef> WriterTo for GLWEToLWESwitchingKeyCompressed<D> {
} }
} }
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> { pub trait GLWEToLWESwitchingKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyCompressedAlloc,
{
fn alloc_glwe_to_lwe_switching_key_compressed(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
dnum: Dnum,
) -> GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
GLWEToLWESwitchingKeyCompressed(self.alloc_glwe_switching_key_compressed(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
}
fn alloc_glwe_to_lwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyCompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed" "rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is unsupported for GLWEToLWESwitchingKeyCompressed" "dsize > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
); );
Self(GGLWESwitchingKeyCompressed::alloc(infos)) self.alloc_glwe_to_lwe_switching_key_compressed(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self { fn bytes_of_glwe_to_lwe_switching_key_compressed(
Self(GGLWESwitchingKeyCompressed::alloc_with( &self,
n, base2k: Base2K,
base2k, k: TorusPrecision,
k, rank_in: Rank,
rank_in, dnum: Dnum,
Rank(1), ) -> usize {
dnum, self.bytes_of_glwe_switching_key_compressed(base2k, k, rank_in, dnum, Dsize(1))
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_glwe_to_lwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed" "rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is unsupported for GLWEToLWESwitchingKeyCompressed" "dsize > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
); );
GGLWESwitchingKeyCompressed::alloc_bytes(infos) self.bytes_of_glwe_switching_key_compressed_from_infos(infos)
} }
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_in: Rank) -> usize {
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rank_in, dnum, Dsize(1)) impl<B: Backend> GLWEToLWESwitchingKeyCompressedAlloc for Module<B> where Self: GLWESwitchingKeyCompressedAlloc {}
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GLWEToLWESwitchingKeyCompressedAlloc,
{
module.alloc_glwe_to_lwe_switching_key_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
where
M: GLWEToLWESwitchingKeyCompressedAlloc,
{
module.alloc_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GLWEToLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_glwe_to_lwe_switching_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_in: Rank) -> usize
where
M: GLWEToLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
}
}
pub trait GLWEToLWESwitchingKeyDecompress
where
Self: GLWESwitchingKeyDecompress,
{
fn decompress_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O)
where
R: GLWEToLWESwitchingKeyToMut,
O: GLWEToLWESwitchingKeyCompressedToRef,
{
self.decompress_glwe_switching_key(&mut res.to_mut().0, &other.to_ref().0);
}
}
impl<B: Backend> GLWEToLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
impl<D: DataMut> GLWEToLWESwitchingKey<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: GLWEToLWESwitchingKeyCompressedToRef,
M: GLWEToLWESwitchingKeyDecompress,
{
module.decompress_glwe_to_lwe_switching_key(self, other);
}
}
pub trait GLWEToLWESwitchingKeyCompressedToRef {
fn to_ref(&self) -> GLWEToLWESwitchingKeyCompressed<&[u8]>;
}
impl<D: DataRef> GLWEToLWESwitchingKeyCompressedToRef for GLWEToLWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToRef,
{
fn to_ref(&self) -> GLWEToLWESwitchingKeyCompressed<&[u8]> {
GLWEToLWESwitchingKeyCompressed(self.0.to_ref())
}
}
pub trait GLWEToLWESwitchingKeyCompressedToMut {
fn to_mut(&mut self) -> GLWEToLWESwitchingKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> GLWEToLWESwitchingKeyCompressedToMut for GLWEToLWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToMut,
{
fn to_mut(&mut self) -> GLWEToLWESwitchingKeyCompressed<&mut [u8]> {
GLWEToLWESwitchingKeyCompressed(self.0.to_mut())
} }
} }

View File

@@ -2,21 +2,24 @@ use std::fmt;
use poulpy_hal::{ use poulpy_hal::{
api::ZnFillUniform, api::ZnFillUniform,
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo, Zn, ZnxInfos, ZnxView, ZnxViewMut}, layouts::{
Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo, Zn, ZnToMut, ZnToRef, ZnxInfos, ZnxView,
ZnxViewMut,
},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, LWECiphertext, LWEInfos, TorusPrecision, compressed::Decompress}; use crate::layouts::{Base2K, LWE, LWEInfos, LWEToMut, RingDegree, TorusPrecision};
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWECiphertextCompressed<D: Data> { pub struct LWECompressed<D: Data> {
pub(crate) data: Zn<D>, pub(crate) data: Zn<D>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) seed: [u8; 32], pub(crate) seed: [u8; 32],
} }
impl<D: Data> LWEInfos for LWECiphertextCompressed<D> { impl<D: Data> LWEInfos for LWECompressed<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -25,8 +28,8 @@ impl<D: Data> LWEInfos for LWECiphertextCompressed<D> {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -34,17 +37,17 @@ impl<D: Data> LWEInfos for LWECiphertextCompressed<D> {
} }
} }
impl<D: DataRef> fmt::Debug for LWECiphertextCompressed<D> { impl<D: DataRef> fmt::Debug for LWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataRef> fmt::Display for LWECiphertextCompressed<D> { impl<D: DataRef> fmt::Display for LWECompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"LWECiphertextCompressed: base2k={} k={} seed={:?}: {}", "LWECompressed: base2k={} k={} seed={:?}: {}",
self.base2k(), self.base2k(),
self.k(), self.k(),
self.seed, self.seed,
@@ -53,22 +56,15 @@ impl<D: DataRef> fmt::Display for LWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut> FillUniform for LWECiphertextCompressed<D> { impl<D: DataMut> FillUniform for LWECompressed<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl LWECiphertextCompressed<Vec<u8>> { pub trait LWECompressedAlloc {
pub fn alloc<A>(infos: &A) -> Self fn alloc_lwe_compressed(&self, base2k: Base2K, k: TorusPrecision) -> LWECompressed<Vec<u8>> {
where LWECompressed {
A: LWEInfos,
{
Self::alloc_with(infos.base2k(), infos.k())
}
pub fn alloc_with(base2k: Base2K, k: TorusPrecision) -> Self {
Self {
data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize), data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize),
k, k,
base2k, base2k,
@@ -76,21 +72,62 @@ impl LWECiphertextCompressed<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_lwe_compressed_from_infos<A>(&self, infos: &A) -> LWECompressed<Vec<u8>>
where where
A: LWEInfos, A: LWEInfos,
{ {
Self::alloc_bytes_with(infos.base2k(), infos.k()) self.alloc_lwe_compressed(infos.base2k(), infos.k())
} }
pub fn alloc_bytes_with(base2k: Base2K, k: TorusPrecision) -> usize { fn bytes_of_lwe_compressed(&self, base2k: Base2K, k: TorusPrecision) -> usize {
Zn::alloc_bytes(1, 1, k.0.div_ceil(base2k.0) as usize) Zn::bytes_of(1, 1, k.0.div_ceil(base2k.0) as usize)
}
fn bytes_of_lwe_compressed_from_infos<A>(&self, infos: &A) -> usize
where
A: LWEInfos,
{
self.bytes_of_lwe_compressed(infos.base2k(), infos.k())
}
}
impl<B: Backend> LWECompressedAlloc for Module<B> {}
impl LWECompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: LWEInfos,
M: LWECompressedAlloc,
{
module.alloc_lwe_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
where
M: LWECompressedAlloc,
{
module.alloc_lwe_compressed(base2k, k)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: LWEInfos,
M: LWECompressedAlloc,
{
module.bytes_of_lwe_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
where
M: LWECompressedAlloc,
{
module.bytes_of_lwe_compressed(base2k, k)
} }
} }
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
impl<D: DataMut> ReaderFrom for LWECiphertextCompressed<D> { impl<D: DataMut> ReaderFrom for LWECompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -99,7 +136,7 @@ impl<D: DataMut> ReaderFrom for LWECiphertextCompressed<D> {
} }
} }
impl<D: DataRef> WriterTo for LWECiphertextCompressed<D> { impl<D: DataRef> WriterTo for LWECompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;
@@ -108,22 +145,72 @@ impl<D: DataRef> WriterTo for LWECiphertextCompressed<D> {
} }
} }
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, LWECiphertextCompressed<DR>> for LWECiphertext<D> pub trait LWEDecompress
where where
Module<B>: ZnFillUniform, Self: ZnFillUniform,
{ {
fn decompress(&mut self, module: &Module<B>, other: &LWECiphertextCompressed<DR>) { fn decompress_lwe<R, O>(&self, res: &mut R, other: &O)
debug_assert_eq!(self.size(), other.size()); where
R: LWEToMut,
O: LWECompressedToRef,
{
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
let other: &LWECompressed<&[u8]> = &other.to_ref();
assert_eq!(res.lwe_layout(), other.lwe_layout());
let mut source: Source = Source::new(other.seed); let mut source: Source = Source::new(other.seed);
module.zn_fill_uniform( self.zn_fill_uniform(
self.n().into(), res.n().into(),
other.base2k().into(), other.base2k().into(),
&mut self.data, &mut res.data,
0, 0,
&mut source, &mut source,
); );
(0..self.size()).for_each(|i| { for i in 0..res.size() {
self.data.at_mut(0, i)[0] = other.data.at(0, i)[0]; res.data.at_mut(0, i)[0] = other.data.at(0, i)[0];
}); }
}
}
impl<B: Backend> LWEDecompress for Module<B> where Self: ZnFillUniform {}
impl<D: DataMut> LWE<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: LWECompressedToRef,
M: LWEDecompress,
{
module.decompress_lwe(self, other);
}
}
pub trait LWECompressedToRef {
fn to_ref(&self) -> LWECompressed<&[u8]>;
}
impl<D: DataRef> LWECompressedToRef for LWECompressed<D> {
fn to_ref(&self) -> LWECompressed<&[u8]> {
LWECompressed {
k: self.k,
base2k: self.base2k,
seed: self.seed,
data: self.data.to_ref(),
}
}
}
pub trait LWECompressedToMut {
fn to_mut(&mut self) -> LWECompressed<&mut [u8]>;
}
impl<D: DataMut> LWECompressedToMut for LWECompressed<D> {
fn to_mut(&mut self) -> LWECompressed<&mut [u8]> {
LWECompressed {
k: self.k,
base2k: self.base2k,
seed: self.seed,
data: self.data.to_mut(),
}
} }
} }

View File

@@ -1,17 +1,20 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWESwitchingKey, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWESwitchingKey, LWESwitchingKeyToMut, Rank, RingDegree,
compressed::{Decompress, GGLWESwitchingKeyCompressed}, TorusPrecision,
compressed::{
GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedAlloc, GLWESwitchingKeyCompressedToMut,
GLWESwitchingKeyCompressedToRef, GLWESwitchingKeyDecompress,
},
}; };
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>); pub struct LWESwitchingKeyCompressed<D: Data>(pub(crate) GLWESwitchingKeyCompressed<D>);
impl<D: Data> LWEInfos for LWESwitchingKeyCompressed<D> { impl<D: Data> LWEInfos for LWESwitchingKeyCompressed<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -22,7 +25,7 @@ impl<D: Data> LWEInfos for LWESwitchingKeyCompressed<D> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -83,73 +86,149 @@ impl<D: DataRef> WriterTo for LWESwitchingKeyCompressed<D> {
} }
} }
impl LWESwitchingKeyCompressed<Vec<u8>> { pub trait LWESwitchingKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyCompressedAlloc,
{
fn alloc_lwe_switching_key_compressed(
&self,
base2k: Base2K,
k: TorusPrecision,
dnum: Dnum,
) -> LWESwitchingKeyCompressed<Vec<u8>> {
LWESwitchingKeyCompressed(self.alloc_glwe_switching_key_compressed(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
}
fn alloc_lwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyCompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWESwitchingKeyCompressed" "dsize > 1 is not supported for LWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.rank_in().0, infos.rank_in().0,
1, 1,
"rank_in > 1 is not supported for LWESwitchingKeyCompressed" "rank_in > 1 is not supported for LWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is not supported for LWESwitchingKeyCompressed" "rank_out > 1 is not supported for LWESwitchingKeyCompressed"
); );
Self(GGLWESwitchingKeyCompressed::alloc(infos)) self.alloc_lwe_switching_key_compressed(infos.base2k(), infos.k(), infos.dnum())
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self { fn bytes_of_lwe_switching_key_compressed(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
Self(GGLWESwitchingKeyCompressed::alloc_with( self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, Dsize(1))
n,
base2k,
k,
Rank(1),
Rank(1),
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_lwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWESwitchingKey" "dsize > 1 is not supported for LWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.rank_in().0, infos.rank_in().0,
1, 1,
"rank_in > 1 is not supported for LWESwitchingKey" "rank_in > 1 is not supported for LWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is not supported for LWESwitchingKey" "rank_out > 1 is not supported for LWESwitchingKeyCompressed"
); );
GGLWESwitchingKeyCompressed::alloc_bytes(infos) self.bytes_of_glwe_switching_key_compressed_from_infos(infos)
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, Rank(1), dnum, Dsize(1))
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWESwitchingKeyCompressed<DR>> for LWESwitchingKey<D> impl<B: Backend> LWESwitchingKeyCompressedAlloc for Module<B> where Self: GLWESwitchingKeyCompressedAlloc {}
impl LWESwitchingKeyCompressed<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: LWESwitchingKeyCompressedAlloc,
{
module.alloc_lwe_switching_key_compressed_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
where
M: LWESwitchingKeyCompressedAlloc,
{
module.alloc_lwe_switching_key_compressed(base2k, k, dnum)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: LWESwitchingKeyCompressedAlloc,
{
module.bytes_of_lwe_switching_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
where
M: LWESwitchingKeyCompressedAlloc,
{
module.bytes_of_lwe_switching_key_compressed(base2k, k, dnum)
}
}
pub trait LWESwitchingKeyDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWESwitchingKeyDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &LWESwitchingKeyCompressed<DR>) { fn decompress_lwe_switching_key<R, O>(&self, res: &mut R, other: &O)
self.0.decompress(module, &other.0); where
R: LWESwitchingKeyToMut,
O: LWESwitchingKeyCompressedToRef,
{
self.decompress_glwe_switching_key(&mut res.to_mut().0, &other.to_ref().0);
}
}
impl<B: Backend> LWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
impl<D: DataMut> LWESwitchingKey<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: LWESwitchingKeyCompressedToRef,
M: LWESwitchingKeyDecompress,
{
module.decompress_lwe_switching_key(self, other);
}
}
pub trait LWESwitchingKeyCompressedToRef {
fn to_ref(&self) -> LWESwitchingKeyCompressed<&[u8]>;
}
impl<D: DataRef> LWESwitchingKeyCompressedToRef for LWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToRef,
{
fn to_ref(&self) -> LWESwitchingKeyCompressed<&[u8]> {
LWESwitchingKeyCompressed(self.0.to_ref())
}
}
pub trait LWESwitchingKeyCompressedToMut {
fn to_mut(&mut self) -> LWESwitchingKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> LWESwitchingKeyCompressedToMut for LWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToMut,
{
fn to_mut(&mut self) -> LWESwitchingKeyCompressed<&mut [u8]> {
LWESwitchingKeyCompressed(self.0.to_mut())
} }
} }

View File

@@ -1,20 +1,23 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKey, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKey, LWEToGLWESwitchingKeyToMut, Rank, RingDegree,
compressed::{Decompress, GGLWESwitchingKeyCompressed}, TorusPrecision,
compressed::{
GLWESwitchingKeyCompressed, GLWESwitchingKeyCompressedAlloc, GLWESwitchingKeyCompressedToMut,
GLWESwitchingKeyCompressedToRef, GLWESwitchingKeyDecompress,
},
}; };
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWEToGLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>); pub struct LWEToGLWESwitchingKeyCompressed<D: Data>(pub(crate) GLWESwitchingKeyCompressed<D>);
impl<D: Data> LWEInfos for LWEToGLWESwitchingKeyCompressed<D> { impl<D: Data> LWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -83,63 +86,138 @@ impl<D: DataRef> WriterTo for LWEToGLWESwitchingKeyCompressed<D> {
} }
} }
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> { pub trait LWEToGLWESwitchingKeyCompressedAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyCompressedAlloc,
{
fn alloc_lwe_to_glwe_switching_key_compressed(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_out: Rank,
dnum: Dnum,
) -> LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
LWEToGLWESwitchingKeyCompressed(self.alloc_glwe_switching_key_compressed(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
}
fn alloc_lwe_to_glwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyCompressed<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWEToGLWESwitchingKeyCompressed" "dsize > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
); );
debug_assert_eq!( assert_eq!(
infos.rank_in().0, infos.rank_in().0,
1, 1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKeyCompressed" "rank_in > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
); );
Self(GGLWESwitchingKeyCompressed::alloc(infos)) self.alloc_lwe_to_glwe_switching_key_compressed(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self { fn bytes_of_lwe_to_glwe_switching_key_compressed(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
Self(GGLWESwitchingKeyCompressed::alloc_with( self.bytes_of_glwe_switching_key_compressed(base2k, k, Rank(1), dnum, Dsize(1))
n,
base2k,
k,
Rank(1),
rank_out,
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_lwe_to_glwe_switching_key_compressed_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey" "dsize > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
); );
GGLWESwitchingKeyCompressed::alloc_bytes(infos) assert_eq!(
} infos.rank_in().0,
1,
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize { "rank_in > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, Rank(1), dnum, Dsize(1)) );
self.bytes_of_lwe_to_glwe_switching_key_compressed(infos.base2k(), infos.k(), infos.dnum())
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWEToGLWESwitchingKeyCompressed<DR>> for LWEToGLWESwitchingKey<D> impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
pub fn alloc<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: LWEToGLWESwitchingKeyCompressedAlloc,
{
module.alloc_lwe_to_glwe_switching_key_compressed_from_infos(infos)
}
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
where
M: LWEToGLWESwitchingKeyCompressedAlloc,
{
module.alloc_lwe_to_glwe_switching_key_compressed(base2k, k, rank_out, dnum)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: LWEToGLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_lwe_to_glwe_switching_key_compressed_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
where
M: LWEToGLWESwitchingKeyCompressedAlloc,
{
module.bytes_of_lwe_to_glwe_switching_key_compressed(base2k, k, dnum)
}
}
pub trait LWEToGLWESwitchingKeyDecompress
where where
Module<B>: VecZnxFillUniform + VecZnxCopy, Self: GLWESwitchingKeyDecompress,
{ {
fn decompress(&mut self, module: &Module<B>, other: &LWEToGLWESwitchingKeyCompressed<DR>) { fn decompress_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O)
self.0.decompress(module, &other.0); where
R: LWEToGLWESwitchingKeyToMut,
O: LWEToGLWESwitchingKeyCompressedToRef,
{
self.decompress_glwe_switching_key(&mut res.to_mut().0, &other.to_ref().0);
}
}
impl<B: Backend> LWEToGLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
impl<D: DataMut> LWEToGLWESwitchingKey<D> {
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
where
O: LWEToGLWESwitchingKeyCompressedToRef,
M: LWEToGLWESwitchingKeyDecompress,
{
module.decompress_lwe_to_glwe_switching_key(self, other);
}
}
pub trait LWEToGLWESwitchingKeyCompressedToRef {
fn to_ref(&self) -> LWEToGLWESwitchingKeyCompressed<&[u8]>;
}
impl<D: DataRef> LWEToGLWESwitchingKeyCompressedToRef for LWEToGLWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToRef,
{
fn to_ref(&self) -> LWEToGLWESwitchingKeyCompressed<&[u8]> {
LWEToGLWESwitchingKeyCompressed(self.0.to_ref())
}
}
pub trait LWEToGLWESwitchingKeyCompressedToMut {
fn to_mut(&mut self) -> LWEToGLWESwitchingKeyCompressed<&mut [u8]>;
}
impl<D: DataMut> LWEToGLWESwitchingKeyCompressedToMut for LWEToGLWESwitchingKeyCompressed<D>
where
GLWESwitchingKeyCompressed<D>: GLWESwitchingKeyCompressedToMut,
{
fn to_mut(&mut self) -> LWEToGLWESwitchingKeyCompressed<&mut [u8]> {
LWEToGLWESwitchingKeyCompressed(self.0.to_mut())
} }
} }

View File

@@ -19,9 +19,3 @@ pub use glwe_to_lwe_ksk::*;
pub use lwe_ct::*; pub use lwe_ct::*;
pub use lwe_ksk::*; pub use lwe_ksk::*;
pub use lwe_to_glwe_ksk::*; pub use lwe_to_glwe_ksk::*;
use poulpy_hal::layouts::{Backend, Module};
pub trait Decompress<B: Backend, C> {
fn decompress(&mut self, module: &Module<B>, other: &C);
}

View File

@@ -1,18 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWE, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GGLWEAutomorphismKeyLayout { pub struct AutomorphismKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank: Rank, pub rank: Rank,
@@ -21,19 +22,19 @@ pub struct GGLWEAutomorphismKeyLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWEAutomorphismKey<D: Data> { pub struct AutomorphismKey<D: Data> {
pub(crate) key: GGLWESwitchingKey<D>, pub(crate) key: GLWESwitchingKey<D>,
pub(crate) p: i64, pub(crate) p: i64,
} }
impl<D: Data> GGLWEAutomorphismKey<D> { impl<D: Data> AutomorphismKey<D> {
pub fn p(&self) -> i64 { pub fn p(&self) -> i64 {
self.p self.p
} }
} }
impl<D: Data> LWEInfos for GGLWEAutomorphismKey<D> { impl<D: Data> LWEInfos for AutomorphismKey<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.key.n() self.key.n()
} }
@@ -50,13 +51,13 @@ impl<D: Data> LWEInfos for GGLWEAutomorphismKey<D> {
} }
} }
impl<D: Data> GLWEInfos for GGLWEAutomorphismKey<D> { impl<D: Data> GLWEInfos for AutomorphismKey<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWEAutomorphismKey<D> { impl<D: Data> GGLWEInfos for AutomorphismKey<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -74,7 +75,7 @@ impl<D: Data> GGLWEInfos for GGLWEAutomorphismKey<D> {
} }
} }
impl LWEInfos for GGLWEAutomorphismKeyLayout { impl LWEInfos for AutomorphismKeyLayout {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -83,18 +84,18 @@ impl LWEInfos for GGLWEAutomorphismKeyLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
impl GLWEInfos for GGLWEAutomorphismKeyLayout { impl GLWEInfos for AutomorphismKeyLayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank self.rank
} }
} }
impl GGLWEInfos for GGLWEAutomorphismKeyLayout { impl GGLWEInfos for AutomorphismKeyLayout {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank self.rank
} }
@@ -112,84 +113,164 @@ impl GGLWEInfos for GGLWEAutomorphismKeyLayout {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKey<D> { impl<D: DataRef> fmt::Debug for AutomorphismKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWEAutomorphismKey<D> { impl<D: DataMut> FillUniform for AutomorphismKey<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.key.fill_uniform(log_bound, source); self.key.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKey<D> { impl<D: DataRef> fmt::Display for AutomorphismKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(AutomorphismKey: p={}) {}", self.p, self.key) write!(f, "(AutomorphismKey: p={}) {}", self.p, self.key)
} }
} }
impl GGLWEAutomorphismKey<Vec<u8>> { impl<B: Backend> AutomorphismKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
pub fn alloc<A>(infos: &A) -> Self
pub trait AutomorphismKeyAlloc
where
Self: GLWESwitchingKeyAlloc,
{
fn alloc_automorphism_key(
&self,
base2k: Base2K,
k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> AutomorphismKey<Vec<u8>> {
AutomorphismKey {
key: self.alloc_glwe_switching_key(base2k, k, rank, rank, dnum, dsize),
p: 0,
}
}
fn alloc_automorphism_key_from_infos<A>(&self, infos: &A) -> AutomorphismKey<Vec<u8>>
where
A: GGLWEInfos,
{
self.alloc_automorphism_key(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
}
fn bytes_of_automorphism_key(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
self.bytes_of_glwe_switching_key(base2k, k, rank, rank, dnum, dsize)
}
fn bytes_of_automorphism_key_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
assert_eq!( assert_eq!(
infos.rank_in(), infos.rank_in(),
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKey" "rank_in != rank_out is not supported for AutomorphismKey"
); );
GGLWEAutomorphismKey { self.bytes_of_automorphism_key(
key: GGLWESwitchingKey::alloc(infos), infos.base2k(),
p: 0, infos.k(),
} infos.rank(),
} infos.dnum(),
infos.dsize(),
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self { )
GGLWEAutomorphismKey {
key: GGLWESwitchingKey::alloc_with(n, base2k, k, rank, rank, dnum, dsize),
p: 0,
}
}
pub fn alloc_bytes<A>(infos: &A) -> usize
where
A: GGLWEInfos,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
);
GGLWESwitchingKey::alloc_bytes(infos)
}
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rank, rank, dnum, dsize)
} }
} }
impl<D: DataRef> GGLWEAutomorphismKey<D> { impl AutomorphismKey<Vec<u8>> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> { pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: AutomorphismKeyAlloc,
{
module.alloc_automorphism_key_from_infos(infos)
}
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: AutomorphismKeyAlloc,
{
module.alloc_automorphism_key(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: AutomorphismKeyAlloc,
{
module.bytes_of_automorphism_key_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: AutomorphismKeyAlloc,
{
module.bytes_of_automorphism_key(base2k, k, rank, dnum, dsize)
}
}
pub trait AutomorphismKeyToMut {
fn to_mut(&mut self) -> AutomorphismKey<&mut [u8]>;
}
impl<D: DataMut> AutomorphismKeyToMut for AutomorphismKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToMut,
{
fn to_mut(&mut self) -> AutomorphismKey<&mut [u8]> {
AutomorphismKey {
key: self.key.to_mut(),
p: self.p,
}
}
}
pub trait AutomorphismKeyToRef {
fn to_ref(&self) -> AutomorphismKey<&[u8]>;
}
impl<D: DataRef> AutomorphismKeyToRef for AutomorphismKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToRef,
{
fn to_ref(&self) -> AutomorphismKey<&[u8]> {
AutomorphismKey {
p: self.p,
key: self.key.to_ref(),
}
}
}
impl<D: DataRef> AutomorphismKey<D> {
pub fn at(&self, row: usize, col: usize) -> GLWE<&[u8]> {
self.key.at(row, col) self.key.at(row, col)
} }
} }
impl<D: DataMut> GGLWEAutomorphismKey<D> { impl<D: DataMut> AutomorphismKey<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> { pub fn at_mut(&mut self, row: usize, col: usize) -> GLWE<&mut [u8]> {
self.key.at_mut(row, col) self.key.at_mut(row, col)
} }
} }
impl<D: DataMut> ReaderFrom for GGLWEAutomorphismKey<D> { impl<D: DataMut> ReaderFrom for AutomorphismKey<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.p = reader.read_u64::<LittleEndian>()? as i64; self.p = reader.read_u64::<LittleEndian>()? as i64;
self.key.read_from(reader) self.key.read_from(reader)
} }
} }
impl<D: DataRef> WriterTo for GGLWEAutomorphismKey<D> { impl<D: DataRef> WriterTo for AutomorphismKey<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.p as u64)?; writer.write_u64::<LittleEndian>(self.p as u64)?;
self.key.write_to(writer) self.key.write_to(writer)

View File

@@ -1,9 +1,11 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, WriterTo, ZnxInfos}, layouts::{
Backend, Data, DataMut, DataRef, FillUniform, MatZnx, MatZnxToMut, MatZnxToRef, Module, ReaderFrom, WriterTo, ZnxInfos,
},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, BuildError, Degree, Dnum, Dsize, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{Base2K, Dnum, Dsize, GLWE, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
@@ -16,8 +18,8 @@ where
fn dsize(&self) -> Dsize; fn dsize(&self) -> Dsize;
fn rank_in(&self) -> Rank; fn rank_in(&self) -> Rank;
fn rank_out(&self) -> Rank; fn rank_out(&self) -> Rank;
fn layout(&self) -> GGLWECiphertextLayout { fn gglwe_layout(&self) -> GGLWELayout {
GGLWECiphertextLayout { GGLWELayout {
n: self.n(), n: self.n(),
base2k: self.base2k(), base2k: self.base2k(),
k: self.k(), k: self.k(),
@@ -29,9 +31,13 @@ where
} }
} }
pub trait SetGGLWEInfos {
fn set_dsize(&mut self, dsize: usize);
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GGLWECiphertextLayout { pub struct GGLWELayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank_in: Rank, pub rank_in: Rank,
@@ -40,7 +46,7 @@ pub struct GGLWECiphertextLayout {
pub dsize: Dsize, pub dsize: Dsize,
} }
impl LWEInfos for GGLWECiphertextLayout { impl LWEInfos for GGLWELayout {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -49,18 +55,18 @@ impl LWEInfos for GGLWECiphertextLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
impl GLWEInfos for GGLWECiphertextLayout { impl GLWEInfos for GGLWELayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out self.rank_out
} }
} }
impl GGLWEInfos for GGLWECiphertextLayout { impl GGLWEInfos for GGLWELayout {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank_in self.rank_in
} }
@@ -79,14 +85,14 @@ impl GGLWEInfos for GGLWECiphertextLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWECiphertext<D: Data> { pub struct GGLWE<D: Data> {
pub(crate) data: MatZnx<D>, pub(crate) data: MatZnx<D>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) dsize: Dsize, pub(crate) dsize: Dsize,
} }
impl<D: Data> LWEInfos for GGLWECiphertext<D> { impl<D: Data> LWEInfos for GGLWE<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -95,8 +101,8 @@ impl<D: Data> LWEInfos for GGLWECiphertext<D> {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -104,13 +110,13 @@ impl<D: Data> LWEInfos for GGLWECiphertext<D> {
} }
} }
impl<D: Data> GLWEInfos for GGLWECiphertext<D> { impl<D: Data> GLWEInfos for GGLWE<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWECiphertext<D> { impl<D: Data> GGLWEInfos for GGLWE<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
Rank(self.data.cols_in() as u32) Rank(self.data.cols_in() as u32)
} }
@@ -128,136 +134,35 @@ impl<D: Data> GGLWEInfos for GGLWECiphertext<D> {
} }
} }
pub struct GGLWECiphertextBuilder<D: Data> { impl<D: DataRef> GGLWE<D> {
data: Option<MatZnx<D>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
dsize: Option<Dsize>,
}
impl<D: Data> GGLWECiphertext<D> {
#[inline]
pub fn builder() -> GGLWECiphertextBuilder<D> {
GGLWECiphertextBuilder {
data: None,
base2k: None,
k: None,
dsize: None,
}
}
}
impl GGLWECiphertextBuilder<Vec<u8>> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGLWEInfos,
{
self.data = Some(MatZnx::alloc(
infos.n().into(),
infos.dnum().into(),
infos.rank_in().into(),
(infos.rank_out() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.dsize = Some(infos.dsize());
self
}
}
impl<D: Data> GGLWECiphertextBuilder<D> {
#[inline]
pub fn data(mut self, data: MatZnx<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn dsize(mut self, dsize: Dsize) -> Self {
self.dsize = Some(dsize);
self
}
pub fn build(self) -> Result<GGLWECiphertext<D>, BuildError> {
let data: MatZnx<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let dsize: Dsize = self.dsize.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if dsize == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGLWECiphertext {
data,
base2k,
k,
dsize,
})
}
}
impl<D: DataRef> GGLWECiphertext<D> {
pub fn data(&self) -> &MatZnx<D> { pub fn data(&self) -> &MatZnx<D> {
&self.data &self.data
} }
} }
impl<D: DataMut> GGLWECiphertext<D> { impl<D: DataMut> GGLWE<D> {
pub fn data_mut(&mut self) -> &mut MatZnx<D> { pub fn data_mut(&mut self) -> &mut MatZnx<D> {
&mut self.data &mut self.data
} }
} }
impl<D: DataRef> fmt::Debug for GGLWECiphertext<D> { impl<D: DataRef> fmt::Debug for GGLWE<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWECiphertext<D> { impl<D: DataMut> FillUniform for GGLWE<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GGLWECiphertext<D> { impl<D: DataRef> fmt::Display for GGLWE<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"(GGLWECiphertext: k={} base2k={} dsize={}) {}", "(GGLWE: k={} base2k={} dsize={}) {}",
self.k().0, self.k().0,
self.base2k().0, self.base2k().0,
self.dsize().0, self.dsize().0,
@@ -266,53 +171,39 @@ impl<D: DataRef> fmt::Display for GGLWECiphertext<D> {
} }
} }
impl<D: DataRef> GGLWECiphertext<D> { impl<D: DataRef> GGLWE<D> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> { pub fn at(&self, row: usize, col: usize) -> GLWE<&[u8]> {
GLWECiphertext::builder() GLWE {
.data(self.data.at(row, col)) k: self.k,
.base2k(self.base2k()) base2k: self.base2k,
.k(self.k()) data: self.data.at(row, col),
.build() }
.unwrap()
} }
} }
impl<D: DataMut> GGLWECiphertext<D> { impl<D: DataMut> GGLWE<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> { pub fn at_mut(&mut self, row: usize, col: usize) -> GLWE<&mut [u8]> {
GLWECiphertext::builder() GLWE {
.base2k(self.base2k()) k: self.k,
.k(self.k()) base2k: self.base2k,
.data(self.data.at_mut(row, col)) data: self.data.at_mut(row, col),
.build() }
.unwrap()
} }
} }
impl GGLWECiphertext<Vec<u8>> { pub trait GGLWEAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GGLWEInfos, {
{ fn alloc_gglwe(
Self::alloc_with( &self,
infos.n(),
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_with(
n: Degree,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
rank_out: Rank, rank_out: Rank,
dnum: Dnum, dnum: Dnum,
dsize: Dsize, dsize: Dsize,
) -> Self { ) -> GGLWE<Vec<u8>> {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -327,9 +218,9 @@ impl GGLWECiphertext<Vec<u8>> {
dsize.0, dsize.0,
); );
Self { GGLWE {
data: MatZnx::alloc( data: MatZnx::alloc(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
rank_in.into(), rank_in.into(),
(rank_out + 1).into(), (rank_out + 1).into(),
@@ -341,12 +232,11 @@ impl GGLWECiphertext<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_from_infos<A>(&self, infos: &A) -> GGLWE<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
Self::alloc_bytes_with( self.alloc_gglwe(
infos.n(),
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank_in(), infos.rank_in(),
@@ -356,8 +246,8 @@ impl GGLWECiphertext<Vec<u8>> {
) )
} }
pub fn alloc_bytes_with( fn bytes_of_gglwe(
n: Degree, &self,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -379,17 +269,111 @@ impl GGLWECiphertext<Vec<u8>> {
dsize.0, dsize.0,
); );
MatZnx::alloc_bytes( MatZnx::bytes_of(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
rank_in.into(), rank_in.into(),
(rank_out + 1).into(), (rank_out + 1).into(),
k.0.div_ceil(base2k.0) as usize, k.0.div_ceil(base2k.0) as usize,
) )
} }
fn bytes_of_gglwe_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
self.bytes_of_gglwe(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
} }
impl<D: DataMut> ReaderFrom for GGLWECiphertext<D> { impl<B: Backend> GGLWEAlloc for Module<B> where Self: GetRingDegree {}
impl GGLWE<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GGLWEAlloc,
{
module.alloc_glwe_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> Self
where
M: GGLWEAlloc,
{
module.alloc_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GGLWEAlloc,
{
module.bytes_of_gglwe_from_infos(infos)
}
pub fn bytes_of<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize
where
M: GGLWEAlloc,
{
module.bytes_of_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
}
}
pub trait GGLWEToMut {
fn to_mut(&mut self) -> GGLWE<&mut [u8]>;
}
impl<D: DataMut> GGLWEToMut for GGLWE<D> {
fn to_mut(&mut self) -> GGLWE<&mut [u8]> {
GGLWE {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
data: self.data.to_mut(),
}
}
}
pub trait GGLWEToRef {
fn to_ref(&self) -> GGLWE<&[u8]>;
}
impl<D: DataRef> GGLWEToRef for GGLWE<D> {
fn to_ref(&self) -> GGLWE<&[u8]> {
GGLWE {
k: self.k(),
base2k: self.base2k(),
dsize: self.dsize(),
data: self.data.to_ref(),
}
}
}
impl<D: DataMut> ReaderFrom for GGLWE<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -398,7 +382,7 @@ impl<D: DataMut> ReaderFrom for GGLWECiphertext<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWECiphertext<D> { impl<D: DataRef> WriterTo for GGLWE<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.0)?; writer.write_u32::<LittleEndian>(self.k.0)?;
writer.write_u32::<LittleEndian>(self.base2k.0)?; writer.write_u32::<LittleEndian>(self.base2k.0)?;

View File

@@ -1,18 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWECiphertext, GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWE, GGLWEAlloc, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWE, GLWEInfos, LWEInfos, Rank, RingDegree,
TorusPrecision,
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GGLWESwitchingKeyLayout { pub struct GLWESwitchingKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank_in: Rank, pub rank_in: Rank,
@@ -21,8 +22,8 @@ pub struct GGLWESwitchingKeyLayout {
pub dsize: Dsize, pub dsize: Dsize,
} }
impl LWEInfos for GGLWESwitchingKeyLayout { impl LWEInfos for GLWESwitchingKeyLayout {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -35,13 +36,13 @@ impl LWEInfos for GGLWESwitchingKeyLayout {
} }
} }
impl GLWEInfos for GGLWESwitchingKeyLayout { impl GLWEInfos for GLWESwitchingKeyLayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl GGLWEInfos for GGLWESwitchingKeyLayout { impl GGLWEInfos for GLWESwitchingKeyLayout {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank_in self.rank_in
} }
@@ -60,14 +61,44 @@ impl GGLWEInfos for GGLWESwitchingKeyLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWESwitchingKey<D: Data> { pub struct GLWESwitchingKey<D: Data> {
pub(crate) key: GGLWECiphertext<D>, pub(crate) key: GGLWE<D>,
pub(crate) sk_in_n: usize, // Degree of sk_in pub(crate) sk_in_n: usize, // Degree of sk_in
pub(crate) sk_out_n: usize, // Degree of sk_out pub(crate) sk_out_n: usize, // Degree of sk_out
} }
impl<D: Data> LWEInfos for GGLWESwitchingKey<D> { pub(crate) trait GLWESwitchingKeySetMetaData {
fn n(&self) -> Degree { fn set_sk_in_n(&mut self, sk_in_n: usize);
fn set_sk_out_n(&mut self, sk_out_n: usize);
}
impl<D: DataMut> GLWESwitchingKeySetMetaData for GLWESwitchingKey<D> {
fn set_sk_in_n(&mut self, sk_in_n: usize) {
self.sk_in_n = sk_in_n
}
fn set_sk_out_n(&mut self, sk_out_n: usize) {
self.sk_out_n = sk_out_n
}
}
pub(crate) trait GLWESwtichingKeyGetMetaData {
fn sk_in_n(&self) -> usize;
fn sk_out_n(&self) -> usize;
}
impl<D: DataRef> GLWESwtichingKeyGetMetaData for GLWESwitchingKey<D> {
fn sk_in_n(&self) -> usize {
self.sk_in_n
}
fn sk_out_n(&self) -> usize {
self.sk_out_n
}
}
impl<D: Data> LWEInfos for GLWESwitchingKey<D> {
fn n(&self) -> RingDegree {
self.key.n() self.key.n()
} }
@@ -84,13 +115,13 @@ impl<D: Data> LWEInfos for GGLWESwitchingKey<D> {
} }
} }
impl<D: Data> GLWEInfos for GGLWESwitchingKey<D> { impl<D: Data> GLWEInfos for GLWESwitchingKey<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWESwitchingKey<D> { impl<D: Data> GGLWEInfos for GLWESwitchingKey<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -108,13 +139,13 @@ impl<D: Data> GGLWEInfos for GGLWESwitchingKey<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWESwitchingKey<D> { impl<D: DataRef> fmt::Debug for GLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataRef> fmt::Display for GGLWESwitchingKey<D> { impl<D: DataRef> fmt::Display for GLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
@@ -126,49 +157,48 @@ impl<D: DataRef> fmt::Display for GGLWESwitchingKey<D> {
} }
} }
impl<D: DataMut> FillUniform for GGLWESwitchingKey<D> { impl<D: DataMut> FillUniform for GLWESwitchingKey<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.key.fill_uniform(log_bound, source); self.key.fill_uniform(log_bound, source);
} }
} }
impl GGLWESwitchingKey<Vec<u8>> { pub trait GLWESwitchingKeyAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GGLWEAlloc,
A: GGLWEInfos, {
{ fn alloc_glwe_switching_key(
GGLWESwitchingKey { &self,
key: GGLWECiphertext::alloc(infos),
sk_in_n: 0,
sk_out_n: 0,
}
}
pub fn alloc_with(
n: Degree,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
rank_out: Rank, rank_out: Rank,
dnum: Dnum, dnum: Dnum,
dsize: Dsize, dsize: Dsize,
) -> Self { ) -> GLWESwitchingKey<Vec<u8>> {
GGLWESwitchingKey { GLWESwitchingKey {
key: GGLWECiphertext::alloc_with(n, base2k, k, rank_in, rank_out, dnum, dsize), key: self.alloc_gglwe(base2k, k, rank_in, rank_out, dnum, dsize),
sk_in_n: 0, sk_in_n: 0,
sk_out_n: 0, sk_out_n: 0,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_switching_key_from_infos<A>(&self, infos: &A) -> GLWESwitchingKey<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
GGLWECiphertext::alloc_bytes(infos) self.alloc_glwe_switching_key(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
} }
pub fn alloc_bytes_with( fn bytes_of_glwe_switching_key(
n: Degree, &self,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -176,23 +206,121 @@ impl GGLWESwitchingKey<Vec<u8>> {
dnum: Dnum, dnum: Dnum,
dsize: Dsize, dsize: Dsize,
) -> usize { ) -> usize {
GGLWECiphertext::alloc_bytes_with(n, base2k, k, rank_in, rank_out, dnum, dsize) self.bytes_of_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
}
fn bytes_of_glwe_switching_key_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
self.bytes_of_glwe_switching_key(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
} }
} }
impl<D: DataRef> GGLWESwitchingKey<D> { impl<B: Backend> GLWESwitchingKeyAlloc for Module<B> where Self: GGLWEAlloc {}
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
impl GLWESwitchingKey<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GLWESwitchingKeyAlloc,
{
module.alloc_glwe_switching_key_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> Self
where
M: GLWESwitchingKeyAlloc,
{
module.alloc_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GLWESwitchingKeyAlloc,
{
module.bytes_of_glwe_switching_key_from_infos(infos)
}
pub fn bytes_of<M>(
module: &M,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize
where
M: GLWESwitchingKeyAlloc,
{
module.bytes_of_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
}
}
pub trait GLWESwitchingKeyToMut {
fn to_mut(&mut self) -> GLWESwitchingKey<&mut [u8]>;
}
impl<D: DataMut> GLWESwitchingKeyToMut for GLWESwitchingKey<D>
where
GGLWE<D>: GGLWEToMut,
{
fn to_mut(&mut self) -> GLWESwitchingKey<&mut [u8]> {
GLWESwitchingKey {
key: self.key.to_mut(),
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
}
}
}
pub trait GLWESwitchingKeyToRef {
fn to_ref(&self) -> GLWESwitchingKey<&[u8]>;
}
impl<D: DataRef> GLWESwitchingKeyToRef for GLWESwitchingKey<D>
where
GGLWE<D>: GGLWEToRef,
{
fn to_ref(&self) -> GLWESwitchingKey<&[u8]> {
GLWESwitchingKey {
key: self.key.to_ref(),
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
}
}
}
impl<D: DataRef> GLWESwitchingKey<D> {
pub fn at(&self, row: usize, col: usize) -> GLWE<&[u8]> {
self.key.at(row, col) self.key.at(row, col)
} }
} }
impl<D: DataMut> GGLWESwitchingKey<D> { impl<D: DataMut> GLWESwitchingKey<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> { pub fn at_mut(&mut self, row: usize, col: usize) -> GLWE<&mut [u8]> {
self.key.at_mut(row, col) self.key.at_mut(row, col)
} }
} }
impl<D: DataMut> ReaderFrom for GGLWESwitchingKey<D> { impl<D: DataMut> ReaderFrom for GLWESwitchingKey<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.sk_in_n = reader.read_u64::<LittleEndian>()? as usize; self.sk_in_n = reader.read_u64::<LittleEndian>()? as usize;
self.sk_out_n = reader.read_u64::<LittleEndian>()? as usize; self.sk_out_n = reader.read_u64::<LittleEndian>()? as usize;
@@ -200,7 +328,7 @@ impl<D: DataMut> ReaderFrom for GGLWESwitchingKey<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWESwitchingKey<D> { impl<D: DataRef> WriterTo for GLWESwitchingKey<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.sk_in_n as u64)?; writer.write_u64::<LittleEndian>(self.sk_in_n as u64)?;
writer.write_u64::<LittleEndian>(self.sk_out_n as u64)?; writer.write_u64::<LittleEndian>(self.sk_out_n as u64)?;

View File

@@ -1,16 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{
Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GGLWETensorKeyLayout { pub struct TensorKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank: Rank, pub rank: Rank,
@@ -19,12 +22,12 @@ pub struct GGLWETensorKeyLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGLWETensorKey<D: Data> { pub struct TensorKey<D: Data> {
pub(crate) keys: Vec<GGLWESwitchingKey<D>>, pub(crate) keys: Vec<GLWESwitchingKey<D>>,
} }
impl<D: Data> LWEInfos for GGLWETensorKey<D> { impl<D: Data> LWEInfos for TensorKey<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.keys[0].n() self.keys[0].n()
} }
@@ -41,13 +44,13 @@ impl<D: Data> LWEInfos for GGLWETensorKey<D> {
} }
} }
impl<D: Data> GLWEInfos for GGLWETensorKey<D> { impl<D: Data> GLWEInfos for TensorKey<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.keys[0].rank_out() self.keys[0].rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GGLWETensorKey<D> { impl<D: Data> GGLWEInfos for TensorKey<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank_out() self.rank_out()
} }
@@ -65,8 +68,8 @@ impl<D: Data> GGLWEInfos for GGLWETensorKey<D> {
} }
} }
impl LWEInfos for GGLWETensorKeyLayout { impl LWEInfos for TensorKeyLayout {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -79,13 +82,13 @@ impl LWEInfos for GGLWETensorKeyLayout {
} }
} }
impl GLWEInfos for GGLWETensorKeyLayout { impl GLWEInfos for TensorKeyLayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl GGLWEInfos for GGLWETensorKeyLayout { impl GGLWEInfos for TensorKeyLayout {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank self.rank
} }
@@ -103,21 +106,21 @@ impl GGLWEInfos for GGLWETensorKeyLayout {
} }
} }
impl<D: DataRef> fmt::Debug for GGLWETensorKey<D> { impl<D: DataRef> fmt::Debug for TensorKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GGLWETensorKey<D> { impl<D: DataMut> FillUniform for TensorKey<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.keys self.keys
.iter_mut() .iter_mut()
.for_each(|key: &mut GGLWESwitchingKey<D>| key.fill_uniform(log_bound, source)) .for_each(|key: &mut GLWESwitchingKey<D>| key.fill_uniform(log_bound, source))
} }
} }
impl<D: DataRef> fmt::Display for GGLWETensorKey<D> { impl<D: DataRef> fmt::Display for TensorKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "(GLWETensorKey)",)?; writeln!(f, "(GLWETensorKey)",)?;
for (i, key) in self.keys.iter().enumerate() { for (i, key) in self.keys.iter().enumerate() {
@@ -127,8 +130,20 @@ impl<D: DataRef> fmt::Display for GGLWETensorKey<D> {
} }
} }
impl GGLWETensorKey<Vec<u8>> { pub trait TensorKeyAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyAlloc,
{
fn alloc_tensor_key(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> TensorKey<Vec<u8>> {
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
TensorKey {
keys: (0..pairs)
.map(|_| self.alloc_glwe_switching_key(base2k, k, Rank(1), rank, dnum, dsize))
.collect(),
}
}
fn alloc_tensor_key_from_infos<A>(&self, infos: &A) -> TensorKey<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
@@ -137,34 +152,21 @@ impl GGLWETensorKey<Vec<u8>> {
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKey" "rank_in != rank_out is not supported for GGLWETensorKey"
); );
Self::alloc_with( self.alloc_tensor_key(
infos.n(),
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank_out(), infos.rank(),
infos.dnum(), infos.dnum(),
infos.dsize(), infos.dsize(),
) )
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self { fn bytes_of_tensor_key(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let mut keys: Vec<GGLWESwitchingKey<Vec<u8>>> = Vec::new(); let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1); pairs * self.bytes_of_glwe_switching_key(base2k, k, Rank(1), rank, dnum, dsize)
(0..pairs).for_each(|_| {
keys.push(GGLWESwitchingKey::alloc_with(
n,
base2k,
k,
Rank(1),
rank,
dnum,
dsize,
));
});
Self { keys }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_tensor_key_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
@@ -173,29 +175,53 @@ impl GGLWETensorKey<Vec<u8>> {
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKey" "rank_in != rank_out is not supported for GGLWETensorKey"
); );
let rank_out: usize = infos.rank_out().into(); self.bytes_of_tensor_key(
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1); infos.base2k(),
pairs infos.k(),
* GGLWESwitchingKey::alloc_bytes_with( infos.rank(),
infos.n(), infos.dnum(),
infos.base2k(), infos.dsize(),
infos.k(), )
Rank(1),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
pairs * GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, Rank(1), rank, dnum, dsize)
} }
} }
impl<D: DataMut> GGLWETensorKey<D> { impl<B: Backend> TensorKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
impl TensorKey<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: TensorKeyAlloc,
{
module.alloc_tensor_key_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: TensorKeyAlloc,
{
module.alloc_tensor_key(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: TensorKeyAlloc,
{
module.bytes_of_tensor_key_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: TensorKeyAlloc,
{
module.bytes_of_tensor_key(base2k, k, rank, dnum, dsize)
}
}
impl<D: DataMut> TensorKey<D> {
// Returns a mutable reference to GLWESwitchingKey_{s}(s[i] * s[j]) // Returns a mutable reference to GLWESwitchingKey_{s}(s[i] * s[j])
pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWESwitchingKey<D> { pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GLWESwitchingKey<D> {
if i > j { if i > j {
std::mem::swap(&mut i, &mut j); std::mem::swap(&mut i, &mut j);
}; };
@@ -204,9 +230,9 @@ impl<D: DataMut> GGLWETensorKey<D> {
} }
} }
impl<D: DataRef> GGLWETensorKey<D> { impl<D: DataRef> TensorKey<D> {
// Returns a reference to GLWESwitchingKey_{s}(s[i] * s[j]) // Returns a reference to GLWESwitchingKey_{s}(s[i] * s[j])
pub fn at(&self, mut i: usize, mut j: usize) -> &GGLWESwitchingKey<D> { pub fn at(&self, mut i: usize, mut j: usize) -> &GLWESwitchingKey<D> {
if i > j { if i > j {
std::mem::swap(&mut i, &mut j); std::mem::swap(&mut i, &mut j);
}; };
@@ -215,7 +241,7 @@ impl<D: DataRef> GGLWETensorKey<D> {
} }
} }
impl<D: DataMut> ReaderFrom for GGLWETensorKey<D> { impl<D: DataMut> ReaderFrom for TensorKey<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
let len: usize = reader.read_u64::<LittleEndian>()? as usize; let len: usize = reader.read_u64::<LittleEndian>()? as usize;
if self.keys.len() != len { if self.keys.len() != len {
@@ -231,7 +257,7 @@ impl<D: DataMut> ReaderFrom for GGLWETensorKey<D> {
} }
} }
impl<D: DataRef> WriterTo for GGLWETensorKey<D> { impl<D: DataRef> WriterTo for TensorKey<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?; writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
for key in &self.keys { for key in &self.keys {
@@ -240,3 +266,33 @@ impl<D: DataRef> WriterTo for GGLWETensorKey<D> {
Ok(()) Ok(())
} }
} }
pub trait TensorKeyToRef {
fn to_ref(&self) -> TensorKey<&[u8]>;
}
impl<D: DataRef> TensorKeyToRef for TensorKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToRef,
{
fn to_ref(&self) -> TensorKey<&[u8]> {
TensorKey {
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
}
}
}
pub trait TensorKeyToMut {
fn to_mut(&mut self) -> TensorKey<&mut [u8]>;
}
impl<D: DataMut> TensorKeyToMut for TensorKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToMut,
{
fn to_mut(&mut self) -> TensorKey<&mut [u8]> {
TensorKey {
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
}
}
}

View File

@@ -1,10 +1,12 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, WriterTo, ZnxInfos}, layouts::{
Backend, Data, DataMut, DataRef, FillUniform, MatZnx, MatZnxToMut, MatZnxToRef, Module, ReaderFrom, WriterTo, ZnxInfos,
},
source::Source, source::Source,
}; };
use std::fmt; use std::fmt;
use crate::layouts::{Base2K, BuildError, Degree, Dnum, Dsize, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{Base2K, Dnum, Dsize, GLWE, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision};
pub trait GGSWInfos pub trait GGSWInfos
where where
@@ -12,8 +14,8 @@ where
{ {
fn dnum(&self) -> Dnum; fn dnum(&self) -> Dnum;
fn dsize(&self) -> Dsize; fn dsize(&self) -> Dsize;
fn ggsw_layout(&self) -> GGSWCiphertextLayout { fn ggsw_layout(&self) -> GGSWLayout {
GGSWCiphertextLayout { GGSWLayout {
n: self.n(), n: self.n(),
base2k: self.base2k(), base2k: self.base2k(),
k: self.k(), k: self.k(),
@@ -25,8 +27,8 @@ where
} }
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GGSWCiphertextLayout { pub struct GGSWLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank: Rank, pub rank: Rank,
@@ -34,7 +36,7 @@ pub struct GGSWCiphertextLayout {
pub dsize: Dsize, pub dsize: Dsize,
} }
impl LWEInfos for GGSWCiphertextLayout { impl LWEInfos for GGSWLayout {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -43,17 +45,17 @@ impl LWEInfos for GGSWCiphertextLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
impl GLWEInfos for GGSWCiphertextLayout { impl GLWEInfos for GGSWLayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank self.rank
} }
} }
impl GGSWInfos for GGSWCiphertextLayout { impl GGSWInfos for GGSWLayout {
fn dsize(&self) -> Dsize { fn dsize(&self) -> Dsize {
self.dsize self.dsize
} }
@@ -64,16 +66,16 @@ impl GGSWInfos for GGSWCiphertextLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GGSWCiphertext<D: Data> { pub struct GGSW<D: Data> {
pub(crate) data: MatZnx<D>, pub(crate) data: MatZnx<D>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) dsize: Dsize, pub(crate) dsize: Dsize,
} }
impl<D: Data> LWEInfos for GGSWCiphertext<D> { impl<D: Data> LWEInfos for GGSW<D> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -89,13 +91,13 @@ impl<D: Data> LWEInfos for GGSWCiphertext<D> {
} }
} }
impl<D: Data> GLWEInfos for GGSWCiphertext<D> { impl<D: Data> GLWEInfos for GGSW<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
Rank(self.data.cols_out() as u32 - 1) Rank(self.data.cols_out() as u32 - 1)
} }
} }
impl<D: Data> GGSWInfos for GGSWCiphertext<D> { impl<D: Data> GGSWInfos for GGSW<D> {
fn dsize(&self) -> Dsize { fn dsize(&self) -> Dsize {
self.dsize self.dsize
} }
@@ -105,133 +107,17 @@ impl<D: Data> GGSWInfos for GGSWCiphertext<D> {
} }
} }
pub struct GGSWCiphertextBuilder<D: Data> { impl<D: DataRef> fmt::Debug for GGSW<D> {
data: Option<MatZnx<D>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
dsize: Option<Dsize>,
}
impl<D: Data> GGSWCiphertext<D> {
#[inline]
pub fn builder() -> GGSWCiphertextBuilder<D> {
GGSWCiphertextBuilder {
data: None,
base2k: None,
k: None,
dsize: None,
}
}
}
impl GGSWCiphertextBuilder<Vec<u8>> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGSWInfos,
{
debug_assert!(
infos.size() as u32 > infos.dsize().0,
"invalid ggsw: ceil(k/base2k): {} <= dsize: {}",
infos.size(),
infos.dsize()
);
assert!(
infos.dnum().0 * infos.dsize().0 <= infos.size() as u32,
"invalid ggsw: dnum: {} * dsize:{} > ceil(k/base2k): {}",
infos.dnum(),
infos.dsize(),
infos.size(),
);
self.data = Some(MatZnx::alloc(
infos.n().into(),
infos.dnum().into(),
(infos.rank() + 1).into(),
(infos.rank() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.dsize = Some(infos.dsize());
self
}
}
impl<D: Data> GGSWCiphertextBuilder<D> {
#[inline]
pub fn data(mut self, data: MatZnx<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn dsize(mut self, dsize: Dsize) -> Self {
self.dsize = Some(dsize);
self
}
pub fn build(self) -> Result<GGSWCiphertext<D>, BuildError> {
let data: MatZnx<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let dsize: Dsize = self.dsize.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if dsize == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGSWCiphertext {
data,
base2k,
k,
dsize,
})
}
}
impl<D: DataRef> fmt::Debug for GGSWCiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.data) write!(f, "{}", self.data)
} }
} }
impl<D: DataRef> fmt::Display for GGSWCiphertext<D> { impl<D: DataRef> fmt::Display for GGSW<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"(GGSWCiphertext: k: {} base2k: {} dsize: {}) {}", "(GGSW: k: {} base2k: {} dsize: {}) {}",
self.k().0, self.k().0,
self.base2k().0, self.base2k().0,
self.dsize().0, self.dsize().0,
@@ -240,50 +126,39 @@ impl<D: DataRef> fmt::Display for GGSWCiphertext<D> {
} }
} }
impl<D: DataMut> FillUniform for GGSWCiphertext<D> { impl<D: DataMut> FillUniform for GGSW<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> GGSWCiphertext<D> { impl<D: DataRef> GGSW<D> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> { pub fn at(&self, row: usize, col: usize) -> GLWE<&[u8]> {
GLWECiphertext::builder() GLWE {
.data(self.data.at(row, col)) k: self.k,
.base2k(self.base2k()) base2k: self.base2k,
.k(self.k()) data: self.data.at(row, col),
.build() }
.unwrap()
} }
} }
impl<D: DataMut> GGSWCiphertext<D> { impl<D: DataMut> GGSW<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> { pub fn at_mut(&mut self, row: usize, col: usize) -> GLWE<&mut [u8]> {
GLWECiphertext::builder() GLWE {
.base2k(self.base2k()) k: self.k,
.k(self.k()) base2k: self.base2k,
.data(self.data.at_mut(row, col)) data: self.data.at_mut(row, col),
.build() }
.unwrap()
} }
} }
impl GGSWCiphertext<Vec<u8>> { impl<B: Backend> GGSWAlloc for Module<B> where Self: GetRingDegree {}
pub fn alloc<A>(infos: &A) -> Self
where
A: GGSWInfos,
{
Self::alloc_with(
infos.n(),
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self { pub trait GGSWAlloc
where
Self: GetRingDegree,
{
fn alloc_ggsw(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> GGSW<Vec<u8>> {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -298,9 +173,9 @@ impl GGSWCiphertext<Vec<u8>> {
dsize.0, dsize.0,
); );
Self { GGSW {
data: MatZnx::alloc( data: MatZnx::alloc(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
(rank + 1).into(), (rank + 1).into(),
(rank + 1).into(), (rank + 1).into(),
@@ -312,12 +187,11 @@ impl GGSWCiphertext<Vec<u8>> {
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_ggsw_from_infos<A>(&self, infos: &A) -> GGSW<Vec<u8>>
where where
A: GGSWInfos, A: GGSWInfos,
{ {
Self::alloc_bytes_with( self.alloc_ggsw(
infos.n(),
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank(), infos.rank(),
@@ -326,7 +200,7 @@ impl GGSWCiphertext<Vec<u8>> {
) )
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize { fn bytes_of_ggsw(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -341,19 +215,64 @@ impl GGSWCiphertext<Vec<u8>> {
dsize.0, dsize.0,
); );
MatZnx::alloc_bytes( MatZnx::bytes_of(
n.into(), self.ring_degree().into(),
dnum.into(), dnum.into(),
(rank + 1).into(), (rank + 1).into(),
(rank + 1).into(), (rank + 1).into(),
k.0.div_ceil(base2k.0) as usize, k.0.div_ceil(base2k.0) as usize,
) )
} }
fn bytes_of_ggsw_from_infos<A>(&self, infos: &A) -> usize
where
A: GGSWInfos,
{
self.bytes_of_ggsw(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
}
}
impl GGSW<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGSWInfos,
M: GGSWAlloc,
{
module.alloc_ggsw_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where
M: GGSWAlloc,
{
module.alloc_ggsw(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGSWInfos,
M: GGSWAlloc,
{
module.bytes_of_ggsw_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: GGSWAlloc,
{
module.bytes_of_ggsw(base2k, k, rank, dnum, dsize)
}
} }
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
impl<D: DataMut> ReaderFrom for GGSWCiphertext<D> { impl<D: DataMut> ReaderFrom for GGSW<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -362,7 +281,7 @@ impl<D: DataMut> ReaderFrom for GGSWCiphertext<D> {
} }
} }
impl<D: DataRef> WriterTo for GGSWCiphertext<D> { impl<D: DataRef> WriterTo for GGSW<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;
@@ -370,3 +289,33 @@ impl<D: DataRef> WriterTo for GGSWCiphertext<D> {
self.data.write_to(writer) self.data.write_to(writer)
} }
} }
pub trait GGSWToMut {
fn to_mut(&mut self) -> GGSW<&mut [u8]>;
}
impl<D: DataMut> GGSWToMut for GGSW<D> {
fn to_mut(&mut self) -> GGSW<&mut [u8]> {
GGSW {
dsize: self.dsize,
k: self.k,
base2k: self.base2k,
data: self.data.to_mut(),
}
}
}
pub trait GGSWToRef {
fn to_ref(&self) -> GGSW<&[u8]>;
}
impl<D: DataRef> GGSWToRef for GGSW<D> {
fn to_ref(&self) -> GGSW<&[u8]> {
GGSW {
dsize: self.dsize,
k: self.k,
base2k: self.base2k,
data: self.data.to_ref(),
}
}
}

View File

@@ -1,11 +1,12 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{ layouts::{
Data, DataMut, DataRef, FillUniform, ReaderFrom, ToOwnedDeep, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo, ZnxInfos, Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, ToOwnedDeep, VecZnx, VecZnxToMut, VecZnxToRef,
WriterTo, ZnxInfos,
}, },
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, BuildError, Degree, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{Base2K, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
@@ -14,8 +15,8 @@ where
Self: LWEInfos, Self: LWEInfos,
{ {
fn rank(&self) -> Rank; fn rank(&self) -> Rank;
fn glwe_layout(&self) -> GLWECiphertextLayout { fn glwe_layout(&self) -> GLWELayout {
GLWECiphertextLayout { GLWELayout {
n: self.n(), n: self.n(),
base2k: self.base2k(), base2k: self.base2k(),
k: self.k(), k: self.k(),
@@ -24,21 +25,21 @@ where
} }
} }
pub trait GLWELayoutSet { pub trait SetGLWEInfos {
fn set_k(&mut self, k: TorusPrecision); fn set_k(&mut self, k: TorusPrecision);
fn set_basek(&mut self, base2k: Base2K); fn set_base2k(&mut self, base2k: Base2K);
} }
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GLWECiphertextLayout { pub struct GLWELayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank: Rank, pub rank: Rank,
} }
impl LWEInfos for GLWECiphertextLayout { impl LWEInfos for GLWELayout {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -51,21 +52,21 @@ impl LWEInfos for GLWECiphertextLayout {
} }
} }
impl GLWEInfos for GLWECiphertextLayout { impl GLWEInfos for GLWELayout {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank self.rank
} }
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GLWECiphertext<D: Data> { pub struct GLWE<D: Data> {
pub(crate) data: VecZnx<D>, pub(crate) data: VecZnx<D>,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
} }
impl<D: DataMut> GLWELayoutSet for GLWECiphertext<D> { impl<D: DataMut> SetGLWEInfos for GLWE<D> {
fn set_basek(&mut self, base2k: Base2K) { fn set_base2k(&mut self, base2k: Base2K) {
self.base2k = base2k self.base2k = base2k
} }
@@ -74,99 +75,19 @@ impl<D: DataMut> GLWELayoutSet for GLWECiphertext<D> {
} }
} }
impl<D: DataRef> GLWECiphertext<D> { impl<D: DataRef> GLWE<D> {
pub fn data(&self) -> &VecZnx<D> { pub fn data(&self) -> &VecZnx<D> {
&self.data &self.data
} }
} }
impl<D: DataMut> GLWECiphertext<D> { impl<D: DataMut> GLWE<D> {
pub fn data_mut(&mut self) -> &mut VecZnx<D> { pub fn data_mut(&mut self) -> &mut VecZnx<D> {
&mut self.data &mut self.data
} }
} }
pub struct GLWECiphertextBuilder<D: Data> { impl<D: Data> LWEInfos for GLWE<D> {
data: Option<VecZnx<D>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
}
impl<D: Data> GLWECiphertext<D> {
#[inline]
pub fn builder() -> GLWECiphertextBuilder<D> {
GLWECiphertextBuilder {
data: None,
base2k: None,
k: None,
}
}
}
impl GLWECiphertextBuilder<Vec<u8>> {
#[inline]
pub fn layout<A>(mut self, layout: &A) -> Self
where
A: GLWEInfos,
{
self.data = Some(VecZnx::alloc(
layout.n().into(),
(layout.rank() + 1).into(),
layout.size(),
));
self.base2k = Some(layout.base2k());
self.k = Some(layout.k());
self
}
}
impl<D: Data> GLWECiphertextBuilder<D> {
#[inline]
pub fn data(mut self, data: VecZnx<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<GLWECiphertext<D>, BuildError> {
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GLWECiphertext { data, base2k, k })
}
}
impl<D: Data> LWEInfos for GLWECiphertext<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -175,8 +96,8 @@ impl<D: Data> LWEInfos for GLWECiphertext<D> {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -184,16 +105,16 @@ impl<D: Data> LWEInfos for GLWECiphertext<D> {
} }
} }
impl<D: Data> GLWEInfos for GLWECiphertext<D> { impl<D: Data> GLWEInfos for GLWE<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
Rank(self.data.cols() as u32 - 1) Rank(self.data.cols() as u32 - 1)
} }
} }
impl<D: DataRef> ToOwnedDeep for GLWECiphertext<D> { impl<D: DataRef> ToOwnedDeep for GLWE<D> {
type Owned = GLWECiphertext<Vec<u8>>; type Owned = GLWE<Vec<u8>>;
fn to_owned_deep(&self) -> Self::Owned { fn to_owned_deep(&self) -> Self::Owned {
GLWECiphertext { GLWE {
data: self.data.to_owned_deep(), data: self.data.to_owned_deep(),
k: self.k, k: self.k,
base2k: self.base2k, base2k: self.base2k,
@@ -201,17 +122,17 @@ impl<D: DataRef> ToOwnedDeep for GLWECiphertext<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GLWECiphertext<D> { impl<D: DataRef> fmt::Debug for GLWE<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataRef> fmt::Display for GLWECiphertext<D> { impl<D: DataRef> fmt::Display for GLWE<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"GLWECiphertext: base2k={} k={}: {}", "GLWE: base2k={} k={}: {}",
self.base2k().0, self.base2k().0,
self.k().0, self.k().0,
self.data self.data
@@ -219,71 +140,86 @@ impl<D: DataRef> fmt::Display for GLWECiphertext<D> {
} }
} }
impl<D: DataMut> FillUniform for GLWECiphertext<D> { impl<D: DataMut> FillUniform for GLWE<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.data.fill_uniform(log_bound, source); self.data.fill_uniform(log_bound, source);
} }
} }
impl GLWECiphertext<Vec<u8>> { pub trait GLWEAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GLWEInfos, {
{ fn alloc_glwe(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWE<Vec<u8>> {
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank()) GLWE {
} data: VecZnx::alloc(
self.ring_degree().into(),
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self { (rank + 1).into(),
Self { k.0.div_ceil(base2k.0) as usize,
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize), ),
base2k, base2k,
k, k,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_from_infos<A>(&self, infos: &A) -> GLWE<Vec<u8>>
where where
A: GLWEInfos, A: GLWEInfos,
{ {
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), infos.rank()) self.alloc_glwe(infos.base2k(), infos.k(), infos.rank())
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize { fn bytes_of_glwe(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize) VecZnx::bytes_of(
self.ring_degree().into(),
(rank + 1).into(),
k.0.div_ceil(base2k.0) as usize,
)
}
fn bytes_of_glwe_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
self.bytes_of_glwe(infos.base2k(), infos.k(), infos.rank())
} }
} }
pub trait GLWECiphertextToRef { impl<B: Backend> GLWEAlloc for Module<B> where Self: GetRingDegree {}
fn to_ref(&self) -> GLWECiphertext<&[u8]>;
}
impl<D: DataRef> GLWECiphertextToRef for GLWECiphertext<D> { impl GLWE<Vec<u8>> {
fn to_ref(&self) -> GLWECiphertext<&[u8]> { pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
GLWECiphertext::builder() where
.k(self.k()) A: GLWEInfos,
.base2k(self.base2k()) M: GLWEAlloc,
.data(self.data.to_ref()) {
.build() module.alloc_glwe_from_infos(infos)
.unwrap() }
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
where
M: GLWEAlloc,
{
module.alloc_glwe(base2k, k, rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GLWEInfos,
M: GLWEAlloc,
{
module.bytes_of_glwe_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
where
M: GLWEAlloc,
{
module.bytes_of_glwe(base2k, k, rank)
} }
} }
pub trait GLWECiphertextToMut { impl<D: DataMut> ReaderFrom for GLWE<D> {
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]>;
}
impl<D: DataMut> GLWECiphertextToMut for GLWECiphertext<D> {
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> {
GLWECiphertext::builder()
.k(self.k())
.base2k(self.base2k())
.data(self.data.to_mut())
.build()
.unwrap()
}
}
impl<D: DataMut> ReaderFrom for GLWECiphertext<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -291,10 +227,38 @@ impl<D: DataMut> ReaderFrom for GLWECiphertext<D> {
} }
} }
impl<D: DataRef> WriterTo for GLWECiphertext<D> { impl<D: DataRef> WriterTo for GLWE<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.0)?; writer.write_u32::<LittleEndian>(self.k.0)?;
writer.write_u32::<LittleEndian>(self.base2k.0)?; writer.write_u32::<LittleEndian>(self.base2k.0)?;
self.data.write_to(writer) self.data.write_to(writer)
} }
} }
pub trait GLWEToRef {
fn to_ref(&self) -> GLWE<&[u8]>;
}
impl<D: DataRef> GLWEToRef for GLWE<D> {
fn to_ref(&self) -> GLWE<&[u8]> {
GLWE {
k: self.k,
base2k: self.base2k,
data: self.data.to_ref(),
}
}
}
pub trait GLWEToMut {
fn to_mut(&mut self) -> GLWE<&mut [u8]>;
}
impl<D: DataMut> GLWEToMut for GLWE<D> {
fn to_mut(&mut self) -> GLWE<&mut [u8]> {
GLWE {
k: self.k,
base2k: self.base2k,
data: self.data.to_mut(),
}
}
}

View File

@@ -1,8 +1,10 @@
use poulpy_hal::layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, WriterTo, ZnxInfos}; use poulpy_hal::layouts::{
Backend, Data, DataMut, DataRef, Module, ReaderFrom, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo, ZnxInfos,
};
use crate::{ use crate::{
dist::Distribution, dist::Distribution,
layouts::{Base2K, BuildError, Degree, GLWEInfos, LWEInfos, Rank, TorusPrecision}, layouts::{Base2K, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision},
}; };
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
@@ -16,12 +18,22 @@ pub struct GLWEPublicKey<D: Data> {
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GLWEPublicKeyLayout { pub struct GLWEPublicKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank: Rank, pub rank: Rank,
} }
pub trait GetDist {
fn get_dist(&self) -> Distribution;
}
impl<D: DataRef> GetDist for GLWEPublicKey<D> {
fn get_dist(&self) -> Distribution {
self.dist
}
}
impl<D: Data> LWEInfos for GLWEPublicKey<D> { impl<D: Data> LWEInfos for GLWEPublicKey<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
@@ -31,8 +43,8 @@ impl<D: Data> LWEInfos for GLWEPublicKey<D> {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -55,7 +67,7 @@ impl LWEInfos for GLWEPublicKeyLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -70,117 +82,77 @@ impl GLWEInfos for GLWEPublicKeyLayout {
} }
} }
pub struct GLWEPublicKeyBuilder<D: Data> { pub trait GLWEPublicKeyAlloc
data: Option<VecZnx<D>>, where
base2k: Option<Base2K>, Self: GetRingDegree,
k: Option<TorusPrecision>, {
} fn alloc_glwe_public_key(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWEPublicKey<Vec<u8>> {
GLWEPublicKey {
impl<D: Data> GLWEPublicKey<D> { data: VecZnx::alloc(
#[inline] self.ring_degree().into(),
pub fn builder() -> GLWEPublicKeyBuilder<D> { (rank + 1).into(),
GLWEPublicKeyBuilder { k.0.div_ceil(base2k.0) as usize,
data: None, ),
base2k: None,
k: None,
}
}
}
impl GLWEPublicKeyBuilder<Vec<u8>> {
#[inline]
pub fn layout<A>(mut self, layout: &A) -> Self
where
A: GLWEInfos,
{
self.data = Some(VecZnx::alloc(
layout.n().into(),
(layout.rank() + 1).into(),
layout.size(),
));
self.base2k = Some(layout.base2k());
self.k = Some(layout.k());
self
}
}
impl<D: Data> GLWEPublicKeyBuilder<D> {
#[inline]
pub fn data(mut self, data: VecZnx<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<GLWEPublicKey<D>, BuildError> {
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GLWEPublicKey {
data,
base2k, base2k,
k, k,
dist: Distribution::NONE, dist: Distribution::NONE,
}) }
}
fn alloc_glwe_public_key_from_infos<A>(&self, infos: &A) -> GLWEPublicKey<Vec<u8>>
where
A: GLWEInfos,
{
self.alloc_glwe_public_key(infos.base2k(), infos.k(), infos.rank())
}
fn bytes_of_glwe_public_key(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
VecZnx::bytes_of(
self.ring_degree().into(),
(rank + 1).into(),
k.0.div_ceil(base2k.0) as usize,
)
}
fn bytes_of_glwe_public_key_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
self.bytes_of_glwe_public_key(infos.base2k(), infos.k(), infos.rank())
} }
} }
impl<B: Backend> GLWEPublicKeyAlloc for Module<B> where Self: GetRingDegree {}
impl GLWEPublicKey<Vec<u8>> { impl GLWEPublicKey<Vec<u8>> {
pub fn alloc<A>(infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GLWEInfos, A: GLWEInfos,
M: GLWEPublicKeyAlloc,
{ {
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank()) module.alloc_glwe_public_key_from_infos(infos)
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self { pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
Self { where
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize), M: GLWEPublicKeyAlloc,
base2k, {
k, module.alloc_glwe_public_key(base2k, k, rank)
dist: Distribution::NONE,
}
} }
pub fn alloc_bytes<A>(infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
M: GLWEPublicKeyAlloc,
{ {
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), infos.rank()) module.bytes_of_glwe_public_key_from_infos(infos)
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize { pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize) where
M: GLWEPublicKeyAlloc,
{
module.bytes_of_glwe_public_key(base2k, k, rank)
} }
} }
@@ -207,3 +179,33 @@ impl<D: DataRef> WriterTo for GLWEPublicKey<D> {
self.data.write_to(writer) self.data.write_to(writer)
} }
} }
pub trait GLWEPublicKeyToRef {
fn to_ref(&self) -> GLWEPublicKey<&[u8]>;
}
impl<D: DataRef> GLWEPublicKeyToRef for GLWEPublicKey<D> {
fn to_ref(&self) -> GLWEPublicKey<&[u8]> {
GLWEPublicKey {
data: self.data.to_ref(),
base2k: self.base2k,
k: self.k,
dist: self.dist,
}
}
}
pub trait GLWEPublicKeyToMut {
fn to_mut(&mut self) -> GLWEPublicKey<&mut [u8]>;
}
impl<D: DataMut> GLWEPublicKeyToMut for GLWEPublicKey<D> {
fn to_mut(&mut self) -> GLWEPublicKey<&mut [u8]> {
GLWEPublicKey {
base2k: self.base2k,
k: self.k,
dist: self.dist,
data: self.data.to_mut(),
}
}
}

View File

@@ -1,15 +1,14 @@
use std::fmt; use std::fmt;
use poulpy_hal::layouts::{Data, DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos}; use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos};
use crate::layouts::{ use crate::layouts::{
Base2K, BuildError, Degree, GLWECiphertext, GLWECiphertextToMut, GLWECiphertextToRef, GLWEInfos, GLWELayoutSet, LWEInfos, Base2K, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetRingDegree, LWEInfos, Rank, RingDegree, SetGLWEInfos, TorusPrecision,
Rank, TorusPrecision,
}; };
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GLWEPlaintextLayout { pub struct GLWEPlaintextLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
} }
@@ -23,7 +22,7 @@ impl LWEInfos for GLWEPlaintextLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
@@ -40,8 +39,8 @@ pub struct GLWEPlaintext<D: Data> {
pub k: TorusPrecision, pub k: TorusPrecision,
} }
impl<D: DataMut> GLWELayoutSet for GLWEPlaintext<D> { impl<D: DataMut> SetGLWEInfos for GLWEPlaintext<D> {
fn set_basek(&mut self, base2k: Base2K) { fn set_base2k(&mut self, base2k: Base2K) {
self.base2k = base2k self.base2k = base2k
} }
@@ -63,8 +62,8 @@ impl<D: Data> LWEInfos for GLWEPlaintext<D> {
self.data.size() self.data.size()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
} }
@@ -74,69 +73,6 @@ impl<D: Data> GLWEInfos for GLWEPlaintext<D> {
} }
} }
pub struct GLWEPlaintextBuilder<D: Data> {
data: Option<VecZnx<D>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
}
impl<D: Data> GLWEPlaintext<D> {
#[inline]
pub fn builder() -> GLWEPlaintextBuilder<D> {
GLWEPlaintextBuilder {
data: None,
base2k: None,
k: None,
}
}
}
impl<D: Data> GLWEPlaintextBuilder<D> {
#[inline]
pub fn data(mut self, data: VecZnx<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<GLWEPlaintext<D>, BuildError> {
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k.0 == 0 {
return Err(BuildError::ZeroBase2K);
}
if k.0 == 0 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() != 1 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GLWEPlaintext { data, base2k, k })
}
}
impl<D: DataRef> fmt::Display for GLWEPlaintext<D> { impl<D: DataRef> fmt::Display for GLWEPlaintext<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
@@ -149,54 +85,123 @@ impl<D: DataRef> fmt::Display for GLWEPlaintext<D> {
} }
} }
impl GLWEPlaintext<Vec<u8>> { pub trait GLWEPlaintextAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GLWEInfos, {
{ fn alloc_glwe_plaintext(&self, base2k: Base2K, k: TorusPrecision) -> GLWEPlaintext<Vec<u8>> {
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), Rank(0)) GLWEPlaintext {
} data: VecZnx::alloc(
self.ring_degree().into(),
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self { 1,
debug_assert!(rank.0 == 0); k.0.div_ceil(base2k.0) as usize,
Self { ),
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
base2k, base2k,
k, k,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_plaintext_from_infos<A>(&self, infos: &A) -> GLWEPlaintext<Vec<u8>>
where where
A: GLWEInfos, A: GLWEInfos,
{ {
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), Rank(0)) self.alloc_glwe_plaintext(infos.base2k(), infos.k())
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize { fn bytes_of_glwe_plaintext(&self, base2k: Base2K, k: TorusPrecision) -> usize {
debug_assert!(rank.0 == 0); VecZnx::bytes_of(
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize) self.ring_degree().into(),
1,
k.0.div_ceil(base2k.0) as usize,
)
}
fn bytes_of_glwe_plaintext_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
self.bytes_of_glwe_plaintext(infos.base2k(), infos.k())
} }
} }
impl<D: DataRef> GLWECiphertextToRef for GLWEPlaintext<D> { impl<B: Backend> GLWEPlaintextAlloc for Module<B> where Self: GetRingDegree {}
fn to_ref(&self) -> GLWECiphertext<&[u8]> {
GLWECiphertext::builder() impl GLWEPlaintext<Vec<u8>> {
.data(self.data.to_ref()) pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
.k(self.k()) where
.base2k(self.base2k()) A: GLWEInfos,
.build() M: GLWEPlaintextAlloc,
.unwrap() {
module.alloc_glwe_plaintext_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
where
M: GLWEPlaintextAlloc,
{
module.alloc_glwe_plaintext(base2k, k)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GLWEInfos,
M: GLWEPlaintextAlloc,
{
module.bytes_of_glwe_plaintext_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
where
M: GLWEPlaintextAlloc,
{
module.bytes_of_glwe_plaintext(base2k, k)
} }
} }
impl<D: DataMut> GLWECiphertextToMut for GLWEPlaintext<D> { impl<D: DataRef> GLWEToRef for GLWEPlaintext<D> {
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> { fn to_ref(&self) -> GLWE<&[u8]> {
GLWECiphertext::builder() GLWE {
.k(self.k()) k: self.k,
.base2k(self.base2k()) base2k: self.base2k,
.data(self.data.to_mut()) data: self.data.to_ref(),
.build() }
.unwrap() }
}
impl<D: DataMut> GLWEToMut for GLWEPlaintext<D> {
fn to_mut(&mut self) -> GLWE<&mut [u8]> {
GLWE {
k: self.k,
base2k: self.base2k,
data: self.data.to_mut(),
}
}
}
pub trait GLWEPlaintextToRef {
fn to_ref(&self) -> GLWEPlaintext<&[u8]>;
}
impl<D: DataRef> GLWEPlaintextToRef for GLWEPlaintext<D> {
fn to_ref(&self) -> GLWEPlaintext<&[u8]> {
GLWEPlaintext {
data: self.data.to_ref(),
base2k: self.base2k,
k: self.k,
}
}
}
pub trait GLWEPlaintextToMut {
fn to_ref(&mut self) -> GLWEPlaintext<&mut [u8]>;
}
impl<D: DataMut> GLWEPlaintextToMut for GLWEPlaintext<D> {
fn to_ref(&mut self) -> GLWEPlaintext<&mut [u8]> {
GLWEPlaintext {
base2k: self.base2k,
k: self.k,
data: self.data.to_mut(),
}
} }
} }

View File

@@ -1,16 +1,19 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, ReaderFrom, ScalarZnx, WriterTo, ZnxInfos, ZnxZero}, layouts::{
Backend, Data, DataMut, DataRef, Module, ReaderFrom, ScalarZnx, ScalarZnxToMut, ScalarZnxToRef, WriterTo, ZnxInfos,
ZnxZero,
},
source::Source, source::Source,
}; };
use crate::{ use crate::{
dist::Distribution, dist::Distribution,
layouts::{Base2K, Degree, GLWEInfos, LWEInfos, Rank, TorusPrecision}, layouts::{Base2K, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision},
}; };
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GLWESecretLayout { pub struct GLWESecretLayout {
pub n: Degree, pub n: RingDegree,
pub rank: Rank, pub rank: Rank,
} }
@@ -23,7 +26,7 @@ impl LWEInfos for GLWESecretLayout {
TorusPrecision(0) TorusPrecision(0)
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -52,8 +55,8 @@ impl<D: Data> LWEInfos for GLWESecret<D> {
TorusPrecision(0) TorusPrecision(0)
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -67,30 +70,67 @@ impl<D: Data> GLWEInfos for GLWESecret<D> {
} }
} }
impl GLWESecret<Vec<u8>> { pub trait GLWESecretAlloc
pub fn alloc<A>(infos: &A) -> Self where
where Self: GetRingDegree,
A: GLWEInfos, {
{ fn alloc_glwe_secret(&self, rank: Rank) -> GLWESecret<Vec<u8>> {
Self::alloc_with(infos.n(), infos.rank()) GLWESecret {
} data: ScalarZnx::alloc(self.ring_degree().into(), rank.into()),
pub fn alloc_with(n: Degree, rank: Rank) -> Self {
Self {
data: ScalarZnx::alloc(n.into(), rank.into()),
dist: Distribution::NONE, dist: Distribution::NONE,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_glwe_secret_from_infos<A>(&self, infos: &A) -> GLWESecret<Vec<u8>>
where where
A: GLWEInfos, A: GLWEInfos,
{ {
Self::alloc_bytes_with(infos.n(), infos.rank()) self.alloc_glwe_secret(infos.rank())
} }
pub fn alloc_bytes_with(n: Degree, rank: Rank) -> usize { fn bytes_of_glwe_secret(&self, rank: Rank) -> usize {
ScalarZnx::alloc_bytes(n.into(), rank.into()) ScalarZnx::bytes_of(self.ring_degree().into(), rank.into())
}
fn bytes_of_glwe_secret_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
self.bytes_of_glwe_secret(infos.rank())
}
}
impl<B: Backend> GLWESecretAlloc for Module<B> where Self: GetRingDegree {}
impl GLWESecret<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GLWEInfos,
M: GLWESecretAlloc,
{
module.alloc_glwe_secret_from_infos(infos)
}
pub fn alloc<M>(module: &M, rank: Rank) -> Self
where
M: GLWESecretAlloc,
{
module.alloc_glwe_secret(rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GLWEInfos,
M: GLWESecretAlloc,
{
module.bytes_of_glwe_secret_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, rank: Rank) -> usize
where
M: GLWESecretAlloc,
{
module.bytes_of_glwe_secret(rank)
} }
} }
@@ -136,6 +176,32 @@ impl<D: DataMut> GLWESecret<D> {
} }
} }
pub trait GLWESecretToMut {
fn to_mut(&mut self) -> GLWESecret<&mut [u8]>;
}
impl<D: DataMut> GLWESecretToMut for GLWESecret<D> {
fn to_mut(&mut self) -> GLWESecret<&mut [u8]> {
GLWESecret {
dist: self.dist,
data: self.data.to_mut(),
}
}
}
pub trait GLWESecretToRef {
fn to_ref(&self) -> GLWESecret<&[u8]>;
}
impl<D: DataRef> GLWESecretToRef for GLWESecret<D> {
fn to_ref(&self) -> GLWESecret<&[u8]> {
GLWESecret {
data: self.data.to_ref(),
dist: self.dist,
}
}
}
impl<D: DataMut> ReaderFrom for GLWESecret<D> { impl<D: DataMut> ReaderFrom for GLWESecret<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
match Distribution::read_from(reader) { match Distribution::read_from(reader) {

View File

@@ -1,15 +1,18 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{
Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
};
use std::fmt; use std::fmt;
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct GLWEToLWEKeyLayout { pub struct GLWEToLWEKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank_in: Rank, pub rank_in: Rank,
@@ -17,7 +20,7 @@ pub struct GLWEToLWEKeyLayout {
} }
impl LWEInfos for GLWEToLWEKeyLayout { impl LWEInfos for GLWEToLWEKeyLayout {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -54,11 +57,11 @@ impl GGLWEInfos for GLWEToLWEKeyLayout {
} }
} }
/// A special [GLWESwitchingKey] required to for the conversion from [GLWECiphertext] to [LWECiphertext]. /// A special [GLWESwitchingKey] required to for the conversion from [GLWE] to [LWE].
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct GLWEToLWEKey<D: Data>(pub(crate) GGLWESwitchingKey<D>); pub struct GLWEToLWESwitchingKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
impl<D: Data> LWEInfos for GLWEToLWEKey<D> { impl<D: Data> LWEInfos for GLWEToLWESwitchingKey<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.0.base2k() self.0.base2k()
} }
@@ -67,7 +70,7 @@ impl<D: Data> LWEInfos for GLWEToLWEKey<D> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -76,12 +79,12 @@ impl<D: Data> LWEInfos for GLWEToLWEKey<D> {
} }
} }
impl<D: Data> GLWEInfos for GLWEToLWEKey<D> { impl<D: Data> GLWEInfos for GLWEToLWESwitchingKey<D> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data> GGLWEInfos for GLWEToLWEKey<D> { impl<D: Data> GGLWEInfos for GLWEToLWESwitchingKey<D> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.0.rank_in() self.0.rank_in()
} }
@@ -99,84 +102,145 @@ impl<D: Data> GGLWEInfos for GLWEToLWEKey<D> {
} }
} }
impl<D: DataRef> fmt::Debug for GLWEToLWEKey<D> { impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataMut> FillUniform for GLWEToLWEKey<D> { impl<D: DataMut> FillUniform for GLWEToLWESwitchingKey<D> {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) { fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
self.0.fill_uniform(log_bound, source); self.0.fill_uniform(log_bound, source);
} }
} }
impl<D: DataRef> fmt::Display for GLWEToLWEKey<D> { impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(GLWEToLWESwitchingKey) {}", self.0) write!(f, "(GLWEToLWESwitchingKey) {}", self.0)
} }
} }
impl<D: DataMut> ReaderFrom for GLWEToLWEKey<D> { impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKey<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.0.read_from(reader) self.0.read_from(reader)
} }
} }
impl<D: DataRef> WriterTo for GLWEToLWEKey<D> { impl<D: DataRef> WriterTo for GLWEToLWESwitchingKey<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
self.0.write_to(writer) self.0.write_to(writer)
} }
} }
impl GLWEToLWEKey<Vec<u8>> { pub trait GLWEToLWESwitchingKeyAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyAlloc,
{
fn alloc_glwe_to_lwe_switching_key(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
dnum: Dnum,
) -> GLWEToLWESwitchingKey<Vec<u8>> {
GLWEToLWESwitchingKey(self.alloc_glwe_switching_key(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
}
fn alloc_glwe_to_lwe_switching_key_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKey<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKey" "rank_out > 1 is not supported for GLWEToLWESwitchingKey"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for GLWEToLWESwitchingKey" "dsize > 1 is not supported for GLWEToLWESwitchingKey"
); );
Self(GGLWESwitchingKey::alloc(infos)) self.alloc_glwe_to_lwe_switching_key(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self { fn bytes_of_glwe_to_lwe_switching_key(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
Self(GGLWESwitchingKey::alloc_with( self.bytes_of_glwe_switching_key(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
n,
base2k,
k,
rank_in,
Rank(1),
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_glwe_to_lwe_switching_key_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_out().0, infos.rank_out().0,
1, 1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKey" "rank_out > 1 is not supported for GLWEToLWESwitchingKey"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for GLWEToLWESwitchingKey" "dsize > 1 is not supported for GLWEToLWESwitchingKey"
); );
GGLWESwitchingKey::alloc_bytes(infos) self.bytes_of_glwe_to_lwe_switching_key(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
} }
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rank_in, Rank(1), dnum, Dsize(1)) impl<B: Backend> GLWEToLWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
impl GLWEToLWESwitchingKey<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GLWEToLWESwitchingKeyAlloc,
{
module.alloc_glwe_to_lwe_switching_key_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
where
M: GLWEToLWESwitchingKeyAlloc,
{
module.alloc_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: GLWEToLWESwitchingKeyAlloc,
{
module.bytes_of_glwe_to_lwe_switching_key_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
where
M: GLWEToLWESwitchingKeyAlloc,
{
module.bytes_of_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
}
}
pub trait GLWEToLWESwitchingKeyToRef {
fn to_ref(&self) -> GLWEToLWESwitchingKey<&[u8]>;
}
impl<D: DataRef> GLWEToLWESwitchingKeyToRef for GLWEToLWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToRef,
{
fn to_ref(&self) -> GLWEToLWESwitchingKey<&[u8]> {
GLWEToLWESwitchingKey(self.0.to_ref())
}
}
pub trait GLWEToLWESwitchingKeyToMut {
fn to_mut(&mut self) -> GLWEToLWESwitchingKey<&mut [u8]>;
}
impl<D: DataMut> GLWEToLWESwitchingKeyToMut for GLWEToLWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToMut,
{
fn to_mut(&mut self) -> GLWEToLWESwitchingKey<&mut [u8]> {
GLWEToLWESwitchingKey(self.0.to_mut())
} }
} }

View File

@@ -1,15 +1,15 @@
use std::fmt; use std::fmt;
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo, Zn, ZnToMut, ZnToRef, ZnxInfos}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo, Zn, ZnToMut, ZnToRef, ZnxInfos},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, BuildError, Degree, TorusPrecision}; use crate::layouts::{Base2K, RingDegree, TorusPrecision};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
pub trait LWEInfos { pub trait LWEInfos {
fn n(&self) -> Degree; fn n(&self) -> RingDegree;
fn k(&self) -> TorusPrecision; fn k(&self) -> TorusPrecision;
fn max_k(&self) -> TorusPrecision { fn max_k(&self) -> TorusPrecision {
TorusPrecision(self.k().0 * self.size() as u32) TorusPrecision(self.k().0 * self.size() as u32)
@@ -18,8 +18,8 @@ pub trait LWEInfos {
fn size(&self) -> usize { fn size(&self) -> usize {
self.k().0.div_ceil(self.base2k().0) as usize self.k().0.div_ceil(self.base2k().0) as usize
} }
fn lwe_layout(&self) -> LWECiphertextLayout { fn lwe_layout(&self) -> LWELayout {
LWECiphertextLayout { LWELayout {
n: self.n(), n: self.n(),
k: self.k(), k: self.k(),
base2k: self.base2k(), base2k: self.base2k(),
@@ -27,14 +27,19 @@ pub trait LWEInfos {
} }
} }
pub trait SetLWEInfos {
fn set_k(&mut self, k: TorusPrecision);
fn set_base2k(&mut self, base2k: Base2K);
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct LWECiphertextLayout { pub struct LWELayout {
pub n: Degree, pub n: RingDegree,
pub k: TorusPrecision, pub k: TorusPrecision,
pub base2k: Base2K, pub base2k: Base2K,
} }
impl LWEInfos for LWECiphertextLayout { impl LWEInfos for LWELayout {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -43,19 +48,18 @@ impl LWEInfos for LWECiphertextLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWECiphertext<D: Data> { pub struct LWE<D: Data> {
pub(crate) data: Zn<D>, pub(crate) data: Zn<D>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
} }
impl<D: Data> LWEInfos for LWECiphertext<D> { impl<D: Data> LWEInfos for LWE<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
} }
@@ -63,8 +67,8 @@ impl<D: Data> LWEInfos for LWECiphertext<D> {
fn k(&self) -> TorusPrecision { fn k(&self) -> TorusPrecision {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32 - 1) RingDegree(self.data.n() as u32 - 1)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -72,29 +76,39 @@ impl<D: Data> LWEInfos for LWECiphertext<D> {
} }
} }
impl<D: DataRef> LWECiphertext<D> { impl<D: Data> SetLWEInfos for LWE<D> {
fn set_base2k(&mut self, base2k: Base2K) {
self.base2k = base2k
}
fn set_k(&mut self, k: TorusPrecision) {
self.k = k
}
}
impl<D: DataRef> LWE<D> {
pub fn data(&self) -> &Zn<D> { pub fn data(&self) -> &Zn<D> {
&self.data &self.data
} }
} }
impl<D: DataMut> LWECiphertext<D> { impl<D: DataMut> LWE<D> {
pub fn data_mut(&mut self) -> &Zn<D> { pub fn data_mut(&mut self) -> &Zn<D> {
&mut self.data &mut self.data
} }
} }
impl<D: DataRef> fmt::Debug for LWECiphertext<D> { impl<D: DataRef> fmt::Debug for LWE<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{self}") write!(f, "{self}")
} }
} }
impl<D: DataRef> fmt::Display for LWECiphertext<D> { impl<D: DataRef> fmt::Display for LWE<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"LWECiphertext: base2k={} k={}: {}", "LWE: base2k={} k={}: {}",
self.base2k().0, self.base2k().0,
self.k().0, self.k().0,
self.data self.data
@@ -102,7 +116,7 @@ impl<D: DataRef> fmt::Display for LWECiphertext<D> {
} }
} }
impl<D: DataMut> FillUniform for LWECiphertext<D> impl<D: DataMut> FillUniform for LWE<D>
where where
Zn<D>: FillUniform, Zn<D>: FillUniform,
{ {
@@ -111,142 +125,98 @@ where
} }
} }
impl LWECiphertext<Vec<u8>> { pub trait LWEAlloc {
pub fn alloc<A>(infos: &A) -> Self fn alloc_lwe(&self, n: RingDegree, base2k: Base2K, k: TorusPrecision) -> LWE<Vec<u8>> {
where LWE {
A: LWEInfos,
{
Self::alloc_with(infos.n(), infos.base2k(), infos.k())
}
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> Self {
Self {
data: Zn::alloc((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize), data: Zn::alloc((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize),
k, k,
base2k, base2k,
} }
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn alloc_lwe_from_infos<A>(&self, infos: &A) -> LWE<Vec<u8>>
where where
A: LWEInfos, A: LWEInfos,
{ {
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k()) self.alloc_lwe(infos.n(), infos.base2k(), infos.k())
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> usize { fn bytes_of_lwe(&self, n: RingDegree, base2k: Base2K, k: TorusPrecision) -> usize {
Zn::alloc_bytes((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize) Zn::bytes_of((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize)
} }
}
impl LWECiphertextBuilder<Vec<u8>> { fn bytes_of_lwe_from_infos<A>(&self, infos: &A) -> usize
#[inline]
pub fn layout<A>(mut self, layout: A) -> Self
where where
A: LWEInfos, A: LWEInfos,
{ {
self.data = Some(Zn::alloc((layout.n() + 1).into(), 1, layout.size())); self.bytes_of_lwe(infos.n(), infos.base2k(), infos.k())
self.base2k = Some(layout.base2k());
self.k = Some(layout.k());
self
} }
} }
pub struct LWECiphertextBuilder<D: Data> { impl<B: Backend> LWEAlloc for Module<B> {}
data: Option<Zn<D>>,
base2k: Option<Base2K>, impl LWE<Vec<u8>> {
k: Option<TorusPrecision>, pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: LWEInfos,
M: LWEAlloc,
{
module.alloc_lwe_from_infos(infos)
}
pub fn alloc<M>(module: &M, n: RingDegree, base2k: Base2K, k: TorusPrecision) -> Self
where
M: LWEAlloc,
{
module.alloc_lwe(n, base2k, k)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: LWEInfos,
M: LWEAlloc,
{
module.bytes_of_lwe_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, n: RingDegree, base2k: Base2K, k: TorusPrecision) -> usize
where
M: LWEAlloc,
{
module.bytes_of_lwe(n, base2k, k)
}
} }
impl<D: Data> LWECiphertext<D> { pub trait LWEToRef {
#[inline] fn to_ref(&self) -> LWE<&[u8]>;
pub fn builder() -> LWECiphertextBuilder<D> { }
LWECiphertextBuilder {
data: None, impl<D: DataRef> LWEToRef for LWE<D> {
base2k: None, fn to_ref(&self) -> LWE<&[u8]> {
k: None, LWE {
k: self.k,
base2k: self.base2k,
data: self.data.to_ref(),
} }
} }
} }
impl<D: Data> LWECiphertextBuilder<D> { pub trait LWEToMut {
#[inline]
pub fn data(mut self, data: Zn<D>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<LWECiphertext<D>, BuildError> {
let data: Zn<D> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k.0 == 0 {
return Err(BuildError::ZeroBase2K);
}
if k.0 == 0 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(LWECiphertext { data, base2k, k })
}
}
pub trait LWECiphertextToRef {
fn to_ref(&self) -> LWECiphertext<&[u8]>;
}
impl<D: DataRef> LWECiphertextToRef for LWECiphertext<D> {
fn to_ref(&self) -> LWECiphertext<&[u8]> {
LWECiphertext::builder()
.base2k(self.base2k())
.k(self.k())
.data(self.data.to_ref())
.build()
.unwrap()
}
}
pub trait LWECiphertextToMut {
#[allow(dead_code)] #[allow(dead_code)]
fn to_mut(&mut self) -> LWECiphertext<&mut [u8]>; fn to_mut(&mut self) -> LWE<&mut [u8]>;
} }
impl<D: DataMut> LWECiphertextToMut for LWECiphertext<D> { impl<D: DataMut> LWEToMut for LWE<D> {
fn to_mut(&mut self) -> LWECiphertext<&mut [u8]> { fn to_mut(&mut self) -> LWE<&mut [u8]> {
LWECiphertext::builder() LWE {
.base2k(self.base2k()) k: self.k,
.k(self.k()) base2k: self.base2k,
.data(self.data.to_mut()) data: self.data.to_mut(),
.build() }
.unwrap()
} }
} }
impl<D: DataMut> ReaderFrom for LWECiphertext<D> { impl<D: DataMut> ReaderFrom for LWE<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> { fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?); self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?); self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
@@ -254,7 +224,7 @@ impl<D: DataMut> ReaderFrom for LWECiphertext<D> {
} }
} }
impl<D: DataRef> WriterTo for LWECiphertext<D> { impl<D: DataRef> WriterTo for LWE<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> { fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u32::<LittleEndian>(self.k.into())?; writer.write_u32::<LittleEndian>(self.k.into())?;
writer.write_u32::<LittleEndian>(self.base2k.into())?; writer.write_u32::<LittleEndian>(self.base2k.into())?;

View File

@@ -1,22 +1,25 @@
use std::fmt; use std::fmt;
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{
Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
};
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct LWESwitchingKeyLayout { pub struct LWESwitchingKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub dnum: Dnum, pub dnum: Dnum,
} }
impl LWEInfos for LWESwitchingKeyLayout { impl LWEInfos for LWESwitchingKeyLayout {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
@@ -54,7 +57,7 @@ impl GGLWEInfos for LWESwitchingKeyLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>); pub struct LWESwitchingKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
impl<D: Data> LWEInfos for LWESwitchingKey<D> { impl<D: Data> LWEInfos for LWESwitchingKey<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -65,7 +68,7 @@ impl<D: Data> LWEInfos for LWESwitchingKey<D> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -98,65 +101,94 @@ impl<D: Data> GGLWEInfos for LWESwitchingKey<D> {
} }
} }
pub trait LWESwitchingKeyAlloc
where
Self: GLWESwitchingKeyAlloc,
{
fn alloc_lwe_switching_key(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> LWESwitchingKey<Vec<u8>> {
LWESwitchingKey(self.alloc_glwe_switching_key(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
}
fn alloc_lwe_switching_key_from_infos<A>(&self, infos: &A) -> LWESwitchingKey<Vec<u8>>
where
A: GGLWEInfos,
{
assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
self.alloc_lwe_switching_key(infos.base2k(), infos.k(), infos.dnum())
}
fn bytes_of_lwe_switching_key(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
self.bytes_of_glwe_switching_key(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
}
fn bytes_of_lwe_switching_key_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
self.bytes_of_lwe_switching_key(infos.base2k(), infos.k(), infos.dnum())
}
}
impl<B: Backend> LWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
impl LWESwitchingKey<Vec<u8>> { impl LWESwitchingKey<Vec<u8>> {
pub fn alloc<A>(infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
M: LWESwitchingKeyAlloc,
{ {
debug_assert_eq!( module.alloc_lwe_switching_key_from_infos(infos)
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
Self(GGLWESwitchingKey::alloc(infos))
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self { pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
Self(GGLWESwitchingKey::alloc_with( where
n, M: LWESwitchingKeyAlloc,
base2k, {
k, module.alloc_lwe_switching_key(base2k, k, dnum)
Rank(1),
Rank(1),
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
M: LWESwitchingKeyAlloc,
{ {
debug_assert_eq!( module.bytes_of_glwe_switching_key_from_infos(infos)
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
GGLWESwitchingKey::alloc_bytes(infos)
} }
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize { pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, Rank(1), Rank(1), dnum, Dsize(1)) where
M: LWESwitchingKeyAlloc,
{
module.bytes_of_lwe_switching_key(base2k, k, dnum)
} }
} }
@@ -189,3 +221,29 @@ impl<D: DataRef> WriterTo for LWESwitchingKey<D> {
self.0.write_to(writer) self.0.write_to(writer)
} }
} }
pub trait LWESwitchingKeyToRef {
fn to_ref(&self) -> LWESwitchingKey<&[u8]>;
}
impl<D: DataRef> LWESwitchingKeyToRef for LWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToRef,
{
fn to_ref(&self) -> LWESwitchingKey<&[u8]> {
LWESwitchingKey(self.0.to_ref())
}
}
pub trait LWESwitchingKeyToMut {
fn to_mut(&mut self) -> LWESwitchingKey<&mut [u8]>;
}
impl<D: DataMut> LWESwitchingKeyToMut for LWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToMut,
{
fn to_mut(&mut self) -> LWESwitchingKey<&mut [u8]> {
LWESwitchingKey(self.0.to_mut())
}
}

View File

@@ -1,8 +1,8 @@
use std::fmt; use std::fmt;
use poulpy_hal::layouts::{Data, DataMut, DataRef, Zn, ZnToMut, ZnToRef, ZnxInfos}; use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Zn, ZnToMut, ZnToRef, ZnxInfos};
use crate::layouts::{Base2K, Degree, LWEInfos, TorusPrecision}; use crate::layouts::{Base2K, LWEInfos, RingDegree, TorusPrecision};
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct LWEPlaintextLayout { pub struct LWEPlaintextLayout {
@@ -19,8 +19,8 @@ impl LWEInfos for LWEPlaintextLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(0) RingDegree(0)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -43,8 +43,8 @@ impl<D: Data> LWEInfos for LWEPlaintext<D> {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32 - 1) RingDegree(self.data.n() as u32 - 1)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -52,21 +52,40 @@ impl<D: Data> LWEInfos for LWEPlaintext<D> {
} }
} }
impl LWEPlaintext<Vec<u8>> { pub trait LWEPlaintextAlloc {
pub fn alloc<A>(infos: &A) -> Self fn alloc_lwe_plaintext(&self, base2k: Base2K, k: TorusPrecision) -> LWEPlaintext<Vec<u8>> {
where LWEPlaintext {
A: LWEInfos,
{
Self::alloc_with(infos.base2k(), infos.k())
}
pub fn alloc_with(base2k: Base2K, k: TorusPrecision) -> Self {
Self {
data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize), data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize),
k, k,
base2k, base2k,
} }
} }
fn alloc_lwe_plaintext_from_infos<A>(&self, infos: &A) -> LWEPlaintext<Vec<u8>>
where
A: LWEInfos,
{
self.alloc_lwe_plaintext(infos.base2k(), infos.k())
}
}
impl<B: Backend> LWEPlaintextAlloc for Module<B> {}
impl LWEPlaintext<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: LWEInfos,
M: LWEPlaintextAlloc,
{
module.alloc_lwe_plaintext_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
where
M: LWEPlaintextAlloc,
{
module.alloc_lwe_plaintext(base2k, k)
}
} }
impl<D: DataRef> fmt::Display for LWEPlaintext<D> { impl<D: DataRef> fmt::Display for LWEPlaintext<D> {

View File

@@ -1,11 +1,11 @@
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, ScalarZnx, ZnxInfos, ZnxView, ZnxZero}, layouts::{Backend, Data, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToMut, ScalarZnxToRef, ZnxInfos, ZnxView, ZnxZero},
source::Source, source::Source,
}; };
use crate::{ use crate::{
dist::Distribution, dist::Distribution,
layouts::{Base2K, Degree, LWEInfos, TorusPrecision}, layouts::{Base2K, LWEInfos, RingDegree, TorusPrecision},
}; };
pub struct LWESecret<D: Data> { pub struct LWESecret<D: Data> {
@@ -13,15 +13,26 @@ pub struct LWESecret<D: Data> {
pub(crate) dist: Distribution, pub(crate) dist: Distribution,
} }
impl LWESecret<Vec<u8>> { pub trait LWESecretAlloc {
pub fn alloc(n: Degree) -> Self { fn alloc_lwe_secret(&self, n: RingDegree) -> LWESecret<Vec<u8>> {
Self { LWESecret {
data: ScalarZnx::alloc(n.into(), 1), data: ScalarZnx::alloc(n.into(), 1),
dist: Distribution::NONE, dist: Distribution::NONE,
} }
} }
} }
impl<B: Backend> LWESecretAlloc for Module<B> {}
impl LWESecret<Vec<u8>> {
pub fn alloc<M>(module: &M, n: RingDegree) -> Self
where
M: LWESecretAlloc,
{
module.alloc_lwe_secret(n)
}
}
impl<D: DataRef> LWESecret<D> { impl<D: DataRef> LWESecret<D> {
pub fn raw(&self) -> &[i64] { pub fn raw(&self) -> &[i64] {
self.data.at(0, 0) self.data.at(0, 0)
@@ -44,8 +55,8 @@ impl<D: Data> LWEInfos for LWESecret<D> {
TorusPrecision(0) TorusPrecision(0)
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -84,3 +95,29 @@ impl<D: DataMut> LWESecret<D> {
self.dist = Distribution::ZERO; self.dist = Distribution::ZERO;
} }
} }
pub trait LWESecretToRef {
fn to_ref(&self) -> LWESecret<&[u8]>;
}
impl<D: DataRef> LWESecretToRef for LWESecret<D> {
fn to_ref(&self) -> LWESecret<&[u8]> {
LWESecret {
dist: self.dist,
data: self.data.to_ref(),
}
}
}
pub trait LWESecretToMut {
fn to_mut(&mut self) -> LWESecret<&mut [u8]>;
}
impl<D: DataMut> LWESecretToMut for LWESecret<D> {
fn to_mut(&mut self) -> LWESecret<&mut [u8]> {
LWESecret {
dist: self.dist,
data: self.data.to_mut(),
}
}
}

View File

@@ -1,15 +1,18 @@
use std::fmt; use std::fmt;
use poulpy_hal::{ use poulpy_hal::{
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo}, layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
source::Source, source::Source,
}; };
use crate::layouts::{Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision}; use crate::layouts::{
Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
};
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub struct LWEToGLWESwitchingKeyLayout { pub struct LWEToGLWESwitchingKeyLayout {
pub n: Degree, pub n: RingDegree,
pub base2k: Base2K, pub base2k: Base2K,
pub k: TorusPrecision, pub k: TorusPrecision,
pub rank_out: Rank, pub rank_out: Rank,
@@ -25,7 +28,7 @@ impl LWEInfos for LWEToGLWESwitchingKeyLayout {
self.k self.k
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.n self.n
} }
} }
@@ -55,7 +58,7 @@ impl GGLWEInfos for LWEToGLWESwitchingKeyLayout {
} }
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct LWEToGLWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>); pub struct LWEToGLWESwitchingKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> { impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -66,7 +69,7 @@ impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -128,54 +131,116 @@ impl<D: DataRef> WriterTo for LWEToGLWESwitchingKey<D> {
} }
} }
impl LWEToGLWESwitchingKey<Vec<u8>> { pub trait LWEToGLWESwitchingKeyAlloc
pub fn alloc<A>(infos: &A) -> Self where
Self: GLWESwitchingKeyAlloc,
{
fn alloc_lwe_to_glwe_switching_key(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_out: Rank,
dnum: Dnum,
) -> LWEToGLWESwitchingKey<Vec<u8>> {
LWEToGLWESwitchingKey(self.alloc_glwe_switching_key(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
}
fn alloc_lwe_to_glwe_switching_key_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKey<Vec<u8>>
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_in().0, infos.rank_in().0,
1, 1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey" "rank_in > 1 is not supported for LWEToGLWESwitchingKey"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey" "dsize > 1 is not supported for LWEToGLWESwitchingKey"
); );
Self(GGLWESwitchingKey::alloc(infos))
self.alloc_lwe_to_glwe_switching_key(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
} }
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self { fn bytes_of_lwe_to_glwe_switching_key(&self, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
Self(GGLWESwitchingKey::alloc_with( self.bytes_of_glwe_switching_key(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
n,
base2k,
k,
Rank(1),
rank_out,
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(infos: &A) -> usize fn bytes_of_lwe_to_glwe_switching_key_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
{ {
debug_assert_eq!( assert_eq!(
infos.rank_in().0, infos.rank_in().0,
1, 1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey" "rank_in > 1 is not supported for LWEToGLWESwitchingKey"
); );
debug_assert_eq!( assert_eq!(
infos.dsize().0, infos.dsize().0,
1, 1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey" "dsize > 1 is not supported for LWEToGLWESwitchingKey"
); );
GGLWESwitchingKey::alloc_bytes(infos) self.bytes_of_lwe_to_glwe_switching_key(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
} }
}
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_out: Rank) -> usize {
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, Rank(1), rank_out, dnum, Dsize(1)) impl<B: Backend> LWEToGLWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
impl LWEToGLWESwitchingKey<Vec<u8>> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: LWEToGLWESwitchingKeyAlloc,
{
module.alloc_lwe_to_glwe_switching_key_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
where
M: LWEToGLWESwitchingKeyAlloc,
{
module.alloc_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: LWEToGLWESwitchingKeyAlloc,
{
module.bytes_of_lwe_to_glwe_switching_key_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_out: Rank) -> usize
where
M: LWEToGLWESwitchingKeyAlloc,
{
module.bytes_of_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
}
}
pub trait LWEToGLWESwitchingKeyToRef {
fn to_ref(&self) -> LWEToGLWESwitchingKey<&[u8]>;
}
impl<D: DataRef> LWEToGLWESwitchingKeyToRef for LWEToGLWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToRef,
{
fn to_ref(&self) -> LWEToGLWESwitchingKey<&[u8]> {
LWEToGLWESwitchingKey(self.0.to_ref())
}
}
pub trait LWEToGLWESwitchingKeyToMut {
fn to_mut(&mut self) -> LWEToGLWESwitchingKey<&mut [u8]>;
}
impl<D: DataMut> LWEToGLWESwitchingKeyToMut for LWEToGLWESwitchingKey<D>
where
GLWESwitchingKey<D>: GLWESwitchingKeyToMut,
{
fn to_mut(&mut self) -> LWEToGLWESwitchingKey<&mut [u8]> {
LWEToGLWESwitchingKey(self.0.to_mut())
} }
} }

View File

@@ -33,20 +33,16 @@ pub use lwe_pt::*;
pub use lwe_sk::*; pub use lwe_sk::*;
pub use lwe_to_glwe_ksk::*; pub use lwe_to_glwe_ksk::*;
#[derive(Debug)] use poulpy_hal::layouts::{Backend, Module};
pub enum BuildError {
MissingData, pub trait GetRingDegree {
MissingBase2K, fn ring_degree(&self) -> RingDegree;
MissingK, }
MissingDigits,
ZeroDegree, impl<B: Backend> GetRingDegree for Module<B> {
NonPowerOfTwoDegree, fn ring_degree(&self) -> RingDegree {
ZeroBase2K, Self::n(&self).into()
ZeroTorusPrecision, }
ZeroCols,
ZeroLimbs,
ZeroRank,
ZeroDigits,
} }
/// Newtype over `u32` with arithmetic and comparisons against same type and `u32`. /// Newtype over `u32` with arithmetic and comparisons against same type and `u32`.
@@ -206,14 +202,14 @@ macro_rules! newtype_u32 {
}; };
} }
newtype_u32!(Degree); newtype_u32!(RingDegree);
newtype_u32!(TorusPrecision); newtype_u32!(TorusPrecision);
newtype_u32!(Base2K); newtype_u32!(Base2K);
newtype_u32!(Dnum); newtype_u32!(Dnum);
newtype_u32!(Rank); newtype_u32!(Rank);
newtype_u32!(Dsize); newtype_u32!(Dsize);
impl Degree { impl RingDegree {
pub fn log2(&self) -> usize { pub fn log2(&self) -> usize {
let n: usize = self.0 as usize; let n: usize = self.0 as usize;
(usize::BITS - (n - 1).leading_zeros()) as _ (usize::BITS - (n - 1).leading_zeros()) as _

View File

@@ -1,27 +1,21 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEAutomorphismKey, GGLWEInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, AutomorphismKeyToRef, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::{
GLWESwitchingKeyPrepare, GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedAlloc, GLWESwitchingKeyPreparedToMut,
GLWESwitchingKeyPreparedToRef,
},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GGLWEAutomorphismKeyPrepared<D: Data, B: Backend> { pub struct AutomorphismKeyPrepared<D: Data, B: Backend> {
pub(crate) key: GGLWESwitchingKeyPrepared<D, B>, pub(crate) key: GLWESwitchingKeyPrepared<D, B>,
pub(crate) p: i64, pub(crate) p: i64,
} }
impl<D: Data, B: Backend> GGLWEAutomorphismKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for AutomorphismKeyPrepared<D, B> {
pub fn p(&self) -> i64 { fn n(&self) -> RingDegree {
self.p
}
}
impl<D: Data, B: Backend> LWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
fn n(&self) -> Degree {
self.key.n() self.key.n()
} }
@@ -38,13 +32,33 @@ impl<D: Data, B: Backend> LWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
} }
} }
impl<D: Data, B: Backend> GLWEInfos for GGLWEAutomorphismKeyPrepared<D, B> { pub trait GetAutomorphismGaloisElement {
fn p(&self) -> i64;
}
impl<D: Data, B: Backend> GetAutomorphismGaloisElement for AutomorphismKeyPrepared<D, B> {
fn p(&self) -> i64 {
self.p
}
}
pub trait SetAutomorphismGaloisElement {
fn set_p(&mut self, p: i64);
}
impl<D: Data, B: Backend> SetAutomorphismGaloisElement for AutomorphismKeyPrepared<D, B> {
fn set_p(&mut self, p: i64) {
self.p = p
}
}
impl<D: Data, B: Backend> GLWEInfos for AutomorphismKeyPrepared<D, B> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data, B: Backend> GGLWEInfos for GGLWEAutomorphismKeyPrepared<D, B> { impl<D: Data, B: Backend> GGLWEInfos for AutomorphismKeyPrepared<D, B> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -62,80 +76,170 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
} }
} }
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> { pub trait AutomorphismKeyPreparedAlloc<B: Backend>
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self where
Self: GLWESwitchingKeyPreparedAlloc<B>,
{
fn alloc_automorphism_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> AutomorphismKeyPrepared<Vec<u8>, B> {
AutomorphismKeyPrepared::<Vec<u8>, B> {
key: self.alloc_glwe_switching_key_prepared(base2k, k, rank, rank, dnum, dsize),
p: 0,
}
}
fn alloc_automorphism_key_prepared_from_infos<A>(&self, infos: &A) -> AutomorphismKeyPrepared<Vec<u8>, B>
where
A: GGLWEInfos,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for AutomorphismKeyPrepared"
);
self.alloc_automorphism_key_prepared(
infos.base2k(),
infos.k(),
infos.rank(),
infos.dnum(),
infos.dsize(),
)
}
fn bytes_of_automorphism_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize {
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank, rank, dnum, dsize)
}
fn bytes_of_automorphism_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>,
{ {
assert_eq!( assert_eq!(
infos.rank_in(), infos.rank_in(),
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared" "rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
); );
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> { self.bytes_of_automorphism_key_prepared(
key: GGLWESwitchingKeyPrepared::alloc(module, infos), infos.base2k(),
p: 0, infos.k(),
} infos.rank(),
infos.dnum(),
infos.dsize(),
)
} }
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self impl<B: Backend> AutomorphismKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
where
Module<B>: VmpPMatAlloc<B>,
{
GGLWEAutomorphismKeyPrepared {
key: GGLWESwitchingKeyPrepared::alloc_with(module, base2k, k, rank, rank, dnum, dsize),
p: 0,
}
}
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: AutomorphismKeyPreparedAlloc<B>,
{ {
assert_eq!( module.alloc_automorphism_key_prepared_from_infos(infos)
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
where where
Module<B>: VmpPMatAllocBytes, M: AutomorphismKeyPreparedAlloc<B>,
{ {
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rank, rank, dnum, dsize) module.alloc_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: AutomorphismKeyPreparedAlloc<B>,
{
module.bytes_of_automorphism_key_prepared_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: AutomorphismKeyPreparedAlloc<B>,
{
module.bytes_of_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for GGLWEAutomorphismKeyPrepared<Vec<u8>, B> pub trait PrepareAutomorphismKey<B: Backend>
where where
GGLWESwitchingKeyPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GLWESwitchingKeyPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_automorphism_key_tmp_bytes<A>(&self, infos: &A) -> usize
GGLWESwitchingKeyPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_glwe_switching_key_tmp_bytes(infos)
}
fn prepare_automorphism_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: AutomorphismKeyPreparedToMut<B> + SetAutomorphismGaloisElement,
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
{
self.prepare_glwe_switching(&mut res.to_mut().key, &other.to_ref().key, scratch);
res.set_p(other.p());
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWEAutomorphismKey<DR>> for GGLWEAutomorphismKeyPrepared<D, B> impl<B: Backend> PrepareAutomorphismKey<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
where
Module<B>: VmpPrepare<B>, impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
{ pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
fn prepare(&mut self, module: &Module<B>, other: &GGLWEAutomorphismKey<DR>, scratch: &mut Scratch<B>) { where
self.key.prepare(module, &other.key, scratch); M: PrepareAutomorphismKey<B>,
self.p = other.p; {
module.prepare_automorphism_key_tmp_bytes(self)
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>> for GGLWEAutomorphismKey<D> impl<D: DataMut, B: Backend> AutomorphismKeyPrepared<D, B> {
where pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>, where
{ O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> { M: PrepareAutomorphismKey<B>,
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(module, self); {
atk_prepared.prepare(module, self, scratch); module.prepare_automorphism_key(self, other, scratch);
atk_prepared }
}
pub trait AutomorphismKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> AutomorphismKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> AutomorphismKeyPreparedToMut<B> for AutomorphismKeyPrepared<D, B> {
fn to_mut(&mut self) -> AutomorphismKeyPrepared<&mut [u8], B> {
AutomorphismKeyPrepared {
p: self.p,
key: self.key.to_mut(),
}
}
}
pub trait AutomorphismKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> AutomorphismKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> AutomorphismKeyPreparedToRef<B> for AutomorphismKeyPrepared<D, B> {
fn to_ref(&self) -> AutomorphismKeyPrepared<&[u8], B> {
AutomorphismKeyPrepared {
p: self.p,
key: self.key.to_ref(),
}
} }
} }

View File

@@ -1,25 +1,23 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes}, api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, ZnxInfos}, layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
oep::VmpPMatAllocBytesImpl,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, BuildError, Degree, Dnum, Dsize, GGLWECiphertext, GGLWEInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWE, GGLWEInfos, GGLWEToRef, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{Prepare, PrepareAlloc, PrepareScratchSpace},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GGLWECiphertextPrepared<D: Data, B: Backend> { pub struct GGLWEPrepared<D: Data, B: Backend> {
pub(crate) data: VmpPMat<D, B>, pub(crate) data: VmpPMat<D, B>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) dsize: Dsize, pub(crate) dsize: Dsize,
} }
impl<D: Data, B: Backend> LWEInfos for GGLWECiphertextPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for GGLWEPrepared<D, B> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -35,13 +33,13 @@ impl<D: Data, B: Backend> LWEInfos for GGLWECiphertextPrepared<D, B> {
} }
} }
impl<D: Data, B: Backend> GLWEInfos for GGLWECiphertextPrepared<D, B> { impl<D: Data, B: Backend> GLWEInfos for GGLWEPrepared<D, B> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data, B: Backend> GGLWEInfos for GGLWECiphertextPrepared<D, B> { impl<D: Data, B: Backend> GGLWEInfos for GGLWEPrepared<D, B> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
Rank(self.data.cols_in() as u32) Rank(self.data.cols_in() as u32)
} }
@@ -59,117 +57,47 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWECiphertextPrepared<D, B> {
} }
} }
pub struct GGLWECiphertextPreparedBuilder<D: Data, B: Backend> { pub trait GGLWEPreparedAlloc<B: Backend>
data: Option<VmpPMat<D, B>>, where
base2k: Option<Base2K>, Self: GetRingDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
k: Option<TorusPrecision>, {
dsize: Option<Dsize>, fn alloc_gglwe_prepared(
} &self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> GGLWEPrepared<Vec<u8>, B> {
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size as u32 > dsize.0,
"invalid gglwe: ceil(k/base2k): {size} <= dsize: {}",
dsize.0
);
impl<D: Data, B: Backend> GGLWECiphertextPrepared<D, B> { assert!(
#[inline] dnum.0 * dsize.0 <= size as u32,
pub fn builder() -> GGLWECiphertextPreparedBuilder<D, B> { "invalid gglwe: dnum: {} * dsize:{} > ceil(k/base2k): {size}",
GGLWECiphertextPreparedBuilder { dnum.0,
data: None, dsize.0,
base2k: None, );
k: None,
dsize: None,
}
}
}
impl<B: Backend> GGLWECiphertextPreparedBuilder<Vec<u8>, B> { GGLWEPrepared {
#[inline] data: self.vmp_pmat_alloc(dnum.into(), rank_in.into(), (rank_out + 1).into(), size),
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGLWEInfos,
B: VmpPMatAllocBytesImpl<B>,
{
self.data = Some(VmpPMat::alloc(
infos.n().into(),
infos.dnum().into(),
infos.rank_in().into(),
(infos.rank_out() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.dsize = Some(infos.dsize());
self
}
}
impl<D: Data, B: Backend> GGLWECiphertextPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn dsize(mut self, dsize: Dsize) -> Self {
self.dsize = Some(dsize);
self
}
pub fn build(self) -> Result<GGLWECiphertextPrepared<D, B>, BuildError> {
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let dsize: Dsize = self.dsize.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if dsize == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGLWECiphertextPrepared {
data,
base2k,
k, k,
base2k,
dsize, dsize,
}) }
} }
}
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> { fn alloc_gglwe_prepared_from_infos<A>(&self, infos: &A) -> GGLWEPrepared<Vec<u8>, B>
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>,
{ {
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()"); assert_eq!(self.ring_degree(), infos.n());
Self::alloc_with( self.alloc_gglwe_prepared(
module,
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.rank_in(), infos.rank_in(),
@@ -179,8 +107,61 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
) )
} }
pub fn alloc_with( fn bytes_of_gglwe_prepared(
module: &Module<B>, &self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize {
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size as u32 > dsize.0,
"invalid gglwe: ceil(k/base2k): {size} <= dsize: {}",
dsize.0
);
assert!(
dnum.0 * dsize.0 <= size as u32,
"invalid gglwe: dnum: {} * dsize:{} > ceil(k/base2k): {size}",
dnum.0,
dsize.0,
);
self.bytes_of_vmp_pmat(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
}
fn bytes_of_gglwe_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
assert_eq!(self.ring_degree(), infos.n());
self.bytes_of_gglwe_prepared(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
}
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetRingDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
impl<B: Backend> GGLWEPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GGLWEPreparedAlloc<B>,
{
module.alloc_gglwe_prepared_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -189,49 +170,21 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
dsize: Dsize, dsize: Dsize,
) -> Self ) -> Self
where where
Module<B>: VmpPMatAlloc<B>, M: GGLWEPreparedAlloc<B>,
{ {
let size: usize = k.0.div_ceil(base2k.0) as usize; module.alloc_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
debug_assert!(
size as u32 > dsize.0,
"invalid gglwe: ceil(k/base2k): {size} <= dsize: {}",
dsize.0
);
assert!(
dnum.0 * dsize.0 <= size as u32,
"invalid gglwe: dnum: {} * dsize:{} > ceil(k/base2k): {size}",
dnum.0,
dsize.0,
);
Self {
data: module.vmp_pmat_alloc(dnum.into(), rank_in.into(), (rank_out + 1).into(), size),
k,
base2k,
dsize,
}
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: GGLWEPreparedAlloc<B>,
{ {
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()"); module.bytes_of_gglwe_prepared_from_infos(infos)
Self::alloc_bytes_with(
module,
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
} }
pub fn alloc_bytes_with( pub fn bytes_of<M>(
module: &Module<B>, module: &M,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -240,59 +193,93 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
dsize: Dsize, dsize: Dsize,
) -> usize ) -> usize
where where
Module<B>: VmpPMatAllocBytes, M: GGLWEPreparedAlloc<B>,
{ {
let size: usize = k.0.div_ceil(base2k.0) as usize; module.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
debug_assert!(
size as u32 > dsize.0,
"invalid gglwe: ceil(k/base2k): {size} <= dsize: {}",
dsize.0
);
assert!(
dnum.0 * dsize.0 <= size as u32,
"invalid gglwe: dnum: {} * dsize:{} > ceil(k/base2k): {size}",
dnum.0,
dsize.0,
);
module.vmp_pmat_alloc_bytes(dnum.into(), rank_in.into(), (rank_out + 1).into(), size)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for GGLWECiphertextPrepared<Vec<u8>, B> pub trait GGLWEPrepare<B: Backend>
where where
Module<B>: VmpPrepareTmpBytes, Self: GetRingDegree + VmpPrepareTmpBytes + VmpPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_gglwe_tmp_bytes<A>(&self, infos: &A) -> usize
module.vmp_prepare_tmp_bytes( where
A: GGLWEInfos,
{
self.vmp_prepare_tmp_bytes(
infos.dnum().into(), infos.dnum().into(),
infos.rank_in().into(), infos.rank_in().into(),
(infos.rank() + 1).into(), (infos.rank() + 1).into(),
infos.size(), infos.size(),
) )
} }
}
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWECiphertext<DR>> for GGLWECiphertextPrepared<D, B> fn prepare_gglwe<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where where
Module<B>: VmpPrepare<B>, R: GGLWEPreparedToMut<B>,
{ O: GGLWEToRef,
fn prepare(&mut self, module: &Module<B>, other: &GGLWECiphertext<DR>, scratch: &mut Scratch<B>) { {
module.vmp_prepare(&mut self.data, &other.data, scratch); let mut res: GGLWEPrepared<&mut [u8], B> = res.to_mut();
self.k = other.k; let other: GGLWE<&[u8]> = other.to_ref();
self.base2k = other.base2k;
self.dsize = other.dsize; assert_eq!(res.n(), self.ring_degree());
assert_eq!(other.n(), self.ring_degree());
assert_eq!(res.base2k, other.base2k);
assert_eq!(res.k, other.k);
assert_eq!(res.dsize, other.dsize);
self.vmp_prepare(&mut res.data, &other.data, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GGLWECiphertextPrepared<Vec<u8>, B>> for GGLWECiphertext<D> impl<B: Backend> GGLWEPrepare<B> for Module<B> where Self: GetRingDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>, impl<D: DataMut, B: Backend> GGLWEPrepared<D, B> {
{ pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> { where
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(module, self); O: GGLWEToRef,
atk_prepared.prepare(module, self, scratch); M: GGLWEPrepare<B>,
atk_prepared {
module.prepare_gglwe(self, other, scratch);
}
}
impl<B: Backend> GGLWEPrepared<Vec<u8>, B> {
pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
where
M: GGLWEPrepare<B>,
{
module.prepare_gglwe_tmp_bytes(self)
}
}
pub trait GGLWEPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GGLWEPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for GGLWEPrepared<D, B> {
fn to_mut(&mut self) -> GGLWEPrepared<&mut [u8], B> {
GGLWEPrepared {
k: self.k,
base2k: self.base2k,
dsize: self.dsize,
data: self.data.to_mut(),
}
}
}
pub trait GGLWEPreparedToRef<B: Backend> {
fn to_ref(&self) -> GGLWEPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for GGLWEPrepared<D, B> {
fn to_ref(&self) -> GGLWEPrepared<&[u8], B> {
GGLWEPrepared {
k: self.k,
base2k: self.base2k,
dsize: self.dsize,
data: self.data.to_ref(),
}
} }
} }

View File

@@ -1,22 +1,40 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWESwitchingKeySetMetaData, GLWESwitchingKeyToRef, GLWESwtichingKeyGetMetaData,
prepared::{GGLWECiphertextPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{GGLWEPrepare, GGLWEPrepared, GGLWEPreparedAlloc, GGLWEPreparedToMut, GGLWEPreparedToRef},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GGLWESwitchingKeyPrepared<D: Data, B: Backend> { pub struct GLWESwitchingKeyPrepared<D: Data, B: Backend> {
pub(crate) key: GGLWECiphertextPrepared<D, B>, pub(crate) key: GGLWEPrepared<D, B>,
pub(crate) sk_in_n: usize, // Degree of sk_in pub(crate) sk_in_n: usize, // Degree of sk_in
pub(crate) sk_out_n: usize, // Degree of sk_out pub(crate) sk_out_n: usize, // Degree of sk_out
} }
impl<D: Data, B: Backend> LWEInfos for GGLWESwitchingKeyPrepared<D, B> { impl<D: DataMut, B: Backend> GLWESwitchingKeySetMetaData for GLWESwitchingKeyPrepared<D, B> {
fn n(&self) -> Degree { fn set_sk_in_n(&mut self, sk_in_n: usize) {
self.sk_in_n = sk_in_n
}
fn set_sk_out_n(&mut self, sk_out_n: usize) {
self.sk_out_n = sk_out_n
}
}
impl<D: DataRef, B: Backend> GLWESwtichingKeyGetMetaData for GLWESwitchingKeyPrepared<D, B> {
fn sk_in_n(&self) -> usize {
self.sk_in_n
}
fn sk_out_n(&self) -> usize {
self.sk_out_n
}
}
impl<D: Data, B: Backend> LWEInfos for GLWESwitchingKeyPrepared<D, B> {
fn n(&self) -> RingDegree {
self.key.n() self.key.n()
} }
@@ -33,13 +51,13 @@ impl<D: Data, B: Backend> LWEInfos for GGLWESwitchingKeyPrepared<D, B> {
} }
} }
impl<D: Data, B: Backend> GLWEInfos for GGLWESwitchingKeyPrepared<D, B> { impl<D: Data, B: Backend> GLWEInfos for GLWESwitchingKeyPrepared<D, B> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data, B: Backend> GGLWEInfos for GGLWESwitchingKeyPrepared<D, B> { impl<D: Data, B: Backend> GGLWEInfos for GLWESwitchingKeyPrepared<D, B> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.key.rank_in() self.key.rank_in()
} }
@@ -57,22 +75,80 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWESwitchingKeyPrepared<D, B> {
} }
} }
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> { pub trait GLWESwitchingKeyPreparedAlloc<B: Backend>
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self where
where Self: GGLWEPreparedAlloc<B>,
A: GGLWEInfos, {
Module<B>: VmpPMatAlloc<B>, fn alloc_glwe_switching_key_prepared(
{ &self,
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()"); base2k: Base2K,
GGLWESwitchingKeyPrepared::<Vec<u8>, B> { k: TorusPrecision,
key: GGLWECiphertextPrepared::alloc(module, infos), rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> GLWESwitchingKeyPrepared<Vec<u8>, B> {
GLWESwitchingKeyPrepared::<Vec<u8>, B> {
key: self.alloc_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize),
sk_in_n: 0, sk_in_n: 0,
sk_out_n: 0, sk_out_n: 0,
} }
} }
pub fn alloc_with( fn alloc_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWESwitchingKeyPrepared<Vec<u8>, B>
module: &Module<B>, where
A: GGLWEInfos,
{
self.alloc_glwe_switching_key_prepared(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
fn bytes_of_glwe_switching_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
rank_out: Rank,
dnum: Dnum,
dsize: Dsize,
) -> usize {
self.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
}
fn bytes_of_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
self.bytes_of_glwe_switching_key_prepared(
infos.base2k(),
infos.k(),
infos.rank_in(),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
}
impl<B: Backend> GLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GGLWEPreparedAlloc<B> {}
impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: GLWESwitchingKeyPreparedAlloc<B>,
{
module.alloc_glwe_switching_key_prepared_from_infos(infos)
}
pub fn alloc<M>(
module: &M,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -81,26 +157,21 @@ impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
dsize: Dsize, dsize: Dsize,
) -> Self ) -> Self
where where
Module<B>: VmpPMatAlloc<B>, M: GLWESwitchingKeyPreparedAlloc<B>,
{ {
GGLWESwitchingKeyPrepared::<Vec<u8>, B> { module.alloc_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
key: GGLWECiphertextPrepared::alloc_with(module, base2k, k, rank_in, rank_out, dnum, dsize),
sk_in_n: 0,
sk_out_n: 0,
}
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: GLWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()"); module.bytes_of_glwe_switching_key_prepared_from_infos(infos)
GGLWECiphertextPrepared::alloc_bytes(module, infos)
} }
pub fn alloc_bytes_with( pub fn bytes_of<M>(
module: &Module<B>, module: &M,
base2k: Base2K, base2k: Base2K,
k: TorusPrecision, k: TorusPrecision,
rank_in: Rank, rank_in: Rank,
@@ -109,39 +180,79 @@ impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
dsize: Dsize, dsize: Dsize,
) -> usize ) -> usize
where where
Module<B>: VmpPMatAllocBytes, M: GLWESwitchingKeyPreparedAlloc<B>,
{ {
GGLWECiphertextPrepared::alloc_bytes_with(module, base2k, k, rank_in, rank_out, dnum, dsize) module.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for GGLWESwitchingKeyPrepared<Vec<u8>, B> pub trait GLWESwitchingKeyPrepare<B: Backend>
where where
GGLWECiphertextPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GGLWEPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_glwe_switching_key_tmp_bytes<A>(&self, infos: &A) -> usize
GGLWECiphertextPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_gglwe_tmp_bytes(infos)
}
fn prepare_glwe_switching<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: GLWESwitchingKeyPreparedToMut<B> + GLWESwitchingKeySetMetaData,
O: GLWESwitchingKeyToRef + GLWESwtichingKeyGetMetaData,
{
self.prepare_gglwe(&mut res.to_mut().key, &other.to_ref().key, scratch);
res.set_sk_in_n(other.sk_in_n());
res.set_sk_out_n(other.sk_out_n());
} }
} }
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWESwitchingKey<DR>> for GGLWESwitchingKeyPrepared<D, B> impl<B: Backend> GLWESwitchingKeyPrepare<B> for Module<B> where Self: GGLWEPrepare<B> {}
where
Module<B>: VmpPrepare<B>, impl<D: DataMut, B: Backend> GLWESwitchingKeyPrepared<D, B> {
{ pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
fn prepare(&mut self, module: &Module<B>, other: &GGLWESwitchingKey<DR>, scratch: &mut Scratch<B>) { where
self.key.prepare(module, &other.key, scratch); O: GLWESwitchingKeyToRef + GLWESwtichingKeyGetMetaData,
self.sk_in_n = other.sk_in_n; M: GLWESwitchingKeyPrepare<B>,
self.sk_out_n = other.sk_out_n; {
module.prepare_glwe_switching(self, other, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GGLWESwitchingKeyPrepared<Vec<u8>, B>> for GGLWESwitchingKey<D> impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B> {
where pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>, where
{ M: GLWESwitchingKeyPrepare<B>,
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> { {
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(module, self); module.prepare_glwe_switching_key_tmp_bytes(self)
atk_prepared.prepare(module, self, scratch); }
atk_prepared }
pub trait GLWESwitchingKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GLWESwitchingKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> GLWESwitchingKeyPreparedToMut<B> for GLWESwitchingKeyPrepared<D, B> {
fn to_mut(&mut self) -> GLWESwitchingKeyPrepared<&mut [u8], B> {
GLWESwitchingKeyPrepared {
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
key: self.key.to_mut(),
}
}
}
pub trait GLWESwitchingKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> GLWESwitchingKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> GLWESwitchingKeyPreparedToRef<B> for GLWESwitchingKeyPrepared<D, B> {
fn to_ref(&self) -> GLWESwitchingKeyPrepared<&[u8], B> {
GLWESwitchingKeyPrepared {
sk_in_n: self.sk_in_n,
sk_out_n: self.sk_out_n,
key: self.key.to_ref(),
}
} }
} }

View File

@@ -1,20 +1,20 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWETensorKey, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, Rank, RingDegree, TensorKey, TensorKeyToRef, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::{
GLWESwitchingKeyPrepare, GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedAlloc, GLWESwitchingKeyPreparedToMut,
GLWESwitchingKeyPreparedToRef,
},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GGLWETensorKeyPrepared<D: Data, B: Backend> { pub struct TensorKeyPrepared<D: Data, B: Backend> {
pub(crate) keys: Vec<GGLWESwitchingKeyPrepared<D, B>>, pub(crate) keys: Vec<GLWESwitchingKeyPrepared<D, B>>,
} }
impl<D: Data, B: Backend> LWEInfos for GGLWETensorKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for TensorKeyPrepared<D, B> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.keys[0].n() self.keys[0].n()
} }
@@ -31,13 +31,13 @@ impl<D: Data, B: Backend> LWEInfos for GGLWETensorKeyPrepared<D, B> {
} }
} }
impl<D: Data, B: Backend> GLWEInfos for GGLWETensorKeyPrepared<D, B> { impl<D: Data, B: Backend> GLWEInfos for TensorKeyPrepared<D, B> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
self.rank_out() self.rank_out()
} }
} }
impl<D: Data, B: Backend> GGLWEInfos for GGLWETensorKeyPrepared<D, B> { impl<D: Data, B: Backend> GGLWEInfos for TensorKeyPrepared<D, B> {
fn rank_in(&self) -> Rank { fn rank_in(&self) -> Rank {
self.rank_out() self.rank_out()
} }
@@ -55,19 +55,36 @@ impl<D: Data, B: Backend> GGLWEInfos for GGLWETensorKeyPrepared<D, B> {
} }
} }
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> { pub trait TensorKeyPreparedAlloc<B: Backend>
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self where
Self: GLWESwitchingKeyPreparedAlloc<B>,
{
fn alloc_tensor_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
dnum: Dnum,
dsize: Dsize,
rank: Rank,
) -> TensorKeyPrepared<Vec<u8>, B> {
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
TensorKeyPrepared {
keys: (0..pairs)
.map(|_| self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank, dnum, dsize))
.collect(),
}
}
fn alloc_tensor_key_prepared_from_infos<A>(&self, infos: &A) -> TensorKeyPrepared<Vec<u8>, B>
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>,
{ {
assert_eq!( assert_eq!(
infos.rank_in(), infos.rank_in(),
infos.rank_out(), infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKeyPrepared" "rank_in != rank_out is not supported for GGLWETensorKeyPrepared"
); );
Self::alloc_with( self.alloc_tensor_key_prepared(
module,
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.dnum(), infos.dnum(),
@@ -76,62 +93,62 @@ impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
) )
} }
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self fn bytes_of_tensor_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
where let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
Module<B>: VmpPMatAlloc<B>, pairs * self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank, dnum, dsize)
{
let mut keys: Vec<GGLWESwitchingKeyPrepared<Vec<u8>, B>> = Vec::new();
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
(0..pairs).for_each(|_| {
keys.push(GGLWESwitchingKeyPrepared::alloc_with(
module,
base2k,
k,
Rank(1),
rank,
dnum,
dsize,
));
});
Self { keys }
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize fn bytes_of_tensor_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes,
{ {
assert_eq!( self.bytes_of_tensor_key_prepared(
infos.rank_in(), infos.base2k(),
infos.rank_out(), infos.k(),
"rank_in != rank_out is not supported for GGLWETensorKey" infos.rank(),
); infos.dnum(),
let rank_out: usize = infos.rank_out().into(); infos.dsize(),
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1); )
pairs
* GGLWESwitchingKeyPrepared::alloc_bytes_with(
module,
infos.base2k(),
infos.k(),
Rank(1),
infos.rank_out(),
infos.dnum(),
infos.dsize(),
)
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
pairs * GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, Rank(1), rank, dnum, dsize)
} }
} }
impl<D: DataMut, B: Backend> GGLWETensorKeyPrepared<D, B> { impl<B: Backend> TensorKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
impl<B: Backend> TensorKeyPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGLWEInfos,
M: TensorKeyPreparedAlloc<B>,
{
module.alloc_tensor_key_prepared_from_infos(infos)
}
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
where
M: TensorKeyPreparedAlloc<B>,
{
module.alloc_tensor_key_prepared(base2k, k, dnum, dsize, rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGLWEInfos,
M: TensorKeyPreparedAlloc<B>,
{
module.bytes_of_tensor_key_prepared_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
where
M: TensorKeyPreparedAlloc<B>,
{
module.bytes_of_tensor_key_prepared(base2k, k, rank, dnum, dsize)
}
}
impl<D: DataMut, B: Backend> TensorKeyPrepared<D, B> {
// Returns a mutable reference to GLWESwitchingKey_{s}(s[i] * s[j]) // Returns a mutable reference to GLWESwitchingKey_{s}(s[i] * s[j])
pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWESwitchingKeyPrepared<D, B> { pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GLWESwitchingKeyPrepared<D, B> {
if i > j { if i > j {
std::mem::swap(&mut i, &mut j); std::mem::swap(&mut i, &mut j);
}; };
@@ -140,9 +157,9 @@ impl<D: DataMut, B: Backend> GGLWETensorKeyPrepared<D, B> {
} }
} }
impl<D: DataRef, B: Backend> GGLWETensorKeyPrepared<D, B> { impl<D: DataRef, B: Backend> TensorKeyPrepared<D, B> {
// Returns a reference to GLWESwitchingKey_{s}(s[i] * s[j]) // Returns a reference to GLWESwitchingKey_{s}(s[i] * s[j])
pub fn at(&self, mut i: usize, mut j: usize) -> &GGLWESwitchingKeyPrepared<D, B> { pub fn at(&self, mut i: usize, mut j: usize) -> &GLWESwitchingKeyPrepared<D, B> {
if i > j { if i > j {
std::mem::swap(&mut i, &mut j); std::mem::swap(&mut i, &mut j);
}; };
@@ -151,40 +168,81 @@ impl<D: DataRef, B: Backend> GGLWETensorKeyPrepared<D, B> {
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for GGLWETensorKeyPrepared<Vec<u8>, B> pub trait TensorKeyPrepare<B: Backend>
where where
GGLWESwitchingKeyPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GLWESwitchingKeyPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_tensor_key_tmp_bytes<A>(&self, infos: &A) -> usize
GGLWESwitchingKeyPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_glwe_switching_key_tmp_bytes(infos)
} }
}
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWETensorKey<DR>> for GGLWETensorKeyPrepared<D, B> fn prepare_tensor_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where where
Module<B>: VmpPrepare<B>, R: TensorKeyPreparedToMut<B>,
{ O: TensorKeyToRef,
fn prepare(&mut self, module: &Module<B>, other: &GGLWETensorKey<DR>, scratch: &mut Scratch<B>) { {
#[cfg(debug_assertions)] let mut res: TensorKeyPrepared<&mut [u8], B> = res.to_mut();
{ let other: TensorKey<&[u8]> = other.to_ref();
assert_eq!(self.keys.len(), other.keys.len());
assert_eq!(res.keys.len(), other.keys.len());
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
self.prepare_glwe_switching(a, b, scratch);
} }
self.keys
.iter_mut()
.zip(other.keys.iter())
.for_each(|(a, b)| {
a.prepare(module, b, scratch);
});
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GGLWETensorKeyPrepared<Vec<u8>, B>> for GGLWETensorKey<D> impl<B: Backend> TensorKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>, impl<B: Backend> TensorKeyPrepared<Vec<u8>, B> {
{ fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A) -> usize
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> { where
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(module, self); A: GGLWEInfos,
tsk_prepared.prepare(module, self, scratch); M: TensorKeyPrepare<B>,
tsk_prepared {
module.prepare_tensor_key_tmp_bytes(infos)
}
}
impl<D: DataMut, B: Backend> TensorKeyPrepared<D, B> {
fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
where
O: TensorKeyToRef,
M: TensorKeyPrepare<B>,
{
module.prepare_tensor_key(self, other, scratch);
}
}
pub trait TensorKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> TensorKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> TensorKeyPreparedToMut<B> for TensorKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToMut<B>,
{
fn to_mut(&mut self) -> TensorKeyPrepared<&mut [u8], B> {
TensorKeyPrepared {
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
}
}
}
pub trait TensorKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> TensorKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> TensorKeyPreparedToRef<B> for TensorKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToRef<B>,
{
fn to_ref(&self) -> TensorKeyPrepared<&[u8], B> {
TensorKeyPrepared {
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
}
} }
} }

View File

@@ -1,25 +1,23 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes}, api::{VmpPMatAlloc, VmpPMatBytesOf, VmpPrepare, VmpPrepareTmpBytes},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToRef, ZnxInfos}, layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
oep::VmpPMatAllocBytesImpl,
}; };
use crate::layouts::{ use crate::layouts::{
Base2K, BuildError, Degree, Dnum, Dsize, GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGSW, GGSWInfos, GGSWToRef, GLWEInfos, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{Prepare, PrepareAlloc, PrepareScratchSpace},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GGSWCiphertextPrepared<D: Data, B: Backend> { pub struct GGSWPrepared<D: Data, B: Backend> {
pub(crate) data: VmpPMat<D, B>, pub(crate) data: VmpPMat<D, B>,
pub(crate) k: TorusPrecision, pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K, pub(crate) base2k: Base2K,
pub(crate) dsize: Dsize, pub(crate) dsize: Dsize,
} }
impl<D: Data, B: Backend> LWEInfos for GGSWCiphertextPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for GGSWPrepared<D, B> {
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -35,13 +33,13 @@ impl<D: Data, B: Backend> LWEInfos for GGSWCiphertextPrepared<D, B> {
} }
} }
impl<D: Data, B: Backend> GLWEInfos for GGSWCiphertextPrepared<D, B> { impl<D: Data, B: Backend> GLWEInfos for GGSWPrepared<D, B> {
fn rank(&self) -> Rank { fn rank(&self) -> Rank {
Rank(self.data.cols_out() as u32 - 1) Rank(self.data.cols_out() as u32 - 1)
} }
} }
impl<D: Data, B: Backend> GGSWInfos for GGSWCiphertextPrepared<D, B> { impl<D: Data, B: Backend> GGSWInfos for GGSWPrepared<D, B> {
fn dsize(&self) -> Dsize { fn dsize(&self) -> Dsize {
self.dsize self.dsize
} }
@@ -51,143 +49,18 @@ impl<D: Data, B: Backend> GGSWInfos for GGSWCiphertextPrepared<D, B> {
} }
} }
pub struct GGSWCiphertextPreparedBuilder<D: Data, B: Backend> { pub trait GGSWPreparedAlloc<B: Backend>
data: Option<VmpPMat<D, B>>, where
base2k: Option<Base2K>, Self: GetRingDegree + VmpPMatAlloc<B> + VmpPMatBytesOf,
k: Option<TorusPrecision>, {
dsize: Option<Dsize>, fn alloc_ggsw_prepared(
} &self,
base2k: Base2K,
impl<D: Data, B: Backend> GGSWCiphertextPrepared<D, B> { k: TorusPrecision,
#[inline] dnum: Dnum,
pub fn builder() -> GGSWCiphertextPreparedBuilder<D, B> { dsize: Dsize,
GGSWCiphertextPreparedBuilder { rank: Rank,
data: None, ) -> GGSWPrepared<Vec<u8>, B> {
base2k: None,
k: None,
dsize: None,
}
}
}
impl<B: Backend> GGSWCiphertextPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGSWInfos,
B: VmpPMatAllocBytesImpl<B>,
{
debug_assert!(
infos.size() as u32 > infos.dsize().0,
"invalid ggsw: ceil(k/base2k): {} <= dsize: {}",
infos.size(),
infos.dsize()
);
assert!(
infos.dnum().0 * infos.dsize().0 <= infos.size() as u32,
"invalid ggsw: dnum: {} * dsize:{} > ceil(k/base2k): {}",
infos.dnum(),
infos.dsize(),
infos.size(),
);
self.data = Some(VmpPMat::alloc(
infos.n().into(),
infos.dnum().into(),
(infos.rank() + 1).into(),
(infos.rank() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.dsize = Some(infos.dsize());
self
}
}
impl<D: Data, B: Backend> GGSWCiphertextPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn dsize(mut self, dsize: Dsize) -> Self {
self.dsize = Some(dsize);
self
}
pub fn build(self) -> Result<GGSWCiphertextPrepared<D, B>, BuildError> {
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let dsize: Dsize = self.dsize.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if dsize == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGSWCiphertextPrepared {
data,
base2k,
k,
dsize,
})
}
}
impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGSWInfos,
Module<B>: VmpPMatAlloc<B>,
{
Self::alloc_with(
module,
infos.base2k(),
infos.k(),
infos.dnum(),
infos.dsize(),
infos.rank(),
)
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -202,8 +75,8 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
dsize.0, dsize.0,
); );
Self { GGSWPrepared {
data: module.vmp_pmat_alloc( data: self.vmp_pmat_alloc(
dnum.into(), dnum.into(),
(rank + 1).into(), (rank + 1).into(),
(rank + 1).into(), (rank + 1).into(),
@@ -215,13 +88,12 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
} }
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize fn alloc_ggsw_prepared_from_infos<A>(&self, infos: &A) -> GGSWPrepared<Vec<u8>, B>
where where
A: GGSWInfos, A: GGSWInfos,
Module<B>: VmpPMatAllocBytes,
{ {
Self::alloc_bytes_with( assert_eq!(self.ring_degree(), infos.n());
module, self.alloc_ggsw_prepared(
infos.base2k(), infos.base2k(),
infos.k(), infos.k(),
infos.dnum(), infos.dnum(),
@@ -230,10 +102,7 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
) )
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize fn bytes_of_ggsw_prepared(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize {
where
Module<B>: VmpPMatAllocBytes,
{
let size: usize = k.0.div_ceil(base2k.0) as usize; let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!( debug_assert!(
size as u32 > dsize.0, size as u32 > dsize.0,
@@ -248,65 +117,144 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
dsize.0, dsize.0,
); );
module.vmp_pmat_alloc_bytes(dnum.into(), (rank + 1).into(), (rank + 1).into(), size) self.bytes_of_vmp_pmat(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
}
fn bytes_of_ggsw_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGSWInfos,
{
assert_eq!(self.ring_degree(), infos.n());
self.bytes_of_ggsw_prepared(
infos.base2k(),
infos.k(),
infos.dnum(),
infos.dsize(),
infos.rank(),
)
} }
} }
impl<D: DataRef, B: Backend> GGSWCiphertextPrepared<D, B> { impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Self: GetRingDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GGSWInfos,
M: GGSWPreparedAlloc<B>,
{
module.alloc_ggsw_prepared_from_infos(infos)
}
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
where
M: GGSWPreparedAlloc<B>,
{
module.alloc_ggsw_prepared(base2k, k, dnum, dsize, rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GGSWInfos,
M: GGSWPreparedAlloc<B>,
{
module.bytes_of_ggsw_prepared_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize
where
M: GGSWPreparedAlloc<B>,
{
module.bytes_of_ggsw_prepared(base2k, k, dnum, dsize, rank)
}
}
impl<D: DataRef, B: Backend> GGSWPrepared<D, B> {
pub fn data(&self) -> &VmpPMat<D, B> { pub fn data(&self) -> &VmpPMat<D, B> {
&self.data &self.data
} }
} }
impl<B: Backend, A: GGSWInfos> PrepareScratchSpace<B, A> for GGSWCiphertextPrepared<Vec<u8>, B> pub trait GGSWPrepare<B: Backend>
where where
Module<B>: VmpPrepareTmpBytes, Self: GetRingDegree + VmpPrepareTmpBytes + VmpPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn ggsw_prepare_tmp_bytes<A>(&self, infos: &A) -> usize
module.vmp_prepare_tmp_bytes( where
A: GGSWInfos,
{
assert_eq!(self.ring_degree(), infos.n());
self.vmp_prepare_tmp_bytes(
infos.dnum().into(), infos.dnum().into(),
(infos.rank() + 1).into(), (infos.rank() + 1).into(),
(infos.rank() + 1).into(), (infos.rank() + 1).into(),
infos.size(), infos.size(),
) )
} }
} fn ggsw_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGSWCiphertext<DR>> for GGSWCiphertextPrepared<D, B> R: GGSWPreparedToMut<B>,
where O: GGSWToRef,
Module<B>: VmpPrepare<B>, {
{ let mut res: GGSWPrepared<&mut [u8], B> = res.to_mut();
fn prepare(&mut self, module: &Module<B>, other: &GGSWCiphertext<DR>, scratch: &mut Scratch<B>) { let other: GGSW<&[u8]> = other.to_ref();
module.vmp_prepare(&mut self.data, &other.data, scratch); assert_eq!(res.n(), self.ring_degree());
self.k = other.k; assert_eq!(other.n(), self.ring_degree());
self.base2k = other.base2k; assert_eq!(res.k, other.k);
self.dsize = other.dsize; assert_eq!(res.base2k, other.base2k);
assert_eq!(res.dsize, other.dsize);
self.vmp_prepare(&mut res.data, &other.data, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GGSWCiphertextPrepared<Vec<u8>, B>> for GGSWCiphertext<D> impl<B: Backend> GGSWPrepare<B> for Module<B> where Self: GetRingDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>, impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
{ pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A) -> usize
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGSWCiphertextPrepared<Vec<u8>, B> { where
let mut ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = GGSWCiphertextPrepared::alloc(module, self); A: GGSWInfos,
ggsw_prepared.prepare(module, self, scratch); M: GGSWPrepare<B>,
ggsw_prepared {
module.ggsw_prepare_tmp_bytes(infos)
} }
} }
pub trait GGSWCiphertextPreparedToRef<B: Backend> { impl<D: DataMut, B: Backend> GGSWPrepared<D, B> {
fn to_ref(&self) -> GGSWCiphertextPrepared<&[u8], B>; pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
} where
O: GGSWToRef,
impl<D: DataRef, B: Backend> GGSWCiphertextPreparedToRef<B> for GGSWCiphertextPrepared<D, B> { M: GGSWPrepare<B>,
fn to_ref(&self) -> GGSWCiphertextPrepared<&[u8], B> { {
GGSWCiphertextPrepared::builder() module.ggsw_prepare(self, other, scratch);
.base2k(self.base2k()) }
.dsize(self.dsize()) }
.k(self.k())
.data(self.data.to_ref()) pub trait GGSWPreparedToMut<B: Backend> {
.build() fn to_mut(&mut self) -> GGSWPrepared<&mut [u8], B>;
.unwrap() }
impl<D: DataMut, B: Backend> GGSWPreparedToMut<B> for GGSWPrepared<D, B> {
fn to_mut(&mut self) -> GGSWPrepared<&mut [u8], B> {
GGSWPrepared {
base2k: self.base2k,
k: self.k,
dsize: self.dsize,
data: self.data.to_mut(),
}
}
}
pub trait GGSWPreparedToRef<B: Backend> {
fn to_ref(&self) -> GGSWPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> GGSWPreparedToRef<B> for GGSWPrepared<D, B> {
fn to_ref(&self) -> GGSWPrepared<&[u8], B> {
GGSWPrepared {
base2k: self.base2k,
k: self.k,
dsize: self.dsize,
data: self.data.to_ref(),
}
} }
} }

View File

@@ -1,14 +1,12 @@
use poulpy_hal::{ use poulpy_hal::{
api::{VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply}, api::{VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VecZnxDft, ZnxInfos}, layouts::{Backend, Data, DataMut, DataRef, Module, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos},
oep::VecZnxDftAllocBytesImpl,
}; };
use crate::{ use crate::{
dist::Distribution, dist::Distribution,
layouts::{ layouts::{
Base2K, BuildError, Degree, GLWEInfos, GLWEPublicKey, LWEInfos, Rank, TorusPrecision, Base2K, GLWEInfos, GLWEPublicKey, GLWEPublicKeyToRef, GetDist, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{Prepare, PrepareAlloc, PrepareScratchSpace},
}, },
}; };
@@ -20,6 +18,16 @@ pub struct GLWEPublicKeyPrepared<D: Data, B: Backend> {
pub(crate) dist: Distribution, pub(crate) dist: Distribution,
} }
pub(crate) trait SetDist {
fn set_dist(&mut self, dist: Distribution);
}
impl<D: Data, B: Backend> SetDist for GLWEPublicKeyPrepared<D, B> {
fn set_dist(&mut self, dist: Distribution) {
self.dist = dist
}
}
impl<D: Data, B: Backend> LWEInfos for GLWEPublicKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for GLWEPublicKeyPrepared<D, B> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
self.base2k self.base2k
@@ -33,8 +41,8 @@ impl<D: Data, B: Backend> LWEInfos for GLWEPublicKeyPrepared<D, B> {
self.data.size() self.data.size()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
} }
@@ -44,164 +52,138 @@ impl<D: Data, B: Backend> GLWEInfos for GLWEPublicKeyPrepared<D, B> {
} }
} }
pub struct GLWEPublicKeyPreparedBuilder<D: Data, B: Backend> { pub trait GLWEPublicKeyPreparedAlloc<B: Backend>
data: Option<VecZnxDft<D, B>>, where
base2k: Option<Base2K>, Self: GetRingDegree + VecZnxDftAlloc<B> + VecZnxDftBytesOf,
k: Option<TorusPrecision>, {
} fn alloc_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
GLWEPublicKeyPrepared {
impl<D: Data, B: Backend> GLWEPublicKeyPrepared<D, B> { data: self.vec_znx_dft_alloc((rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
#[inline]
pub fn builder() -> GLWEPublicKeyPreparedBuilder<D, B> {
GLWEPublicKeyPreparedBuilder {
data: None,
base2k: None,
k: None,
}
}
}
impl<B: Backend> GLWEPublicKeyPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, layout: &A) -> Self
where
A: GLWEInfos,
B: VecZnxDftAllocBytesImpl<B>,
{
self.data = Some(VecZnxDft::alloc(
layout.n().into(),
(layout.rank() + 1).into(),
layout.size(),
));
self.base2k = Some(layout.base2k());
self.k = Some(layout.k());
self
}
}
impl<D: Data, B: Backend> GLWEPublicKeyPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VecZnxDft<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<GLWEPublicKeyPrepared<D, B>, BuildError> {
let data: VecZnxDft<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GLWEPublicKeyPrepared {
data,
base2k, base2k,
k, k,
dist: Distribution::NONE, dist: Distribution::NONE,
}) }
}
fn alloc_glwe_public_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEPublicKeyPrepared<Vec<u8>, B>
where
A: GLWEInfos,
{
self.alloc_glwe_public_key_prepared(infos.base2k(), infos.k(), infos.rank())
}
fn bytes_of_glwe_public_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
self.bytes_of_vec_znx_dft((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
}
fn bytes_of_glwe_public_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GLWEInfos,
{
self.bytes_of_glwe_public_key_prepared(infos.base2k(), infos.k(), infos.rank())
} }
} }
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftBytesOf {}
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> { impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxDftAlloc<B>, M: GLWEPublicKeyPreparedAlloc<B>,
{ {
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()"); module.alloc_glwe_public_key_prepared_from_infos(infos)
Self::alloc_with(module, infos.base2k(), infos.k(), infos.rank())
} }
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
where where
Module<B>: VecZnxDftAlloc<B>, M: GLWEPublicKeyPreparedAlloc<B>,
{ {
Self { module.alloc_glwe_public_key_prepared(base2k, k, rank)
data: module.vec_znx_dft_alloc((rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
base2k,
k,
dist: Distribution::NONE,
}
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: VecZnxDftAllocBytes, M: GLWEPublicKeyPreparedAlloc<B>,
{ {
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()"); module.bytes_of_glwe_public_key_prepared_from_infos(infos)
Self::alloc_bytes_with(module, infos.base2k(), infos.k(), infos.rank())
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
where where
Module<B>: VecZnxDftAllocBytes, M: GLWEPublicKeyPreparedAlloc<B>,
{ {
module.vec_znx_dft_alloc_bytes((rank + 1).into(), k.0.div_ceil(base2k.0) as usize) module.bytes_of_glwe_public_key_prepared(base2k, k, rank)
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GLWEPublicKeyPrepared<Vec<u8>, B>> for GLWEPublicKey<D> pub trait GLWEPublicKeyPrepare<B: Backend>
where where
Module<B>: VecZnxDftAlloc<B> + VecZnxDftApply<B>, Self: GetRingDegree + VecZnxDftApply<B>,
{ {
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEPublicKeyPrepared<Vec<u8>, B> { fn prepare_glwe_public_key<R, O>(&self, res: &mut R, other: &O)
let mut pk_prepared: GLWEPublicKeyPrepared<Vec<u8>, B> = GLWEPublicKeyPrepared::alloc(module, self); where
pk_prepared.prepare(module, self, scratch); R: GLWEPublicKeyPreparedToMut<B> + SetDist,
pk_prepared O: GLWEPublicKeyToRef + GetDist,
} {
}
impl<B: Backend, A: GLWEInfos> PrepareScratchSpace<B, A> for GLWEPublicKeyPrepared<Vec<u8>, B> {
fn prepare_scratch_space(_module: &Module<B>, _infos: &A) -> usize {
0
}
}
impl<DM: DataMut, DR: DataRef, B: Backend> Prepare<B, GLWEPublicKey<DR>> for GLWEPublicKeyPrepared<DM, B>
where
Module<B>: VecZnxDftApply<B>,
{
fn prepare(&mut self, module: &Module<B>, other: &GLWEPublicKey<DR>, _scratch: &mut Scratch<B>) {
#[cfg(debug_assertions)]
{ {
assert_eq!(self.n(), other.n()); let mut res: GLWEPublicKeyPrepared<&mut [u8], B> = res.to_mut();
assert_eq!(self.size(), other.size()); let other: GLWEPublicKey<&[u8]> = other.to_ref();
assert_eq!(res.n(), self.ring_degree());
assert_eq!(other.n(), self.ring_degree());
assert_eq!(res.size(), other.size());
assert_eq!(res.k(), other.k());
assert_eq!(res.base2k(), other.base2k());
for i in 0..(res.rank() + 1).into() {
self.vec_znx_dft_apply(1, 0, &mut res.data, i, &other.data, i);
}
} }
(0..(self.rank() + 1).into()).for_each(|i| { res.set_dist(other.get_dist());
module.vec_znx_dft_apply(1, 0, &mut self.data, i, &other.data, i); }
}); }
self.k = other.k();
self.base2k = other.base2k(); impl<B: Backend> GLWEPublicKeyPrepare<B> for Module<B> where Self: GetRingDegree + VecZnxDftApply<B> {}
self.dist = other.dist;
impl<D: DataMut, B: Backend> GLWEPublicKeyPrepared<D, B> {
pub fn prepare<O, M>(&mut self, module: &M, other: &O)
where
O: GLWEPublicKeyToRef + GetDist,
M: GLWEPublicKeyPrepare<B>,
{
module.prepare_glwe_public_key(self, other);
}
}
pub trait GLWEPublicKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GLWEPublicKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> GLWEPublicKeyPreparedToMut<B> for GLWEPublicKeyPrepared<D, B> {
fn to_mut(&mut self) -> GLWEPublicKeyPrepared<&mut [u8], B> {
GLWEPublicKeyPrepared {
dist: self.dist,
k: self.k,
base2k: self.base2k,
data: self.data.to_mut(),
}
}
}
pub trait GLWEPublicKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> GLWEPublicKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> GLWEPublicKeyPreparedToRef<B> for GLWEPublicKeyPrepared<D, B> {
fn to_ref(&self) -> GLWEPublicKeyPrepared<&[u8], B> {
GLWEPublicKeyPrepared {
data: self.data.to_ref(),
dist: self.dist,
k: self.k,
base2k: self.base2k,
}
} }
} }

View File

@@ -1,13 +1,13 @@
use poulpy_hal::{ use poulpy_hal::{
api::{SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare}, api::{SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, SvpPPol, ZnxInfos}, layouts::{Backend, Data, DataMut, DataRef, Module, SvpPPol, SvpPPolToMut, SvpPPolToRef, ZnxInfos},
}; };
use crate::{ use crate::{
dist::Distribution, dist::Distribution,
layouts::{ layouts::{
Base2K, Degree, GLWEInfos, GLWESecret, LWEInfos, Rank, TorusPrecision, Base2K, GLWEInfos, GLWESecret, GLWESecretToRef, GetDist, GetRingDegree, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::SetDist,
}, },
}; };
@@ -16,6 +16,12 @@ pub struct GLWESecretPrepared<D: Data, B: Backend> {
pub(crate) dist: Distribution, pub(crate) dist: Distribution,
} }
impl<D: DataRef, B: Backend> SetDist for GLWESecretPrepared<D, B> {
fn set_dist(&mut self, dist: Distribution) {
self.dist = dist
}
}
impl<D: Data, B: Backend> LWEInfos for GLWESecretPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for GLWESecretPrepared<D, B> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
Base2K(0) Base2K(0)
@@ -25,8 +31,8 @@ impl<D: Data, B: Backend> LWEInfos for GLWESecretPrepared<D, B> {
TorusPrecision(0) TorusPrecision(0)
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@@ -38,46 +44,74 @@ impl<D: Data, B: Backend> GLWEInfos for GLWESecretPrepared<D, B> {
Rank(self.data.cols() as u32) Rank(self.data.cols() as u32)
} }
} }
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GLWEInfos,
Module<B>: SvpPPolAlloc<B>,
{
assert_eq!(module.n() as u32, infos.n());
Self::alloc_with(module, infos.rank())
}
pub fn alloc_with(module: &Module<B>, rank: Rank) -> Self pub trait GLWESecretPreparedAlloc<B: Backend>
where where
Module<B>: SvpPPolAlloc<B>, Self: GetRingDegree + SvpPPolBytesOf + SvpPPolAlloc<B>,
{ {
Self { fn alloc_glwe_secret_prepared(&self, rank: Rank) -> GLWESecretPrepared<Vec<u8>, B> {
data: module.svp_ppol_alloc(rank.into()), GLWESecretPrepared {
data: self.svp_ppol_alloc(rank.into()),
dist: Distribution::NONE, dist: Distribution::NONE,
} }
} }
fn alloc_glwe_secret_prepared_from_infos<A>(&self, infos: &A) -> GLWESecretPrepared<Vec<u8>, B>
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where where
A: GLWEInfos, A: GLWEInfos,
Module<B>: SvpPPolAllocBytes,
{ {
assert_eq!(module.n() as u32, infos.n()); assert_eq!(self.ring_degree(), infos.n());
Self::alloc_bytes_with(module, infos.rank()) self.alloc_glwe_secret_prepared(infos.rank())
} }
pub fn alloc_bytes_with(module: &Module<B>, rank: Rank) -> usize fn bytes_of_glwe_secret(&self, rank: Rank) -> usize {
self.bytes_of_svp_ppol(rank.into())
}
fn bytes_of_glwe_secret_from_infos<A>(&self, infos: &A) -> usize
where where
Module<B>: SvpPPolAllocBytes, A: GLWEInfos,
{ {
module.svp_ppol_alloc_bytes(rank.into()) assert_eq!(self.ring_degree(), infos.n());
self.bytes_of_glwe_secret(infos.rank())
}
}
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetRingDegree + SvpPPolBytesOf + SvpPPolAlloc<B> {}
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where
A: GLWEInfos,
M: GLWESecretPreparedAlloc<B>,
{
module.alloc_glwe_secret_prepared_from_infos(infos)
}
pub fn alloc<M>(module: &M, rank: Rank) -> Self
where
M: GLWESecretPreparedAlloc<B>,
{
module.alloc_glwe_secret_prepared(rank)
}
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where
A: GLWEInfos,
M: GLWESecretPreparedAlloc<B>,
{
module.bytes_of_glwe_secret_from_infos(infos)
}
pub fn bytes_of<M>(module: &M, rank: Rank) -> usize
where
M: GLWESecretPreparedAlloc<B>,
{
module.bytes_of_glwe_secret(rank)
} }
} }
impl<D: Data, B: Backend> GLWESecretPrepared<D, B> { impl<D: Data, B: Backend> GLWESecretPrepared<D, B> {
pub fn n(&self) -> Degree { pub fn n(&self) -> RingDegree {
Degree(self.data.n() as u32) RingDegree(self.data.n() as u32)
} }
pub fn rank(&self) -> Rank { pub fn rank(&self) -> Rank {
@@ -85,31 +119,62 @@ impl<D: Data, B: Backend> GLWESecretPrepared<D, B> {
} }
} }
impl<B: Backend, A: GLWEInfos> PrepareScratchSpace<B, A> for GLWESecretPrepared<Vec<u8>, B> { pub trait GLWESecretPrepare<B: Backend>
fn prepare_scratch_space(_module: &Module<B>, _infos: &A) -> usize { where
0 Self: SvpPrepare<B>,
{
fn prepare_glwe_secret<R, O>(&self, res: &mut R, other: &O)
where
R: GLWESecretPreparedToMut<B> + SetDist,
O: GLWESecretToRef + GetDist,
{
{
let mut res: GLWESecretPrepared<&mut [u8], _> = res.to_mut();
let other: GLWESecret<&[u8]> = other.to_ref();
for i in 0..res.rank().into() {
self.svp_prepare(&mut res.data, i, &other.data, i);
}
}
res.set_dist(other.get_dist());
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GLWESecretPrepared<Vec<u8>, B>> for GLWESecret<D> impl<B: Backend> GLWESecretPrepare<B> for Module<B> where Self: SvpPrepare<B> {}
where
Module<B>: SvpPrepare<B> + SvpPPolAlloc<B>, impl<D: DataMut, B: Backend> GLWESecretPrepared<D, B> {
{ pub fn prepare<M, O>(&mut self, module: &M, other: &O)
fn prepare_alloc(&self, module: &Module<B>, _scratch: &mut Scratch<B>) -> GLWESecretPrepared<Vec<u8>, B> { where
let mut sk_dft: GLWESecretPrepared<Vec<u8>, B> = GLWESecretPrepared::alloc(module, self); M: GLWESecretPrepare<B>,
sk_dft.prepare(module, self, _scratch); O: GLWESecretToRef + GetDist,
sk_dft {
module.prepare_glwe_secret(self, other);
} }
} }
impl<DM: DataMut, DR: DataRef, B: Backend> Prepare<B, GLWESecret<DR>> for GLWESecretPrepared<DM, B> pub trait GLWESecretPreparedToRef<B: Backend> {
where fn to_ref(&self) -> GLWESecretPrepared<&[u8], B>;
Module<B>: SvpPrepare<B>, }
{
fn prepare(&mut self, module: &Module<B>, other: &GLWESecret<DR>, _scratch: &mut Scratch<B>) { impl<D: DataRef, B: Backend> GLWESecretPreparedToRef<B> for GLWESecretPrepared<D, B> {
(0..self.rank().into()).for_each(|i| { fn to_ref(&self) -> GLWESecretPrepared<&[u8], B> {
module.svp_prepare(&mut self.data, i, &other.data, i); GLWESecretPrepared {
}); data: self.data.to_ref(),
self.dist = other.dist dist: self.dist,
}
}
}
pub trait GLWESecretPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GLWESecretPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> GLWESecretPreparedToMut<B> for GLWESecretPrepared<D, B> {
fn to_mut(&mut self) -> GLWESecretPrepared<&mut [u8], B> {
GLWESecretPrepared {
dist: self.dist,
data: self.data.to_mut(),
}
} }
} }

View File

@@ -1,15 +1,15 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWEToLWEKey, LWEInfos, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, GLWEToLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::{
GLWESwitchingKeyPrepare, GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedAlloc, GLWESwitchingKeyPreparedToMut,
GLWESwitchingKeyPreparedToRef,
},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct GLWEToLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>); pub struct GLWEToLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -20,7 +20,7 @@ impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -53,91 +53,156 @@ impl<D: Data, B: Backend> GGLWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
} }
} }
pub trait GLWEToLWESwitchingKeyPreparedAlloc<B: Backend>
where
Self: GLWESwitchingKeyPreparedAlloc<B>,
{
fn alloc_glwe_to_lwe_switching_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_in: Rank,
dnum: Dnum,
) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
GLWEToLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
}
fn alloc_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
self.alloc_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
}
fn bytes_of_glwe_to_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
}
fn bytes_of_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
self.bytes_of_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
}
}
impl<B: Backend> GLWEToLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> { impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>, M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
} }
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
where where
Module<B>: VmpPMatAlloc<B>, M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
{ {
Self(GGLWESwitchingKeyPrepared::alloc_with( module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
module,
base2k,
k,
rank_in,
Rank(1),
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
where where
Module<B>: VmpPMatAllocBytes, M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
{ {
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rank_in, Rank(1), dnum, Dsize(1)) module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> pub trait GLWEToLWESwitchingKeyPrepare<B: Backend>
where where
GGLWESwitchingKeyPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GLWESwitchingKeyPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_glwe_to_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
GGLWESwitchingKeyPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_glwe_switching_key_tmp_bytes(infos);
}
fn prepare_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: GLWEToLWESwitchingKeyPreparedToMut<B>,
O: GLWEToLWESwitchingKeyToRef,
{
self.prepare_glwe_switching(&mut res.to_mut().0, &other.to_ref().0, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>> for GLWEToLWEKey<D> impl<B: Backend> GLWEToLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>, impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
{ pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> { where
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> = GLWEToLWESwitchingKeyPrepared::alloc(module, self); A: GGLWEInfos,
ksk_prepared.prepare(module, self, scratch); M: GLWEToLWESwitchingKeyPrepare<B>,
ksk_prepared {
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
} }
} }
impl<DM: DataMut, DR: DataRef, B: Backend> Prepare<B, GLWEToLWEKey<DR>> for GLWEToLWESwitchingKeyPrepared<DM, B> impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
where fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
Module<B>: VmpPrepare<B>, where
{ O: GLWEToLWESwitchingKeyToRef,
fn prepare(&mut self, module: &Module<B>, other: &GLWEToLWEKey<DR>, scratch: &mut Scratch<B>) { M: GLWEToLWESwitchingKeyPrepare<B>,
self.0.prepare(module, &other.0, scratch); {
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
}
}
pub trait GLWEToLWESwitchingKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> GLWEToLWESwitchingKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> GLWEToLWESwitchingKeyPreparedToRef<B> for GLWEToLWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToRef<B>,
{
fn to_ref(&self) -> GLWEToLWESwitchingKeyPrepared<&[u8], B> {
GLWEToLWESwitchingKeyPrepared(self.0.to_ref())
}
}
pub trait GLWEToLWESwitchingKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GLWEToLWESwitchingKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPreparedToMut<B> for GLWEToLWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToMut<B>,
{
fn to_mut(&mut self) -> GLWEToLWESwitchingKeyPrepared<&mut [u8], B> {
GLWEToLWESwitchingKeyPrepared(self.0.to_mut())
} }
} }

View File

@@ -1,15 +1,15 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWESwitchingKey, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWESwitchingKeyToRef, Rank, RingDegree, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::{
GLWESwitchingKeyPrepare, GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedAlloc, GLWESwitchingKeyPreparedToMut,
GLWESwitchingKeyPreparedToRef,
},
}; };
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct LWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>); pub struct LWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> LWEInfos for LWESwitchingKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for LWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -20,7 +20,7 @@ impl<D: Data, B: Backend> LWEInfos for LWESwitchingKeyPrepared<D, B> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -52,101 +52,165 @@ impl<D: Data, B: Backend> GGLWEInfos for LWESwitchingKeyPrepared<D, B> {
} }
} }
pub trait LWESwitchingKeyPreparedAlloc<B: Backend>
where
Self: GLWESwitchingKeyPreparedAlloc<B>,
{
fn alloc_lwe_switching_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
dnum: Dnum,
) -> LWESwitchingKeyPrepared<Vec<u8>, B> {
LWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1)))
}
fn alloc_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWESwitchingKeyPrepared<Vec<u8>, B>
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
self.alloc_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
}
fn bytes_of_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
}
fn bytes_of_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
self.bytes_of_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.dnum())
}
}
impl<B: Backend> LWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> { impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>, M: LWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.alloc_lwe_switching_key_prepared_from_infos(infos)
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
} }
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
where where
Module<B>: VmpPMatAlloc<B>, M: LWESwitchingKeyPreparedAlloc<B>,
{ {
Self(GGLWESwitchingKeyPrepared::alloc_with( module.alloc_lwe_switching_key_prepared(base2k, k, dnum)
module,
base2k,
k,
Rank(1),
Rank(1),
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: LWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.bytes_of_lwe_switching_key_prepared_from_infos(infos)
infos.dsize().0,
1,
"dsize > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
where where
Module<B>: VmpPMatAllocBytes, M: LWESwitchingKeyPreparedAlloc<B>,
{ {
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, Rank(1), Rank(1), dnum, Dsize(1)) module.bytes_of_lwe_switching_key_prepared(base2k, k, dnum)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for LWESwitchingKeyPrepared<Vec<u8>, B> pub trait LWESwitchingKeyPrepare<B: Backend>
where where
GGLWESwitchingKeyPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GLWESwitchingKeyPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_lwe_switching_key_tmp_bytes<A>(&self, infos: &A)
GGLWESwitchingKeyPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_glwe_switching_key_tmp_bytes(infos);
}
fn prepare_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: LWESwitchingKeyPreparedToMut<B>,
O: LWESwitchingKeyToRef,
{
self.prepare_glwe_switching(&mut res.to_mut().0, &other.to_ref().0, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, LWESwitchingKeyPrepared<Vec<u8>, B>> for LWESwitchingKey<D> impl<B: Backend> LWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>, impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
{ pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWESwitchingKeyPrepared<Vec<u8>, B> { where
let mut ksk_prepared: LWESwitchingKeyPrepared<Vec<u8>, B> = LWESwitchingKeyPrepared::alloc(module, self); A: GGLWEInfos,
ksk_prepared.prepare(module, self, scratch); M: LWESwitchingKeyPrepare<B>,
ksk_prepared {
module.prepare_lwe_switching_key_tmp_bytes(infos);
} }
} }
impl<DM: DataMut, DR: DataRef, B: Backend> Prepare<B, LWESwitchingKey<DR>> for LWESwitchingKeyPrepared<DM, B> impl<D: DataMut, B: Backend> LWESwitchingKeyPrepared<D, B> {
where fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
Module<B>: VmpPrepare<B>, where
{ O: LWESwitchingKeyToRef,
fn prepare(&mut self, module: &Module<B>, other: &LWESwitchingKey<DR>, scratch: &mut Scratch<B>) { M: LWESwitchingKeyPrepare<B>,
self.0.prepare(module, &other.0, scratch); {
module.prepare_lwe_switching_key(self, other, scratch);
}
}
pub trait LWESwitchingKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> LWESwitchingKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> LWESwitchingKeyPreparedToRef<B> for LWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToRef<B>,
{
fn to_ref(&self) -> LWESwitchingKeyPrepared<&[u8], B> {
LWESwitchingKeyPrepared(self.0.to_ref())
}
}
pub trait LWESwitchingKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> LWESwitchingKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> LWESwitchingKeyPreparedToMut<B> for LWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToMut<B>,
{
fn to_mut(&mut self) -> LWESwitchingKeyPrepared<&mut [u8], B> {
LWESwitchingKeyPrepared(self.0.to_mut())
} }
} }

View File

@@ -1,16 +1,16 @@
use poulpy_hal::{ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{ use crate::layouts::{
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKey, Rank, TorusPrecision, Base2K, Dnum, Dsize, GGLWEInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKeyToRef, Rank, RingDegree, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc, PrepareScratchSpace}, prepared::{
GLWESwitchingKeyPrepare, GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedAlloc, GLWESwitchingKeyPreparedToMut,
GLWESwitchingKeyPreparedToRef,
},
}; };
/// A special [GLWESwitchingKey] required to for the conversion from [LWECiphertext] to [GLWECiphertext]. /// A special [GLWESwitchingKey] required to for the conversion from [LWE] to [GLWE].
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
pub struct LWEToGLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>); pub struct LWEToGLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> { impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K { fn base2k(&self) -> Base2K {
@@ -21,7 +21,7 @@ impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
self.0.k() self.0.k()
} }
fn n(&self) -> Degree { fn n(&self) -> RingDegree {
self.0.n() self.0.n()
} }
@@ -54,91 +54,162 @@ impl<D: Data, B: Backend> GGLWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
} }
} }
pub trait LWEToGLWESwitchingKeyPreparedAlloc<B: Backend>
where
Self: GLWESwitchingKeyPreparedAlloc<B>,
{
fn alloc_lwe_to_glwe_switching_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_out: Rank,
dnum: Dnum,
) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
LWEToGLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
}
fn alloc_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
);
self.alloc_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
}
fn bytes_of_lwe_to_glwe_switching_key_prepared(
&self,
base2k: Base2K,
k: TorusPrecision,
rank_out: Rank,
dnum: Dnum,
) -> usize {
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
}
fn bytes_of_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
where
A: GGLWEInfos,
{
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
);
self.bytes_of_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
}
}
impl<B: Backend> LWEToGLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> { impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAlloc<B>, M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
} }
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
where where
Module<B>: VmpPMatAlloc<B>, M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
{ {
Self(GGLWESwitchingKeyPrepared::alloc_with( module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
module,
base2k,
k,
Rank(1),
rank_out,
dnum,
Dsize(1),
))
} }
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
where where
A: GGLWEInfos, A: GGLWEInfos,
Module<B>: VmpPMatAllocBytes, M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
{ {
debug_assert_eq!( module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.dsize().0,
1,
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
} }
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_out: Rank) -> usize pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize
where where
Module<B>: VmpPMatAllocBytes, M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
{ {
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, Rank(1), rank_out, dnum, Dsize(1)) module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
} }
} }
impl<B: Backend, A: GGLWEInfos> PrepareScratchSpace<B, A> for LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> pub trait LWEToGLWESwitchingKeyPrepare<B: Backend>
where where
GGLWESwitchingKeyPrepared<Vec<u8>, B>: PrepareScratchSpace<B, A>, Self: GLWESwitchingKeyPrepare<B>,
{ {
fn prepare_scratch_space(module: &Module<B>, infos: &A) -> usize { fn prepare_lwe_to_glwe_switching_key_tmp_bytes<A>(&self, infos: &A)
GGLWESwitchingKeyPrepared::prepare_scratch_space(module, infos) where
A: GGLWEInfos,
{
self.prepare_glwe_switching_key_tmp_bytes(infos);
}
fn prepare_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: LWEToGLWESwitchingKeyPreparedToMut<B>,
O: LWEToGLWESwitchingKeyToRef,
{
self.prepare_glwe_switching(&mut res.to_mut().0, &other.to_ref().0, scratch);
} }
} }
impl<D: DataRef, B: Backend> PrepareAlloc<B, LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>> for LWEToGLWESwitchingKey<D> impl<B: Backend> LWEToGLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>, impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
{ pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> { where
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> = LWEToGLWESwitchingKeyPrepared::alloc(module, self); A: GGLWEInfos,
ksk_prepared.prepare(module, self, scratch); M: LWEToGLWESwitchingKeyPrepare<B>,
ksk_prepared {
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
} }
} }
impl<DM: DataMut, DR: DataRef, B: Backend> Prepare<B, LWEToGLWESwitchingKey<DR>> for LWEToGLWESwitchingKeyPrepared<DM, B> impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
where fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
Module<B>: VmpPrepare<B>, where
{ O: LWEToGLWESwitchingKeyToRef,
fn prepare(&mut self, module: &Module<B>, other: &LWEToGLWESwitchingKey<DR>, scratch: &mut Scratch<B>) { M: LWEToGLWESwitchingKeyPrepare<B>,
self.0.prepare(module, &other.0, scratch); {
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
}
}
pub trait LWEToGLWESwitchingKeyPreparedToRef<B: Backend> {
fn to_ref(&self) -> LWEToGLWESwitchingKeyPrepared<&[u8], B>;
}
impl<D: DataRef, B: Backend> LWEToGLWESwitchingKeyPreparedToRef<B> for LWEToGLWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToRef<B>,
{
fn to_ref(&self) -> LWEToGLWESwitchingKeyPrepared<&[u8], B> {
LWEToGLWESwitchingKeyPrepared(self.0.to_ref())
}
}
pub trait LWEToGLWESwitchingKeyPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> LWEToGLWESwitchingKeyPrepared<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPreparedToMut<B> for LWEToGLWESwitchingKeyPrepared<D, B>
where
GLWESwitchingKeyPrepared<D, B>: GLWESwitchingKeyPreparedToMut<B>,
{
fn to_mut(&mut self) -> LWEToGLWESwitchingKeyPrepared<&mut [u8], B> {
LWEToGLWESwitchingKeyPrepared(self.0.to_mut())
} }
} }

View File

@@ -19,16 +19,3 @@ pub use glwe_sk::*;
pub use glwe_to_lwe_ksk::*; pub use glwe_to_lwe_ksk::*;
pub use lwe_ksk::*; pub use lwe_ksk::*;
pub use lwe_to_glwe_ksk::*; pub use lwe_to_glwe_ksk::*;
use poulpy_hal::layouts::{Backend, Module, Scratch};
pub trait PrepareScratchSpace<B: Backend, T> {
fn prepare_scratch_space(module: &Module<B>, infos: &T) -> usize;
}
pub trait PrepareAlloc<B: Backend, T> {
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> T;
}
pub trait Prepare<B: Backend, T> {
fn prepare(&mut self, module: &Module<B>, other: &T, scratch: &mut Scratch<B>);
}

View File

@@ -14,6 +14,7 @@ mod utils;
pub use operations::*; pub use operations::*;
pub mod layouts; pub mod layouts;
pub use conversion::*;
pub use dist::*; pub use dist::*;
pub use external_product::*; pub use external_product::*;
pub use glwe_packing::*; pub use glwe_packing::*;
@@ -22,4 +23,4 @@ pub use encryption::SIGMA;
pub use scratch::*; pub use scratch::*;
pub mod tests; // pub mod tests;

View File

@@ -1,16 +1,16 @@
use poulpy_hal::{ use poulpy_hal::{
api::{ api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
VecZnxNormalizeTmpBytes, VecZnxSubScalarInplace, VecZnxSubScalarInplace,
}, },
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero}, layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl}, oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
}; };
use crate::layouts::{GGLWECiphertext, GGLWEInfos, GLWECiphertext, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared}; use crate::layouts::{GGLWE, GGLWEInfos, GLWE, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
impl<D: DataRef> GGLWECiphertext<D> { impl<D: DataRef> GGLWE<D> {
pub fn assert_noise<B, DataSk, DataWant>( pub fn assert_noise<B, DataSk, DataWant>(
&self, &self,
module: &Module<B>, module: &Module<B>,
@@ -20,8 +20,8 @@ impl<D: DataRef> GGLWECiphertext<D> {
) where ) where
DataSk: DataRef, DataSk: DataRef,
DataWant: DataRef, DataWant: DataRef,
Module<B>: VecZnxDftAllocBytes Module<B>: VecZnxDftBytesOf
+ VecZnxBigAllocBytes + VecZnxBigBytesOf
+ VecZnxDftApply<B> + VecZnxDftApply<B>
+ SvpApplyDftToDftInplace<B> + SvpApplyDftToDftInplace<B>
+ VecZnxIdftApplyConsume<B> + VecZnxIdftApplyConsume<B>
@@ -30,13 +30,13 @@ impl<D: DataRef> GGLWECiphertext<D> {
+ VecZnxBigNormalize<B> + VecZnxBigNormalize<B>
+ VecZnxNormalizeTmpBytes + VecZnxNormalizeTmpBytes
+ VecZnxSubScalarInplace, + VecZnxSubScalarInplace,
B: Backend + TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>, B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{ {
let dsize: usize = self.dsize().into(); let dsize: usize = self.dsize().into();
let base2k: usize = self.base2k().into(); let base2k: usize = self.base2k().into();
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(module, self)); let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_tmp_bytes(module, self));
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self); let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
(0..self.rank_in().into()).for_each(|col_i| { (0..self.rank_in().into()).for_each(|col_i| {
(0..self.dnum().into()).for_each(|row_i| { (0..self.dnum().into()).for_each(|row_i| {

Some files were not shown because too many files have changed in this diff Show More