Added more serialization tests + generalize methods to any n

This commit is contained in:
Pro7ech
2025-08-13 15:28:52 +02:00
parent 068470783e
commit 940742ce6c
117 changed files with 3658 additions and 2577 deletions

View File

@@ -1,10 +1,10 @@
use backend::{
hal::{
api::{
ModuleNew, ScalarZnxAlloc, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyInplace, SvpPPolAlloc, SvpPrepare,
VecZnxAddNormal, VecZnxAlloc, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigNormalize,
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxDecodeVeci64, VecZnxDftAlloc, VecZnxDftFromVecZnx,
VecZnxDftToVecZnxBigTmpA, VecZnxEncodeVeci64, VecZnxFillUniform, VecZnxNormalizeInplace, ZnxInfos,
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyInplace, SvpPPolAlloc, SvpPrepare, VecZnxAddNormal,
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace,
VecZnxDecodeVeci64, VecZnxDftAlloc, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigTmpA, VecZnxEncodeVeci64,
VecZnxFillUniform, VecZnxNormalizeInplace, ZnxInfos,
},
layouts::{Module, ScalarZnx, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft},
},
@@ -27,17 +27,18 @@ fn main() {
let mut source: Source = Source::new(seed);
// s <- Z_{-1, 0, 1}[X]/(X^{N}+1)
let mut s: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut s: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(module.n(), 1);
s.fill_ternary_prob(0, 0.5, &mut source);
// Buffer to store s in the DFT domain
let mut s_dft: SvpPPol<Vec<u8>, FFT64> = module.svp_ppol_alloc(s.cols());
let mut s_dft: SvpPPol<Vec<u8>, FFT64> = module.svp_ppol_alloc(n, s.cols());
// s_dft <- DFT(s)
module.svp_prepare(&mut s_dft, 0, &s, 0);
// Allocates a VecZnx with two columns: ct=(0, 0)
let mut ct: VecZnx<Vec<u8>> = module.vec_znx_alloc(
let mut ct: VecZnx<Vec<u8>> = VecZnx::alloc(
module.n(),
2, // Number of columns
ct_size, // Number of small poly per column
);
@@ -45,7 +46,7 @@ fn main() {
// Fill the second column with random values: ct = (0, a)
module.vec_znx_fill_uniform(basek, &mut ct, 1, ct_size * basek, &mut source);
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.vec_znx_dft_alloc(1, ct_size);
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.vec_znx_dft_alloc(n, 1, ct_size);
module.vec_znx_dft_from_vec_znx(1, 0, &mut buf_dft, 0, &ct, 1);
@@ -60,11 +61,12 @@ fn main() {
// Alias scratch space (VecZnxDft<B> is always at least as big as VecZnxBig<B>)
// BIG(ct[1] * s) <- IDFT(DFT(ct[1] * s)) (not normalized)
let mut buf_big: VecZnxBig<Vec<u8>, FFT64> = module.vec_znx_big_alloc(1, ct_size);
let mut buf_big: VecZnxBig<Vec<u8>, FFT64> = module.vec_znx_big_alloc(n, 1, ct_size);
module.vec_znx_dft_to_vec_znx_big_tmp_a(&mut buf_big, 0, &mut buf_dft, 0);
// Creates a plaintext: VecZnx with 1 column
let mut m = module.vec_znx_alloc(
let mut m = VecZnx::alloc(
module.n(),
1, // Number of columns
msg_size, // Number of small polynomials
);
@@ -125,7 +127,7 @@ fn main() {
module.vec_znx_big_add_small_inplace(&mut buf_big, 0, &ct, 0);
// m + e <- BIG(ct[1] * s + ct[0])
let mut res = module.vec_znx_alloc(1, ct_size);
let mut res = VecZnx::alloc(module.n(), 1, ct_size);
module.vec_znx_big_normalize(basek, &mut res, 0, &buf_big, 0, scratch.borrow());
// have = m * 2^{log_scale} + e

View File

@@ -1,17 +0,0 @@
use crate::hal::layouts::MatZnxOwned;
/// Allocates as [crate::hal::layouts::MatZnx].
pub trait MatZnxAlloc {
fn mat_znx_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxOwned;
}
/// Returns the size in bytes to allocate a [crate::hal::layouts::MatZnx].
pub trait MatZnxAllocBytes {
fn mat_znx_alloc_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
}
/// Consume a vector of bytes into a [crate::hal::layouts::MatZnx].
/// User must ensure that bytes is memory aligned and that it length is equal to [MatZnxAllocBytes].
pub trait MatZnxFromBytes {
fn mat_znx_from_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: Vec<u8>) -> MatZnxOwned;
}

View File

@@ -1,6 +1,4 @@
mod mat_znx;
mod module;
mod scalar_znx;
mod scratch;
mod svp_ppol;
mod vec_znx;
@@ -9,9 +7,7 @@ mod vec_znx_dft;
mod vmp_pmat;
mod znx_base;
pub use mat_znx::*;
pub use module::*;
pub use scalar_znx::*;
pub use scratch::*;
pub use svp_ppol::*;
pub use vec_znx::*;

View File

@@ -1,17 +0,0 @@
use crate::hal::layouts::ScalarZnxOwned;
/// Allocates as [crate::hal::layouts::ScalarZnx].
pub trait ScalarZnxAlloc {
fn scalar_znx_alloc(&self, cols: usize) -> ScalarZnxOwned;
}
/// Returns the size in bytes to allocate a [crate::hal::layouts::ScalarZnx].
pub trait ScalarZnxAllocBytes {
fn scalar_znx_alloc_bytes(&self, cols: usize) -> usize;
}
/// Consume a vector of bytes into a [crate::hal::layouts::ScalarZnx].
/// User must ensure that bytes is memory aligned and that it length is equal to [ScalarZnxAllocBytes].
pub trait ScalarZnxFromBytes {
fn scalar_znx_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxOwned;
}

View File

@@ -1,4 +1,4 @@
use crate::hal::layouts::{Backend, MatZnx, Module, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
use crate::hal::layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat};
/// Allocates a new [crate::hal::layouts::ScratchOwned] of `size` aligned bytes.
pub trait ScratchOwnedAlloc<B: Backend> {
@@ -27,44 +27,38 @@ pub trait TakeSlice {
/// Take a slice of bytes from a [Scratch], wraps it into a [ScalarZnx] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeScalarZnx<B: Backend> {
fn take_scalar_znx(&mut self, module: &Module<B>, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self);
pub trait TakeScalarZnx {
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], wraps it into a [SvpPPol] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeSvpPPol<B: Backend> {
fn take_svp_ppol(&mut self, module: &Module<B>, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self);
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnx] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeVecZnx<B: Backend> {
fn take_vec_znx(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self);
pub trait TakeVecZnx {
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnx] aand returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeVecZnxSlice<B: Backend> {
fn take_vec_znx_slice(
&mut self,
len: usize,
module: &Module<B>,
cols: usize,
size: usize,
) -> (Vec<VecZnx<&mut [u8]>>, &mut Self);
pub trait TakeVecZnxSlice {
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxBig] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeVecZnxBig<B: Backend> {
fn take_vec_znx_big(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self);
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], wraps it into a [VecZnxDft] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeVecZnxDft<B: Backend> {
fn take_vec_znx_dft(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self);
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self);
}
/// Take a slice of bytes from a [Scratch], slices it into a vector of [VecZnxDft] and returns it
@@ -73,7 +67,7 @@ pub trait TakeVecZnxDftSlice<B: Backend> {
fn take_vec_znx_dft_slice(
&mut self,
len: usize,
module: &Module<B>,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self);
@@ -84,7 +78,7 @@ pub trait TakeVecZnxDftSlice<B: Backend> {
pub trait TakeVmpPMat<B: Backend> {
fn take_vmp_pmat(
&mut self,
module: &Module<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
@@ -94,10 +88,10 @@ pub trait TakeVmpPMat<B: Backend> {
/// Take a slice of bytes from a [Scratch], wraps it into a [MatZnx] and returns it
/// as well as a new [Scratch] minus the taken array of bytes.
pub trait TakeMatZnx<B: Backend> {
pub trait TakeMatZnx {
fn take_mat_znx(
&mut self,
module: &Module<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,

View File

@@ -2,18 +2,18 @@ use crate::hal::layouts::{Backend, ScalarZnxToRef, SvpPPolOwned, SvpPPolToMut, S
/// Allocates as [crate::hal::layouts::SvpPPol].
pub trait SvpPPolAlloc<B: Backend> {
fn svp_ppol_alloc(&self, cols: usize) -> SvpPPolOwned<B>;
fn svp_ppol_alloc(&self, n: usize, cols: usize) -> SvpPPolOwned<B>;
}
/// Returns the size in bytes to allocate a [crate::hal::layouts::SvpPPol].
pub trait SvpPPolAllocBytes {
fn svp_ppol_alloc_bytes(&self, cols: usize) -> usize;
fn svp_ppol_alloc_bytes(&self, n: usize, cols: usize) -> usize;
}
/// Consume a vector of bytes into a [crate::hal::layouts::MatZnx].
/// User must ensure that bytes is memory aligned and that it length is equal to [SvpPPolAllocBytes].
pub trait SvpPPolFromBytes<B: Backend> {
fn svp_ppol_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B>;
fn svp_ppol_from_bytes(&self, n: usize, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B>;
}
/// Prepare a [crate::hal::layouts::ScalarZnx] into an [crate::hal::layouts::SvpPPol].

View File

@@ -2,33 +2,7 @@ use rand_distr::Distribution;
use rug::Float;
use sampling::source::Source;
use crate::hal::layouts::{Backend, ScalarZnxToRef, Scratch, VecZnxOwned, VecZnxToMut, VecZnxToRef};
pub trait VecZnxAlloc {
/// Allocates a new [crate::hal::layouts::VecZnx].
///
/// # Arguments
///
/// * `cols`: the number of polynomials.
/// * `size`: the number small polynomials per column.
fn vec_znx_alloc(&self, cols: usize, size: usize) -> VecZnxOwned;
}
pub trait VecZnxFromBytes {
/// Instantiates a new [crate::hal::layouts::VecZnx] from a slice of bytes.
/// The returned [crate::hal::layouts::VecZnx] takes ownership of the slice of bytes.
///
/// # Arguments
///
/// * `cols`: the number of polynomials.
/// * `size`: the number small polynomials per column.
fn vec_znx_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned;
}
pub trait VecZnxAllocBytes {
/// Returns the number of bytes necessary to allocate a new [crate::hal::layouts::VecZnx].
fn vec_znx_alloc_bytes(&self, cols: usize, size: usize) -> usize;
}
use crate::hal::layouts::{Backend, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef};
pub trait VecZnxNormalizeTmpBytes {
/// Returns the minimum number of bytes necessary for normalization.

View File

@@ -5,18 +5,18 @@ use crate::hal::layouts::{Backend, Scratch, VecZnxBigOwned, VecZnxBigToMut, VecZ
/// Allocates as [crate::hal::layouts::VecZnxBig].
pub trait VecZnxBigAlloc<B: Backend> {
fn vec_znx_big_alloc(&self, cols: usize, size: usize) -> VecZnxBigOwned<B>;
fn vec_znx_big_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxBigOwned<B>;
}
/// Returns the size in bytes to allocate a [crate::hal::layouts::VecZnxBig].
pub trait VecZnxBigAllocBytes {
fn vec_znx_big_alloc_bytes(&self, cols: usize, size: usize) -> usize;
fn vec_znx_big_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize;
}
/// Consume a vector of bytes into a [crate::hal::layouts::VecZnxBig].
/// User must ensure that bytes is memory aligned and that it length is equal to [VecZnxBigAllocBytes].
pub trait VecZnxBigFromBytes<B: Backend> {
fn vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B>;
fn vec_znx_big_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B>;
}
/// Add a discrete normal distribution on res.

View File

@@ -3,19 +3,19 @@ use crate::hal::layouts::{
};
pub trait VecZnxDftAlloc<B: Backend> {
fn vec_znx_dft_alloc(&self, cols: usize, size: usize) -> VecZnxDftOwned<B>;
fn vec_znx_dft_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxDftOwned<B>;
}
pub trait VecZnxDftFromBytes<B: Backend> {
fn vec_znx_dft_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B>;
fn vec_znx_dft_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B>;
}
pub trait VecZnxDftAllocBytes {
fn vec_znx_dft_alloc_bytes(&self, cols: usize, size: usize) -> usize;
fn vec_znx_dft_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize;
}
pub trait VecZnxDftToVecZnxBigTmpBytes {
fn vec_znx_dft_to_vec_znx_big_tmp_bytes(&self) -> usize;
fn vec_znx_dft_to_vec_znx_big_tmp_bytes(&self, n: usize) -> usize;
}
pub trait VecZnxDftToVecZnxBig<B: Backend> {

View File

@@ -3,19 +3,27 @@ use crate::hal::layouts::{
};
pub trait VmpPMatAlloc<B: Backend> {
fn vmp_pmat_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B>;
fn vmp_pmat_alloc(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B>;
}
pub trait VmpPMatAllocBytes {
fn vmp_pmat_alloc_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
fn vmp_pmat_alloc_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
}
pub trait VmpPMatFromBytes<B: Backend> {
fn vmp_pmat_from_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: Vec<u8>) -> VmpPMatOwned<B>;
fn vmp_pmat_from_bytes(
&self,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> VmpPMatOwned<B>;
}
pub trait VmpPrepareTmpBytes {
fn vmp_prepare_tmp_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
fn vmp_prepare_tmp_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
}
pub trait VmpPMatPrepare<B: Backend> {
@@ -28,6 +36,7 @@ pub trait VmpPMatPrepare<B: Backend> {
pub trait VmpApplyTmpBytes {
fn vmp_apply_tmp_bytes(
&self,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -72,6 +81,7 @@ pub trait VmpApply<B: Backend> {
pub trait VmpApplyAddTmpBytes {
fn vmp_apply_add_tmp_bytes(
&self,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,

View File

@@ -113,3 +113,7 @@ where
pub trait FillUniform {
fn fill_uniform(&mut self, source: &mut Source);
}
pub trait Reset {
fn reset(&mut self);
}

View File

@@ -1,32 +0,0 @@
use crate::hal::{
api::{MatZnxAlloc, MatZnxAllocBytes, MatZnxFromBytes},
layouts::{Backend, MatZnxOwned, Module},
oep::{MatZnxAllocBytesImpl, MatZnxAllocImpl, MatZnxFromBytesImpl},
};
impl<B> MatZnxAlloc for Module<B>
where
B: Backend + MatZnxAllocImpl<B>,
{
fn mat_znx_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxOwned {
B::mat_znx_alloc_impl(self, rows, cols_in, cols_out, size)
}
}
impl<B> MatZnxAllocBytes for Module<B>
where
B: Backend + MatZnxAllocBytesImpl<B>,
{
fn mat_znx_alloc_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
B::mat_znx_alloc_bytes_impl(self, rows, cols_in, cols_out, size)
}
}
impl<B> MatZnxFromBytes for Module<B>
where
B: Backend + MatZnxFromBytesImpl<B>,
{
fn mat_znx_from_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: Vec<u8>) -> MatZnxOwned {
B::mat_znx_from_bytes_impl(self, rows, cols_in, cols_out, size, bytes)
}
}

View File

@@ -1,6 +1,4 @@
mod mat_znx;
mod module;
mod scalar_znx;
mod scratch;
mod svp_ppol;
mod vec_znx;

View File

@@ -1,23 +0,0 @@
use crate::hal::{
api::{ScalarZnxAlloc, ScalarZnxAllocBytes},
layouts::{Backend, Module, ScalarZnxOwned},
oep::{ScalarZnxAllocBytesImpl, ScalarZnxAllocImpl},
};
impl<B> ScalarZnxAllocBytes for Module<B>
where
B: Backend + ScalarZnxAllocBytesImpl<B>,
{
fn scalar_znx_alloc_bytes(&self, cols: usize) -> usize {
B::scalar_znx_alloc_bytes_impl(self.n(), cols)
}
}
impl<B> ScalarZnxAlloc for Module<B>
where
B: Backend + ScalarZnxAllocImpl<B>,
{
fn scalar_znx_alloc(&self, cols: usize) -> ScalarZnxOwned {
B::scalar_znx_alloc_impl(self.n(), cols)
}
}

View File

@@ -3,9 +3,7 @@ use crate::hal::{
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeLike, TakeMatZnx, TakeScalarZnx,
TakeSlice, TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
},
layouts::{
Backend, DataRef, MatZnx, Module, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat,
},
layouts::{Backend, DataRef, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
oep::{
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeLikeImpl, TakeMatZnxImpl,
TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl,
@@ -58,12 +56,12 @@ where
}
}
impl<B> TakeScalarZnx<B> for Scratch<B>
impl<B> TakeScalarZnx for Scratch<B>
where
B: Backend + TakeScalarZnxImpl<B>,
{
fn take_scalar_znx(&mut self, module: &Module<B>, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
B::take_scalar_znx_impl(self, module.n(), cols)
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
B::take_scalar_znx_impl(self, n, cols)
}
}
@@ -71,32 +69,26 @@ impl<B> TakeSvpPPol<B> for Scratch<B>
where
B: Backend + TakeSvpPPolImpl<B>,
{
fn take_svp_ppol(&mut self, module: &Module<B>, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self) {
B::take_svp_ppol_impl(self, module.n(), cols)
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self) {
B::take_svp_ppol_impl(self, n, cols)
}
}
impl<B> TakeVecZnx<B> for Scratch<B>
impl<B> TakeVecZnx for Scratch<B>
where
B: Backend + TakeVecZnxImpl<B>,
{
fn take_vec_znx(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
B::take_vec_znx_impl(self, module.n(), cols, size)
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
B::take_vec_znx_impl(self, n, cols, size)
}
}
impl<B> TakeVecZnxSlice<B> for Scratch<B>
impl<B> TakeVecZnxSlice for Scratch<B>
where
B: Backend + TakeVecZnxSliceImpl<B>,
{
fn take_vec_znx_slice(
&mut self,
len: usize,
module: &Module<B>,
cols: usize,
size: usize,
) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
B::take_vec_znx_slice_impl(self, len, module.n(), cols, size)
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
B::take_vec_znx_slice_impl(self, len, n, cols, size)
}
}
@@ -104,8 +96,8 @@ impl<B> TakeVecZnxBig<B> for Scratch<B>
where
B: Backend + TakeVecZnxBigImpl<B>,
{
fn take_vec_znx_big(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self) {
B::take_vec_znx_big_impl(self, module.n(), cols, size)
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self) {
B::take_vec_znx_big_impl(self, n, cols, size)
}
}
@@ -113,8 +105,8 @@ impl<B> TakeVecZnxDft<B> for Scratch<B>
where
B: Backend + TakeVecZnxDftImpl<B>,
{
fn take_vec_znx_dft(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self) {
B::take_vec_znx_dft_impl(self, module.n(), cols, size)
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self) {
B::take_vec_znx_dft_impl(self, n, cols, size)
}
}
@@ -125,11 +117,11 @@ where
fn take_vec_znx_dft_slice(
&mut self,
len: usize,
module: &Module<B>,
n: usize,
cols: usize,
size: usize,
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self) {
B::take_vec_znx_dft_slice_impl(self, len, module.n(), cols, size)
B::take_vec_znx_dft_slice_impl(self, len, n, cols, size)
}
}
@@ -139,29 +131,29 @@ where
{
fn take_vmp_pmat(
&mut self,
module: &Module<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (VmpPMat<&mut [u8], B>, &mut Self) {
B::take_vmp_pmat_impl(self, module.n(), rows, cols_in, cols_out, size)
B::take_vmp_pmat_impl(self, n, rows, cols_in, cols_out, size)
}
}
impl<B> TakeMatZnx<B> for Scratch<B>
impl<B> TakeMatZnx for Scratch<B>
where
B: Backend + TakeMatZnxImpl<B>,
{
fn take_mat_znx(
&mut self,
module: &Module<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> (MatZnx<&mut [u8]>, &mut Self) {
B::take_mat_znx_impl(self, module.n(), rows, cols_in, cols_out, size)
B::take_mat_znx_impl(self, n, rows, cols_in, cols_out, size)
}
}

View File

@@ -8,8 +8,8 @@ impl<B> SvpPPolFromBytes<B> for Module<B>
where
B: Backend + SvpPPolFromBytesImpl<B>,
{
fn svp_ppol_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B> {
B::svp_ppol_from_bytes_impl(self.n(), cols, bytes)
fn svp_ppol_from_bytes(&self, n: usize, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B> {
B::svp_ppol_from_bytes_impl(n, cols, bytes)
}
}
@@ -17,8 +17,8 @@ impl<B> SvpPPolAlloc<B> for Module<B>
where
B: Backend + SvpPPolAllocImpl<B>,
{
fn svp_ppol_alloc(&self, cols: usize) -> SvpPPolOwned<B> {
B::svp_ppol_alloc_impl(self.n(), cols)
fn svp_ppol_alloc(&self, n: usize, cols: usize) -> SvpPPolOwned<B> {
B::svp_ppol_alloc_impl(n, cols)
}
}
@@ -26,8 +26,8 @@ impl<B> SvpPPolAllocBytes for Module<B>
where
B: Backend + SvpPPolAllocBytesImpl<B>,
{
fn svp_ppol_alloc_bytes(&self, cols: usize) -> usize {
B::svp_ppol_alloc_bytes_impl(self.n(), cols)
fn svp_ppol_alloc_bytes(&self, n: usize, cols: usize) -> usize {
B::svp_ppol_alloc_bytes_impl(n, cols)
}
}

View File

@@ -2,54 +2,26 @@ use sampling::source::Source;
use crate::hal::{
api::{
VecZnxAdd, VecZnxAddDistF64, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAlloc, VecZnxAllocBytes,
VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy, VecZnxDecodeCoeffsi64, VecZnxDecodeVecFloat,
VecZnxDecodeVeci64, VecZnxEncodeCoeffsi64, VecZnxEncodeVeci64, VecZnxFillDistF64, VecZnxFillNormal, VecZnxFillUniform,
VecZnxFromBytes, VecZnxLshInplace, VecZnxMerge, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegate,
VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
VecZnxRshInplace, VecZnxSplit, VecZnxStd, VecZnxSub, VecZnxSubABInplace, VecZnxSubBAInplace, VecZnxSubScalarInplace,
VecZnxSwithcDegree,
VecZnxAdd, VecZnxAddDistF64, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
VecZnxAutomorphismInplace, VecZnxCopy, VecZnxDecodeCoeffsi64, VecZnxDecodeVecFloat, VecZnxDecodeVeci64,
VecZnxEncodeCoeffsi64, VecZnxEncodeVeci64, VecZnxFillDistF64, VecZnxFillNormal, VecZnxFillUniform, VecZnxLshInplace,
VecZnxMerge, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSplit,
VecZnxStd, VecZnxSub, VecZnxSubABInplace, VecZnxSubBAInplace, VecZnxSubScalarInplace, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxOwned, VecZnxToMut, VecZnxToRef},
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
oep::{
VecZnxAddDistF64Impl, VecZnxAddImpl, VecZnxAddInplaceImpl, VecZnxAddNormalImpl, VecZnxAddScalarInplaceImpl,
VecZnxAllocBytesImpl, VecZnxAllocImpl, VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxCopyImpl,
VecZnxDecodeCoeffsi64Impl, VecZnxDecodeVecFloatImpl, VecZnxDecodeVeci64Impl, VecZnxEncodeCoeffsi64Impl,
VecZnxEncodeVeci64Impl, VecZnxFillDistF64Impl, VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxFromBytesImpl,
VecZnxLshInplaceImpl, VecZnxMergeImpl, VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl, VecZnxNegateImpl,
VecZnxNegateInplaceImpl, VecZnxNormalizeImpl, VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl,
VecZnxRotateInplaceImpl, VecZnxRshInplaceImpl, VecZnxSplitImpl, VecZnxStdImpl, VecZnxSubABInplaceImpl,
VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarInplaceImpl, VecZnxSwithcDegreeImpl,
VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxCopyImpl, VecZnxDecodeCoeffsi64Impl,
VecZnxDecodeVecFloatImpl, VecZnxDecodeVeci64Impl, VecZnxEncodeCoeffsi64Impl, VecZnxEncodeVeci64Impl,
VecZnxFillDistF64Impl, VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxLshInplaceImpl, VecZnxMergeImpl,
VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl, VecZnxRshInplaceImpl,
VecZnxSplitImpl, VecZnxStdImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl,
VecZnxSubScalarInplaceImpl, VecZnxSwithcDegreeImpl,
},
};
impl<B> VecZnxAlloc for Module<B>
where
B: Backend + VecZnxAllocImpl<B>,
{
fn vec_znx_alloc(&self, cols: usize, size: usize) -> VecZnxOwned {
B::vec_znx_alloc_impl(self.n(), cols, size)
}
}
impl<B> VecZnxFromBytes for Module<B>
where
B: Backend + VecZnxFromBytesImpl<B>,
{
fn vec_znx_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned {
B::vec_znx_from_bytes_impl(self.n(), cols, size, bytes)
}
}
impl<B> VecZnxAllocBytes for Module<B>
where
B: Backend + VecZnxAllocBytesImpl<B>,
{
fn vec_znx_alloc_bytes(&self, cols: usize, size: usize) -> usize {
B::vec_znx_alloc_bytes_impl(self.n(), cols, size)
}
}
impl<B> VecZnxNormalizeTmpBytes for Module<B>
where
B: Backend + VecZnxNormalizeTmpBytesImpl<B>,

View File

@@ -24,8 +24,8 @@ impl<B> VecZnxBigAlloc<B> for Module<B>
where
B: Backend + VecZnxBigAllocImpl<B>,
{
fn vec_znx_big_alloc(&self, cols: usize, size: usize) -> VecZnxBigOwned<B> {
B::vec_znx_big_alloc_impl(self.n(), cols, size)
fn vec_znx_big_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxBigOwned<B> {
B::vec_znx_big_alloc_impl(n, cols, size)
}
}
@@ -33,8 +33,8 @@ impl<B> VecZnxBigFromBytes<B> for Module<B>
where
B: Backend + VecZnxBigFromBytesImpl<B>,
{
fn vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B> {
B::vec_znx_big_from_bytes_impl(self.n(), cols, size, bytes)
fn vec_znx_big_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B> {
B::vec_znx_big_from_bytes_impl(n, cols, size, bytes)
}
}
@@ -42,8 +42,8 @@ impl<B> VecZnxBigAllocBytes for Module<B>
where
B: Backend + VecZnxBigAllocBytesImpl<B>,
{
fn vec_znx_big_alloc_bytes(&self, cols: usize, size: usize) -> usize {
B::vec_znx_big_alloc_bytes_impl(self.n(), cols, size)
fn vec_znx_big_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize {
B::vec_znx_big_alloc_bytes_impl(n, cols, size)
}
}

View File

@@ -20,8 +20,8 @@ impl<B> VecZnxDftFromBytes<B> for Module<B>
where
B: Backend + VecZnxDftFromBytesImpl<B>,
{
fn vec_znx_dft_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B> {
B::vec_znx_dft_from_bytes_impl(self.n(), cols, size, bytes)
fn vec_znx_dft_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B> {
B::vec_znx_dft_from_bytes_impl(n, cols, size, bytes)
}
}
@@ -29,8 +29,8 @@ impl<B> VecZnxDftAllocBytes for Module<B>
where
B: Backend + VecZnxDftAllocBytesImpl<B>,
{
fn vec_znx_dft_alloc_bytes(&self, cols: usize, size: usize) -> usize {
B::vec_znx_dft_alloc_bytes_impl(self.n(), cols, size)
fn vec_znx_dft_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize {
B::vec_znx_dft_alloc_bytes_impl(n, cols, size)
}
}
@@ -38,8 +38,8 @@ impl<B> VecZnxDftAlloc<B> for Module<B>
where
B: Backend + VecZnxDftAllocImpl<B>,
{
fn vec_znx_dft_alloc(&self, cols: usize, size: usize) -> VecZnxDftOwned<B> {
B::vec_znx_dft_alloc_impl(self.n(), cols, size)
fn vec_znx_dft_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxDftOwned<B> {
B::vec_znx_dft_alloc_impl(n, cols, size)
}
}
@@ -47,8 +47,8 @@ impl<B> VecZnxDftToVecZnxBigTmpBytes for Module<B>
where
B: Backend + VecZnxDftToVecZnxBigTmpBytesImpl<B>,
{
fn vec_znx_dft_to_vec_znx_big_tmp_bytes(&self) -> usize {
B::vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(self)
fn vec_znx_dft_to_vec_znx_big_tmp_bytes(&self, n: usize) -> usize {
B::vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(self, n)
}
}

View File

@@ -14,8 +14,8 @@ impl<B> VmpPMatAlloc<B> for Module<B>
where
B: Backend + VmpPMatAllocImpl<B>,
{
fn vmp_pmat_alloc(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B> {
B::vmp_pmat_alloc_impl(self.n(), rows, cols_in, cols_out, size)
fn vmp_pmat_alloc(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B> {
B::vmp_pmat_alloc_impl(n, rows, cols_in, cols_out, size)
}
}
@@ -23,8 +23,8 @@ impl<B> VmpPMatAllocBytes for Module<B>
where
B: Backend + VmpPMatAllocBytesImpl<B>,
{
fn vmp_pmat_alloc_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
B::vmp_pmat_alloc_bytes_impl(self.n(), rows, cols_in, cols_out, size)
fn vmp_pmat_alloc_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size)
}
}
@@ -32,8 +32,16 @@ impl<B> VmpPMatFromBytes<B> for Module<B>
where
B: Backend + VmpPMatFromBytesImpl<B>,
{
fn vmp_pmat_from_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: Vec<u8>) -> VmpPMatOwned<B> {
B::vmp_pmat_from_bytes_impl(self.n(), rows, cols_in, cols_out, size, bytes)
fn vmp_pmat_from_bytes(
&self,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> VmpPMatOwned<B> {
B::vmp_pmat_from_bytes_impl(n, rows, cols_in, cols_out, size, bytes)
}
}
@@ -41,8 +49,8 @@ impl<B> VmpPrepareTmpBytes for Module<B>
where
B: Backend + VmpPrepareTmpBytesImpl<B>,
{
fn vmp_prepare_tmp_bytes(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
B::vmp_prepare_tmp_bytes_impl(self, rows, cols_in, cols_out, size)
fn vmp_prepare_tmp_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
B::vmp_prepare_tmp_bytes_impl(self, n, rows, cols_in, cols_out, size)
}
}
@@ -65,6 +73,7 @@ where
{
fn vmp_apply_tmp_bytes(
&self,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -73,7 +82,7 @@ where
b_size: usize,
) -> usize {
B::vmp_apply_tmp_bytes_impl(
self, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
self, n, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
)
}
}
@@ -98,6 +107,7 @@ where
{
fn vmp_apply_add_tmp_bytes(
&self,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -106,7 +116,7 @@ where
b_size: usize,
) -> usize {
B::vmp_apply_add_tmp_bytes_impl(
self, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
self, n, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
)
}
}

View File

@@ -1,7 +1,7 @@
use crate::{
alloc_aligned,
hal::{
api::{DataView, DataViewMut, FillUniform, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, WriterTo},
},
};
@@ -78,15 +78,13 @@ impl<D: Data> MatZnx<D> {
}
}
impl<D: DataRef> MatZnx<D> {
pub fn bytes_of(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
rows * cols_in * VecZnx::<Vec<u8>>::alloc_bytes::<i64>(n, cols_out, size)
impl MatZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
rows * cols_in * VecZnx::<Vec<u8>>::alloc_bytes(n, cols_out, size)
}
}
impl<D: DataRef + From<Vec<u8>>> MatZnx<D> {
pub(crate) fn alloc(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned(Self::bytes_of(n, rows, cols_in, cols_out, size));
pub fn alloc(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned(Self::alloc_bytes(n, rows, cols_in, cols_out, size));
Self {
data: data.into(),
n,
@@ -97,16 +95,9 @@ impl<D: DataRef + From<Vec<u8>>> MatZnx<D> {
}
}
pub(crate) fn from_bytes(
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: impl Into<Vec<u8>>,
) -> Self {
pub fn from_bytes(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::bytes_of(n, rows, cols_in, cols_out, size));
assert!(data.len() == Self::alloc_bytes(n, rows, cols_in, cols_out, size));
Self {
data: data.into(),
n,
@@ -127,7 +118,7 @@ impl<D: DataRef> MatZnx<D> {
}
let self_ref: MatZnx<&[u8]> = self.to_ref();
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes::<i64>(self.n, self.cols_out, self.size);
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes(self.n, self.cols_out, self.size);
let start: usize = nb_bytes * self.cols() * row + col * nb_bytes;
let end: usize = start + nb_bytes;
@@ -155,7 +146,7 @@ impl<D: DataMut> MatZnx<D> {
let size: usize = self.size();
let self_ref: MatZnx<&mut [u8]> = self.to_mut();
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes::<i64>(n, cols_out, size);
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes(n, cols_out, size);
let start: usize = nb_bytes * cols_in * row + col * nb_bytes;
let end: usize = start + nb_bytes;
@@ -175,6 +166,17 @@ impl<D: DataMut> FillUniform for MatZnx<D> {
}
}
impl<D: DataMut> Reset for MatZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.size = 0;
self.rows = 0;
self.cols_in = 0;
self.cols_out = 0;
}
}
pub type MatZnxOwned = MatZnx<Vec<u8>>;
pub type MatZnxMut<'a> = MatZnx<&'a mut [u8]>;
pub type MatZnxRef<'a> = MatZnx<&'a [u8]>;

View File

@@ -6,7 +6,7 @@ use sampling::source::Source;
use crate::{
alloc_aligned,
hal::{
api::{DataView, DataViewMut, FillUniform, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, WriterTo},
},
};
@@ -107,15 +107,13 @@ impl<D: DataMut> ScalarZnx<D> {
}
}
impl<D: DataRef> ScalarZnx<D> {
pub fn bytes_of(n: usize, cols: usize) -> usize {
impl ScalarZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, cols: usize) -> usize {
n * cols * size_of::<i64>()
}
}
impl<D: DataRef + From<Vec<u8>>> ScalarZnx<D> {
pub fn alloc(n: usize, cols: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(n, cols));
let data: Vec<u8> = alloc_aligned::<u8>(Self::alloc_bytes(n, cols));
Self {
data: data.into(),
n,
@@ -123,9 +121,9 @@ impl<D: DataRef + From<Vec<u8>>> ScalarZnx<D> {
}
}
pub(crate) fn from_bytes(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
pub fn from_bytes(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::bytes_of(n, cols));
assert!(data.len() == Self::alloc_bytes(n, cols));
Self {
data: data.into(),
n,
@@ -149,6 +147,14 @@ impl<D: DataMut> FillUniform for ScalarZnx<D> {
}
}
impl<D: DataMut> Reset for ScalarZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
}
}
pub type ScalarZnxOwned = ScalarZnx<Vec<u8>>;
impl<D: Data> ScalarZnx<D> {

View File

@@ -3,7 +3,7 @@ use std::fmt;
use crate::{
alloc_aligned,
hal::{
api::{DataView, DataViewMut, FillUniform, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, WriterTo},
},
};
@@ -79,15 +79,13 @@ impl<D: DataMut> ZnxZero for VecZnx<D> {
}
}
impl<D: DataRef> VecZnx<D> {
pub fn alloc_bytes<Scalar: Sized>(n: usize, cols: usize, size: usize) -> usize {
n * cols * size * size_of::<Scalar>()
impl VecZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, cols: usize, size: usize) -> usize {
n * cols * size * size_of::<i64>()
}
}
impl<D: DataRef + From<Vec<u8>>> VecZnx<D> {
pub fn alloc<Scalar: Sized>(n: usize, cols: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(Self::alloc_bytes::<Scalar>(n, cols, size));
pub fn alloc(n: usize, cols: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(Self::alloc_bytes(n, cols, size));
Self {
data: data.into(),
n,
@@ -99,7 +97,7 @@ impl<D: DataRef + From<Vec<u8>>> VecZnx<D> {
pub fn from_bytes<Scalar: Sized>(n: usize, cols: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::alloc_bytes::<Scalar>(n, cols, size));
assert!(data.len() == Self::alloc_bytes(n, cols, size));
Self {
data: data.into(),
n,
@@ -163,6 +161,16 @@ impl<D: DataMut> FillUniform for VecZnx<D> {
}
}
impl<D: DataMut> Reset for VecZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
self.size = 0;
self.max_size = 0;
}
}
pub type VecZnxOwned = VecZnx<Vec<u8>>;
pub type VecZnxMut<'a> = VecZnx<&'a mut [u8]>;
pub type VecZnxRef<'a> = VecZnx<&'a [u8]>;

View File

@@ -1,20 +0,0 @@
use crate::hal::layouts::{Backend, MatZnxOwned, Module};
pub unsafe trait MatZnxAllocImpl<B: Backend> {
fn mat_znx_alloc_impl(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxOwned;
}
pub unsafe trait MatZnxAllocBytesImpl<B: Backend> {
fn mat_znx_alloc_bytes_impl(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
}
pub unsafe trait MatZnxFromBytesImpl<B: Backend> {
fn mat_znx_from_bytes_impl(
module: &Module<B>,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxOwned;
}

View File

@@ -1,6 +1,4 @@
mod mat_znx;
mod module;
mod scalar_znx;
mod scratch;
mod svp_ppol;
mod vec_znx;
@@ -8,9 +6,7 @@ mod vec_znx_big;
mod vec_znx_dft;
mod vmp_pmat;
pub use mat_znx::*;
pub use module::*;
pub use scalar_znx::*;
pub use scratch::*;
pub use svp_ppol::*;
pub use vec_znx::*;

View File

@@ -1,13 +0,0 @@
use crate::hal::layouts::{Backend, ScalarZnxOwned};
pub unsafe trait ScalarZnxFromBytesImpl<B: Backend> {
fn scalar_znx_from_bytes_impl(n: usize, cols: usize, bytes: Vec<u8>) -> ScalarZnxOwned;
}
pub unsafe trait ScalarZnxAllocBytesImpl<B: Backend> {
fn scalar_znx_alloc_bytes_impl(n: usize, cols: usize) -> usize;
}
pub unsafe trait ScalarZnxAllocImpl<B: Backend> {
fn scalar_znx_alloc_impl(n: usize, cols: usize) -> ScalarZnxOwned;
}

View File

@@ -2,32 +2,7 @@ use rand_distr::Distribution;
use rug::Float;
use sampling::source::Source;
use crate::hal::layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxOwned, VecZnxToMut, VecZnxToRef};
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
/// * See [crate::hal::layouts::VecZnx::new] for reference code.
/// * See [crate::hal::api::VecZnxAlloc] for corresponding public API.
/// * See [crate::doc::backend_safety] for safety contract.
/// * See test \[TODO\]
pub unsafe trait VecZnxAllocImpl<B: Backend> {
fn vec_znx_alloc_impl(n: usize, cols: usize, size: usize) -> VecZnxOwned;
}
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
/// * See [crate::hal::layouts::VecZnx::from_bytes] for reference code.
/// * See [crate::hal::api::VecZnxFromBytes] for corresponding public API.
/// * See [crate::doc::backend_safety] for safety contract.
pub unsafe trait VecZnxFromBytesImpl<B: Backend> {
fn vec_znx_from_bytes_impl(n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned;
}
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
/// * See [crate::hal::layouts::VecZnx::alloc_bytes] for reference code.
/// * See [crate::hal::api::VecZnxAllocBytes] for corresponding public API.
/// * See [crate::doc::backend_safety] for safety contract.
pub unsafe trait VecZnxAllocBytesImpl<B: Backend> {
fn vec_znx_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize;
}
use crate::hal::layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef};
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
/// * See [vec_znx_normalize_base2k_tmp_bytes_ref](https://github.com/phantomzone-org/spqlios-arithmetic/blob/32a3f5fcce9863b58e949f2dfd5abc1bfbaa09b4/spqlios/arithmetic/vec_znx.c#L245C17-L245C55) for reference code.

View File

@@ -16,7 +16,7 @@ pub unsafe trait VecZnxDftAllocBytesImpl<B: Backend> {
}
pub unsafe trait VecZnxDftToVecZnxBigTmpBytesImpl<B: Backend> {
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<B>) -> usize;
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<B>, n: usize) -> usize;
}
pub unsafe trait VecZnxDftToVecZnxBigImpl<B: Backend> {

View File

@@ -22,7 +22,14 @@ pub unsafe trait VmpPMatFromBytesImpl<B: Backend> {
}
pub unsafe trait VmpPrepareTmpBytesImpl<B: Backend> {
fn vmp_prepare_tmp_bytes_impl(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
fn vmp_prepare_tmp_bytes_impl(
module: &Module<B>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> usize;
}
pub unsafe trait VmpPMatPrepareImpl<B: Backend> {
@@ -35,6 +42,7 @@ pub unsafe trait VmpPMatPrepareImpl<B: Backend> {
pub unsafe trait VmpApplyTmpBytesImpl<B: Backend> {
fn vmp_apply_tmp_bytes_impl(
module: &Module<B>,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -55,6 +63,7 @@ pub unsafe trait VmpApplyImpl<B: Backend> {
pub unsafe trait VmpApplyAddTmpBytesImpl<B: Backend> {
fn vmp_apply_add_tmp_bytes_impl(
module: &Module<B>,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,

View File

@@ -3,7 +3,7 @@ use std::fmt::Debug;
use sampling::source::Source;
use crate::hal::{
api::{FillUniform, ZnxZero},
api::{FillUniform, Reset},
layouts::{ReaderFrom, WriterTo},
};
@@ -12,7 +12,7 @@ use crate::hal::{
/// - `T` must implement I/O traits, zeroing, cloning, and random filling.
pub fn test_reader_writer_interface<T>(mut original: T)
where
T: WriterTo + ReaderFrom + PartialEq + Eq + Debug + Clone + ZnxZero + FillUniform,
T: WriterTo + ReaderFrom + PartialEq + Eq + Debug + Clone + Reset + FillUniform,
{
// Fill original with uniform random data
let mut source = Source::new([0u8; 32]);
@@ -24,7 +24,7 @@ where
// Prepare receiver: same shape, but zeroed
let mut receiver = original.clone();
receiver.zero();
receiver.reset();
// Deserialize from buffer
let mut reader: &[u8] = &buffer;
@@ -45,7 +45,7 @@ fn scalar_znx_serialize() {
#[test]
fn vec_znx_serialize() {
let original: crate::hal::layouts::VecZnx<Vec<u8>> = crate::hal::layouts::VecZnx::alloc::<i64>(1024, 3, 4);
let original: crate::hal::layouts::VecZnx<Vec<u8>> = crate::hal::layouts::VecZnx::alloc(1024, 3, 4);
test_reader_writer_interface(original);
}

View File

@@ -2,25 +2,23 @@ use itertools::izip;
use sampling::source::Source;
use crate::hal::{
api::{
VecZnxAddNormal, VecZnxAlloc, VecZnxDecodeVeci64, VecZnxEncodeVeci64, VecZnxFillUniform, VecZnxStd, ZnxInfos, ZnxView,
ZnxViewMut,
},
api::{VecZnxAddNormal, VecZnxDecodeVeci64, VecZnxEncodeVeci64, VecZnxFillUniform, VecZnxStd, ZnxInfos, ZnxView, ZnxViewMut},
layouts::{Backend, Module, VecZnx},
};
pub fn test_vec_znx_fill_uniform<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxFillUniform + VecZnxStd + VecZnxAlloc,
Module<B>: VecZnxFillUniform + VecZnxStd,
{
let n: usize = module.n();
let basek: usize = 17;
let size: usize = 5;
let mut source: Source = Source::new([0u8; 32]);
let cols: usize = 2;
let zero: Vec<i64> = vec![0; module.n()];
let zero: Vec<i64> = vec![0; n];
let one_12_sqrt: f64 = 0.28867513459481287;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = module.vec_znx_alloc(cols, size);
let mut a: VecZnx<_> = VecZnx::alloc(n, cols, size);
module.vec_znx_fill_uniform(basek, &mut a, col_i, size * basek, &mut source);
(0..cols).for_each(|col_j| {
if col_j != col_i {
@@ -42,8 +40,9 @@ where
pub fn test_vec_znx_add_normal<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxAddNormal + VecZnxStd + VecZnxAlloc,
Module<B>: VecZnxAddNormal + VecZnxStd,
{
let n: usize = module.n();
let basek: usize = 17;
let k: usize = 2 * 17;
let size: usize = 5;
@@ -51,10 +50,10 @@ where
let bound: f64 = 6.0 * sigma;
let mut source: Source = Source::new([0u8; 32]);
let cols: usize = 2;
let zero: Vec<i64> = vec![0; module.n()];
let zero: Vec<i64> = vec![0; n];
let k_f64: f64 = (1u64 << k as u64) as f64;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = module.vec_znx_alloc(cols, size);
let mut a: VecZnx<_> = VecZnx::alloc(n, cols, size);
module.vec_znx_add_normal(basek, &mut a, col_i, k, &mut source, sigma, bound);
(0..cols).for_each(|col_j| {
if col_j != col_i {
@@ -71,21 +70,22 @@ where
pub fn test_vec_znx_encode_vec_i64_lo_norm<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxEncodeVeci64 + VecZnxDecodeVeci64 + VecZnxAlloc,
Module<B>: VecZnxEncodeVeci64 + VecZnxDecodeVeci64,
{
let n: usize = module.n();
let basek: usize = 17;
let size: usize = 5;
let k: usize = size * basek - 5;
let mut a: VecZnx<_> = module.vec_znx_alloc(2, size);
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, 2, size);
let mut source: Source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
(0..a.cols()).for_each(|col_i| {
let mut have: Vec<i64> = vec![i64::default(); module.n()];
let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut()
.for_each(|x| *x = (source.next_i64() << 56) >> 56);
module.encode_vec_i64(basek, &mut a, col_i, k, &have, 10);
let mut want: Vec<i64> = vec![i64::default(); module.n()];
let mut want: Vec<i64> = vec![i64::default(); n];
module.decode_vec_i64(basek, &a, col_i, k, &mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
});
@@ -93,17 +93,18 @@ where
pub fn test_vec_znx_encode_vec_i64_hi_norm<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxEncodeVeci64 + VecZnxDecodeVeci64 + VecZnxAlloc,
Module<B>: VecZnxEncodeVeci64 + VecZnxDecodeVeci64,
{
let n: usize = module.n();
let basek: usize = 17;
let size: usize = 5;
for k in [1, basek / 2, size * basek - 5] {
let mut a: VecZnx<_> = module.vec_znx_alloc(2, size);
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, 2, size);
let mut source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
(0..a.cols()).for_each(|col_i| {
let mut have: Vec<i64> = vec![i64::default(); module.n()];
let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut().for_each(|x| {
if k < 64 {
*x = source.next_u64n(1 << k, (1 << k) - 1) as i64;
@@ -112,7 +113,7 @@ where
}
});
module.encode_vec_i64(basek, &mut a, col_i, k, &have, 63);
let mut want: Vec<i64> = vec![i64::default(); module.n()];
let mut want: Vec<i64> = vec![i64::default(); n];
module.decode_vec_i64(basek, &a, col_i, k, &mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
})

View File

@@ -56,7 +56,7 @@ unsafe extern "C" {
);
}
unsafe extern "C" {
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE) -> u64;
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE, n: u64) -> u64;
}
unsafe extern "C" {
pub unsafe fn vec_znx_idft_tmp_a(
@@ -67,19 +67,3 @@ unsafe extern "C" {
a_size: u64,
);
}
unsafe extern "C" {
pub unsafe fn vec_znx_dft_automorphism(
module: *const MODULE,
d: i64,
res_dft: *mut VEC_ZNX_DFT,
res_size: u64,
a_dft: *const VEC_ZNX_DFT,
a_size: u64,
tmp: *mut u8,
);
}
unsafe extern "C" {
pub unsafe fn vec_znx_dft_automorphism_tmp_bytes(module: *const MODULE) -> u64;
}

View File

@@ -86,6 +86,7 @@ unsafe extern "C" {
unsafe extern "C" {
pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes(
module: *const MODULE,
nn: u64,
res_size: u64,
a_size: u64,
nrows: u64,
@@ -109,5 +110,5 @@ unsafe extern "C" {
}
unsafe extern "C" {
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nn: u64, nrows: u64, ncols: u64) -> u64;
}

View File

@@ -1,41 +0,0 @@
use crate::{
hal::{
layouts::{Backend, MatZnxOwned, Module},
oep::{MatZnxAllocBytesImpl, MatZnxAllocImpl, MatZnxFromBytesImpl},
},
implementation::cpu_spqlios::CPUAVX,
};
unsafe impl<B: Backend> MatZnxAllocImpl<B> for B
where
B: CPUAVX,
{
fn mat_znx_alloc_impl(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxOwned {
MatZnxOwned::alloc(module.n(), rows, cols_in, cols_out, size)
}
}
unsafe impl<B: Backend> MatZnxAllocBytesImpl<B> for B
where
B: CPUAVX,
{
fn mat_znx_alloc_bytes_impl(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
MatZnxOwned::bytes_of(module.n(), rows, cols_in, cols_out, size)
}
}
unsafe impl<B: Backend> MatZnxFromBytesImpl<B> for B
where
B: CPUAVX,
{
fn mat_znx_from_bytes_impl(
module: &Module<B>,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxOwned {
MatZnxOwned::from_bytes(module.n(), rows, cols_in, cols_out, size, bytes)
}
}

View File

@@ -1,8 +1,6 @@
mod ffi;
mod mat_znx;
mod module_fft64;
mod module_ntt120;
mod scalar_znx;
mod scratch;
mod svp_ppol_fft64;
mod svp_ppol_ntt120;

View File

@@ -1,34 +0,0 @@
use crate::{
hal::{
layouts::{Backend, ScalarZnxOwned},
oep::{ScalarZnxAllocBytesImpl, ScalarZnxAllocImpl, ScalarZnxFromBytesImpl},
},
implementation::cpu_spqlios::CPUAVX,
};
unsafe impl<B: Backend> ScalarZnxAllocBytesImpl<B> for B
where
B: CPUAVX,
{
fn scalar_znx_alloc_bytes_impl(n: usize, cols: usize) -> usize {
ScalarZnxOwned::bytes_of(n, cols)
}
}
unsafe impl<B: Backend> ScalarZnxAllocImpl<B> for B
where
B: CPUAVX,
{
fn scalar_znx_alloc_impl(n: usize, cols: usize) -> ScalarZnxOwned {
ScalarZnxOwned::alloc(n, cols)
}
}
unsafe impl<B: Backend> ScalarZnxFromBytesImpl<B> for B
where
B: CPUAVX,
{
fn scalar_znx_from_bytes_impl(n: usize, cols: usize, bytes: Vec<u8>) -> ScalarZnxOwned {
ScalarZnxOwned::from_bytes(n, cols, bytes)
}
}

View File

@@ -6,10 +6,10 @@ use crate::{
api::ScratchFromBytes,
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
oep::{
ScalarZnxAllocBytesImpl, ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl,
SvpPPolAllocBytesImpl, TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl,
TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl,
VecZnxAllocBytesImpl, VecZnxBigAllocBytesImpl, VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, SvpPPolAllocBytesImpl,
TakeMatZnxImpl, TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl, VecZnxBigAllocBytesImpl,
VecZnxDftAllocBytesImpl, VmpPMatAllocBytesImpl,
},
},
implementation::cpu_spqlios::CPUAVX,
@@ -76,10 +76,10 @@ where
unsafe impl<B: Backend> TakeScalarZnxImpl<B> for B
where
B: CPUAVX + ScalarZnxAllocBytesImpl<B>,
B: CPUAVX,
{
fn take_scalar_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, B::scalar_znx_alloc_bytes_impl(n, cols));
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, ScalarZnx::alloc_bytes(n, cols));
(
ScalarZnx::from_data(take_slice, n, cols),
Scratch::from_bytes(rem_slice),
@@ -102,13 +102,10 @@ where
unsafe impl<B: Backend> TakeVecZnxImpl<B> for B
where
B: CPUAVX + VecZnxAllocBytesImpl<B>,
B: CPUAVX,
{
fn take_vec_znx_impl(scratch: &mut Scratch<B>, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
B::vec_znx_alloc_bytes_impl(n, cols, size),
);
let (take_slice, rem_slice) = take_slice_aligned(&mut scratch.data, VecZnx::alloc_bytes(n, cols, size));
(
VecZnx::from_data(take_slice, n, cols, size),
Scratch::from_bytes(rem_slice),
@@ -240,7 +237,7 @@ where
) -> (MatZnx<&mut [u8]>, &mut Scratch<B>) {
let (take_slice, rem_slice) = take_slice_aligned(
&mut scratch.data,
MatZnx::<Vec<u8>>::bytes_of(n, rows, cols_in, cols_out, size),
MatZnx::alloc_bytes(n, rows, cols_in, cols_out, size),
);
(
MatZnx::from_data(take_slice, n, rows, cols_in, cols_out, size),

View File

@@ -14,16 +14,16 @@ use crate::{
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxSwithcDegree, ZnxInfos, ZnxSliceSize, ZnxView,
ZnxViewMut, ZnxZero,
},
layouts::{Backend, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, VecZnxOwned, VecZnxToMut, VecZnxToRef},
layouts::{Backend, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, VecZnxToMut, VecZnxToRef},
oep::{
VecZnxAddDistF64Impl, VecZnxAddImpl, VecZnxAddInplaceImpl, VecZnxAddNormalImpl, VecZnxAddScalarInplaceImpl,
VecZnxAllocBytesImpl, VecZnxAllocImpl, VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxCopyImpl,
VecZnxDecodeCoeffsi64Impl, VecZnxDecodeVecFloatImpl, VecZnxDecodeVeci64Impl, VecZnxEncodeCoeffsi64Impl,
VecZnxEncodeVeci64Impl, VecZnxFillDistF64Impl, VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxFromBytesImpl,
VecZnxLshInplaceImpl, VecZnxMergeImpl, VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl, VecZnxNegateImpl,
VecZnxNegateInplaceImpl, VecZnxNormalizeImpl, VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl,
VecZnxRotateImpl, VecZnxRotateInplaceImpl, VecZnxRshInplaceImpl, VecZnxSplitImpl, VecZnxStdImpl,
VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarInplaceImpl, VecZnxSwithcDegreeImpl,
VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxCopyImpl, VecZnxDecodeCoeffsi64Impl,
VecZnxDecodeVecFloatImpl, VecZnxDecodeVeci64Impl, VecZnxEncodeCoeffsi64Impl, VecZnxEncodeVeci64Impl,
VecZnxFillDistF64Impl, VecZnxFillNormalImpl, VecZnxFillUniformImpl, VecZnxLshInplaceImpl, VecZnxMergeImpl,
VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl,
VecZnxNormalizeImpl, VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl,
VecZnxRotateInplaceImpl, VecZnxRshInplaceImpl, VecZnxSplitImpl, VecZnxStdImpl, VecZnxSubABInplaceImpl,
VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarInplaceImpl, VecZnxSwithcDegreeImpl,
},
},
implementation::cpu_spqlios::{
@@ -32,33 +32,6 @@ use crate::{
},
};
unsafe impl<B: Backend> VecZnxAllocImpl<B> for B
where
B: CPUAVX,
{
fn vec_znx_alloc_impl(n: usize, cols: usize, size: usize) -> VecZnxOwned {
VecZnxOwned::alloc::<i64>(n, cols, size)
}
}
unsafe impl<B: Backend> VecZnxFromBytesImpl<B> for B
where
B: CPUAVX,
{
fn vec_znx_from_bytes_impl(n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned {
VecZnxOwned::from_bytes::<i64>(n, cols, size, bytes)
}
}
unsafe impl<B: Backend> VecZnxAllocBytesImpl<B> for B
where
B: CPUAVX,
{
fn vec_znx_alloc_bytes_impl(n: usize, cols: usize, size: usize) -> usize {
VecZnxOwned::alloc_bytes::<i64>(n, cols, size)
}
}
unsafe impl<B: Backend> VecZnxNormalizeTmpBytesImpl<B> for B
where
B: CPUAVX,
@@ -156,9 +129,8 @@ where
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -192,8 +164,7 @@ where
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_add(
@@ -232,8 +203,7 @@ where
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
@@ -269,9 +239,8 @@ where
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -304,8 +273,7 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -337,8 +305,7 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -377,8 +344,7 @@ where
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
@@ -411,8 +377,7 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_negate(
@@ -437,10 +402,6 @@ where
A: VecZnxToMut,
{
let mut a: VecZnx<&mut [u8]> = a.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
}
unsafe {
vec_znx::vec_znx_negate(
module.ptr() as *const module_info_t,
@@ -604,8 +565,7 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
@@ -633,7 +593,6 @@ where
let mut a: VecZnx<&mut [u8]> = a.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert!(
k & 1 != 0,
"invalid galois element: must be odd but is {}",
@@ -668,8 +627,8 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(res.n(), res.n());
}
unsafe {
vec_znx::vec_znx_mul_xp_minus_one(
@@ -697,7 +656,7 @@ where
let mut res: VecZnx<&mut [u8]> = res.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), module.n());
assert_eq!(res.n(), res.n());
}
unsafe {
vec_znx::vec_znx_mul_xp_minus_one(
@@ -749,7 +708,7 @@ pub fn vec_znx_split_ref<R, A, B: Backend>(
let (n_in, n_out) = (a.n(), res[0].to_mut().n());
let (mut buf, _) = scratch.take_vec_znx(module, 1, a.size());
let (mut buf, _) = scratch.take_vec_znx(n_in.max(n_out), 1, a.size());
debug_assert!(
n_out < n_in,

View File

@@ -210,9 +210,8 @@ unsafe impl VecZnxBigAddImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -244,8 +243,7 @@ unsafe impl VecZnxBigAddInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_add(
@@ -285,9 +283,8 @@ unsafe impl VecZnxBigAddSmallImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -319,8 +316,7 @@ unsafe impl VecZnxBigAddSmallInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_add(
@@ -360,9 +356,8 @@ unsafe impl VecZnxBigSubImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -394,8 +389,7 @@ unsafe impl VecZnxBigSubABInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -426,8 +420,7 @@ unsafe impl VecZnxBigSubBAInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -467,9 +460,8 @@ unsafe impl VecZnxBigSubSmallAImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -501,8 +493,7 @@ unsafe impl VecZnxBigSubSmallAInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -542,9 +533,8 @@ unsafe impl VecZnxBigSubSmallBImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(b.n(), res.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
@@ -576,8 +566,7 @@ unsafe impl VecZnxBigSubSmallBInplaceImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_sub(
@@ -602,10 +591,6 @@ unsafe impl VecZnxBigNegateInplaceImpl<FFT64> for FFT64 {
A: VecZnxBigToMut<FFT64>,
{
let mut a: VecZnxBig<&mut [u8], FFT64> = a.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
}
unsafe {
vec_znx::vec_znx_negate(
module.ptr(),
@@ -677,8 +662,7 @@ unsafe impl VecZnxBigAutomorphismImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), res.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
@@ -702,11 +686,6 @@ unsafe impl VecZnxBigAutomorphismInplaceImpl<FFT64> for FFT64 {
A: VecZnxBigToMut<FFT64>,
{
let mut a: VecZnxBig<&mut [u8], FFT64> = a.to_mut();
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
module.ptr(),

View File

@@ -57,8 +57,8 @@ unsafe impl VecZnxDftAllocImpl<FFT64> for FFT64 {
}
unsafe impl VecZnxDftToVecZnxBigTmpBytesImpl<FFT64> for FFT64 {
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<FFT64>) -> usize {
unsafe { vec_znx_dft::vec_znx_idft_tmp_bytes(module.ptr()) as usize }
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<FFT64>, n: usize) -> usize {
unsafe { vec_znx_dft::vec_znx_idft_tmp_bytes(module.ptr(), n as u64) as usize }
}
}
@@ -74,26 +74,31 @@ unsafe impl VecZnxDftToVecZnxBigImpl<FFT64> for FFT64 {
R: VecZnxBigToMut<FFT64>,
A: VecZnxDftToRef<FFT64>,
{
let mut res_mut: VecZnxBig<&mut [u8], FFT64> = res.to_mut();
let a_ref: VecZnxDft<&[u8], FFT64> = a.to_ref();
let mut res: VecZnxBig<&mut [u8], FFT64> = res.to_mut();
let a: VecZnxDft<&[u8], FFT64> = a.to_ref();
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_dft_to_vec_znx_big_tmp_bytes());
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), a.n())
}
let min_size: usize = res_mut.size().min(a_ref.size());
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_dft_to_vec_znx_big_tmp_bytes(a.n()));
let min_size: usize = res.size().min(a.size());
unsafe {
(0..min_size).for_each(|j| {
vec_znx_dft::vec_znx_idft(
module.ptr(),
res_mut.at_mut_ptr(res_col, j) as *mut vec_znx_big::vec_znx_big_t,
res.at_mut_ptr(res_col, j) as *mut vec_znx_big::vec_znx_big_t,
1 as u64,
a_ref.at_ptr(a_col, j) as *const vec_znx_dft::vec_znx_dft_t,
a.at_ptr(a_col, j) as *const vec_znx_dft::vec_znx_dft_t,
1 as u64,
tmp_bytes.as_mut_ptr(),
)
});
(min_size..res_mut.size()).for_each(|j| {
res_mut.zero_at(res_col, j);
(min_size..res.size()).for_each(|j| {
res.zero_at(res_col, j);
});
}
}

View File

@@ -57,10 +57,18 @@ unsafe impl VmpPMatAllocImpl<FFT64> for FFT64 {
}
unsafe impl VmpPrepareTmpBytesImpl<FFT64> for FFT64 {
fn vmp_prepare_tmp_bytes_impl(module: &Module<FFT64>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
fn vmp_prepare_tmp_bytes_impl(
module: &Module<FFT64>,
n: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
) -> usize {
unsafe {
vmp::vmp_prepare_tmp_bytes(
module.ptr(),
n as u64,
(rows * cols_in) as u64,
(cols_out * size) as u64,
) as usize
@@ -79,8 +87,7 @@ unsafe impl VmpPMatPrepareImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(a.n(), res.n());
assert_eq!(
res.cols_in(),
a.cols_in(),
@@ -111,7 +118,8 @@ unsafe impl VmpPMatPrepareImpl<FFT64> for FFT64 {
);
}
let (tmp_bytes, _) = scratch.take_slice(module.vmp_prepare_tmp_bytes(a.rows(), a.cols_in(), a.cols_out(), a.size()));
let (tmp_bytes, _) =
scratch.take_slice(module.vmp_prepare_tmp_bytes(res.n(), a.rows(), a.cols_in(), a.cols_out(), a.size()));
unsafe {
vmp::vmp_prepare_contiguous(
@@ -129,6 +137,7 @@ unsafe impl VmpPMatPrepareImpl<FFT64> for FFT64 {
unsafe impl VmpApplyTmpBytesImpl<FFT64> for FFT64 {
fn vmp_apply_tmp_bytes_impl(
module: &Module<FFT64>,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -139,6 +148,7 @@ unsafe impl VmpApplyTmpBytesImpl<FFT64> for FFT64 {
unsafe {
vmp::vmp_apply_dft_to_dft_tmp_bytes(
module.ptr(),
n as u64,
(res_size * b_cols_out) as u64,
(a_size * b_cols_in) as u64,
(b_rows * b_cols_in) as u64,
@@ -161,9 +171,8 @@ unsafe impl VmpApplyImpl<FFT64> for FFT64 {
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), res.n());
assert_eq!(a.n(), res.n());
assert_eq!(
res.cols(),
b.cols_out(),
@@ -181,6 +190,7 @@ unsafe impl VmpApplyImpl<FFT64> for FFT64 {
}
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
res.n(),
res.size(),
a.size(),
b.rows(),
@@ -207,6 +217,7 @@ unsafe impl VmpApplyImpl<FFT64> for FFT64 {
unsafe impl VmpApplyAddTmpBytesImpl<FFT64> for FFT64 {
fn vmp_apply_add_tmp_bytes_impl(
module: &Module<FFT64>,
n: usize,
res_size: usize,
a_size: usize,
b_rows: usize,
@@ -217,6 +228,7 @@ unsafe impl VmpApplyAddTmpBytesImpl<FFT64> for FFT64 {
unsafe {
vmp::vmp_apply_dft_to_dft_tmp_bytes(
module.ptr(),
n as u64,
(res_size * b_cols_out) as u64,
(a_size * b_cols_in) as u64,
(b_rows * b_cols_in) as u64,
@@ -241,9 +253,8 @@ unsafe impl VmpApplyAddImpl<FFT64> for FFT64 {
{
use crate::hal::api::ZnxInfos;
assert_eq!(res.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), res.n());
assert_eq!(a.n(), res.n());
assert_eq!(
res.cols(),
b.cols_out(),
@@ -261,6 +272,7 @@ unsafe impl VmpApplyAddImpl<FFT64> for FFT64 {
}
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
res.n(),
res.size(),
a.size(),
b.rows(),

View File

@@ -3,7 +3,7 @@ use std::hint::black_box;
use backend::{
hal::{
api::{ModuleNew, ScalarZnxAlloc, ScratchOwnedAlloc, ScratchOwnedBorrow},
api::{ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow},
layouts::{Module, ScalarZnx, ScratchOwned},
},
implementation::cpu_spqlios::FFT64,
@@ -26,6 +26,7 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64> = Module::<FFT64>::new(1 << p.log_n);
let n: usize = module.n();
let basek: usize = p.basek;
let k_ct_in: usize = p.k_ct_in;
let k_ct_out: usize = p.k_ct_out;
@@ -36,16 +37,17 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
let rows: usize = 1; //(p.k_ct_in.div_ceil(p.basek);
let sigma: f64 = 3.2;
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank);
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank);
let pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct_in, rank);
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct_out, rank);
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe_in.k())
GGSWCiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_glwe_in.k())
| GLWECiphertext::external_product_scratch_space(
&module,
n,
basek,
ct_glwe_out.k(),
ct_glwe_in.k(),
@@ -59,7 +61,7 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
let mut source_xe = Source::new([0u8; 32]);
let mut source_xa = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretExec<Vec<u8>, FFT64> = GLWESecretExec::from(&module, &sk);
@@ -121,6 +123,7 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64> = Module::<FFT64>::new(1 << p.log_n);
let n = module.n();
let basek: usize = p.basek;
let k_glwe: usize = p.k_ct;
let k_ggsw: usize = p.k_ggsw;
@@ -130,21 +133,29 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
let rows: usize = p.k_ct.div_ceil(p.basek);
let sigma: f64 = 3.2;
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_glwe, rank);
let pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_glwe, rank);
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe.k())
| GLWECiphertext::external_product_inplace_scratch_space(&module, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_glwe.k())
| GLWECiphertext::external_product_inplace_scratch_space(
&module,
n,
basek,
ct_glwe.k(),
ct_ggsw.k(),
digits,
rank,
),
);
let mut source_xs = Source::new([0u8; 32]);
let mut source_xe = Source::new([0u8; 32]);
let mut source_xa = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretExec<Vec<u8>, FFT64> = GLWESecretExec::from(&module, &sk);

View File

@@ -31,6 +31,7 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64> = Module::<FFT64>::new(1 << p.log_n);
let n = module.n();
let basek: usize = p.basek;
let k_rlwe_in: usize = p.k_ct_in;
let k_rlwe_out: usize = p.k_ct_out;
@@ -42,15 +43,16 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
let rows: usize = p.k_ct_in.div_ceil(p.basek * digits);
let sigma: f64 = 3.2;
let mut ksk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_grlwe, rows, digits, rank_out);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_rlwe_in, rank_in);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_rlwe_out, rank_out);
let mut ksk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_grlwe, rows, digits, rank_out);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_in, rank_in);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_out, rank_out);
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank_in, rank_out)
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_in.k())
GLWESwitchingKey::encrypt_sk_scratch_space(&module, n, basek, ksk.k(), rank_in, rank_out)
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_in.k())
| GLWECiphertext::keyswitch_scratch_space(
&module,
n,
basek,
ct_out.k(),
ct_in.k(),
@@ -65,11 +67,11 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
let mut source_xe = Source::new([0u8; 32]);
let mut source_xa = Source::new([0u8; 32]);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_dft: GLWESecretExec<Vec<u8>, FFT64> = GLWESecretExec::from(&module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
ksk.encrypt_sk(
@@ -137,6 +139,7 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
fn runner(p: Params) -> impl FnMut() {
let module: Module<FFT64> = Module::<FFT64>::new(1 << p.log_n);
let n = module.n();
let basek: usize = p.basek;
let k_ct: usize = p.k_ct;
let k_ksk: usize = p.k_ksk;
@@ -146,24 +149,24 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
let rows: usize = p.k_ct.div_ceil(p.basek);
let sigma: f64 = 3.2;
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, digits, rank, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank, rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct.k())
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, basek, ct.k(), ksk.k(), digits, rank),
GLWESwitchingKey::encrypt_sk_scratch_space(&module, n, basek, ksk.k(), rank, rank)
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct.k())
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, n, basek, ct.k(), ksk.k(), digits, rank),
);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_dft: GLWESecretExec<Vec<u8>, FFT64> = GLWESecretExec::from(&module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
ksk.encrypt_sk(

View File

@@ -1,20 +1,20 @@
use backend::hal::{
api::{
ScratchAvailable, SvpApply, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
TakeVecZnxSlice, VecZnxAddInplace, VecZnxAllocBytes, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
VecZnxDftSubABInplace, VecZnxDftToVecZnxBig, VecZnxDftToVecZnxBigTmpBytes, VecZnxDftZero, VecZnxMulXpMinusOneInplace,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxRotate, VecZnxSubABInplace, VmpApplyTmpBytes, ZnxView, ZnxZero,
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftSubABInplace, VecZnxDftToVecZnxBig,
VecZnxDftToVecZnxBigTmpBytes, VecZnxDftZero, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
VecZnxRotate, VecZnxSubABInplace, VmpApplyTmpBytes, ZnxView, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, SvpPPol},
layouts::{Backend, DataMut, DataRef, Module, Scratch, SvpPPol, VecZnx},
};
use itertools::izip;
use crate::{
GLWECiphertext, GLWECiphertextToMut, GLWEExternalProductFamily, GLWEOps, Infos, LWECiphertext, TakeGLWECt,
GLWECiphertext, GLWECiphertextToMut, GLWEExternalProductFamily, GLWEOps, Infos, LWECiphertext, LWECiphertextToRef,
TakeGLWECt,
blind_rotation::{key::BlindRotationKeyCGGIExec, lut::LookUpTable},
dist::Distribution,
lwe::ciphertext::LWECiphertextToRef,
};
pub trait CCGIBlindRotationFamily<B: Backend> = VecZnxBigAllocBytes
@@ -42,6 +42,7 @@ pub trait CCGIBlindRotationFamily<B: Backend> = VecZnxBigAllocBytes
pub fn cggi_blind_rotate_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
block_size: usize,
extension_factor: usize,
basek: usize,
@@ -51,22 +52,22 @@ pub fn cggi_blind_rotate_scratch_space<B: Backend>(
rank: usize,
) -> usize
where
Module<B>: CCGIBlindRotationFamily<B> + VecZnxAllocBytes,
Module<B>: CCGIBlindRotationFamily<B>,
{
let brk_size: usize = k_brk.div_ceil(basek);
if block_size > 1 {
let cols: usize = rank + 1;
let acc_dft: usize = module.vec_znx_dft_alloc_bytes(cols, rows) * extension_factor;
let acc_big: usize = module.vec_znx_big_alloc_bytes(1, brk_size);
let vmp_res: usize = module.vec_znx_dft_alloc_bytes(cols, brk_size) * extension_factor;
let vmp_xai: usize = module.vec_znx_dft_alloc_bytes(1, brk_size);
let acc_dft: usize = module.vec_znx_dft_alloc_bytes(n, cols, rows) * extension_factor;
let acc_big: usize = module.vec_znx_big_alloc_bytes(n, 1, brk_size);
let vmp_res: usize = module.vec_znx_dft_alloc_bytes(n, cols, brk_size) * extension_factor;
let vmp_xai: usize = module.vec_znx_dft_alloc_bytes(n, 1, brk_size);
let acc_dft_add: usize = vmp_res;
let vmp: usize = module.vmp_apply_tmp_bytes(brk_size, rows, rows, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
let vmp: usize = module.vmp_apply_tmp_bytes(n, brk_size, rows, rows, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
let acc: usize;
if extension_factor > 1 {
acc = module.vec_znx_alloc_bytes(cols, k_res.div_ceil(basek)) * extension_factor;
acc = VecZnx::alloc_bytes(n, cols, k_res.div_ceil(basek)) * extension_factor;
} else {
acc = 0;
}
@@ -76,12 +77,10 @@ where
+ acc_dft_add
+ vmp_res
+ vmp_xai
+ (vmp
| (acc_big
+ (module.vec_znx_big_normalize_tmp_bytes(module.n()) | module.vec_znx_dft_to_vec_znx_big_tmp_bytes())));
+ (vmp | (acc_big + (module.vec_znx_big_normalize_tmp_bytes(n) | module.vec_znx_dft_to_vec_znx_big_tmp_bytes(n))));
} else {
GLWECiphertext::bytes_of(module, basek, k_res, rank)
+ GLWECiphertext::external_product_scratch_space(module, basek, k_res, k_res, k_brk, 1, rank)
GLWECiphertext::bytes_of(n, basek, k_res, rank)
+ GLWECiphertext::external_product_scratch_space(module, n, basek, k_res, k_res, k_brk, 1, rank)
}
}
@@ -97,8 +96,7 @@ pub fn cggi_blind_rotate<DataRes, DataIn, DataBrk, B: Backend>(
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>:
TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx<B> + ScratchAvailable + TakeVecZnxSlice<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx + ScratchAvailable + TakeVecZnxSlice,
{
match brk.dist {
Distribution::BinaryBlock(_) | Distribution::BinaryFixed(_) | Distribution::BinaryProb(_) | Distribution::ZERO => {
@@ -129,18 +127,19 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended<DataRes, DataIn, DataBrk,
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice,
{
let n_glwe: usize = brk.n();
let extension_factor: usize = lut.extension_factor();
let basek: usize = res.basek();
let rows: usize = brk.rows();
let cols: usize = res.rank() + 1;
let (mut acc, scratch1) = scratch.take_vec_znx_slice(extension_factor, module, cols, res.size());
let (mut acc_dft, scratch2) = scratch1.take_vec_znx_dft_slice(extension_factor, module, cols, rows);
let (mut vmp_res, scratch3) = scratch2.take_vec_znx_dft_slice(extension_factor, module, cols, brk.size());
let (mut acc_add_dft, scratch4) = scratch3.take_vec_znx_dft_slice(extension_factor, module, cols, brk.size());
let (mut vmp_xai, scratch5) = scratch4.take_vec_znx_dft(module, 1, brk.size());
let (mut acc, scratch1) = scratch.take_vec_znx_slice(extension_factor, n_glwe, cols, res.size());
let (mut acc_dft, scratch2) = scratch1.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, rows);
let (mut vmp_res, scratch3) = scratch2.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
let (mut acc_add_dft, scratch4) = scratch3.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
let (mut vmp_xai, scratch5) = scratch4.take_vec_znx_dft(n_glwe, 1, brk.size());
(0..extension_factor).for_each(|i| {
acc[i].zero();
@@ -156,7 +155,7 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended<DataRes, DataIn, DataBrk,
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let two_n: usize = 2 * module.n();
let two_n: usize = 2 * n_glwe;
let two_n_ext: usize = 2 * lut.domain_size();
negate_and_mod_switch_2n(two_n_ext, &mut lwe_2n, &lwe_ref);
@@ -244,7 +243,7 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended<DataRes, DataIn, DataBrk,
});
{
let (mut acc_add_big, scratch7) = scratch5.take_vec_znx_big(module, 1, brk.size());
let (mut acc_add_big, scratch7) = scratch5.take_vec_znx_big(n_glwe, 1, brk.size());
(0..extension_factor).for_each(|j| {
(0..cols).for_each(|i| {
@@ -275,12 +274,13 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn, DataBrk, B: Backen
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B>,
{
let n_glwe: usize = brk.n();
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let mut out_mut: GLWECiphertext<&mut [u8]> = res.to_mut();
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let two_n: usize = module.n() << 1;
let two_n: usize = n_glwe << 1;
let basek: usize = brk.basek();
let rows = brk.rows();
let rows: usize = brk.rows();
let cols: usize = out_mut.rank() + 1;
@@ -298,10 +298,10 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn, DataBrk, B: Backen
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
let (mut acc_dft, scratch1) = scratch.take_vec_znx_dft(module, cols, rows);
let (mut vmp_res, scratch2) = scratch1.take_vec_znx_dft(module, cols, brk.size());
let (mut acc_add_dft, scratch3) = scratch2.take_vec_znx_dft(module, cols, brk.size());
let (mut vmp_xai, scratch4) = scratch3.take_vec_znx_dft(module, 1, brk.size());
let (mut acc_dft, scratch1) = scratch.take_vec_znx_dft(n_glwe, cols, rows);
let (mut vmp_res, scratch2) = scratch1.take_vec_znx_dft(n_glwe, cols, brk.size());
let (mut acc_add_dft, scratch3) = scratch2.take_vec_znx_dft(n_glwe, cols, brk.size());
let (mut vmp_xai, scratch4) = scratch3.take_vec_znx_dft(n_glwe, 1, brk.size());
let x_pow_a: &Vec<SvpPPol<Vec<u8>, B>>;
if let Some(b) = &brk.x_pow_a {
@@ -336,7 +336,7 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn, DataBrk, B: Backen
});
{
let (mut acc_add_big, scratch5) = scratch4.take_vec_znx_big(module, 1, brk.size());
let (mut acc_add_big, scratch5) = scratch4.take_vec_znx_big(n_glwe, 1, brk.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_to_vec_znx_big(&mut acc_add_big, 0, &acc_add_dft, i, scratch5);
@@ -359,30 +359,23 @@ pub(crate) fn cggi_blind_rotate_binary_standard<DataRes, DataIn, DataBrk, B: Bac
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx<B> + ScratchAvailable,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx + ScratchAvailable,
{
#[cfg(debug_assertions)]
{
assert_eq!(
res.n(),
module.n(),
brk.n(),
"res.n(): {} != brk.n(): {}",
res.n(),
module.n()
brk.n()
);
assert_eq!(
lut.domain_size(),
module.n(),
brk.n(),
"lut.n(): {} != brk.n(): {}",
lut.domain_size(),
module.n()
);
assert_eq!(
brk.n(),
module.n(),
"brk.n(): {} != brk.n(): {}",
brk.n(),
module.n()
brk.n()
);
assert_eq!(
res.rank(),
@@ -416,7 +409,7 @@ pub(crate) fn cggi_blind_rotate_binary_standard<DataRes, DataIn, DataBrk, B: Bac
module.vec_znx_rotate(b, &mut out_mut.data, 0, &lut.data[0], 0);
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
let (mut acc_tmp, scratch1) = scratch.take_glwe_ct(module, basek, out_mut.k(), out_mut.rank());
let (mut acc_tmp, scratch1) = scratch.take_glwe_ct(out_mut.n(), basek, out_mut.k(), out_mut.rank());
// TODO: see if faster by skipping normalization in external product and keeping acc in big coeffs
// TODO: first iteration can be optimized to be a gglwe product

View File

@@ -1,7 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScratchAvailable, SvpPPolAlloc, SvpPrepare, TakeVecZnx, TakeVecZnxDft,
VecZnxAddScalarInplace, VecZnxAllocBytes, ZnxView, ZnxViewMut,
ScratchAvailable, SvpPPolAlloc, SvpPrepare, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, ZnxInfos, ZnxView,
ZnxViewMut,
},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, ScalarZnx, ScalarZnxToRef, Scratch, SvpPPol, WriterTo},
};
@@ -69,23 +69,20 @@ impl<D: DataRef> WriterTo for BlindRotationKeyCGGI<D> {
}
impl BlindRotationKeyCGGI<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
let mut data: Vec<GGSWCiphertext<Vec<u8>>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(module, basek, k, rows, 1, rank)));
(0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(n_gglwe, basek, k, rows, 1, rank)));
Self {
keys: data,
dist: Distribution::NONE,
}
}
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAllocBytes,
Module<B>: GGSWEncryptSkFamily<B>,
{
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank)
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
@@ -141,13 +138,13 @@ impl<D: DataMut> BlindRotationKeyCGGI<D> {
) where
DataSkGLWE: DataRef,
DataSkLWE: DataRef,
Module<B>: GGSWEncryptSkFamily<B> + ScalarZnxAlloc + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.keys.len(), sk_lwe.n());
assert_eq!(sk_glwe.n(), module.n());
assert!(sk_glwe.n() <= module.n());
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
match sk_lwe.dist {
Distribution::BinaryBlock(_)
@@ -162,7 +159,7 @@ impl<D: DataMut> BlindRotationKeyCGGI<D> {
self.dist = sk_lwe.dist;
let mut pt: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data.to_ref();
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
@@ -220,12 +217,16 @@ impl<D: Data, B: Backend> BlindRotationKeyCGGIExec<D, B> {
pub trait BlindRotationKeyCGGIExecLayoutFamily<B: Backend> = GGSWLayoutFamily<B> + SvpPPolAlloc<B> + SvpPrepare<B>;
impl<B: Backend> BlindRotationKeyCGGIExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n_glwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self
where
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
let mut data: Vec<GGSWCiphertextExec<Vec<u8>, B>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| data.push(GGSWCiphertextExec::alloc(module, basek, k, rows, 1, rank)));
(0..n_lwe).for_each(|_| {
data.push(GGSWCiphertextExec::alloc(
module, n_glwe, basek, k, rows, 1, rank,
))
});
Self {
data,
dist: Distribution::NONE,
@@ -236,10 +237,11 @@ impl<B: Backend> BlindRotationKeyCGGIExec<Vec<u8>, B> {
pub fn from<DataOther>(module: &Module<B>, other: &BlindRotationKeyCGGI<DataOther>, scratch: &mut Scratch<B>) -> Self
where
DataOther: DataRef,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B> + ScalarZnxAlloc,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
let mut brk: BlindRotationKeyCGGIExec<Vec<u8>, B> = Self::alloc(
module,
other.n(),
other.keys.len(),
other.basek(),
other.k(),
@@ -255,13 +257,15 @@ impl<D: DataMut, B: Backend> BlindRotationKeyCGGIExec<D, B> {
pub fn prepare<DataOther>(&mut self, module: &Module<B>, other: &BlindRotationKeyCGGI<DataOther>, scratch: &mut Scratch<B>)
where
DataOther: DataRef,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B> + ScalarZnxAlloc,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.data.len(), other.keys.len());
}
let n: usize = other.n();
self.data
.iter_mut()
.zip(other.keys.iter())
@@ -273,10 +277,10 @@ impl<D: DataMut, B: Backend> BlindRotationKeyCGGIExec<D, B> {
match other.dist {
Distribution::BinaryBlock(_) => {
let mut x_pow_a: Vec<SvpPPol<Vec<u8>, B>> = Vec::with_capacity(module.n() << 1);
let mut buf: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
(0..module.n() << 1).for_each(|i| {
let mut res: SvpPPol<Vec<u8>, B> = module.svp_ppol_alloc(1);
let mut x_pow_a: Vec<SvpPPol<Vec<u8>, B>> = Vec::with_capacity(n << 1);
let mut buf: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
(0..n << 1).for_each(|i| {
let mut res: SvpPPol<Vec<u8>, B> = module.svp_ppol_alloc(n, 1);
set_xai_plus_y(module, i, 0, &mut res, &mut buf);
x_pow_a.push(res);
});
@@ -293,7 +297,7 @@ where
C: DataMut,
Module<B>: SvpPrepare<B>,
{
let n: usize = module.n();
let n: usize = res.n();
{
let raw: &mut [i64] = buf.at_mut(0, 0);

View File

@@ -1,7 +1,7 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAlloc, VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
VecZnxRotateInplace, VecZnxSwithcDegree, ZnxInfos, ZnxViewMut,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace,
VecZnxSwithcDegree, ZnxInfos, ZnxViewMut,
},
layouts::{Backend, Module, ScratchOwned, VecZnx},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
@@ -14,10 +14,7 @@ pub struct LookUpTable {
}
impl LookUpTable {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, extension_factor: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, extension_factor: usize) -> Self {
#[cfg(debug_assertions)]
{
assert!(
@@ -29,7 +26,7 @@ impl LookUpTable {
let size: usize = k.div_ceil(basek);
let mut data: Vec<VecZnx<Vec<u8>>> = Vec::with_capacity(extension_factor);
(0..extension_factor).for_each(|_| {
data.push(module.vec_znx_alloc(1, size));
data.push(VecZnx::alloc(n, 1, size));
});
Self { data, basek, k }
}
@@ -69,13 +66,13 @@ impl LookUpTable {
// #elements in lookup table
let f_len: usize = f.len();
// If LUT size > module.n()
// If LUT size > TakeScalarZnx
let domain_size: usize = self.domain_size();
let size: usize = self.k.div_ceil(self.basek);
// Equivalent to AUTO([f(0), -f(n-1), -f(n-2), ..., -f(1)], -1)
let mut lut_full: VecZnx<Vec<u8>> = VecZnx::alloc::<i64>(domain_size, 1, size);
let mut lut_full: VecZnx<Vec<u8>> = VecZnx::alloc(domain_size, 1, size);
let lut_at: &mut [i64] = lut_full.at_mut(0, limbs - 1);

View File

@@ -1,10 +1,10 @@
pub mod cggi;
pub mod key;
pub mod lut;
mod cggi;
mod key;
mod lut;
pub use cggi::{CCGIBlindRotationFamily, cggi_blind_rotate, cggi_blind_rotate_scratch_space};
pub use key::{BlindRotationKeyCGGI, BlindRotationKeyCGGIExec, BlindRotationKeyCGGIExecLayoutFamily};
pub use lut::LookUpTable;
pub use cggi::*;
pub use key::*;
pub use lut::*;
#[cfg(test)]
mod test;
mod tests;

View File

@@ -1,2 +0,0 @@
pub mod cggi;
pub mod lut;

View File

@@ -0,0 +1,39 @@
use backend::{
hal::{api::ModuleNew, layouts::Module},
implementation::cpu_spqlios::FFT64,
};
use crate::blind_rotation::tests::{
generic_cggi::blind_rotatio_test,
generic_lut::{test_lut_extended, test_lut_standard},
};
#[test]
fn lut_standard() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
test_lut_standard(&module);
}
#[test]
fn lut_extended() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
test_lut_extended(&module);
}
#[test]
fn standard() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 1, 1);
}
#[test]
fn block_binary() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 7, 1);
}
#[test]
fn block_binary_extended() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 7, 2);
}

View File

@@ -0,0 +1 @@
mod fft64;

View File

@@ -1,63 +1,33 @@
use backend::{
hal::{
api::{
MatZnxAlloc, ModuleNew, ScalarZnxAlloc, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddNormal,
VecZnxAddScalarInplace, VecZnxAlloc, VecZnxAllocBytes, VecZnxEncodeCoeffsi64, VecZnxFillUniform, VecZnxRotateInplace,
VecZnxSub, VecZnxSwithcDegree, ZnxView,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, VecZnxBigAllocBytesImpl, VecZnxDftAllocBytesImpl,
},
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxEncodeCoeffsi64, VecZnxFillUniform,
VecZnxRotateInplace, VecZnxSub, VecZnxSwithcDegree, ZnxView,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, VecZnxBigAllocBytesImpl, VecZnxDftAllocBytesImpl,
},
implementation::cpu_spqlios::FFT64,
};
use sampling::source::Source;
use crate::{
BlindRotationKeyCGGIExecLayoutFamily, CCGIBlindRotationFamily, GLWECiphertext, GLWEDecryptFamily, GLWEPlaintext, GLWESecret,
GLWESecretExec, GLWESecretFamily, Infos, LWECiphertext, LWESecret,
blind_rotation::{
cggi::{cggi_blind_rotate, cggi_blind_rotate_scratch_space, negate_and_mod_switch_2n},
key::{BlindRotationKeyCGGI, BlindRotationKeyCGGIExec},
lut::LookUpTable,
},
lwe::{LWEPlaintext, ciphertext::LWECiphertextToRef},
BlindRotationKeyCGGI, BlindRotationKeyCGGIExec, BlindRotationKeyCGGIExecLayoutFamily, CCGIBlindRotationFamily,
GLWECiphertext, GLWEDecryptFamily, GLWEPlaintext, GLWESecret, GLWESecretExec, GLWESecretFamily, Infos, LWECiphertext,
LWECiphertextToRef, LWEPlaintext, LWESecret, LookUpTable, cggi_blind_rotate, cggi_blind_rotate_scratch_space,
negate_and_mod_switch_2n,
};
#[test]
fn standard() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 1, 1);
}
#[test]
fn block_binary() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 7, 1);
}
#[test]
fn block_binary_extended() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
blind_rotatio_test(&module, 224, 7, 2);
}
pub(crate) trait CGGITestModuleFamily<B: Backend> = CCGIBlindRotationFamily<B>
+ GLWESecretFamily<B>
+ GLWEDecryptFamily<B>
+ BlindRotationKeyCGGIExecLayoutFamily<B>
+ VecZnxAlloc
+ ScalarZnxAlloc
+ VecZnxFillUniform
+ VecZnxAddNormal
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace
+ VecZnxEncodeCoeffsi64
+ VecZnxRotateInplace
+ VecZnxSwithcDegree
+ MatZnxAlloc
+ VecZnxSub;
pub(crate) trait CGGITestScratchFamily<B: Backend> = VecZnxDftAllocBytesImpl<B>
+ VecZnxBigAllocBytesImpl<B>
@@ -70,13 +40,13 @@ pub(crate) trait CGGITestScratchFamily<B: Backend> = VecZnxDftAllocBytesImpl<B>
+ TakeVecZnxImpl<B>
+ TakeVecZnxSliceImpl<B>;
fn blind_rotatio_test<B: Backend>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
pub(crate) fn blind_rotatio_test<B: Backend>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
where
Module<B>: CGGITestModuleFamily<B>,
B: CGGITestScratchFamily<B>,
{
let n: usize = module.n();
let basek: usize = 19;
let k_lwe: usize = 24;
let k_brk: usize = 3 * basek;
let rows_brk: usize = 2; // Ensures first limb is noise-free.
@@ -90,7 +60,7 @@ where
let mut source_xe: Source = Source::new([2u8; 32]);
let mut source_xa: Source = Source::new([1u8; 32]);
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
let sk_glwe_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_glwe);
@@ -98,11 +68,12 @@ where
sk_lwe.fill_binary_block(block_size, &mut source_xs);
let mut scratch: ScratchOwned<B> = ScratchOwned::<B>::alloc(BlindRotationKeyCGGI::generate_from_sk_scratch_space(
module, basek, k_brk, rank,
module, n, basek, k_brk, rank,
));
let mut scratch_br: ScratchOwned<B> = ScratchOwned::<B>::alloc(cggi_blind_rotate_scratch_space(
module,
n,
block_size,
extension_factor,
basek,
@@ -112,7 +83,7 @@ where
rank,
));
let mut brk: BlindRotationKeyCGGI<Vec<u8>> = BlindRotationKeyCGGI::alloc(module, n_lwe, basek, k_brk, rows_brk, rank);
let mut brk: BlindRotationKeyCGGI<Vec<u8>> = BlindRotationKeyCGGI::alloc(n, n_lwe, basek, k_brk, rows_brk, rank);
brk.generate_from_sk(
module,
@@ -147,16 +118,16 @@ where
.enumerate()
.for_each(|(i, x)| *x = 2 * (i as i64) + 1);
let mut lut: LookUpTable = LookUpTable::alloc(module, basek, k_lut, extension_factor);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(module, &f, message_modulus);
let mut res: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_res, rank);
let mut res: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_res, rank);
let brk_exec: BlindRotationKeyCGGIExec<Vec<u8>, B> = BlindRotationKeyCGGIExec::from(module, &brk, scratch_br.borrow());
cggi_blind_rotate(module, &mut res, &lwe, &lut, &brk_exec, scratch_br.borrow());
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_res);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_res);
res.decrypt(module, &mut pt_have, &sk_glwe_dft, scratch.borrow());

View File

@@ -1,18 +1,19 @@
use std::vec;
use backend::{
hal::{
api::{ModuleNew, ZnxView},
layouts::Module,
},
implementation::cpu_spqlios::FFT64,
use backend::hal::{
api::{VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSwithcDegree, ZnxView},
layouts::{Backend, Module},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
};
use crate::blind_rotation::lut::{DivRound, LookUpTable};
use crate::{DivRound, LookUpTable};
#[test]
fn standard() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
pub(crate) fn test_lut_standard<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let n: usize = module.n();
let basek: usize = 20;
let k_lut: usize = 40;
let message_modulus: usize = 16;
@@ -25,11 +26,11 @@ fn standard() {
.enumerate()
.for_each(|(i, x)| *x = (i as i64) - 8);
let mut lut: LookUpTable = LookUpTable::alloc(&module, basek, k_lut, extension_factor);
lut.set(&module, &f, log_scale);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(module, &f, log_scale);
let half_step: i64 = lut.domain_size().div_round(message_modulus << 1) as i64;
lut.rotate(&module, half_step);
lut.rotate(module, half_step);
let step: usize = lut.domain_size().div_round(message_modulus);
@@ -39,14 +40,17 @@ fn standard() {
f[i / step] % message_modulus as i64,
lut.data[0].raw()[0] / (1 << (log_scale % basek)) as i64
);
lut.rotate(&module, -1);
lut.rotate(module, -1);
});
});
}
#[test]
fn extended() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
pub(crate) fn test_lut_extended<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let n: usize = module.n();
let basek: usize = 20;
let k_lut: usize = 40;
let message_modulus: usize = 16;
@@ -59,7 +63,7 @@ fn extended() {
.enumerate()
.for_each(|(i, x)| *x = (i as i64) - 8);
let mut lut: LookUpTable = LookUpTable::alloc(&module, basek, k_lut, extension_factor);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(&module, &f, log_scale);
let half_step: i64 = lut.domain_size().div_round(message_modulus << 1) as i64;

View File

@@ -0,0 +1,396 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy,
VecZnxStd, VecZnxSubScalarInplace, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeScalarZnxImpl, TakeSvpPPolImpl,
TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxImpl,
},
};
use sampling::source::Source;
use crate::{
AutomorphismKey, AutomorphismKeyCompressed, AutomorphismKeyEncryptSkFamily, AutomorphismKeyExec, GGLWEExecLayoutFamily,
GLWEDecryptFamily, GLWEKeyswitchFamily, GLWEPlaintext, GLWESecret, GLWESecretExec, Infos,
noise::log2_std_noise_gglwe_product,
};
pub(crate) trait AutomorphismTestModuleFamily<B: Backend> = AutomorphismKeyEncryptSkFamily<B>
+ GLWEKeyswitchFamily<B>
+ VecZnxAutomorphism
+ GGLWEExecLayoutFamily<B>
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
+ VecZnxAutomorphism
+ VecZnxAutomorphismInplace
+ GLWEDecryptFamily<B>
+ VecZnxSubScalarInplace
+ VecZnxStd
+ VecZnxCopy;
pub(crate) trait AutomorphismTestScratchFamily<B: Backend> = ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ ScratchAvailableImpl<B>
+ TakeScalarZnxImpl<B>
+ TakeVecZnxDftImpl<B>
+ TakeVecZnxImpl<B>
+ TakeSvpPPolImpl<B>
+ TakeVecZnxBigImpl<B>;
pub(crate) fn test_automorphisk_key_encrypt_sk<B: Backend>(
module: &Module<B>,
basek: usize,
k_ksk: usize,
digits: usize,
rank: usize,
sigma: f64,
) where
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = TakeScalarZnx;
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
module, n, basek, k_ksk, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let p = -5;
atk.encrypt_sk(
module,
p,
&sk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let mut sk_out: GLWESecret<Vec<u8>> = sk.clone();
(0..atk.rank()).for_each(|i| {
module.vec_znx_automorphism(
module.galois_element_inv(p),
&mut sk_out.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
let sk_out_exec = GLWESecretExec::from(module, &sk_out);
atk.key
.key
.assert_noise(module, &sk_out_exec, &sk.data, sigma);
}
pub(crate) fn test_automorphisk_key_encrypt_sk_compressed<B: Backend>(
module: &Module<B>,
basek: usize,
k_ksk: usize,
digits: usize,
rank: usize,
sigma: f64,
) where
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = TakeScalarZnx;
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut atk_compressed: AutomorphismKeyCompressed<Vec<u8>> =
AutomorphismKeyCompressed::alloc(n, basek, k_ksk, rows, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
module, n, basek, k_ksk, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let p = -5;
let seed_xa: [u8; 32] = [1u8; 32];
atk_compressed.encrypt_sk(
module,
p,
&sk,
seed_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let mut sk_out: GLWESecret<Vec<u8>> = sk.clone();
(0..atk_compressed.rank()).for_each(|i| {
module.vec_znx_automorphism(
module.galois_element_inv(p),
&mut sk_out.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
let sk_out_exec = GLWESecretExec::from(module, &sk_out);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
atk.decompress(module, &atk_compressed);
atk.key
.key
.assert_noise(module, &sk_out_exec, &sk.data, sigma);
}
pub(crate) fn test_gglwe_automorphism<B: Backend>(
module: &Module<B>,
p0: i64,
p1: i64,
basek: usize,
digits: usize,
k_in: usize,
k_out: usize,
k_apply: usize,
sigma: f64,
rank: usize,
) where
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = TakeScalarZnx;
let digits_in: usize = 1;
let rows_in: usize = k_in / (basek * digits);
let rows_apply: usize = k_in.div_ceil(basek * digits);
let mut auto_key_in: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_out: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_out, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_apply, rows_apply, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
| AutomorphismKey::automorphism_scratch_space(module, n, basek, k_out, k_in, k_apply, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(TakeScalarZnx, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
// gglwe_{s1}(s0) = s0 -> s1
auto_key_in.encrypt_sk(
module,
p0,
&sk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
// gglwe_{s2}(s1) -> s1 -> s2
auto_key_apply.encrypt_sk(
module,
p1,
&sk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let mut auto_key_apply_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
auto_key_apply_exec.prepare(module, &auto_key_apply, scratch.borrow());
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
auto_key_out.automorphism(module, &auto_key_in, &auto_key_apply_exec, scratch.borrow());
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(TakeScalarZnx, basek, k_out);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(TakeScalarZnx, rank);
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
(0..rank).for_each(|i| {
module.vec_znx_automorphism(
module.galois_element_inv(p0 * p1),
&mut sk_auto.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_auto);
(0..auto_key_out.rank_in()).for_each(|col_i| {
(0..auto_key_out.rows()).for_each(|row_i| {
auto_key_out
.at(row_i, col_i)
.decrypt(module, &mut pt, &sk_auto_dft, scratch.borrow());
module.vec_znx_sub_scalar_inplace(
&mut pt.data,
0,
(digits_in - 1) + row_i * digits_in,
&sk.data,
col_i,
);
let noise_have: f64 = module.vec_znx_std(basek, &pt.data, 0).log2();
let noise_want: f64 = log2_std_noise_gglwe_product(
TakeScalarZnx as f64,
basek * digits,
0.5,
0.5,
0f64,
sigma * sigma,
0f64,
rank as f64,
k_out,
k_apply,
);
assert!(
noise_have < noise_want + 0.5,
"{} {}",
noise_have,
noise_want
);
});
});
}
pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
module: &Module<B>,
p0: i64,
p1: i64,
basek: usize,
digits: usize,
k_in: usize,
k_apply: usize,
sigma: f64,
rank: usize,
) where
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = TakeScalarZnx;
let digits_in: usize = 1;
let rows_in: usize = k_in / (basek * digits);
let rows_apply: usize = k_in.div_ceil(basek * digits);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_apply, rows_apply, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
| AutomorphismKey::automorphism_inplace_scratch_space(module, n, basek, k_in, k_apply, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(TakeScalarZnx, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
// gglwe_{s1}(s0) = s0 -> s1
auto_key.encrypt_sk(
module,
p0,
&sk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
// gglwe_{s2}(s1) -> s1 -> s2
auto_key_apply.encrypt_sk(
module,
p1,
&sk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let mut auto_key_apply_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
auto_key_apply_exec.prepare(module, &auto_key_apply, scratch.borrow());
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
auto_key.automorphism_inplace(module, &auto_key_apply_exec, scratch.borrow());
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(TakeScalarZnx, basek, k_in);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(TakeScalarZnx, rank);
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
(0..rank).for_each(|i| {
module.vec_znx_automorphism(
module.galois_element_inv(p0 * p1),
&mut sk_auto.data.as_vec_znx_mut(),
i,
&sk.data.as_vec_znx(),
i,
);
});
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_auto);
(0..auto_key.rank_in()).for_each(|col_i| {
(0..auto_key.rows()).for_each(|row_i| {
auto_key
.at(row_i, col_i)
.decrypt(module, &mut pt, &sk_auto_dft, scratch.borrow());
module.vec_znx_sub_scalar_inplace(
&mut pt.data,
0,
(digits_in - 1) + row_i * digits_in,
&sk.data,
col_i,
);
let noise_have: f64 = module.vec_znx_std(basek, &pt.data, 0).log2();
let noise_want: f64 = log2_std_noise_gglwe_product(
TakeScalarZnx as f64,
basek * digits,
0.5,
0.5,
0f64,
sigma * sigma,
0f64,
rank as f64,
k_in,
k_apply,
);
assert!(
noise_have < noise_want + 0.5,
"{} {}",
noise_have,
noise_want
);
});
});
}

View File

@@ -0,0 +1,321 @@
use backend::hal::{
api::{ScratchAvailable, SvpPPolAlloc, SvpPrepare, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, ZnxView, ZnxViewMut},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, ScalarZnx, ScalarZnxToRef, Scratch, SvpPPol, WriterTo},
};
use sampling::source::Source;
use crate::{
Distribution, GGSWCiphertext, GGSWCiphertextExec, GGSWEncryptSkFamily, GGSWLayoutFamily, GLWESecretExec, Infos, LWESecret,
};
pub struct BlindRotationKeyCGGI<D: Data> {
pub(crate) keys: Vec<GGSWCiphertext<D>>,
pub(crate) dist: Distribution,
}
impl<D: Data> PartialEq for BlindRotationKeyCGGI<D> {
fn eq(&self, other: &Self) -> bool {
if self.keys.len() != other.keys.len() {
return false;
}
for (a, b) in self.keys.iter().zip(other.keys.iter()) {
if a != b {
return false;
}
}
self.dist == other.dist
}
}
impl<D: Data> Eq for BlindRotationKeyCGGI<D> {}
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
impl<D: DataMut> ReaderFrom for BlindRotationKeyCGGI<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
match Distribution::read_from(reader) {
Ok(dist) => self.dist = dist,
Err(e) => return Err(e),
}
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
if self.keys.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
));
}
for key in &mut self.keys {
key.read_from(reader)?;
}
Ok(())
}
}
impl<D: DataRef> WriterTo for BlindRotationKeyCGGI<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
match self.dist.write_to(writer) {
Ok(()) => {}
Err(e) => return Err(e),
}
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
for key in &self.keys {
key.write_to(writer)?;
}
Ok(())
}
}
impl BlindRotationKeyCGGI<Vec<u8>> {
pub fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
let mut data: Vec<GGSWCiphertext<Vec<u8>>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(n_gglwe, basek, k, rows, 1, rank)));
Self {
keys: data,
dist: Distribution::NONE,
}
}
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B>,
{
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
impl<D: DataRef> BlindRotationKeyCGGI<D> {
#[allow(dead_code)]
pub(crate) fn n(&self) -> usize {
self.keys[0].n()
}
#[allow(dead_code)]
pub(crate) fn rows(&self) -> usize {
self.keys[0].rows()
}
#[allow(dead_code)]
pub(crate) fn k(&self) -> usize {
self.keys[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.keys[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.keys[0].rank()
}
pub(crate) fn basek(&self) -> usize {
self.keys[0].basek()
}
#[allow(dead_code)]
pub(crate) fn block_size(&self) -> usize {
match self.dist {
Distribution::BinaryBlock(value) => value,
_ => 1,
}
}
}
impl<D: DataMut> BlindRotationKeyCGGI<D> {
pub fn generate_from_sk<DataSkGLWE, DataSkLWE, B: Backend>(
&mut self,
module: &Module<B>,
sk_glwe: &GLWESecretExec<DataSkGLWE, B>,
sk_lwe: &LWESecret<DataSkLWE>,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) where
DataSkGLWE: DataRef,
DataSkLWE: DataRef,
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.keys.len(), sk_lwe.n());
assert!(sk_glwe.n() <= TakeScalarZnx);
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
match sk_lwe.dist {
Distribution::BinaryBlock(_)
| Distribution::BinaryFixed(_)
| Distribution::BinaryProb(_)
| Distribution::ZERO => {}
_ => panic!(
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
),
}
}
self.dist = sk_lwe.dist;
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data.to_ref();
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i];
ggsw.encrypt_sk(module, &pt, sk_glwe, source_xa, source_xe, sigma, scratch);
});
}
}
#[derive(PartialEq, Eq)]
pub struct BlindRotationKeyCGGIExec<D: Data, B: Backend> {
pub(crate) data: Vec<GGSWCiphertextExec<D, B>>,
pub(crate) dist: Distribution,
pub(crate) x_pow_a: Option<Vec<SvpPPol<Vec<u8>, B>>>,
}
impl<D: Data, B: Backend> BlindRotationKeyCGGIExec<D, B> {
#[allow(dead_code)]
pub(crate) fn n(&self) -> usize {
self.data[0].n()
}
#[allow(dead_code)]
pub(crate) fn rows(&self) -> usize {
self.data[0].rows()
}
#[allow(dead_code)]
pub(crate) fn k(&self) -> usize {
self.data[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.data[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.data[0].rank()
}
pub(crate) fn basek(&self) -> usize {
self.data[0].basek()
}
pub(crate) fn block_size(&self) -> usize {
match self.dist {
Distribution::BinaryBlock(value) => value,
_ => 1,
}
}
}
pub trait BlindRotationKeyCGGIExecLayoutFamily<B: Backend> = GGSWLayoutFamily<B> + SvpPPolAlloc<B> + SvpPrepare<B>;
impl<B: Backend> BlindRotationKeyCGGIExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, n_glwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self
where
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
let mut data: Vec<GGSWCiphertextExec<Vec<u8>, B>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| {
data.push(GGSWCiphertextExec::alloc(
module, n_glwe, basek, k, rows, 1, rank,
))
});
Self {
data,
dist: Distribution::NONE,
x_pow_a: None,
}
}
pub fn from<DataOther>(module: &Module<B>, other: &BlindRotationKeyCGGI<DataOther>, scratch: &mut Scratch<B>) -> Self
where
DataOther: DataRef,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
let mut brk: BlindRotationKeyCGGIExec<Vec<u8>, B> = Self::alloc(
module,
other.n(),
other.keys.len(),
other.basek(),
other.k(),
other.rows(),
other.rank(),
);
brk.prepare(module, other, scratch);
brk
}
}
impl<D: DataMut, B: Backend> BlindRotationKeyCGGIExec<D, B> {
pub fn prepare<DataOther>(&mut self, module: &Module<B>, other: &BlindRotationKeyCGGI<DataOther>, scratch: &mut Scratch<B>)
where
DataOther: DataRef,
Module<B>: BlindRotationKeyCGGIExecLayoutFamily<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.data.len(), other.keys.len());
}
let n: usize = other.n();
self.data
.iter_mut()
.zip(other.keys.iter())
.for_each(|(ggsw_exec, other)| {
ggsw_exec.prepare(module, other, scratch);
});
self.dist = other.dist;
match other.dist {
Distribution::BinaryBlock(_) => {
let mut x_pow_a: Vec<SvpPPol<Vec<u8>, B>> = Vec::with_capacity(n << 1);
let mut buf: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
(0..n << 1).for_each(|i| {
let mut res: SvpPPol<Vec<u8>, B> = module.svp_ppol_alloc(n, 1);
set_xai_plus_y(module, i, 0, &mut res, &mut buf);
x_pow_a.push(res);
});
self.x_pow_a = Some(x_pow_a);
}
_ => {}
}
}
}
pub fn set_xai_plus_y<A, C, B: Backend>(module: &Module<B>, ai: usize, y: i64, res: &mut SvpPPol<A, B>, buf: &mut ScalarZnx<C>)
where
A: DataMut,
C: DataMut,
Module<B>: SvpPrepare<B>,
{
let n: usize = res.n();
{
let raw: &mut [i64] = buf.at_mut(0, 0);
if ai < n {
raw[ai] = 1;
} else {
raw[(ai - n) & (n - 1)] = -1;
}
raw[0] += y;
}
module.svp_prepare(res, 0, buf, 0);
{
let raw: &mut [i64] = buf.at_mut(0, 0);
if ai < n {
raw[ai] = 0;
} else {
raw[(ai - n) & (n - 1)] = 0;
}
raw[0] = 0;
}
}

View File

@@ -0,0 +1,3 @@
mod cpu_spqlios;
mod generic_cggi;
mod generic_lut;

View File

@@ -8,6 +8,7 @@ use crate::{AutomorphismKey, AutomorphismKeyExec, GLWECiphertext, GLWEKeyswitchF
impl AutomorphismKey<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -18,11 +19,12 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
}
pub fn automorphism_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -32,7 +34,7 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
AutomorphismKey::automorphism_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank)
AutomorphismKey::automorphism_scratch_space(module, n, basek, k_out, k_out, k_ksk, digits, rank)
}
}

View File

@@ -1,8 +1,8 @@
use backend::hal::{
api::{
ScalarZnxAllocBytes, ScratchAvailable, SvpApply, TakeScalarZnx, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
VecZnxAddScalarInplace, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxBigAllocBytes, VecZnxDftToVecZnxBigTmpA,
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSwithcDegree, ZnxZero,
ScratchAvailable, SvpApply, TakeScalarZnx, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddScalarInplace,
VecZnxAutomorphism, VecZnxBigAllocBytes, VecZnxDftToVecZnxBigTmpA, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
VecZnxSwithcDegree, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
};
@@ -18,15 +18,15 @@ use crate::{
pub trait GGLWEEncryptSkFamily<B: Backend> = GLWEEncryptSkFamily<B> + GLWESecretFamily<B>;
impl GGLWECiphertext<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GGLWEEncryptSkFamily<B> + VecZnxAllocBytes,
Module<B>: GGLWEEncryptSkFamily<B>,
{
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
+ (GLWEPlaintext::byte_of(module, basek, k) | module.vec_znx_normalize_tmp_bytes(module.n()))
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
+ (GLWEPlaintext::byte_of(n, basek, k) | module.vec_znx_normalize_tmp_bytes(n))
}
pub fn encrypt_pk_scratch_space<B: Backend>(_module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
pub fn encrypt_pk_scratch_space<B: Backend>(_module: &Module<B>, _n: usize, _basek: usize, _k: usize, _rank: usize) -> usize {
unimplemented!()
}
}
@@ -42,8 +42,8 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GGLWEEncryptSkFamily<B> + VecZnxAllocBytes + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Module<B>: GGLWEEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -63,16 +63,15 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
self.rank_out(),
sk.rank()
);
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(pt.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n(), sk.n());
assert!(
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k()),
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
scratch.available(),
self.rank(),
self.size(),
GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
GGLWECiphertext::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k())
);
assert!(
self.rows() * self.digits() * self.basek() <= self.k(),
@@ -91,7 +90,7 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
let k: usize = self.k();
let rank_in: usize = self.rank_in();
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(module, basek, k);
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(sk.n(), basek, k);
// For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns
//
// Example for ksk rank 2 to rank 3:
@@ -125,11 +124,11 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
}
impl GGLWECiphertextCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GLWESwitchingKeyEncryptSkFamily<B> + VecZnxAllocBytes,
Module<B>: GLWESwitchingKeyEncryptSkFamily<B>,
{
GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
}
}
@@ -144,8 +143,8 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GGLWEEncryptSkFamily<B> + VecZnxAllocBytes + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Module<B>: GGLWEEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -165,16 +164,16 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
self.rank_out(),
sk.rank()
);
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(pt.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n(), sk.n());
assert!(
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k()),
scratch.available()
>= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k()),
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
scratch.available(),
self.rank(),
self.size(),
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k())
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k())
);
assert!(
self.rows() * self.digits() * self.basek() <= self.k(),
@@ -196,7 +195,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
let mut source_xa = Source::new(seed);
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(module, basek, k);
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(sk.n(), basek, k);
(0..rank_in).for_each(|col_i| {
(0..rows).for_each(|row_i| {
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
@@ -237,27 +236,29 @@ pub trait GLWESwitchingKeyEncryptSkFamily<B: Backend> = GGLWEEncryptSkFamily<B>;
impl GLWESwitchingKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: GLWESwitchingKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: GLWESwitchingKeyEncryptSkFamily<B>,
{
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | module.scalar_znx_alloc_bytes(1))
+ module.scalar_znx_alloc_bytes(rank_in)
+ GLWESecretExec::bytes_of(module, rank_out)
(GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k) | ScalarZnx::alloc_bytes(n, 1))
+ ScalarZnx::alloc_bytes(n, rank_in)
+ GLWESecretExec::bytes_of(module, n, rank_out)
}
pub fn encrypt_pk_scratch_space<B: Backend>(
module: &Module<B>,
_n: usize,
_basek: usize,
_k: usize,
_rank_in: usize,
_rank_out: usize,
) -> usize {
GGLWECiphertext::encrypt_pk_scratch_space(module, _basek, _k, _rank_out)
GGLWECiphertext::encrypt_pk_scratch_space(module, _n, _basek, _k, _rank_out)
}
}
@@ -272,13 +273,8 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GLWESwitchingKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace,
Scratch<B>:
ScratchAvailable + TakeScalarZnx<B> + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + ScratchAvailable + TakeVecZnx<B>,
Module<B>: GLWESwitchingKeyEncryptSkFamily<B> + VecZnxSwithcDegree + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -288,6 +284,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
scratch.available()
>= GLWESwitchingKey::encrypt_sk_scratch_space(
module,
sk_out.n(),
self.basek(),
self.k(),
self.rank_in(),
@@ -297,6 +294,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
scratch.available(),
GLWESwitchingKey::encrypt_sk_scratch_space(
module,
sk_out.n(),
self.basek(),
self.k(),
self.rank_in(),
@@ -305,7 +303,9 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
)
}
let (mut sk_in_tmp, scratch1) = scratch.take_scalar_znx(module, sk_in.rank());
let n: usize = sk_in.n().max(sk_out.n());
let (mut sk_in_tmp, scratch1) = scratch.take_scalar_znx(n, sk_in.rank());
(0..sk_in.rank()).for_each(|i| {
module.vec_znx_switch_degree(
&mut sk_in_tmp.as_vec_znx_mut(),
@@ -315,9 +315,9 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
);
});
let (mut sk_out_tmp, scratch2) = scratch1.take_glwe_secret_exec(module, sk_out.rank());
let (mut sk_out_tmp, scratch2) = scratch1.take_glwe_secret_exec(n, sk_out.rank());
{
let (mut tmp, _) = scratch2.take_scalar_znx(module, 1);
let (mut tmp, _) = scratch2.take_scalar_znx(n, 1);
(0..sk_out.rank()).for_each(|i| {
module.vec_znx_switch_degree(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
@@ -341,17 +341,18 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
impl GLWESwitchingKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: GLWESwitchingKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: GLWESwitchingKeyEncryptSkFamily<B>,
{
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | module.scalar_znx_alloc_bytes(1))
+ module.scalar_znx_alloc_bytes(rank_in)
+ GLWESecretExec::bytes_of(module, rank_out)
(GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k) | ScalarZnx::alloc_bytes(n, 1))
+ ScalarZnx::alloc_bytes(n, rank_in)
+ GLWESecretExec::bytes_of(module, n, rank_out)
}
}
@@ -366,13 +367,8 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GLWESwitchingKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace,
Scratch<B>:
ScratchAvailable + TakeScalarZnx<B> + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + ScratchAvailable + TakeVecZnx<B>,
Module<B>: GLWESwitchingKeyEncryptSkFamily<B> + VecZnxSwithcDegree + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -382,6 +378,7 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
scratch.available()
>= GLWESwitchingKey::encrypt_sk_scratch_space(
module,
sk_out.n(),
self.basek(),
self.k(),
self.rank_in(),
@@ -391,6 +388,7 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
scratch.available(),
GLWESwitchingKey::encrypt_sk_scratch_space(
module,
sk_out.n(),
self.basek(),
self.k(),
self.rank_in(),
@@ -399,7 +397,9 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
)
}
let (mut sk_in_tmp, scratch1) = scratch.take_scalar_znx(module, sk_in.rank());
let n: usize = sk_in.n().max(sk_out.n());
let (mut sk_in_tmp, scratch1) = scratch.take_scalar_znx(n, sk_in.rank());
(0..sk_in.rank()).for_each(|i| {
module.vec_znx_switch_degree(
&mut sk_in_tmp.as_vec_znx_mut(),
@@ -409,9 +409,9 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
);
});
let (mut sk_out_tmp, scratch2) = scratch1.take_glwe_secret_exec(module, sk_out.rank());
let (mut sk_out_tmp, scratch2) = scratch1.take_glwe_secret_exec(n, sk_out.rank());
{
let (mut tmp, _) = scratch2.take_scalar_znx(module, 1);
let (mut tmp, _) = scratch2.take_scalar_znx(n, 1);
(0..sk_out.rank()).for_each(|i| {
module.vec_znx_switch_degree(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
@@ -435,15 +435,15 @@ impl<DataSelf: DataMut> GLWESwitchingKeyCompressed<DataSelf> {
pub trait AutomorphismKeyEncryptSkFamily<B: Backend> = GGLWEEncryptSkFamily<B>;
impl AutomorphismKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: AutomorphismKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: AutomorphismKeyEncryptSkFamily<B>,
{
GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank) + GLWESecret::bytes_of(module, rank)
GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank, rank) + GLWESecret::bytes_of(n, rank)
}
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
GLWESwitchingKey::encrypt_pk_scratch_space(module, _basek, _k, _rank, _rank)
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, _n: usize, _basek: usize, _k: usize, _rank: usize) -> usize {
GLWESwitchingKey::encrypt_pk_scratch_space(module, _n, _basek, _k, _rank, _rank)
}
}
@@ -458,31 +458,26 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: AutomorphismKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxAutomorphism
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx<B> + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + TakeVecZnx<B>,
Module<B>: AutomorphismKeyEncryptSkFamily<B> + VecZnxAutomorphism + VecZnxSwithcDegree + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(self.rank_out(), self.rank_in());
assert_eq!(sk.rank(), self.rank());
assert!(
scratch.available() >= AutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
scratch.available()
>= AutomorphismKey::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank()),
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
scratch.available(),
self.rank(),
self.size(),
AutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
AutomorphismKey::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank())
)
}
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(module, sk.rank());
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
{
(0..self.rank()).for_each(|i| {
@@ -504,11 +499,11 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
}
impl AutomorphismKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: AutomorphismKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: AutomorphismKeyEncryptSkFamily<B>,
{
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, basek, k, rank, rank) + GLWESecret::bytes_of(module, rank)
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, n, basek, k, rank, rank) + GLWESecret::bytes_of(n, rank)
}
}
@@ -523,32 +518,26 @@ impl<DataSelf: DataMut> AutomorphismKeyCompressed<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: AutomorphismKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAutomorphism
+ VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx<B> + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + TakeVecZnx<B>,
Module<B>: AutomorphismKeyEncryptSkFamily<B> + VecZnxSwithcDegree + VecZnxAutomorphism + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeVecZnxDft<B> + TakeGLWESecretExec<B> + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(self.rank_out(), self.rank_in());
assert_eq!(sk.rank(), self.rank());
assert!(
scratch.available()
>= AutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
>= AutomorphismKeyCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank()),
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
scratch.available(),
self.rank(),
self.size(),
AutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
AutomorphismKeyCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank())
)
}
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(module, sk.rank());
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
{
(0..self.rank()).for_each(|i| {
@@ -573,16 +562,16 @@ pub trait GLWETensorKeyEncryptSkFamily<B: Backend> =
GGLWEEncryptSkFamily<B> + VecZnxBigAllocBytes + VecZnxDftToVecZnxBigTmpA<B> + SvpApply<B>;
impl GLWETensorKey<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GLWETensorKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: GLWETensorKeyEncryptSkFamily<B>,
{
GLWESecretExec::bytes_of(module, rank)
+ module.vec_znx_dft_alloc_bytes(rank, 1)
+ module.vec_znx_big_alloc_bytes(1, 1)
+ module.vec_znx_dft_alloc_bytes(1, 1)
+ GLWESecret::bytes_of(module, 1)
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank)
GLWESecretExec::bytes_of(module, n, rank)
+ module.vec_znx_dft_alloc_bytes(n, rank, 1)
+ module.vec_znx_big_alloc_bytes(n, 1, 1)
+ module.vec_znx_dft_alloc_bytes(n, 1, 1)
+ GLWESecret::bytes_of(n, 1)
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank, rank)
}
}
@@ -596,35 +585,31 @@ impl<DataSelf: DataMut> GLWETensorKey<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GLWETensorKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace,
Scratch<B>:
ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeGLWESecretExec<B> + TakeScalarZnx<B> + TakeVecZnx<B>,
Module<B>: GLWETensorKeyEncryptSkFamily<B> + VecZnxSwithcDegree + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeGLWESecretExec<B> + TakeScalarZnx + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
}
let n: usize = sk.n();
let rank: usize = self.rank();
let (mut sk_dft_prep, scratch1) = scratch.take_glwe_secret_exec(module, rank);
let (mut sk_dft_prep, scratch1) = scratch.take_glwe_secret_exec(n, rank);
sk_dft_prep.prepare(module, &sk);
let (mut sk_dft, scratch2) = scratch1.take_vec_znx_dft(module, rank, 1);
let (mut sk_dft, scratch2) = scratch1.take_vec_znx_dft(n, rank, 1);
(0..rank).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
});
let (mut sk_ij_big, scratch3) = scratch2.take_vec_znx_big(module, 1, 1);
let (mut sk_ij, scratch4) = scratch3.take_glwe_secret(module, 1);
let (mut sk_ij_dft, scratch5) = scratch4.take_vec_znx_dft(module, 1, 1);
let (mut sk_ij_big, scratch3) = scratch2.take_vec_znx_big(n, 1, 1);
let (mut sk_ij, scratch4) = scratch3.take_glwe_secret(n, 1);
let (mut sk_ij_dft, scratch5) = scratch4.take_vec_znx_dft(n, 1, 1);
(0..rank).for_each(|i| {
(i..rank).for_each(|j| {
@@ -648,11 +633,11 @@ impl<DataSelf: DataMut> GLWETensorKey<DataSelf> {
}
impl GLWETensorKeyCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GLWETensorKeyEncryptSkFamily<B> + ScalarZnxAllocBytes + VecZnxAllocBytes,
Module<B>: GLWETensorKeyEncryptSkFamily<B>,
{
GLWETensorKey::encrypt_sk_scratch_space(module, basek, k, rank)
GLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
@@ -666,35 +651,30 @@ impl<DataSelf: DataMut> GLWETensorKeyCompressed<DataSelf> {
sigma: f64,
scratch: &mut Scratch<B>,
) where
Module<B>: GLWETensorKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace,
Scratch<B>:
ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeGLWESecretExec<B> + TakeScalarZnx<B> + TakeVecZnx<B>,
Module<B>: GLWETensorKeyEncryptSkFamily<B> + VecZnxSwithcDegree + VecZnxAddScalarInplace,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeGLWESecretExec<B> + TakeScalarZnx + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
}
let n: usize = sk.n();
let rank: usize = self.rank();
let (mut sk_dft_prep, scratch1) = scratch.take_glwe_secret_exec(module, rank);
let (mut sk_dft_prep, scratch1) = scratch.take_glwe_secret_exec(n, rank);
sk_dft_prep.prepare(module, &sk);
let (mut sk_dft, scratch2) = scratch1.take_vec_znx_dft(module, rank, 1);
let (mut sk_dft, scratch2) = scratch1.take_vec_znx_dft(n, rank, 1);
(0..rank).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
});
let (mut sk_ij_big, scratch3) = scratch2.take_vec_znx_big(module, 1, 1);
let (mut sk_ij, scratch4) = scratch3.take_glwe_secret(module, 1);
let (mut sk_ij_dft, scratch5) = scratch4.take_vec_znx_dft(module, 1, 1);
let (mut sk_ij_big, scratch3) = scratch2.take_vec_znx_big(n, 1, 1);
let (mut sk_ij, scratch4) = scratch3.take_glwe_secret(n, 1);
let (mut sk_ij_dft, scratch5) = scratch4.take_vec_znx_dft(n, 1, 1);
let mut source_xa: Source = Source::new(seed_xa);

View File

@@ -8,6 +8,7 @@ use crate::{AutomorphismKey, GGSWCiphertextExec, GLWECiphertext, GLWEExternalPro
impl GLWESwitchingKey<Vec<u8>> {
pub fn external_product_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -18,11 +19,12 @@ impl GLWESwitchingKey<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
GLWECiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank)
}
pub fn external_product_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ggsw: usize,
@@ -32,7 +34,7 @@ impl GLWESwitchingKey<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, k_out, k_ggsw, digits, rank)
}
}
@@ -118,6 +120,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
impl AutomorphismKey<Vec<u8>> {
pub fn external_product_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -128,11 +131,12 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWESwitchingKey::external_product_scratch_space(module, basek, k_out, k_in, ggsw_k, digits, rank)
GLWESwitchingKey::external_product_scratch_space(module, n, basek, k_out, k_in, ggsw_k, digits, rank)
}
pub fn external_product_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
ggsw_k: usize,
@@ -142,7 +146,7 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWESwitchingKey::external_product_inplace_scratch_space(module, basek, k_out, ggsw_k, digits, rank)
GLWESwitchingKey::external_product_inplace_scratch_space(module, n, basek, k_out, ggsw_k, digits, rank)
}
}

View File

@@ -10,6 +10,7 @@ use crate::{
impl AutomorphismKey<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -20,11 +21,12 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWESwitchingKey::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
GLWESwitchingKey::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -34,7 +36,7 @@ impl AutomorphismKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
}
}
@@ -68,6 +70,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
impl GLWESwitchingKey<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -79,11 +82,14 @@ impl GLWESwitchingKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out)
GLWECiphertext::keyswitch_scratch_space(
module, n, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out,
)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -93,7 +99,7 @@ impl GLWESwitchingKey<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
}
}

View File

@@ -1,14 +1,15 @@
use backend::hal::{
api::{MatZnxAlloc, MatZnxAllocBytes, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatPrepare},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, Module, ReaderFrom, WriterTo},
api::{FillUniform, Reset, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatPrepare},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, ReaderFrom, WriterTo},
};
use crate::{GLWECiphertext, Infos};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
pub trait GGLWEExecLayoutFamily<B: Backend> = VmpPMatAlloc<B> + VmpPMatAllocBytes + VmpPMatPrepare<B>;
use std::fmt;
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GGLWECiphertext<D: Data> {
pub(crate) data: MatZnx<D>,
pub(crate) basek: usize,
@@ -16,6 +17,40 @@ pub struct GGLWECiphertext<D: Data> {
pub(crate) digits: usize,
}
impl<D: DataRef> fmt::Debug for GGLWECiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for GGLWECiphertext<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.data.fill_uniform(source);
}
}
impl<D: DataMut> Reset for GGLWECiphertext<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.digits = 0;
}
}
impl<D: DataRef> fmt::Display for GGLWECiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(GGLWECiphertext: basek={} k={} digits={}) {}",
self.basek, self.k, self.digits, self.data
)
}
}
impl<D: DataRef> GGLWECiphertext<D> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
GLWECiphertext {
@@ -37,18 +72,7 @@ impl<D: DataMut> GGLWECiphertext<D> {
}
impl GGLWECiphertext<Vec<u8>> {
pub fn alloc<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -66,25 +90,14 @@ impl GGLWECiphertext<Vec<u8>> {
);
Self {
data: module.mat_znx_alloc(rows, rank_in, rank_out + 1, size),
data: MatZnx::alloc(n, rows, rank_in, rank_out + 1, size),
basek: basek,
k,
digits,
}
}
pub fn bytes_of<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> usize {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -101,7 +114,7 @@ impl GGLWECiphertext<Vec<u8>> {
size
);
module.mat_znx_alloc_bytes(rows, rank_in, rank_out + 1, rows)
MatZnx::alloc_bytes(n, rows, rank_in, rank_out + 1, rows)
}
}
@@ -157,46 +170,57 @@ impl<D: DataRef> WriterTo for GGLWECiphertext<D> {
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GLWESwitchingKey<D: Data> {
pub(crate) key: GGLWECiphertext<D>,
pub(crate) sk_in_n: usize, // Degree of sk_in
pub(crate) sk_out_n: usize, // Degree of sk_out
}
impl<D: DataRef> fmt::Debug for GLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataRef> fmt::Display for GLWESwitchingKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(GLWESwitchingKey: sk_in_n={} sk_out_n={}) {}",
self.sk_in_n, self.sk_out_n, self.key.data
)
}
}
impl<D: DataMut> FillUniform for GLWESwitchingKey<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.key.fill_uniform(source);
}
}
impl<D: DataMut> Reset for GLWESwitchingKey<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.key.reset();
self.sk_in_n = 0;
self.sk_out_n = 0;
}
}
impl GLWESwitchingKey<Vec<u8>> {
pub fn alloc<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
GLWESwitchingKey {
key: GGLWECiphertext::alloc(module, basek, k, rows, digits, rank_in, rank_out),
key: GGLWECiphertext::alloc(n, basek, k, rows, digits, rank_in, rank_out),
sk_in_n: 0,
sk_out_n: 0,
}
}
pub fn bytes_of<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: MatZnxAllocBytes,
{
GGLWECiphertext::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, rank_in, rank_out)
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> usize {
GGLWECiphertext::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, rank_in, rank_out)
}
}
@@ -270,28 +294,50 @@ impl<D: DataRef> WriterTo for GLWESwitchingKey<D> {
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct AutomorphismKey<D: Data> {
pub(crate) key: GLWESwitchingKey<D>,
pub(crate) p: i64,
}
impl<D: DataRef> fmt::Debug for AutomorphismKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for AutomorphismKey<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.key.fill_uniform(source);
}
}
impl<D: DataMut> Reset for AutomorphismKey<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.key.reset();
self.p = 0;
}
}
impl<D: DataRef> fmt::Display for AutomorphismKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(AutomorphismKey: p={}) {}", self.p, self.key)
}
}
impl AutomorphismKey<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
AutomorphismKey {
key: GLWESwitchingKey::alloc(module, basek, k, rows, digits, rank, rank),
key: GLWESwitchingKey::alloc(n, basek, k, rows, digits, rank, rank),
p: 0,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
GLWESwitchingKey::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, rank, rank)
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
GLWESwitchingKey::bytes_of(n, basek, k, rows, digits, rank, rank)
}
}
@@ -359,32 +405,59 @@ impl<D: DataRef> WriterTo for AutomorphismKey<D> {
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GLWETensorKey<D: Data> {
pub(crate) keys: Vec<GLWESwitchingKey<D>>,
}
impl<D: DataRef> fmt::Debug for GLWETensorKey<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for GLWETensorKey<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.keys
.iter_mut()
.for_each(|key: &mut GLWESwitchingKey<D>| key.fill_uniform(source))
}
}
impl<D: DataMut> Reset for GLWETensorKey<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.keys
.iter_mut()
.for_each(|key: &mut GLWESwitchingKey<D>| key.reset())
}
}
impl<D: DataRef> fmt::Display for GLWETensorKey<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "(GLWETensorKey)",)?;
for (i, key) in self.keys.iter().enumerate() {
write!(f, "{}: {}", i, key)?;
}
Ok(())
}
}
impl GLWETensorKey<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
let mut keys: Vec<GLWESwitchingKey<Vec<u8>>> = Vec::new();
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
(0..pairs).for_each(|_| {
keys.push(GLWESwitchingKey::alloc(
module, basek, k, rows, digits, 1, rank,
));
keys.push(GLWESwitchingKey::alloc(n, basek, k, rows, digits, 1, rank));
});
Self { keys: keys }
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
pairs * GLWESwitchingKey::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, 1, rank)
pairs * GLWESwitchingKey::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, 1, rank)
}
}

View File

@@ -1,12 +1,13 @@
use backend::hal::{
api::{MatZnxAlloc, MatZnxAllocBytes, VecZnxCopy, VecZnxFillUniform},
api::{FillUniform, Reset, VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, Module, ReaderFrom, WriterTo},
};
use crate::{AutomorphismKey, Decompress, GGLWECiphertext, GLWECiphertextCompressed, GLWESwitchingKey, GLWETensorKey, Infos};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GGLWECiphertextCompressed<D: Data> {
pub(crate) data: MatZnx<D>,
pub(crate) basek: usize,
@@ -16,19 +17,44 @@ pub struct GGLWECiphertextCompressed<D: Data> {
pub(crate) seed: Vec<[u8; 32]>,
}
impl<D: DataRef> fmt::Debug for GGLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for GGLWECiphertextCompressed<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.data.fill_uniform(source);
}
}
impl<D: DataMut> Reset for GGLWECiphertextCompressed<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.digits = 0;
self.rank_out = 0;
self.seed = Vec::new();
}
}
impl<D: DataRef> fmt::Display for GGLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(GGLWECiphertextCompressed: basek={} k={} digits={}) {}",
self.basek, self.k, self.digits, self.data
)
}
}
impl GGLWECiphertextCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -46,7 +72,7 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
);
Self {
data: module.mat_znx_alloc(rows, rank_in, 1, size),
data: MatZnx::alloc(n, rows, rank_in, 1, size),
basek: basek,
k,
rank_out,
@@ -55,10 +81,7 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -75,7 +98,7 @@ impl GGLWECiphertextCompressed<Vec<u8>> {
size
);
module.mat_znx_alloc_bytes(rows, rank_in, 1, rows)
MatZnx::alloc_bytes(n, rows, rank_in, 1, rows)
}
}
@@ -145,11 +168,9 @@ impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> {
self.digits = reader.read_u64::<LittleEndian>()? as usize;
self.rank_out = reader.read_u64::<LittleEndian>()? as usize;
let seed_len = reader.read_u64::<LittleEndian>()? as usize;
if seed_len != self.seed.len() {
} else {
for s in &mut self.seed {
reader.read_exact(s)?;
}
self.seed = vec![[0u8; 32]; seed_len];
for s in &mut self.seed {
reader.read_exact(s)?;
}
self.data.read_from(reader)
}
@@ -228,13 +249,46 @@ impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGLWECiphertextCompresse
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GLWESwitchingKeyCompressed<D: Data> {
pub(crate) key: GGLWECiphertextCompressed<D>,
pub(crate) sk_in_n: usize, // Degree of sk_in
pub(crate) sk_out_n: usize, // Degree of sk_out
}
impl<D: DataRef> fmt::Debug for GLWESwitchingKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for GLWESwitchingKeyCompressed<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.key.fill_uniform(source);
}
}
impl<D: DataMut> Reset for GLWESwitchingKeyCompressed<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.key.reset();
self.sk_in_n = 0;
self.sk_out_n = 0;
}
}
impl<D: DataRef> fmt::Display for GLWESwitchingKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(GLWESwitchingKeyCompressed: sk_in_n={} sk_out_n={}) {}",
self.sk_in_n, self.sk_out_n, self.key.data
)
}
}
impl<D: Data> Infos for GLWESwitchingKeyCompressed<D> {
type Inner = MatZnx<D>;
@@ -270,30 +324,16 @@ impl<D: Data> GLWESwitchingKeyCompressed<D> {
}
impl GLWESwitchingKeyCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
GLWESwitchingKeyCompressed {
key: GGLWECiphertextCompressed::alloc(module, basek, k, rows, digits, rank_in, rank_out),
key: GGLWECiphertextCompressed::alloc(n, basek, k, rows, digits, rank_in, rank_out),
sk_in_n: 0,
sk_out_n: 0,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
GGLWECiphertextCompressed::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, rank_in)
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize {
GGLWECiphertextCompressed::bytes_of(n, basek, k, rows, digits, rank_in)
}
}
@@ -327,28 +367,50 @@ impl<D: DataMut> GLWESwitchingKey<D> {
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct AutomorphismKeyCompressed<D: Data> {
pub(crate) key: GLWESwitchingKeyCompressed<D>,
pub(crate) p: i64,
}
impl<D: DataRef> fmt::Debug for AutomorphismKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for AutomorphismKeyCompressed<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.key.fill_uniform(source);
}
}
impl<D: DataMut> Reset for AutomorphismKeyCompressed<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.key.reset();
self.p = 0;
}
}
impl<D: DataRef> fmt::Display for AutomorphismKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(AutomorphismKeyCompressed: p={}) {}", self.p, self.key)
}
}
impl AutomorphismKeyCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
AutomorphismKeyCompressed {
key: GLWESwitchingKeyCompressed::alloc(module, basek, k, rows, digits, rank, rank),
key: GLWESwitchingKeyCompressed::alloc(n, basek, k, rows, digits, rank, rank),
p: 0,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
GLWESwitchingKeyCompressed::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, rank)
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
GLWESwitchingKeyCompressed::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, rank)
}
}
@@ -410,32 +472,61 @@ impl<D: DataMut> AutomorphismKey<D> {
}
}
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GLWETensorKeyCompressed<D: Data> {
pub(crate) keys: Vec<GLWESwitchingKeyCompressed<D>>,
}
impl<D: DataRef> fmt::Debug for GLWETensorKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataMut> FillUniform for GLWETensorKeyCompressed<D> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.keys
.iter_mut()
.for_each(|key: &mut GLWESwitchingKeyCompressed<D>| key.fill_uniform(source))
}
}
impl<D: DataMut> Reset for GLWETensorKeyCompressed<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.keys
.iter_mut()
.for_each(|key: &mut GLWESwitchingKeyCompressed<D>| key.reset())
}
}
impl<D: DataRef> fmt::Display for GLWETensorKeyCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "(GLWETensorKeyCompressed)",)?;
for (i, key) in self.keys.iter().enumerate() {
write!(f, "{}: {}", i, key)?;
}
Ok(())
}
}
impl GLWETensorKeyCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
let mut keys: Vec<GLWESwitchingKeyCompressed<Vec<u8>>> = Vec::new();
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
(0..pairs).for_each(|_| {
keys.push(GLWESwitchingKeyCompressed::alloc(
module, basek, k, rows, digits, 1, rank,
n, basek, k, rows, digits, 1, rank,
));
});
Self { keys: keys }
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
pairs * GLWESwitchingKeyCompressed::<Vec<u8>>::bytes_of(module, basek, k, rows, digits, 1)
pairs * GLWESwitchingKeyCompressed::bytes_of(n, basek, k, rows, digits, 1)
}
}

View File

@@ -14,7 +14,16 @@ pub struct GGLWECiphertextExec<D: Data, B: Backend> {
}
impl<B: Backend> GGLWECiphertextExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
pub fn alloc(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
@@ -35,7 +44,7 @@ impl<B: Backend> GGLWECiphertextExec<Vec<u8>, B> {
);
Self {
data: module.vmp_pmat_alloc(rows, rank_in, rank_out + 1, size),
data: module.vmp_pmat_alloc(n, rows, rank_in, rank_out + 1, size),
basek: basek,
k,
digits,
@@ -44,6 +53,7 @@ impl<B: Backend> GGLWECiphertextExec<Vec<u8>, B> {
pub fn bytes_of(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rows: usize,
@@ -70,7 +80,7 @@ impl<B: Backend> GGLWECiphertextExec<Vec<u8>, B> {
size
);
module.vmp_pmat_alloc_bytes(rows, rank_in, rank_out + 1, rows)
module.vmp_pmat_alloc_bytes(n, rows, rank_in, rank_out + 1, rows)
}
}
@@ -129,12 +139,21 @@ pub struct GLWESwitchingKeyExec<D: Data, B: Backend> {
}
impl<B: Backend> GLWESwitchingKeyExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
pub fn alloc(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> Self
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
GLWESwitchingKeyExec::<Vec<u8>, B> {
key: GGLWECiphertextExec::alloc(module, basek, k, rows, digits, rank_in, rank_out),
key: GGLWECiphertextExec::alloc(module, n, basek, k, rows, digits, rank_in, rank_out),
sk_in_n: 0,
sk_out_n: 0,
}
@@ -142,6 +161,7 @@ impl<B: Backend> GLWESwitchingKeyExec<Vec<u8>, B> {
pub fn bytes_of(
module: &Module<B>,
n: usize,
basek: usize,
k: usize,
rows: usize,
@@ -152,7 +172,7 @@ impl<B: Backend> GLWESwitchingKeyExec<Vec<u8>, B> {
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
GGLWECiphertextExec::bytes_of(module, basek, k, rows, digits, rank_in, rank_out)
GGLWECiphertextExec::bytes_of(module, n, basek, k, rows, digits, rank_in, rank_out)
}
pub fn from<DataOther: DataRef>(module: &Module<B>, other: &GLWESwitchingKey<DataOther>, scratch: &mut Scratch<B>) -> Self
@@ -161,6 +181,7 @@ impl<B: Backend> GLWESwitchingKeyExec<Vec<u8>, B> {
{
let mut ksk_exec: GLWESwitchingKeyExec<Vec<u8>, B> = Self::alloc(
module,
other.n(),
other.basek(),
other.k(),
other.rows(),
@@ -234,21 +255,21 @@ pub struct AutomorphismKeyExec<D: Data, B: Backend> {
}
impl<B: Backend> AutomorphismKeyExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
AutomorphismKeyExec::<Vec<u8>, B> {
key: GLWESwitchingKeyExec::alloc(module, basek, k, rows, digits, rank, rank),
key: GLWESwitchingKeyExec::alloc(module, n, basek, k, rows, digits, rank, rank),
p: 0,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
GLWESwitchingKeyExec::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, rank, rank)
GLWESwitchingKeyExec::bytes_of(module, n, basek, k, rows, digits, rank, rank)
}
pub fn from<DataOther: DataRef>(module: &Module<B>, other: &AutomorphismKey<DataOther>, scratch: &mut Scratch<B>) -> Self
@@ -257,6 +278,7 @@ impl<B: Backend> AutomorphismKeyExec<Vec<u8>, B> {
{
let mut atk_exec: AutomorphismKeyExec<Vec<u8>, B> = Self::alloc(
module,
other.n(),
other.basek(),
other.k(),
other.rows(),
@@ -323,7 +345,7 @@ pub struct GLWETensorKeyExec<D: Data, B: Backend> {
}
impl<B: Backend> GLWETensorKeyExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
@@ -331,18 +353,18 @@ impl<B: Backend> GLWETensorKeyExec<Vec<u8>, B> {
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
(0..pairs).for_each(|_| {
keys.push(GLWESwitchingKeyExec::alloc(
module, basek, k, rows, digits, 1, rank,
module, n, basek, k, rows, digits, 1, rank,
));
});
Self { keys }
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: GGLWEExecLayoutFamily<B>,
{
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
pairs * GLWESwitchingKeyExec::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, 1, rank)
pairs * GLWESwitchingKeyExec::bytes_of(module, n, basek, k, rows, digits, 1, rank)
}
}

View File

@@ -1,5 +1,5 @@
use backend::hal::{
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAlloc, VecZnxStd, VecZnxSubScalarInplace, ZnxZero},
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxStd, VecZnxSubScalarInplace, ZnxZero},
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
};
@@ -16,15 +16,20 @@ impl<D: DataRef> GGLWECiphertext<D> {
) where
DataSk: DataRef,
DataWant: DataRef,
Module<B>: GLWEDecryptFamily<B> + VecZnxStd + VecZnxAlloc + VecZnxSubScalarInplace,
Module<B>: GLWEDecryptFamily<B> + VecZnxStd + VecZnxSubScalarInplace,
B: TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let digits: usize = self.digits();
let basek: usize = self.basek();
let k: usize = self.k();
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(module, basek, k));
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(
module,
self.n(),
basek,
k,
));
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
(0..self.rank_in()).for_each(|col_i| {
(0..self.rows()).for_each(|row_i| {

View File

@@ -0,0 +1,54 @@
use backend::hal::tests::serialization::test_reader_writer_interface;
use crate::{
AutomorphismKey, AutomorphismKeyCompressed, GGLWECiphertext, GGLWECiphertextCompressed, GLWESwitchingKey,
GLWESwitchingKeyCompressed, GLWETensorKey, GLWETensorKeyCompressed,
};
#[test]
fn test_gglwe_serialization() {
let original: GGLWECiphertext<Vec<u8>> = GGLWECiphertext::alloc(1024, 12, 54, 3, 1, 2, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_gglwe_serialization_compressed() {
let original: GGLWECiphertextCompressed<Vec<u8>> = GGLWECiphertextCompressed::alloc(1024, 12, 54, 3, 1, 2, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_glwe_switching_key_serialization() {
let original: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(1024, 12, 54, 3, 1, 2, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_glwe_switching_key_serialization_compressed() {
let original: GLWESwitchingKeyCompressed<Vec<u8>> = GLWESwitchingKeyCompressed::alloc(1024, 12, 54, 3, 1, 2, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_automorphism_key_serialization() {
let original: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_automorphism_key_serialization_compressed() {
let original: AutomorphismKeyCompressed<Vec<u8>> = AutomorphismKeyCompressed::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_tensor_key_serialization() {
let original: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_tensor_key_serialization_compressed() {
let original: GLWETensorKeyCompressed<Vec<u8>> = GLWETensorKeyCompressed::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}

View File

@@ -1,8 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy, VecZnxStd,
VecZnxSubScalarInplace, VecZnxSwithcDegree,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy,
VecZnxStd, VecZnxSubScalarInplace, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
@@ -18,19 +17,14 @@ use crate::{
noise::log2_std_noise_gglwe_product,
};
pub(crate) trait AutomorphismTestModuleFamily<B: Backend> = MatZnxAlloc
+ AutomorphismKeyEncryptSkFamily<B>
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
pub(crate) trait AutomorphismTestModuleFamily<B: Backend> = AutomorphismKeyEncryptSkFamily<B>
+ GLWEKeyswitchFamily<B>
+ ScalarZnxAlloc
+ VecZnxAutomorphism
+ GGLWEExecLayoutFamily<B>
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
+ VecZnxAutomorphism
+ VecZnxAutomorphismInplace
+ VecZnxAlloc
+ GLWEDecryptFamily<B>
+ VecZnxSubScalarInplace
+ VecZnxStd
@@ -55,19 +49,20 @@ pub(crate) fn test_automorphisk_key_encrypt_sk<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
module, basek, k_ksk, rank,
module, n, basek, k_ksk, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let p = -5;
@@ -110,19 +105,20 @@ pub(crate) fn test_automorphisk_key_encrypt_sk_compressed<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut atk_compressed: AutomorphismKeyCompressed<Vec<u8>> =
AutomorphismKeyCompressed::alloc(module, basek, k_ksk, rows, digits, rank);
AutomorphismKeyCompressed::alloc(n, basek, k_ksk, rows, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
module, basek, k_ksk, rank,
module, n, basek, k_ksk, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let p = -5;
@@ -151,7 +147,7 @@ pub(crate) fn test_automorphisk_key_encrypt_sk_compressed<B: Backend>(
});
let sk_out_exec = GLWESecretExec::from(module, &sk_out);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut atk: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
atk.decompress(module, &atk_compressed);
atk.key
@@ -174,30 +170,31 @@ pub(crate) fn test_gglwe_automorphism<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let digits_in: usize = 1;
let rows_in: usize = k_in / (basek * digits);
let rows_apply: usize = k_in.div_ceil(basek * digits);
let mut auto_key_in: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_out: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_out, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_apply, rows_apply, digits, rank);
let mut auto_key_in: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_out: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_out, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_apply, rows_apply, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(&module, basek, k_apply, rank)
| AutomorphismKey::automorphism_scratch_space(&module, basek, k_out, k_in, k_apply, digits, rank),
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
| AutomorphismKey::automorphism_scratch_space(module, n, basek, k_out, k_in, k_apply, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
// gglwe_{s1}(s0) = s0 -> s1
auto_key_in.encrypt_sk(
&module,
module,
p0,
&sk,
&mut source_xa,
@@ -208,7 +205,7 @@ pub(crate) fn test_gglwe_automorphism<B: Backend>(
// gglwe_{s2}(s1) -> s1 -> s2
auto_key_apply.encrypt_sk(
&module,
module,
p1,
&sk,
&mut source_xa,
@@ -218,21 +215,16 @@ pub(crate) fn test_gglwe_automorphism<B: Backend>(
);
let mut auto_key_apply_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(&module, basek, k_apply, rows_apply, digits, rank);
AutomorphismKeyExec::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
auto_key_apply_exec.prepare(&module, &auto_key_apply, scratch.borrow());
auto_key_apply_exec.prepare(module, &auto_key_apply, scratch.borrow());
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
auto_key_out.automorphism(
&module,
&auto_key_in,
&auto_key_apply_exec,
scratch.borrow(),
);
auto_key_out.automorphism(module, &auto_key_in, &auto_key_apply_exec, scratch.borrow());
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_out);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_out);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
(0..rank).for_each(|i| {
module.vec_znx_automorphism(
@@ -244,13 +236,13 @@ pub(crate) fn test_gglwe_automorphism<B: Backend>(
);
});
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(&module, &sk_auto);
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_auto);
(0..auto_key_out.rank_in()).for_each(|col_i| {
(0..auto_key_out.rows()).for_each(|row_i| {
auto_key_out
.at(row_i, col_i)
.decrypt(&module, &mut pt, &sk_auto_dft, scratch.borrow());
.decrypt(module, &mut pt, &sk_auto_dft, scratch.borrow());
module.vec_znx_sub_scalar_inplace(
&mut pt.data,
@@ -262,7 +254,7 @@ pub(crate) fn test_gglwe_automorphism<B: Backend>(
let noise_have: f64 = module.vec_znx_std(basek, &pt.data, 0).log2();
let noise_want: f64 = log2_std_noise_gglwe_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
0.5,
@@ -298,29 +290,30 @@ pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let digits_in: usize = 1;
let rows_in: usize = k_in / (basek * digits);
let rows_apply: usize = k_in.div_ceil(basek * digits);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(&module, basek, k_apply, rows_apply, digits, rank);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut auto_key_apply: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_apply, rows_apply, digits, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(&module, basek, k_apply, rank)
| AutomorphismKey::automorphism_inplace_scratch_space(&module, basek, k_in, k_apply, digits, rank),
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
| AutomorphismKey::automorphism_inplace_scratch_space(module, n, basek, k_in, k_apply, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
// gglwe_{s1}(s0) = s0 -> s1
auto_key.encrypt_sk(
&module,
module,
p0,
&sk,
&mut source_xa,
@@ -331,7 +324,7 @@ pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
// gglwe_{s2}(s1) -> s1 -> s2
auto_key_apply.encrypt_sk(
&module,
module,
p1,
&sk,
&mut source_xa,
@@ -341,16 +334,16 @@ pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
);
let mut auto_key_apply_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(&module, basek, k_apply, rows_apply, digits, rank);
AutomorphismKeyExec::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
auto_key_apply_exec.prepare(&module, &auto_key_apply, scratch.borrow());
auto_key_apply_exec.prepare(module, &auto_key_apply, scratch.borrow());
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
auto_key.automorphism_inplace(&module, &auto_key_apply_exec, scratch.borrow());
auto_key.automorphism_inplace(module, &auto_key_apply_exec, scratch.borrow());
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_in);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_in);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk_auto: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
(0..rank).for_each(|i| {
@@ -363,13 +356,13 @@ pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
);
});
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(&module, &sk_auto);
let sk_auto_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_auto);
(0..auto_key.rank_in()).for_each(|col_i| {
(0..auto_key.rows()).for_each(|row_i| {
auto_key
.at(row_i, col_i)
.decrypt(&module, &mut pt, &sk_auto_dft, scratch.borrow());
.decrypt(module, &mut pt, &sk_auto_dft, scratch.borrow());
module.vec_znx_sub_scalar_inplace(
&mut pt.data,
0,
@@ -380,7 +373,7 @@ pub(crate) fn test_gglwe_automorphism_inplace<B: Backend>(
let noise_have: f64 = module.vec_znx_std(basek, &pt.data, 0).log2();
let noise_want: f64 = log2_std_noise_gglwe_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
0.5,

View File

@@ -1,8 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxCopy, VecZnxRotateInplace, VecZnxStd, VecZnxSubScalarInplace, VecZnxSwithcDegree,
ZnxViewMut,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxCopy, VecZnxRotateInplace, VecZnxStd,
VecZnxSubScalarInplace, VecZnxSwithcDegree, ZnxViewMut,
},
layouts::{Backend, Module, ScalarZnx, ScalarZnxToMut, ScratchOwned},
oep::{
@@ -21,14 +20,9 @@ use crate::{
pub(crate) trait TestModuleFamily<B: Backend> = GGLWEEncryptSkFamily<B>
+ GLWEDecryptFamily<B>
+ MatZnxAlloc
+ ScalarZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
+ VecZnxStd
+ VecZnxAlloc
+ VecZnxSubScalarInplace
+ VecZnxCopy;
@@ -56,22 +50,23 @@ pub(crate) fn test_gglwe_encrypt_sk<B: Backend>(
Module<B>: TestModuleFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::encrypt_sk_scratch_space(
module, basek, k_ksk, rank_in, rank_out,
module, n, basek, k_ksk, rank_in, rank_out,
));
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -101,22 +96,23 @@ pub(crate) fn test_gglwe_encrypt_sk_compressed<B: Backend>(
Module<B>: TestModuleFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k_ksk - digits * basek) / (digits * basek);
let mut ksk_compressed: GLWESwitchingKeyCompressed<Vec<u8>> =
GLWESwitchingKeyCompressed::alloc(module, basek, k_ksk, rows, digits, rank_in, rank_out);
GLWESwitchingKeyCompressed::alloc(n, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(
module, basek, k_ksk, rank_in, rank_out,
module, n, basek, k_ksk, rank_in, rank_out,
));
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -132,7 +128,7 @@ pub(crate) fn test_gglwe_encrypt_sk_compressed<B: Backend>(
scratch.borrow(),
);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank_in, rank_out);
ksk.decompress(module, &ksk_compressed);
ksk.key
@@ -155,29 +151,16 @@ pub(crate) fn test_gglwe_keyswitch<B: Backend>(
TestModuleFamily<B> + GGLWEEncryptSkFamily<B> + GLWEDecryptFamily<B> + GLWEKeyswitchFamily<B> + GGLWEExecLayoutFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let digits_in: usize = 1;
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(
module,
basek,
k_in,
rows,
digits_in,
rank_in_s0s1,
rank_out_s0s1,
);
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(
module,
basek,
k_ksk,
rows,
digits,
rank_out_s0s1,
rank_out_s1s2,
);
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(n, basek, k_in, rows, digits_in, rank_in_s0s1, rank_out_s0s1);
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank_out_s0s1, rank_out_s1s2);
let mut ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(
module,
n,
basek,
k_out,
rows,
@@ -192,6 +175,7 @@ pub(crate) fn test_gglwe_keyswitch<B: Backend>(
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::encrypt_sk_scratch_space(
module,
n,
basek,
k_ksk,
rank_in_s0s1 | rank_out_s0s1,
@@ -199,6 +183,7 @@ pub(crate) fn test_gglwe_keyswitch<B: Backend>(
));
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_scratch_space(
module,
n,
basek,
k_out,
k_in,
@@ -208,13 +193,13 @@ pub(crate) fn test_gglwe_keyswitch<B: Backend>(
ct_gglwe_s1s2.rank_out(),
));
let mut sk0: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in_s0s1);
let mut sk0: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in_s0s1);
sk0.fill_ternary_prob(0.5, &mut source_xs);
let mut sk1: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out_s0s1);
let mut sk1: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out_s0s1);
sk1.fill_ternary_prob(0.5, &mut source_xs);
let mut sk2: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out_s1s2);
let mut sk2: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out_s1s2);
sk2.fill_ternary_prob(0.5, &mut source_xs);
let sk2_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk2);
@@ -252,7 +237,7 @@ pub(crate) fn test_gglwe_keyswitch<B: Backend>(
);
let max_noise: f64 = log2_std_noise_gglwe_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
0.5,
@@ -286,13 +271,13 @@ pub(crate) fn test_gglwe_keyswitch_inplace<B: Backend>(
+ GLWEDecryptFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(basek * digits);
let digits_in: usize = 1;
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(module, basek, k_ct, rows, digits_in, rank_in, rank_out);
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank_out, rank_out);
GLWESwitchingKey::alloc(n, basek, k_ct, rows, digits_in, rank_in, rank_out);
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank_out, rank_out);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -300,24 +285,25 @@ pub(crate) fn test_gglwe_keyswitch_inplace<B: Backend>(
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::encrypt_sk_scratch_space(
module,
n,
basek,
k_ksk,
rank_in | rank_out,
rank_out,
));
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_inplace_scratch_space(
module, basek, k_ct, k_ksk, digits, rank_out,
module, n, basek, k_ct, k_ksk, digits, rank_out,
));
let var_xs: f64 = 0.5;
let mut sk0: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk0: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk0.fill_ternary_prob(var_xs, &mut source_xs);
let mut sk1: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk1: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk1.fill_ternary_prob(var_xs, &mut source_xs);
let mut sk2: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk2: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk2.fill_ternary_prob(var_xs, &mut source_xs);
let sk2_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk2);
@@ -352,7 +338,7 @@ pub(crate) fn test_gglwe_keyswitch_inplace<B: Backend>(
let ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>> = ct_gglwe_s0s1;
let max_noise: f64 = log2_std_noise_gglwe_product(
module.n() as f64,
n as f64,
basek * digits,
var_xs,
var_xs,
@@ -388,25 +374,25 @@ pub(crate) fn test_gglwe_external_product<B: Backend>(
+ VecZnxRotateInplace,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let digits_in: usize = 1;
let mut ct_gglwe_in: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(module, basek, k_in, rows, digits_in, rank_in, rank_out);
let mut ct_gglwe_in: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_in, rows, digits_in, rank_in, rank_out);
let mut ct_gglwe_out: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(module, basek, k_out, rows, digits_in, rank_in, rank_out);
let mut ct_rgsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank_out);
GLWESwitchingKey::alloc(n, basek, k_out, rows, digits_in, rank_in, rank_out);
let mut ct_rgsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank_out);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_in, rank_in, rank_out)
| GLWESwitchingKey::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank_out)
| GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank_out),
GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_in, rank_in, rank_out)
| GLWESwitchingKey::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank_out)
| GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank_out),
);
let r: usize = 1;
@@ -415,10 +401,10 @@ pub(crate) fn test_gglwe_external_product<B: Backend>(
let var_xs: f64 = 0.5;
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -444,7 +430,7 @@ pub(crate) fn test_gglwe_external_product<B: Backend>(
);
let mut ct_rgsw_exec: GGSWCiphertextExec<Vec<u8>, B> =
GGSWCiphertextExec::alloc(module, basek, k_ggsw, rows, digits, rank_out);
GGSWCiphertextExec::alloc(module, n, basek, k_ggsw, rows, digits, rank_out);
ct_rgsw_exec.prepare(module, &ct_rgsw, scratch.borrow());
@@ -458,12 +444,12 @@ pub(crate) fn test_gglwe_external_product<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise: f64 = noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
var_xs,
var_msg,
@@ -499,24 +485,24 @@ pub(crate) fn test_gglwe_external_product_inplace<B: Backend>(
+ VecZnxRotateInplace,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(basek * digits);
let digits_in: usize = 1;
let mut ct_gglwe: GLWESwitchingKey<Vec<u8>> =
GLWESwitchingKey::alloc(module, basek, k_ct, rows, digits_in, rank_in, rank_out);
let mut ct_rgsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank_out);
let mut ct_gglwe: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ct, rows, digits_in, rank_in, rank_out);
let mut ct_rgsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank_out);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ct, rank_in, rank_out)
| GLWESwitchingKey::external_product_inplace_scratch_space(module, basek, k_ct, k_ggsw, digits, rank_out)
| GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank_out),
GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ct, rank_in, rank_out)
| GLWESwitchingKey::external_product_inplace_scratch_space(module, n, basek, k_ct, k_ggsw, digits, rank_out)
| GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank_out),
);
let r: usize = 1;
@@ -525,10 +511,10 @@ pub(crate) fn test_gglwe_external_product_inplace<B: Backend>(
let var_xs: f64 = 0.5;
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -554,7 +540,7 @@ pub(crate) fn test_gglwe_external_product_inplace<B: Backend>(
);
let mut ct_rgsw_exec: GGSWCiphertextExec<Vec<u8>, B> =
GGSWCiphertextExec::alloc(module, basek, k_ggsw, rows, digits, rank_out);
GGSWCiphertextExec::alloc(module, n, basek, k_ggsw, rows, digits, rank_out);
ct_rgsw_exec.prepare(module, &ct_rgsw, scratch.borrow());
@@ -568,12 +554,12 @@ pub(crate) fn test_gglwe_external_product_inplace<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise: f64 = noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
var_xs,
var_msg,

View File

@@ -1,8 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxBigAlloc, VecZnxCopy, VecZnxDftAlloc, VecZnxStd, VecZnxSubScalarInplace,
VecZnxSwithcDegree,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxBigAlloc, VecZnxCopy, VecZnxDftAlloc, VecZnxStd,
VecZnxSubScalarInplace, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScratchOwned, VecZnxDft},
oep::{
@@ -19,14 +18,9 @@ use crate::{
pub(crate) trait TestModuleFamily<B: Backend> = GGLWEEncryptSkFamily<B>
+ GLWEDecryptFamily<B>
+ MatZnxAlloc
+ ScalarZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
+ VecZnxStd
+ VecZnxAlloc
+ VecZnxSubScalarInplace;
pub(crate) trait TestScratchFamily<B: Backend> = TakeVecZnxDftImpl<B>
@@ -51,9 +45,10 @@ where
+ VecZnxBigAlloc<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k / basek;
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(&module, basek, k, rows, 1, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k, rows, 1, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -61,14 +56,15 @@ where
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWETensorKey::encrypt_sk_scratch_space(
module,
n,
basek,
tensor_key.k(),
rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(&module, &sk);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
sk_exec.prepare(module, &sk);
tensor_key.encrypt_sk(
@@ -80,12 +76,12 @@ where
scratch.borrow(),
);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
let mut sk_ij_dft = module.vec_znx_dft_alloc(1, 1);
let mut sk_ij_big = module.vec_znx_big_alloc(1, 1);
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, 1);
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(rank, 1);
let mut sk_ij_dft = module.vec_znx_dft_alloc(n, 1, 1);
let mut sk_ij_big = module.vec_znx_big_alloc(n, 1, 1);
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(n, rank, 1);
(0..rank).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
@@ -108,7 +104,7 @@ where
tensor_key
.at(i, j)
.at(row_i, col_i)
.decrypt(&module, &mut pt, &sk_exec, scratch.borrow());
.decrypt(module, &mut pt, &sk_exec, scratch.borrow());
module.vec_znx_sub_scalar_inplace(&mut pt.data, 0, row_i, &sk_ij.data, col_i);
@@ -136,24 +132,25 @@ pub(crate) fn test_tensor_key_encrypt_sk_compressed<B: Backend>(
+ VecZnxCopy,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k / basek;
let mut tensor_key_compressed: GLWETensorKeyCompressed<Vec<u8>> =
GLWETensorKeyCompressed::alloc(&module, basek, k, rows, 1, rank);
let mut tensor_key_compressed: GLWETensorKeyCompressed<Vec<u8>> = GLWETensorKeyCompressed::alloc(n, basek, k, rows, 1, rank);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWETensorKeyCompressed::encrypt_sk_scratch_space(
module,
n,
basek,
tensor_key_compressed.k(),
rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(&module, &sk);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
sk_exec.prepare(module, &sk);
let seed_xa: [u8; 32] = [1u8; 32];
@@ -167,15 +164,15 @@ pub(crate) fn test_tensor_key_encrypt_sk_compressed<B: Backend>(
scratch.borrow(),
);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(&module, basek, k, rows, 1, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k, rows, 1, rank);
tensor_key.decompress(module, &tensor_key_compressed);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
let mut sk_ij_dft = module.vec_znx_dft_alloc(1, 1);
let mut sk_ij_big = module.vec_znx_big_alloc(1, 1);
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, 1);
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(rank, 1);
let mut sk_ij_dft = module.vec_znx_dft_alloc(n, 1, 1);
let mut sk_ij_big = module.vec_znx_big_alloc(n, 1, 1);
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(n, rank, 1);
(0..rank).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
@@ -198,7 +195,7 @@ pub(crate) fn test_tensor_key_encrypt_sk_compressed<B: Backend>(
tensor_key
.at(i, j)
.at(row_i, col_i)
.decrypt(&module, &mut pt, &sk_exec, scratch.borrow());
.decrypt(module, &mut pt, &sk_exec, scratch.borrow());
module.vec_znx_sub_scalar_inplace(&mut pt.data, 0, row_i, &sk_ij.data, col_i);

View File

@@ -1,4 +1,5 @@
mod cpu_spqlios;
mod generic_serialization;
mod generics_automorphism_key;
mod generics_gglwe;
mod generics_tensor_key;

View File

@@ -10,6 +10,7 @@ use crate::{
impl GGSWCiphertext<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -23,15 +24,16 @@ impl GGSWCiphertext<Vec<u8>> {
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxNormalizeTmpBytes,
{
let out_size: usize = k_out.div_ceil(basek);
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
let ks_internal: usize =
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, n, basek, k_out, k_tsk, digits_tsk, rank);
ci_dft + (ks_internal | expand)
}
pub fn automorphism_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -44,7 +46,7 @@ impl GGSWCiphertext<Vec<u8>> {
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxNormalizeTmpBytes,
{
GGSWCiphertext::automorphism_scratch_space(
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
module, n, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
)
}
}
@@ -65,6 +67,9 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
{
use crate::Infos;
assert_eq!(self.n(), auto_key.n());
assert_eq!(lhs.n(), auto_key.n());
assert_eq!(
self.rank(),
lhs.rank(),
@@ -90,6 +95,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
scratch.available()
>= GGSWCiphertext::automorphism_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
@@ -102,6 +108,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
)
};
let n: usize = auto_key.n();
let rank: usize = self.rank();
let cols: usize = rank + 1;
@@ -113,7 +120,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
.automorphism(module, &lhs.at(row_i, 0), auto_key, scratch);
// Isolates DFT(AUTO(a[i]))
let (mut ci_dft, scratch1) = scratch.take_vec_znx_dft(module, cols, self.size());
let (mut ci_dft, scratch1) = scratch.take_vec_znx_dft(n, cols, self.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut ci_dft, i, &self.at(row_i, 0).data, i);
});

View File

@@ -1,8 +1,6 @@
use backend::hal::{
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxAllocBytes, VecZnxNormalizeInplace, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
api::{ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxNormalizeInplace, ZnxZero},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx},
};
use sampling::source::Source;
@@ -14,15 +12,15 @@ use crate::{
pub trait GGSWEncryptSkFamily<B: Backend> = GLWEEncryptSkFamily<B>;
impl GGSWCiphertext<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAllocBytes,
Module<B>: GGSWEncryptSkFamily<B>,
{
let size = k.div_ceil(basek);
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
+ module.vec_znx_alloc_bytes(rank + 1, size)
+ module.vec_znx_alloc_bytes(1, size)
+ module.vec_znx_dft_alloc_bytes(rank + 1, size)
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
+ VecZnx::alloc_bytes(n, rank + 1, size)
+ VecZnx::alloc_bytes(n, 1, size)
+ module.vec_znx_dft_alloc_bytes(n, rank + 1, size)
}
}
@@ -38,16 +36,15 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
use backend::hal::api::ZnxInfos;
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), module.n());
assert_eq!(pt.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n(), sk.n());
}
let basek: usize = self.basek();
@@ -55,7 +52,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
let rank: usize = self.rank();
let digits: usize = self.digits();
let (mut tmp_pt, scratch1) = scratch.take_glwe_pt(module, basek, k);
let (mut tmp_pt, scratch1) = scratch.take_glwe_pt(self.n(), basek, k);
(0..self.rows()).for_each(|row_i| {
tmp_pt.data.zero();
@@ -82,11 +79,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
}
impl GGSWCiphertextCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAllocBytes,
Module<B>: GGSWEncryptSkFamily<B>,
{
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank)
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
@@ -102,16 +99,15 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
use backend::hal::api::ZnxInfos;
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), module.n());
assert_eq!(pt.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n(), sk.n());
}
let basek: usize = self.basek();
@@ -120,10 +116,12 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
let cols: usize = rank + 1;
let digits: usize = self.digits();
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(module, basek, k);
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self.n(), basek, k);
let mut source = Source::new(seed_xa);
self.seed = vec![[0u8; 32]; self.rows() * cols];
(0..self.rows()).for_each(|row_i| {
tmp_pt.data.zero();
@@ -137,7 +135,7 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
let (seed, mut source_xa_tmp) = source.branch();
self.seed[row_i * cols + col_j] = seed;
encrypt_sk_internal(
module,
self.basek(),

View File

@@ -8,6 +8,7 @@ use crate::{GGSWCiphertext, GGSWCiphertextExec, GLWECiphertext, GLWEExternalProd
impl GGSWCiphertext<Vec<u8>> {
pub fn external_product_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -18,11 +19,12 @@ impl GGSWCiphertext<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
GLWECiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank)
}
pub fn external_product_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ggsw: usize,
@@ -32,7 +34,7 @@ impl GGSWCiphertext<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, k_out, k_ggsw, digits, rank)
}
}
@@ -51,6 +53,9 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
{
use crate::{GGSWCiphertext, Infos};
assert_eq!(lhs.n(), self.n());
assert_eq!(rhs.n(), self.n());
assert_eq!(
self.rank(),
lhs.rank(),
@@ -70,6 +75,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
scratch.available()
>= GGSWCiphertext::external_product_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
@@ -104,6 +110,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
{
#[cfg(debug_assertions)]
{
assert_eq!(rhs.n(), self.n());
assert_eq!(
self.rank(),
rhs.rank(),

View File

@@ -1,9 +1,9 @@
use backend::hal::{
api::{
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxAllocBytes, VecZnxBigAllocBytes, VecZnxDftAddInplace,
VecZnxDftCopy, VecZnxDftToVecZnxBigTmpA, VecZnxNormalizeTmpBytes, ZnxInfos,
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAllocBytes, VecZnxDftAddInplace, VecZnxDftCopy,
VecZnxDftToVecZnxBigTmpA, VecZnxNormalizeTmpBytes, ZnxInfos,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxDft, VmpPMat},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VecZnxDft, VmpPMat},
};
use crate::{GGSWCiphertext, GLWECiphertext, GLWEKeyswitchFamily, GLWESwitchingKeyExec, GLWETensorKeyExec, Infos};
@@ -14,6 +14,7 @@ pub trait GGSWKeySwitchFamily<B> =
impl GGSWCiphertext<Vec<u8>> {
pub(crate) fn expand_row_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
self_k: usize,
k_tsk: usize,
@@ -27,9 +28,10 @@ impl GGSWCiphertext<Vec<u8>> {
let self_size_out: usize = self_k.div_ceil(basek);
let self_size_in: usize = self_size_out.div_ceil(digits);
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(rank + 1, tsk_size);
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, self_size_in);
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, tsk_size);
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(n, 1, self_size_in);
let vmp: usize = module.vmp_apply_tmp_bytes(
n,
self_size_out,
self_size_in,
self_size_in,
@@ -37,13 +39,14 @@ impl GGSWCiphertext<Vec<u8>> {
rank,
tsk_size,
);
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(n, 1, tsk_size);
let norm: usize = module.vec_znx_normalize_tmp_bytes(module.n());
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
}
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -54,19 +57,20 @@ impl GGSWCiphertext<Vec<u8>> {
rank: usize,
) -> usize
where
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxAllocBytes + VecZnxNormalizeTmpBytes,
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxNormalizeTmpBytes,
{
let out_size: usize = k_out.div_ceil(basek);
let res_znx: usize = module.vec_znx_alloc_bytes(rank + 1, out_size);
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
let res_znx: usize = VecZnx::alloc_bytes(n, rank + 1, out_size);
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, n, basek, k_out, k_tsk, digits_tsk, rank);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
res_znx + ci_dft + (ks | expand_rows | res_dft)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -76,10 +80,10 @@ impl GGSWCiphertext<Vec<u8>> {
rank: usize,
) -> usize
where
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxAllocBytes + VecZnxNormalizeTmpBytes,
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxNormalizeTmpBytes,
{
GGSWCiphertext::keyswitch_scratch_space(
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
module, n, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
)
}
}
@@ -99,10 +103,16 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
{
let cols: usize = self.rank() + 1;
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), tsk.n());
}
assert!(
scratch.available()
>= GGSWCiphertext::expand_row_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
tsk.k(),
@@ -131,10 +141,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
let n: usize = self.n();
let digits: usize = tsk.digits();
let (mut tmp_dft_i, scratch1) = scratch.take_vec_znx_dft(module, cols, tsk.size());
let (mut tmp_a, scratch2) = scratch1.take_vec_znx_dft(module, 1, ci_dft.size().div_ceil(digits));
let (mut tmp_dft_i, scratch1) = scratch.take_vec_znx_dft(n, cols, tsk.size());
let (mut tmp_a, scratch2) = scratch1.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(digits));
{
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
@@ -184,7 +195,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
// =
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, ci_dft, 0);
let (mut tmp_idft, scratch2) = scratch1.take_vec_znx_big(module, 1, tsk.size());
let (mut tmp_idft, scratch2) = scratch1.take_vec_znx_big(n, 1, tsk.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_to_vec_znx_big_tmp_a(&mut tmp_idft, 0, &mut tmp_dft_i, i);
module.vec_znx_big_normalize(
@@ -209,6 +220,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
Module<B>: GLWEKeyswitchFamily<B> + GGSWKeySwitchFamily<B> + VecZnxNormalizeTmpBytes,
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B> + ScratchAvailable,
{
let n: usize = self.n();
let rank: usize = self.rank();
let cols: usize = rank + 1;
@@ -220,7 +232,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
.keyswitch(module, &lhs.at(row_i, 0), ksk, scratch);
// Pre-compute DFT of (a0, a1, a2)
let (mut ci_dft, scratch1) = scratch.take_vec_znx_dft(module, cols, self.size());
let (mut ci_dft, scratch1) = scratch.take_vec_znx_dft(n, cols, self.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut ci_dft, i, &self.at(row_i, 0).data, i);
});

View File

@@ -1,13 +1,14 @@
use backend::hal::{
api::{MatZnxAlloc, MatZnxAllocBytes, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatPrepare},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, Module, ReaderFrom, WriterTo},
api::{FillUniform, Reset, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatPrepare},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, ReaderFrom, WriterTo},
};
use std::fmt;
use crate::{GLWECiphertext, Infos};
pub trait GGSWLayoutFamily<B: Backend> = VmpPMatAlloc<B> + VmpPMatAllocBytes + VmpPMatPrepare<B>;
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GGSWCiphertext<D: Data> {
pub(crate) data: MatZnx<D>,
pub(crate) basek: usize,
@@ -15,6 +16,37 @@ pub struct GGSWCiphertext<D: Data> {
pub(crate) digits: usize,
}
impl<D: DataRef> fmt::Debug for GGSWCiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"(GGSWCiphertext: basek={} k={} digits={}) {}",
self.basek, self.k, self.digits, self.data
)
}
}
impl<D: DataMut> Reset for GGSWCiphertext<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.digits = 0;
}
}
impl<D: DataMut> FillUniform for GGSWCiphertext<D>
where
MatZnx<D>: FillUniform,
{
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.data.fill_uniform(source);
}
}
impl<D: DataRef> GGSWCiphertext<D> {
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
GLWECiphertext {
@@ -36,10 +68,7 @@ impl<D: DataMut> GGSWCiphertext<D> {
}
impl GGSWCiphertext<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
let size: usize = k.div_ceil(basek);
debug_assert!(digits > 0, "invalid ggsw: `digits` == 0");
@@ -59,17 +88,14 @@ impl GGSWCiphertext<Vec<u8>> {
);
Self {
data: module.mat_znx_alloc(rows, rank + 1, rank + 1, k.div_ceil(basek)),
data: MatZnx::alloc(n, rows, rank + 1, rank + 1, k.div_ceil(basek)),
basek,
k: k,
digits,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -86,7 +112,7 @@ impl GGSWCiphertext<Vec<u8>> {
size
);
module.mat_znx_alloc_bytes(rows, rank + 1, rank + 1, size)
MatZnx::alloc_bytes(n, rows, rank + 1, rank + 1, size)
}
}

View File

@@ -1,11 +1,13 @@
use backend::hal::{
api::{MatZnxAlloc, MatZnxAllocBytes, VecZnxCopy, VecZnxFillUniform},
api::{FillUniform, Reset, VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, MatZnx, Module, ReaderFrom, WriterTo},
};
use crate::{Decompress, GGSWCiphertext, GLWECiphertextCompressed, Infos};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
#[derive(PartialEq, Eq)]
#[derive(PartialEq, Eq, Clone)]
pub struct GGSWCiphertextCompressed<D: Data> {
pub(crate) data: MatZnx<D>,
pub(crate) basek: usize,
@@ -15,11 +17,41 @@ pub struct GGSWCiphertextCompressed<D: Data> {
pub(crate) seed: Vec<[u8; 32]>,
}
impl<D: DataRef> fmt::Debug for GGSWCiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"(GGSWCiphertextCompressed: basek={} k={} digits={}) {}",
self.basek, self.k, self.digits, self.data
)
}
}
impl<D: DataMut> Reset for GGSWCiphertextCompressed<D>
where
MatZnx<D>: Reset,
{
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.digits = 0;
self.rank = 0;
self.seed = Vec::new();
}
}
impl<D: DataMut> FillUniform for GGSWCiphertextCompressed<D>
where
MatZnx<D>: FillUniform,
{
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.data.fill_uniform(source);
}
}
impl GGSWCiphertextCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: MatZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
let size: usize = k.div_ceil(basek);
debug_assert!(digits > 0, "invalid ggsw: `digits` == 0");
@@ -39,19 +71,16 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
);
Self {
data: module.mat_znx_alloc(rows, rank + 1, 1, k.div_ceil(basek)),
data: MatZnx::alloc(n, rows, rank + 1, 1, k.div_ceil(basek)),
basek,
k: k,
digits,
rank,
seed: vec![[0u8; 32]; rows * (rank + 1)],
seed: Vec::new(),
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: MatZnxAllocBytes,
{
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
let size: usize = k.div_ceil(basek);
debug_assert!(
size > digits,
@@ -68,7 +97,7 @@ impl GGSWCiphertextCompressed<Vec<u8>> {
size
);
module.mat_znx_alloc_bytes(rows, rank + 1, 1, size)
MatZnx::alloc_bytes(n, rows, rank + 1, 1, size)
}
}
@@ -125,12 +154,29 @@ impl<D: Data> GGSWCiphertextCompressed<D> {
impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = reader.read_u64::<LittleEndian>()? as usize;
self.basek = reader.read_u64::<LittleEndian>()? as usize;
self.digits = reader.read_u64::<LittleEndian>()? as usize;
self.rank = reader.read_u64::<LittleEndian>()? as usize;
let seed_len = reader.read_u64::<LittleEndian>()? as usize;
self.seed = vec![[0u8; 32]; seed_len];
for s in &mut self.seed {
reader.read_exact(s)?;
}
self.data.read_from(reader)
}
}
impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.k as u64)?;
writer.write_u64::<LittleEndian>(self.basek as u64)?;
writer.write_u64::<LittleEndian>(self.digits as u64)?;
writer.write_u64::<LittleEndian>(self.rank as u64)?;
writer.write_u64::<LittleEndian>(self.seed.len() as u64)?;
for s in &self.seed {
writer.write_all(s)?;
}
self.data.write_to(writer)
}
}

View File

@@ -14,7 +14,7 @@ pub struct GGSWCiphertextExec<D: Data, B: Backend> {
}
impl<B: Backend> GGSWCiphertextExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: GGSWLayoutFamily<B>,
{
@@ -37,14 +37,14 @@ impl<B: Backend> GGSWCiphertextExec<Vec<u8>, B> {
);
Self {
data: module.vmp_pmat_alloc(rows, rank + 1, rank + 1, k.div_ceil(basek)),
data: module.vmp_pmat_alloc(n, rows, rank + 1, rank + 1, k.div_ceil(basek)),
basek,
k: k,
digits,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: GGSWLayoutFamily<B>,
{
@@ -64,7 +64,7 @@ impl<B: Backend> GGSWCiphertextExec<Vec<u8>, B> {
size
);
module.vmp_pmat_alloc_bytes(rows, rank + 1, rank + 1, size)
module.vmp_pmat_alloc_bytes(n, rows, rank + 1, rank + 1, size)
}
pub fn from<DataOther: DataRef>(
@@ -77,6 +77,7 @@ impl<B: Backend> GGSWCiphertextExec<Vec<u8>, B> {
{
let mut ggsw_exec: GGSWCiphertextExec<Vec<u8>, B> = Self::alloc(
module,
other.n(),
other.basek(),
other.k(),
other.rows(),

View File

@@ -1,6 +1,6 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAlloc, VecZnxBigAlloc, VecZnxBigNormalize,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxBigAlloc, VecZnxBigNormalize,
VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftToVecZnxBigTmpA, VecZnxNormalizeTmpBytes, VecZnxStd,
VecZnxSubABInplace, ZnxZero,
},
@@ -27,7 +27,7 @@ impl<D: DataRef> GGSWCiphertext<D> {
) where
DataSk: DataRef,
DataScalar: DataRef,
Module<B>: GGSWAssertNoiseFamily<B> + VecZnxAlloc + VecZnxAddScalarInplace + VecZnxSubABInplace + VecZnxStd,
Module<B>: GGSWAssertNoiseFamily<B> + VecZnxAddScalarInplace + VecZnxSubABInplace + VecZnxStd,
B: TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
F: Fn(usize) -> f64,
{
@@ -35,13 +35,13 @@ impl<D: DataRef> GGSWCiphertext<D> {
let k: usize = self.k();
let digits: usize = self.digits();
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k);
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(self.n(), 1, self.size());
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(self.n(), 1, self.size());
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::decrypt_scratch_space(module, basek, k) | module.vec_znx_normalize_tmp_bytes(module.n()),
GLWECiphertext::decrypt_scratch_space(module, self.n(), basek, k) | module.vec_znx_normalize_tmp_bytes(self.n()),
);
(0..self.rank() + 1).for_each(|col_j| {

View File

@@ -0,0 +1,15 @@
use backend::hal::tests::serialization::test_reader_writer_interface;
use crate::{GGSWCiphertext, GGSWCiphertextCompressed};
#[test]
fn ggsw_test_serialization() {
let original: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}
#[test]
fn ggsw_test_serialization_compressed() {
let original: GGSWCiphertextCompressed<Vec<u8>> = GGSWCiphertextCompressed::alloc(1024, 12, 54, 3, 1, 2);
test_reader_writer_interface(original);
}

View File

@@ -1,8 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy, VecZnxRotateInplace, VecZnxStd,
VecZnxSubABInplace, VecZnxSwithcDegree, ZnxViewMut,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxCopy,
VecZnxRotateInplace, VecZnxStd, VecZnxSubABInplace, VecZnxSwithcDegree, ZnxViewMut,
},
layouts::{Backend, Module, ScalarZnx, ScalarZnxToMut, ScratchOwned},
oep::{
@@ -23,14 +22,9 @@ use crate::{
pub(crate) trait TestModuleFamily<B: Backend> = GLWESecretFamily<B>
+ GGSWEncryptSkFamily<B>
+ GGSWAssertNoiseFamily<B>
+ VecZnxAlloc
+ ScalarZnxAlloc
+ VecZnxAllocBytes
+ MatZnxAlloc
+ VecZnxAddScalarInplace
+ VecZnxSubABInplace
+ VecZnxStd
+ ScalarZnxAllocBytes
+ VecZnxCopy;
pub(crate) trait TestScratchFamily<B: Backend> = TakeVecZnxDftImpl<B>
+ TakeVecZnxBigImpl<B>
@@ -49,23 +43,24 @@ where
Module<B>: TestModuleFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k - digits * basek) / (digits * basek);
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k, rows, digits, rank);
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertext::encrypt_sk_scratch_space(
module, basek, k, rank,
module, n, basek, k, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
sk_exec.prepare(module, &sk);
@@ -96,23 +91,23 @@ pub(crate) fn test_encrypt_sk_compressed<B: Backend>(
Module<B>: TestModuleFamily<B>,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = (k - digits * basek) / (digits * basek);
let mut ct_compressed: GGSWCiphertextCompressed<Vec<u8>> =
GGSWCiphertextCompressed::alloc(module, basek, k, rows, digits, rank);
let mut ct_compressed: GGSWCiphertextCompressed<Vec<u8>> = GGSWCiphertextCompressed::alloc(n, basek, k, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertextCompressed::encrypt_sk_scratch_space(
module, basek, k, rank,
module, n, basek, k, rank,
));
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let mut sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
sk_exec.prepare(module, &sk);
@@ -131,7 +126,7 @@ pub(crate) fn test_encrypt_sk_compressed<B: Backend>(
let noise_f = |_col_i: usize| -(k as f64) + sigma.log2() + 0.5;
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k, rows, digits, rank);
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k, rows, digits, rank);
ct.decompress(module, &ct_compressed);
ct.assert_noise(module, &sk_exec, &pt_scalar, &noise_f);
@@ -157,36 +152,37 @@ pub(crate) fn test_keyswitch<B: Backend>(
+ VecZnxSwithcDegree,
B: TestScratchFamily<B> + VecZnxDftAllocBytesImpl<B> + VecZnxBigAllocBytesImpl<B> + TakeSvpPPolImpl<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(digits * basek);
let digits_in: usize = 1;
let mut ct_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_in, rows, digits_in, rank);
let mut ct_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_out, rows, digits_in, rank);
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_in, rows, digits_in, rank);
let mut ct_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_out, rows, digits_in, rank);
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_in, rank)
| GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_in, rank)
| GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
| GGSWCiphertext::keyswitch_scratch_space(
module, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
module, n, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
),
);
let var_xs: f64 = 0.5;
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
let sk_in_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -208,7 +204,7 @@ pub(crate) fn test_keyswitch<B: Backend>(
scratch.borrow(),
);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
ct_in.encrypt_sk(
module,
@@ -221,8 +217,8 @@ pub(crate) fn test_keyswitch<B: Backend>(
);
let mut ksk_exec: GLWESwitchingKeyExec<Vec<u8>, B> =
GLWESwitchingKeyExec::alloc(module, basek, k_ksk, rows, digits, rank, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
GLWESwitchingKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
ksk_exec.prepare(module, &ksk, scratch.borrow());
tsk_exec.prepare(module, &tsk, scratch.borrow());
@@ -231,7 +227,7 @@ pub(crate) fn test_keyswitch<B: Backend>(
let max_noise = |col_j: usize| -> f64 {
noise_ggsw_keyswitch(
module.n() as f64,
n as f64,
basek * digits,
col_j,
var_xs,
@@ -267,33 +263,34 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
+ VecZnxSwithcDegree,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(digits * basek);
let digits_in: usize = 1;
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ct, rows, digits_in, rank);
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(module, basek, k_tsk, rows, digits, rank);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ct, rows, digits_in, rank);
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k_tsk, rows, digits, rank);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ct, rank)
| GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
| GGSWCiphertext::keyswitch_inplace_scratch_space(module, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct, rank)
| GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
| GGSWCiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
);
let var_xs: f64 = 0.5;
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
let sk_in_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -315,7 +312,7 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
scratch.borrow(),
);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
ct.encrypt_sk(
module,
@@ -328,8 +325,8 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
);
let mut ksk_exec: GLWESwitchingKeyExec<Vec<u8>, B> =
GLWESwitchingKeyExec::alloc(module, basek, k_ksk, rows, digits, rank, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
GLWESwitchingKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
ksk_exec.prepare(module, &ksk, scratch.borrow());
tsk_exec.prepare(module, &tsk, scratch.borrow());
@@ -338,7 +335,7 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
let max_noise = |col_j: usize| -> f64 {
noise_ggsw_keyswitch(
module.n() as f64,
n as f64,
basek * digits,
col_j,
var_xs,
@@ -379,33 +376,34 @@ pub(crate) fn test_automorphism<B: Backend>(
+ VecZnxAutomorphism,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let rows_in: usize = k_in.div_euclid(basek * digits);
let digits_in: usize = 1;
let mut ct_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_in, rows_in, digits_in, rank);
let mut ct_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_out, rows_in, digits_in, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(module, basek, k_tsk, rows, digits, rank);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut ct_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_out, rows_in, digits_in, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k_tsk, rows, digits, rank);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_in, rank)
| AutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_in, rank)
| AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
| GGSWCiphertext::automorphism_scratch_space(
module, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
module, n, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
),
);
let var_xs: f64 = 0.5;
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(var_xs, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -427,7 +425,7 @@ pub(crate) fn test_automorphism<B: Backend>(
scratch.borrow(),
);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
ct_in.encrypt_sk(
module,
@@ -439,10 +437,11 @@ pub(crate) fn test_automorphism<B: Backend>(
scratch.borrow(),
);
let mut auto_key_exec: AutomorphismKeyExec<Vec<u8>, B> = AutomorphismKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
let mut auto_key_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
auto_key_exec.prepare(module, &auto_key, scratch.borrow());
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, basek, k_tsk, rows, digits, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, n, basek, k_tsk, rows, digits, rank);
tsk_exec.prepare(module, &tensor_key, scratch.borrow());
ct_out.automorphism(module, &ct_in, &auto_key_exec, &tsk_exec, scratch.borrow());
@@ -451,7 +450,7 @@ pub(crate) fn test_automorphism<B: Backend>(
let max_noise = |col_j: usize| -> f64 {
noise_ggsw_keyswitch(
module.n() as f64,
n as f64,
basek * digits,
col_j,
var_xs,
@@ -491,29 +490,30 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
+ VecZnxAutomorphismInplace,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(digits * basek);
let rows_in: usize = k_ct.div_euclid(basek * digits);
let digits_in: usize = 1;
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ct, rows_in, digits_in, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(module, basek, k_tsk, rows, digits, rank);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ct, rows_in, digits_in, rank);
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc(n, basek, k_tsk, rows, digits, rank);
let mut auto_key: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ct, rank)
| AutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
| GGSWCiphertext::automorphism_inplace_scratch_space(module, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct, rank)
| AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
| GLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
| GGSWCiphertext::automorphism_inplace_scratch_space(module, n, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
);
let var_xs: f64 = 0.5;
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(var_xs, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -535,7 +535,7 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
scratch.borrow(),
);
pt_scalar.fill_ternary_hw(0, module.n(), &mut source_xs);
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
ct.encrypt_sk(
module,
@@ -547,10 +547,11 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
scratch.borrow(),
);
let mut auto_key_exec: AutomorphismKeyExec<Vec<u8>, B> = AutomorphismKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
let mut auto_key_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
auto_key_exec.prepare(module, &auto_key, scratch.borrow());
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, basek, k_tsk, rows, digits, rank);
let mut tsk_exec: GLWETensorKeyExec<Vec<u8>, B> = GLWETensorKeyExec::alloc(module, n, basek, k_tsk, rows, digits, rank);
tsk_exec.prepare(module, &tensor_key, scratch.borrow());
ct.automorphism_inplace(module, &auto_key_exec, &tsk_exec, scratch.borrow());
@@ -559,7 +560,7 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
let max_noise = |col_j: usize| -> f64 {
noise_ggsw_keyswitch(
module.n() as f64,
n as f64,
basek * digits,
col_j,
var_xs,
@@ -595,15 +596,16 @@ pub(crate) fn test_external_product<B: Backend>(
+ VecZnxRotateInplace,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let rows_in: usize = k_in.div_euclid(basek * digits);
let digits_in: usize = 1;
let mut ct_ggsw_lhs_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_in, rows_in, digits_in, rank);
let mut ct_ggsw_lhs_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_out, rows_in, digits_in, rank);
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut ct_ggsw_lhs_in: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_in, rows_in, digits_in, rank);
let mut ct_ggsw_lhs_out: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_out, rows_in, digits_in, rank);
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -616,11 +618,11 @@ pub(crate) fn test_external_product<B: Backend>(
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank)
| GGSWCiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank)
| GGSWCiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -644,7 +646,7 @@ pub(crate) fn test_external_product<B: Backend>(
scratch.borrow(),
);
let mut ct_rhs_exec: GGSWCiphertextExec<Vec<u8>, B> = GGSWCiphertextExec::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut ct_rhs_exec: GGSWCiphertextExec<Vec<u8>, B> = GGSWCiphertextExec::alloc(module, n, basek, k_ggsw, rows, digits, rank);
ct_rhs_exec.prepare(module, &ct_ggsw_rhs, scratch.borrow());
ct_ggsw_lhs_out.external_product(module, &ct_ggsw_lhs_in, &ct_rhs_exec, scratch.borrow());
@@ -654,13 +656,13 @@ pub(crate) fn test_external_product<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise = |_col_j: usize| -> f64 {
noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
var_msg,
@@ -695,15 +697,16 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
+ VecZnxRotateInplace,
B: TestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(digits * basek);
let rows_in: usize = k_ct.div_euclid(basek * digits);
let digits_in: usize = 1;
let mut ct_ggsw_lhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ct, rows_in, digits_in, rank);
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut ct_ggsw_lhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ct, rows_in, digits_in, rank);
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -716,11 +719,11 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank)
| GGSWCiphertext::external_product_inplace_scratch_space(module, basek, k_ct, k_ggsw, digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank)
| GGSWCiphertext::external_product_inplace_scratch_space(module, n, basek, k_ct, k_ggsw, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -744,7 +747,7 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
scratch.borrow(),
);
let mut ct_rhs_exec: GGSWCiphertextExec<Vec<u8>, B> = GGSWCiphertextExec::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut ct_rhs_exec: GGSWCiphertextExec<Vec<u8>, B> = GGSWCiphertextExec::alloc(module, n, basek, k_ggsw, rows, digits, rank);
ct_rhs_exec.prepare(module, &ct_ggsw_rhs, scratch.borrow());
ct_ggsw_lhs.external_product_inplace(module, &ct_rhs_exec, scratch.borrow());
@@ -754,13 +757,13 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise = |_col_j: usize| -> f64 {
noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
var_msg,

View File

@@ -1,2 +1,3 @@
mod cpu_spqlios;
mod generic_serialization;
mod generic_tests;

View File

@@ -11,6 +11,7 @@ use crate::{AutomorphismKeyExec, GLWECiphertext, GLWEKeyswitchFamily, Infos, glw
impl GLWECiphertext<Vec<u8>> {
pub fn automorphism_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -21,11 +22,12 @@ impl GLWECiphertext<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
Self::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
Self::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
}
pub fn automorphism_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -35,7 +37,7 @@ impl GLWECiphertext<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
Self::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
Self::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
}
}
@@ -85,7 +87,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
{
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
}
let (res_dft, scratch1) = scratch.take_vec_znx_dft(module, self.cols(), rhs.size()); // TODO: optimise size
let (res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
let mut res_big: VecZnxBig<_, B> = keyswitch(module, res_dft, lhs, &rhs.key, scratch1);
(0..self.cols()).for_each(|i| {
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i);
@@ -123,7 +125,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
{
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
}
let (res_dft, scratch1) = scratch.take_vec_znx_dft(module, self.cols(), rhs.size()); // TODO: optimise size
let (res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
let mut res_big: VecZnxBig<_, B> = keyswitch(module, res_dft, lhs, &rhs.key, scratch1);
(0..self.cols()).for_each(|i| {
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i);
@@ -161,7 +163,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
{
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
}
let (res_dft, scratch1) = scratch.take_vec_znx_dft(module, self.cols(), rhs.size()); // TODO: optimise size
let (res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
let mut res_big: VecZnxBig<_, B> = keyswitch(module, res_dft, lhs, &rhs.key, scratch1);
(0..self.cols()).for_each(|i| {
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i);

View File

@@ -20,13 +20,13 @@ pub trait GLWEDecryptFamily<B: Backend> = VecZnxDftAllocBytes
+ VecZnxNormalizeTmpBytes;
impl GLWECiphertext<Vec<u8>> {
pub fn decrypt_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn decrypt_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GLWEDecryptFamily<B>,
{
let size: usize = k.div_ceil(basek);
(module.vec_znx_normalize_tmp_bytes(module.n()) | module.vec_znx_dft_alloc_bytes(1, size))
+ module.vec_znx_dft_alloc_bytes(1, size)
(module.vec_znx_normalize_tmp_bytes(n) | module.vec_znx_dft_alloc_bytes(n, 1, size))
+ module.vec_znx_dft_alloc_bytes(n, 1, size)
}
}
@@ -44,20 +44,19 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), sk.rank());
assert_eq!(self.n(), module.n());
assert_eq!(pt.n(), module.n());
assert_eq!(sk.n(), module.n());
assert_eq!(self.n(), sk.n());
assert_eq!(pt.n(), sk.n());
}
let cols: usize = self.rank() + 1;
let (mut c0_big, scratch_1) = scratch.take_vec_znx_big(module, 1, self.size()); // TODO optimize size when pt << ct
let (mut c0_big, scratch_1) = scratch.take_vec_znx_big(self.n(), 1, self.size()); // TODO optimize size when pt << ct
c0_big.data_mut().fill(0);
{
(1..cols).for_each(|i| {
// ci_dft = DFT(a[i]) * DFT(s[i])
let (mut ci_dft, _) = scratch_1.take_vec_znx_dft(module, 1, self.size()); // TODO optimize size when pt << ct
let (mut ci_dft, _) = scratch_1.take_vec_znx_dft(self.n(), 1, self.size()); // TODO optimize size when pt << ct
module.vec_znx_dft_from_vec_znx(1, 0, &mut ci_dft, 0, &self.data, i);
module.svp_apply_inplace(&mut ci_dft, 0, &sk.data, i - 1);
let ci_big = module.vec_znx_dft_to_vec_znx_big_consume(ci_dft);

View File

@@ -1,12 +1,11 @@
use backend::hal::{
api::{
ScalarZnxAllocBytes, ScratchAvailable, SvpApply, SvpApplyInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
TakeSvpPPol, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAllocBytes, VecZnxBigAddNormal,
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
VecZnxDftToVecZnxBigConsume, VecZnxFillUniform, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
VecZnxSub, VecZnxSubABInplace, ZnxInfos, ZnxZero,
ScratchAvailable, SvpApply, SvpApplyInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol, TakeVecZnx,
TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigConsume, VecZnxFillUniform,
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, ZnxInfos, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VecZnxBig},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig},
};
use sampling::source::Source;
@@ -27,8 +26,7 @@ pub trait GLWEEncryptSkFamily<B: Backend> = VecZnxDftAllocBytes
+ VecZnxNormalizeInplace<B>
+ VecZnxAddNormal
+ VecZnxNormalize<B>
+ VecZnxSub
+ VecZnxAllocBytes;
+ VecZnxSub;
pub trait GLWEEncryptPkFamily<B: Backend> = VecZnxDftAllocBytes
+ VecZnxBigAllocBytes
@@ -39,27 +37,24 @@ pub trait GLWEEncryptPkFamily<B: Backend> = VecZnxDftAllocBytes
+ VecZnxBigAddNormal<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ ScalarZnxAllocBytes
+ VecZnxNormalizeTmpBytes;
impl GLWECiphertext<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GLWEEncryptSkFamily<B>,
{
let size: usize = k.div_ceil(basek);
module.vec_znx_normalize_tmp_bytes(module.n())
+ 2 * module.vec_znx_alloc_bytes(1, size)
+ module.vec_znx_dft_alloc_bytes(1, size)
module.vec_znx_normalize_tmp_bytes(n) + 2 * VecZnx::alloc_bytes(n, 1, size) + module.vec_znx_dft_alloc_bytes(n, 1, size)
}
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GLWEEncryptPkFamily<B>,
{
let size: usize = k.div_ceil(basek);
((module.vec_znx_dft_alloc_bytes(1, size) + module.vec_znx_big_alloc_bytes(1, size)) | module.scalar_znx_alloc_bytes(1))
+ module.svp_ppol_alloc_bytes(1)
+ module.vec_znx_normalize_tmp_bytes(module.n())
((module.vec_znx_dft_alloc_bytes(n, 1, size) + module.vec_znx_big_alloc_bytes(n, 1, size)) | ScalarZnx::alloc_bytes(n, 1))
+ module.svp_ppol_alloc_bytes(n, 1)
+ module.vec_znx_normalize_tmp_bytes(n)
}
}
@@ -75,7 +70,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -83,10 +78,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
assert_eq!(sk.n(), self.n());
assert_eq!(pt.n(), self.n());
assert!(
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k()),
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
scratch.available(),
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k())
)
}
@@ -111,17 +106,17 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), sk.rank());
assert_eq!(sk.n(), self.n());
assert!(
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k()),
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
scratch.available(),
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k())
)
}
self.encrypt_sk_internal(
@@ -146,7 +141,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let cols: usize = self.rank() + 1;
encrypt_sk_internal(
@@ -176,7 +171,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptPkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx,
{
self.encrypt_pk_internal::<DataPt, DataPk, B>(
module,
@@ -199,7 +194,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptPkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx,
{
self.encrypt_pk_internal::<Vec<u8>, DataPk, B>(
module,
@@ -230,17 +225,16 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
+ VecZnxBigAddNormal<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + TakeSvpPPol<B> + TakeScalarZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.basek(), pk.basek());
assert_eq!(self.n(), module.n());
assert_eq!(pk.n(), module.n());
assert_eq!(self.n(), pk.n());
assert_eq!(self.rank(), pk.rank());
if let Some((pt, _)) = pt {
assert_eq!(pt.basek(), pk.basek());
assert_eq!(pt.n(), module.n());
assert_eq!(pt.n(), pk.n());
}
}
@@ -249,10 +243,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
let cols: usize = self.rank() + 1;
// Generates u according to the underlying secret distribution.
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(module, 1);
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(self.n(), 1);
{
let (mut u, _) = scratch_1.take_scalar_znx(module, 1);
let (mut u, _) = scratch_1.take_scalar_znx(self.n(), 1);
match pk.dist {
Distribution::NONE => panic!(
"invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \
@@ -271,7 +265,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
// ct[i] = pk[i] * u + ei (+ m if col = i)
(0..cols).for_each(|i| {
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(module, 1, size_pk);
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), 1, size_pk);
// ci_dft = DFT(u) * DFT(pk[i])
module.svp_apply(&mut ci_dft, 0, &u_dft, 0, &pk.data, i);
@@ -303,11 +297,11 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
}
impl GLWECiphertextCompressed<Vec<u8>> {
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
where
Module<B>: GLWEEncryptSkFamily<B>,
{
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
}
}
impl<D: DataMut> GLWECiphertextCompressed<D> {
@@ -322,7 +316,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
self.encrypt_sk_internal(
module,
@@ -346,7 +340,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let mut source_xa = Source::new(seed_xa);
let cols: usize = self.rank() + 1;
@@ -383,7 +377,7 @@ pub(crate) fn encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: Data
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
@@ -399,11 +393,11 @@ pub(crate) fn encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: Data
let size: usize = ct.size();
let (mut c0, scratch_1) = scratch.take_vec_znx(module, 1, size);
let (mut c0, scratch_1) = scratch.take_vec_znx(ct.n(), 1, size);
c0.zero();
{
let (mut ci, scratch_2) = scratch_1.take_vec_znx(module, 1, size);
let (mut ci, scratch_2) = scratch_1.take_vec_znx(ct.n(), 1, size);
// ct[i] = uniform
// ct[0] -= c[i] * s[i],
@@ -418,7 +412,7 @@ pub(crate) fn encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: Data
// ct[i] = uniform (+ pt)
module.vec_znx_fill_uniform(basek, ct, col_ct, k, source_xa);
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(module, 1, size);
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
// ci = ct[i] - pt
// i.e. we act as we sample ct[i] already as uniform + pt

View File

@@ -20,6 +20,7 @@ pub trait GLWEExternalProductFamily<B: Backend> = VecZnxDftAllocBytes
impl GLWECiphertext<Vec<u8>> {
pub fn external_product_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -33,9 +34,10 @@ impl GLWECiphertext<Vec<u8>> {
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
let out_size: usize = k_out.div_ceil(basek);
let ggsw_size: usize = k_ggsw.div_ceil(basek);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, ggsw_size);
let a_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, in_size);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, ggsw_size);
let a_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, in_size);
let vmp: usize = module.vmp_apply_tmp_bytes(
n,
out_size,
in_size,
in_size, // rows
@@ -49,6 +51,7 @@ impl GLWECiphertext<Vec<u8>> {
pub fn external_product_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ggsw: usize,
@@ -58,7 +61,7 @@ impl GLWECiphertext<Vec<u8>> {
where
Module<B>: GLWEExternalProductFamily<B>,
{
Self::external_product_scratch_space(module, basek, k_out, k_out, k_ggsw, digits, rank)
Self::external_product_scratch_space(module, n, basek, k_out, k_out, k_ggsw, digits, rank)
}
}
@@ -83,13 +86,13 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
assert_eq!(rhs.rank(), self.rank());
assert_eq!(self.basek(), basek);
assert_eq!(lhs.basek(), basek);
assert_eq!(rhs.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(lhs.n(), module.n());
assert_eq!(rhs.n(), self.n());
assert_eq!(lhs.n(), self.n());
assert!(
scratch.available()
>= GLWECiphertext::external_product_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
@@ -103,8 +106,8 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
let cols: usize = rhs.rank() + 1;
let digits: usize = rhs.digits();
let (mut res_dft, scratch1) = scratch.take_vec_znx_dft(module, cols, rhs.size()); // Todo optimise
let (mut a_dft, scratch2) = scratch1.take_vec_znx_dft(module, cols, lhs.size().div_ceil(digits));
let (mut res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise
let (mut a_dft, scratch2) = scratch1.take_vec_znx_dft(self.n(), cols, lhs.size().div_ceil(digits));
a_dft.data_mut().fill(0);

View File

@@ -22,6 +22,7 @@ pub trait GLWEKeyswitchFamily<B: Backend> = VecZnxDftAllocBytes
impl GLWECiphertext<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -36,16 +37,24 @@ impl GLWECiphertext<Vec<u8>> {
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
let out_size: usize = k_out.div_ceil(basek);
let ksk_size: usize = k_ksk.div_ceil(basek);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank_out + 1, ksk_size); // TODO OPTIMIZE
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(rank_in, in_size);
let vmp: usize = module.vmp_apply_tmp_bytes(out_size, in_size, in_size, rank_in, rank_out + 1, ksk_size)
+ module.vec_znx_dft_alloc_bytes(rank_in, in_size);
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes(module.n());
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_out + 1, ksk_size); // TODO OPTIMIZE
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
let vmp: usize = module.vmp_apply_tmp_bytes(
n,
out_size,
in_size,
in_size,
rank_in,
rank_out + 1,
ksk_size,
) + module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes(n);
return res_dft + ((ai_dft + vmp) | normalize);
}
pub fn keyswitch_from_fourier_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
@@ -57,11 +66,14 @@ impl GLWECiphertext<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
Self::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out)
Self::keyswitch_scratch_space(
module, n, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out,
)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
@@ -71,7 +83,7 @@ impl GLWECiphertext<Vec<u8>> {
where
Module<B>: GLWEKeyswitchFamily<B>,
{
Self::keyswitch_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank, rank)
Self::keyswitch_scratch_space(module, n, basek, k_out, k_out, k_ksk, digits, rank, rank)
}
}
@@ -105,13 +117,13 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
);
assert_eq!(self.basek(), basek);
assert_eq!(lhs.basek(), basek);
assert_eq!(rhs.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(lhs.n(), module.n());
assert_eq!(rhs.n(), self.n());
assert_eq!(lhs.n(), self.n());
assert!(
scratch.available()
>= GLWECiphertext::keyswitch_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
@@ -133,6 +145,7 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
scratch.available(),
GLWECiphertext::keyswitch_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
@@ -160,7 +173,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
{
self.assert_keyswitch(module, lhs, rhs, scratch);
}
let (res_dft, scratch1) = scratch.take_vec_znx_dft(module, self.cols(), rhs.size()); // Todo optimise
let (res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
let res_big: VecZnxBig<_, B> = keyswitch(module, res_dft, lhs, rhs, scratch1);
(0..self.cols()).for_each(|i| {
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch1);
@@ -227,7 +240,7 @@ where
Scratch<B>: TakeVecZnxDft<B>,
{
let cols: usize = a.cols();
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
(0..cols - 1).for_each(|col_i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut ai_dft, col_i, a, col_i + 1);
});
@@ -259,7 +272,7 @@ where
{
let cols: usize = a.cols();
let size: usize = a.size();
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(module, cols - 1, size.div_ceil(digits));
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(a.n(), cols - 1, size.div_ceil(digits));
ai_dft.data_mut().fill(0);

View File

@@ -1,12 +1,11 @@
use std::fmt::Debug;
use backend::hal::{
api::{FillUniform, VecZnxAlloc, VecZnxAllocBytes, VecZnxCopy, VecZnxFillUniform, ZnxInfos, ZnxZero},
api::{FillUniform, Reset, VecZnxCopy, VecZnxFillUniform, ZnxInfos},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo},
};
use sampling::source::Source;
use crate::{Decompress, GLWEOps, Infos, SetMetaData};
use std::fmt;
#[derive(PartialEq, Eq, Clone)]
pub struct GLWECiphertext<D: Data> {
@@ -15,8 +14,14 @@ pub struct GLWECiphertext<D: Data> {
pub k: usize,
}
impl<D: DataRef> Debug for GLWECiphertext<D> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl<D: DataRef> fmt::Debug for GLWECiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataRef> fmt::Display for GLWECiphertext<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"GLWECiphertext: basek={} k={}: {}",
@@ -27,16 +32,14 @@ impl<D: DataRef> Debug for GLWECiphertext<D> {
}
}
impl<D: DataMut> ZnxZero for GLWECiphertext<D>
impl<D: DataMut> Reset for GLWECiphertext<D>
where
VecZnx<D>: ZnxZero,
VecZnx<D>: Reset,
{
fn zero(&mut self) {
self.data.zero()
}
fn zero_at(&mut self, i: usize, j: usize) {
self.data.zero_at(i, j);
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
}
}
@@ -50,22 +53,16 @@ where
}
impl GLWECiphertext<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
Self {
data: module.vec_znx_alloc(rank + 1, k.div_ceil(basek)),
data: VecZnx::alloc(n, rank + 1, k.div_ceil(basek)),
basek,
k,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: VecZnxAllocBytes,
{
module.vec_znx_alloc_bytes(rank + 1, k.div_ceil(basek))
pub fn bytes_of(n: usize, basek: usize, k: usize, rank: usize) -> usize {
VecZnx::alloc_bytes(n, rank + 1, k.div_ceil(basek))
}
}
@@ -168,28 +165,36 @@ pub struct GLWECiphertextCompressed<D: Data> {
pub(crate) seed: [u8; 32],
}
impl<D: DataRef> Debug for GLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl<D: DataRef> fmt::Debug for GLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataRef> fmt::Display for GLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"GLWECiphertext: basek={} k={}: {}",
"GLWECiphertextCompressed: basek={} k={} rank={} seed={:?}: {}",
self.basek(),
self.k(),
self.rank,
self.seed,
self.data
)
}
}
impl<D: DataMut> ZnxZero for GLWECiphertextCompressed<D>
impl<D: DataMut> Reset for GLWECiphertextCompressed<D>
where
VecZnx<D>: ZnxZero,
VecZnx<D>: Reset,
{
fn zero(&mut self) {
self.data.zero()
}
fn zero_at(&mut self, i: usize, j: usize) {
self.data.zero_at(i, j);
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.rank = 0;
self.seed = [0u8; 32];
}
}
@@ -225,12 +230,9 @@ impl<D: Data> GLWECiphertextCompressed<D> {
}
impl GLWECiphertextCompressed<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
Self {
data: module.vec_znx_alloc(1, k.div_ceil(basek)),
data: VecZnx::alloc(n, 1, k.div_ceil(basek)),
basek,
k,
rank,
@@ -238,11 +240,8 @@ impl GLWECiphertextCompressed<Vec<u8>> {
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
where
Module<B>: VecZnxAllocBytes,
{
GLWECiphertext::bytes_of(module, basek, k, 1)
pub fn bytes_of(n: usize, basek: usize, k: usize) -> usize {
GLWECiphertext::bytes_of(n, basek, k, 1)
}
}

View File

@@ -1,5 +1,5 @@
use backend::hal::{
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAlloc, VecZnxNormalizeInplace, VecZnxStd, VecZnxSubABInplace},
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxNormalizeInplace, VecZnxStd, VecZnxSubABInplace},
layouts::{Backend, DataRef, Module, ScratchOwned},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
};
@@ -16,13 +16,14 @@ impl<D: DataRef> GLWECiphertext<D> {
) where
DataSk: DataRef,
DataPt: DataRef,
Module<B>: GLWEDecryptFamily<B> + VecZnxSubABInplace + VecZnxNormalizeInplace<B> + VecZnxStd + VecZnxAlloc,
Module<B>: GLWEDecryptFamily<B> + VecZnxSubABInplace + VecZnxNormalizeInplace<B> + VecZnxStd,
B: TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, self.basek(), self.k());
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), self.basek(), self.k());
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
));

View File

@@ -18,9 +18,8 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(a.basek(), b.basek());
assert!(self.rank() >= a.rank().max(b.rank()));
}
@@ -65,8 +64,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(self.basek(), a.basek());
assert!(self.rank() >= a.rank())
}
@@ -89,9 +87,8 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(b.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(a.basek(), b.basek());
assert!(self.rank() >= a.rank().max(b.rank()));
}
@@ -137,8 +134,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(self.basek(), a.basek());
assert!(self.rank() >= a.rank())
}
@@ -160,8 +156,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(self.basek(), a.basek());
assert!(self.rank() >= a.rank())
}
@@ -183,8 +178,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(self.rank(), a.rank())
}
@@ -203,11 +197,6 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
where
Module<B>: VecZnxRotateInplace,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
}
let self_mut: &mut GLWECiphertext<&mut [u8]> = &mut self.to_mut();
(0..self_mut.rank() + 1).for_each(|i| {
@@ -222,8 +211,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), self.n());
assert_eq!(self.rank(), a.rank())
}
@@ -242,11 +230,6 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
where
Module<B>: VecZnxMulXpMinusOneInplace,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
}
let self_mut: &mut GLWECiphertext<&mut [u8]> = &mut self.to_mut();
(0..self_mut.rank() + 1).for_each(|i| {
@@ -261,8 +244,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), a.n());
assert_eq!(self.rank(), a.rank());
}
@@ -292,8 +274,7 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(self.n(), a.n());
assert_eq!(self.rank(), a.rank());
}
@@ -311,10 +292,6 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
where
Module<B>: VecZnxNormalizeInplace<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
}
let self_mut: &mut GLWECiphertext<&mut [u8]> = &mut self.to_mut();
(0..self_mut.rank() + 1).for_each(|i| {
module.vec_znx_normalize_inplace(self_mut.basek(), &mut self_mut.data, i, scratch);
@@ -323,8 +300,8 @@ pub trait GLWEOps: GLWECiphertextToMut + SetMetaData + Sized {
}
impl GLWECiphertext<Vec<u8>> {
pub fn rsh_scratch_space<BACKEND: Backend>(module: &Module<BACKEND>) -> usize {
VecZnx::rsh_scratch_space(module.n())
pub fn rsh_scratch_space(n: usize) -> usize {
VecZnx::rsh_scratch_space(n)
}
}

View File

@@ -2,9 +2,9 @@ use std::collections::HashMap;
use backend::hal::{
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphismInplace,
VecZnxBigAutomorphismInplace, VecZnxBigSubSmallBInplace, VecZnxCopy, VecZnxNegateInplace, VecZnxNormalizeInplace,
VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace,
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAutomorphismInplace,
VecZnxBigSubSmallBInplace, VecZnxCopy, VecZnxNegateInplace, VecZnxNormalizeInplace, VecZnxRotate, VecZnxRotateInplace,
VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch},
};
@@ -52,12 +52,9 @@ impl Accumulator {
/// * `basek`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
/// * `rank`: rank of the GLWE ciphertext.
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
Self {
data: GLWECiphertext::alloc(module, basek, k, rank),
data: GLWECiphertext::alloc(n, basek, k, rank),
value: false,
control: false,
}
@@ -78,13 +75,10 @@ impl GLWEPacker {
/// * `basek`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
/// * `rank`: rank of the GLWE ciphertext.
pub fn new<B: Backend>(module: &Module<B>, log_batch: usize, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn new(n: usize, log_batch: usize, basek: usize, k: usize, rank: usize) -> Self {
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
let log_n: usize = module.log_n();
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(module, basek, k, rank)));
let log_n: usize = (usize::BITS - (n - 1).leading_zeros()) as _;
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(n, basek, k, rank)));
Self {
accumulators: accumulators,
log_batch,
@@ -104,6 +98,7 @@ impl GLWEPacker {
/// Number of scratch space bytes required to call [Self::add].
pub fn scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
ct_k: usize,
k_ksk: usize,
@@ -111,9 +106,9 @@ impl GLWEPacker {
rank: usize,
) -> usize
where
Module<B>: GLWEKeyswitchFamily<B> + VecZnxAllocBytes,
Module<B>: GLWEKeyswitchFamily<B>,
{
pack_core_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
pack_core_scratch_space(module, n, basek, ct_k, k_ksk, digits, rank)
}
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
@@ -137,12 +132,12 @@ impl GLWEPacker {
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEPackingFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
assert!(
self.counter < module.n(),
self.counter < self.accumulators[0].data.n(),
"Packing limit of {} reached",
module.n() >> self.log_batch
self.accumulators[0].data.n() >> self.log_batch
);
pack_core(
@@ -161,7 +156,7 @@ impl GLWEPacker {
where
Module<B>: VecZnxCopy,
{
assert!(self.counter == module.n());
assert!(self.counter == self.accumulators[0].data.n());
// Copy result GLWE into res GLWE
res.copy(
module,
@@ -174,6 +169,7 @@ impl GLWEPacker {
fn pack_core_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
ct_k: usize,
k_ksk: usize,
@@ -181,9 +177,9 @@ fn pack_core_scratch_space<B: Backend>(
rank: usize,
) -> usize
where
Module<B>: GLWEKeyswitchFamily<B> + VecZnxAllocBytes,
Module<B>: GLWEKeyswitchFamily<B>,
{
combine_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
combine_scratch_space(module, n, basek, ct_k, k_ksk, digits, rank)
}
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
@@ -195,7 +191,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEPackingFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let log_n: usize = module.log_n();
@@ -248,6 +244,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
fn combine_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
ct_k: usize,
k_ksk: usize,
@@ -255,11 +252,11 @@ fn combine_scratch_space<B: Backend>(
rank: usize,
) -> usize
where
Module<B>: GLWEKeyswitchFamily<B> + VecZnxAllocBytes,
Module<B>: GLWEKeyswitchFamily<B>,
{
GLWECiphertext::bytes_of(module, basek, ct_k, rank)
+ (GLWECiphertext::rsh_scratch_space(module)
| GLWECiphertext::automorphism_scratch_space(module, basek, ct_k, ct_k, k_ksk, digits, rank))
GLWECiphertext::bytes_of(n, basek, ct_k, rank)
+ (GLWECiphertext::rsh_scratch_space(n)
| GLWECiphertext::automorphism_scratch_space(module, n, basek, ct_k, ct_k, k_ksk, digits, rank))
}
/// [combine] merges two ciphertexts together.
@@ -272,9 +269,10 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
scratch: &mut Scratch<B>,
) where
Module<B>: GLWEPackingFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let log_n: usize = module.log_n();
let n: usize = acc.data.n();
let log_n: usize = (u64::BITS - (n - 1).leading_zeros()) as _;
let a: &mut GLWECiphertext<Vec<u8>> = &mut acc.data;
let basek: usize = a.basek();
let k: usize = a.k();
@@ -302,7 +300,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
if acc.value {
if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, basek, k, rank);
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
// a = a * X^-t
a.rotate_inplace(module, -t);
@@ -343,7 +341,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
}
} else {
if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, basek, k, rank);
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
tmp_b.rotate(module, 1 << (log_n - i - 1), b);
tmp_b.rsh(module, 1);

View File

@@ -1,7 +1,4 @@
use backend::hal::{
api::{VecZnxAlloc, VecZnxAllocBytes},
layouts::{Backend, Data, DataMut, DataRef, Module, VecZnx, VecZnxToMut, VecZnxToRef},
};
use backend::hal::layouts::{Data, DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef};
use crate::{GLWECiphertext, GLWECiphertextToMut, GLWECiphertextToRef, GLWEOps, Infos, SetMetaData};
@@ -38,22 +35,16 @@ impl<D: DataMut> SetMetaData for GLWEPlaintext<D> {
}
impl GLWEPlaintext<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize) -> Self {
Self {
data: module.vec_znx_alloc(1, k.div_ceil(basek)),
data: VecZnx::alloc(n, 1, k.div_ceil(basek)),
basek: basek,
k,
}
}
pub fn byte_of<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
where
Module<B>: VecZnxAllocBytes,
{
module.vec_znx_alloc_bytes(1, k.div_ceil(basek))
pub fn byte_of(n: usize, basek: usize, k: usize) -> usize {
VecZnx::alloc_bytes(n, 1, k.div_ceil(basek))
}
}

View File

@@ -1,8 +1,5 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAlloc, VecZnxAllocBytes, VecZnxDftAlloc, VecZnxDftAllocBytes,
VecZnxDftFromVecZnx,
},
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftFromVecZnx},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, Scratch, ScratchOwned, VecZnx, VecZnxDft, WriterTo},
oep::{ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxDftImpl, TakeVecZnxImpl},
};
@@ -21,23 +18,17 @@ pub struct GLWEPublicKey<D: Data> {
}
impl GLWEPublicKey<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxAlloc,
{
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
Self {
data: module.vec_znx_alloc(rank + 1, k.div_ceil(basek)),
data: VecZnx::alloc(n, rank + 1, k.div_ceil(basek)),
basek: basek,
k: k,
dist: Distribution::NONE,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: VecZnxAllocBytes,
{
module.vec_znx_alloc_bytes(rank + 1, k.div_ceil(basek))
pub fn bytes_of(n: usize, basek: usize, k: usize, rank: usize) -> usize {
VecZnx::alloc_bytes(n, rank + 1, k.div_ceil(basek))
}
}
@@ -72,7 +63,7 @@ impl<D: DataMut> GLWEPublicKey<D> {
source_xe: &mut Source,
sigma: f64,
) where
Module<B>: GLWEPublicKeyFamily<B> + VecZnxAlloc,
Module<B>: GLWEPublicKeyFamily<B>,
B: ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ TakeVecZnxDftImpl<B>
@@ -81,6 +72,8 @@ impl<D: DataMut> GLWEPublicKey<D> {
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), sk.n());
match sk.dist {
Distribution::NONE => panic!("invalid sk: SecretDistribution::NONE"),
_ => {}
@@ -90,11 +83,12 @@ impl<D: DataMut> GLWEPublicKey<D> {
// Its ok to allocate scratch space here since pk is usually generated only once.
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
));
let mut tmp: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, self.basek(), self.k(), self.rank());
let mut tmp: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(self.n(), self.basek(), self.k(), self.rank());
tmp.encrypt_zero_sk(module, sk, source_xa, source_xe, sigma, scratch.borrow());
self.dist = sk.dist;
}
@@ -157,23 +151,23 @@ impl<D: Data, B: Backend> GLWEPublicKeyExec<D, B> {
}
impl<B: Backend> GLWEPublicKeyExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> Self
where
Module<B>: VecZnxDftAlloc<B>,
{
Self {
data: module.vec_znx_dft_alloc(rank + 1, k.div_ceil(basek)),
data: module.vec_znx_dft_alloc(n, rank + 1, k.div_ceil(basek)),
basek: basek,
k: k,
dist: Distribution::NONE,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: VecZnxDftAllocBytes,
{
module.vec_znx_dft_alloc_bytes(rank + 1, k.div_ceil(basek))
module.vec_znx_dft_alloc_bytes(n, rank + 1, k.div_ceil(basek))
}
pub fn from<DataOther>(module: &Module<B>, other: &GLWEPublicKey<DataOther>, scratch: &mut Scratch<B>) -> Self
@@ -181,7 +175,8 @@ impl<B: Backend> GLWEPublicKeyExec<Vec<u8>, B> {
DataOther: DataRef,
Module<B>: VecZnxDftAlloc<B> + VecZnxDftFromVecZnx<B>,
{
let mut pk_exec: GLWEPublicKeyExec<Vec<u8>, B> = GLWEPublicKeyExec::alloc(module, other.basek(), other.k(), other.rank());
let mut pk_exec: GLWEPublicKeyExec<Vec<u8>, B> =
GLWEPublicKeyExec::alloc(module, other.n(), other.basek(), other.k(), other.rank());
pk_exec.prepare(module, other, scratch);
pk_exec
}
@@ -195,8 +190,7 @@ impl<D: DataMut, B: Backend> GLWEPublicKeyExec<D, B> {
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n());
assert_eq!(other.n(), module.n());
assert_eq!(self.n(), other.n());
assert_eq!(self.size(), other.size());
}

View File

@@ -1,5 +1,5 @@
use backend::hal::{
api::{ScalarZnxAlloc, ScalarZnxAllocBytes, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, ZnxInfos, ZnxZero},
api::{SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, ZnxInfos, ZnxZero},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, ScalarZnx, SvpPPol, WriterTo},
};
use sampling::source::Source;
@@ -15,21 +15,15 @@ pub struct GLWESecret<D: Data> {
}
impl GLWESecret<Vec<u8>> {
pub fn alloc<B: Backend>(module: &Module<B>, rank: usize) -> Self
where
Module<B>: ScalarZnxAlloc,
{
pub fn alloc(n: usize, rank: usize) -> Self {
Self {
data: module.scalar_znx_alloc(rank),
data: ScalarZnx::alloc(n, rank),
dist: Distribution::NONE,
}
}
pub fn bytes_of<B: Backend>(module: &Module<B>, rank: usize) -> usize
where
Module<B>: ScalarZnxAllocBytes,
{
module.scalar_znx_alloc_bytes(rank)
pub fn bytes_of(n: usize, rank: usize) -> usize {
ScalarZnx::alloc_bytes(n, rank)
}
}
@@ -115,21 +109,21 @@ pub struct GLWESecretExec<D: Data, B: Backend> {
}
impl<B: Backend> GLWESecretExec<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, rank: usize) -> Self
pub fn alloc(module: &Module<B>, n: usize, rank: usize) -> Self
where
Module<B>: GLWESecretFamily<B>,
Module<B>: SvpPPolAlloc<B>,
{
Self {
data: module.svp_ppol_alloc(rank),
data: module.svp_ppol_alloc(n, rank),
dist: Distribution::NONE,
}
}
pub fn bytes_of(module: &Module<B>, rank: usize) -> usize
pub fn bytes_of(module: &Module<B>, n: usize, rank: usize) -> usize
where
Module<B>: GLWESecretFamily<B>,
Module<B>: SvpPPolAllocBytes,
{
module.svp_ppol_alloc_bytes(rank)
module.svp_ppol_alloc_bytes(n, rank)
}
}
@@ -137,9 +131,9 @@ impl<B: Backend> GLWESecretExec<Vec<u8>, B> {
pub fn from<D>(module: &Module<B>, sk: &GLWESecret<D>) -> Self
where
D: DataRef,
Module<B>: GLWESecretFamily<B>,
Module<B>: SvpPrepare<B> + SvpPPolAlloc<B>,
{
let mut sk_dft: GLWESecretExec<Vec<u8>, B> = Self::alloc(module, sk.rank());
let mut sk_dft: GLWESecretExec<Vec<u8>, B> = Self::alloc(module, sk.n(), sk.rank());
sk_dft.prepare(module, sk);
sk_dft
}
@@ -163,7 +157,7 @@ impl<D: DataMut, B: Backend> GLWESecretExec<D, B> {
pub(crate) fn prepare<O>(&mut self, module: &Module<B>, sk: &GLWESecret<O>)
where
O: DataRef,
Module<B>: GLWESecretFamily<B>,
Module<B>: SvpPrepare<B>,
{
(0..self.rank()).for_each(|i| {
module.svp_prepare(&mut self.data, i, &sk.data, i);

View File

@@ -8,7 +8,6 @@ use crate::glwe::tests::{
generic_encryption::{test_encrypt_pk, test_encrypt_sk, test_encrypt_sk_compressed, test_encrypt_zero_sk},
generic_external_product::{test_external_product, test_external_product_inplace},
generic_keyswitch::{test_keyswitch, test_keyswitch_inplace},
generic_serialization::{test_serialization, test_serialization_compressed},
packing::test_packing,
trace::test_trace_inplace,
};
@@ -175,17 +174,3 @@ fn packing() {
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
test_packing(&module);
}
#[test]
fn serialization() {
let log_n: usize = 5;
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
test_serialization(&module);
}
#[test]
fn serialization_compressed() {
let log_n: usize = 5;
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
test_serialization_compressed(&module);
}

View File

@@ -1,8 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxFillUniform, VecZnxStd,
VecZnxSwithcDegree,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
VecZnxFillUniform, VecZnxStd, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
@@ -22,13 +21,8 @@ pub(crate) trait AutomorphismTestModuleFamily<B: Backend> = AutomorphismKeyEncry
+ GLWEDecryptFamily<B>
+ GGLWEExecLayoutFamily<B>
+ GLWEKeyswitchFamily<B>
+ MatZnxAlloc
+ VecZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxAutomorphism
+ VecZnxSwithcDegree
+ ScalarZnxAlloc
+ VecZnxAddScalarInplace
+ VecZnxAutomorphismInplace
+ VecZnxStd;
@@ -55,12 +49,13 @@ pub(crate) fn test_automorphism<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let mut autokey: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_in, rank);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_out, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_in);
let mut autokey: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_in, rank);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_out, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_in);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -69,11 +64,12 @@ pub(crate) fn test_automorphism<B: Backend>(
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(module, basek, autokey.k(), rank)
| GLWECiphertext::decrypt_scratch_space(module, basek, ct_out.k())
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_in.k())
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, autokey.k(), rank)
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct_out.k())
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_in.k())
| GLWECiphertext::automorphism_scratch_space(
module,
n,
basek,
ct_out.k(),
ct_in.k(),
@@ -83,7 +79,7 @@ pub(crate) fn test_automorphism<B: Backend>(
),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -107,7 +103,8 @@ pub(crate) fn test_automorphism<B: Backend>(
scratch.borrow(),
);
let mut autokey_exec: AutomorphismKeyExec<Vec<u8>, B> = AutomorphismKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
let mut autokey_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
autokey_exec.prepare(module, &autokey, scratch.borrow());
ct_out.automorphism(module, &ct_in, &autokey_exec, scratch.borrow());
@@ -143,11 +140,12 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
Module<B>: AutomorphismTestModuleFamily<B>,
B: AutomorphismTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(basek * digits);
let mut autokey: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut autokey: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -156,13 +154,13 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
AutomorphismKey::encrypt_sk_scratch_space(module, basek, autokey.k(), rank)
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
| GLWECiphertext::automorphism_inplace_scratch_space(module, basek, ct.k(), autokey.k(), digits, rank),
AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, autokey.k(), rank)
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::automorphism_inplace_scratch_space(module, n, basek, ct.k(), autokey.k(), digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -186,7 +184,8 @@ pub(crate) fn test_automorphism_inplace<B: Backend>(
scratch.borrow(),
);
let mut autokey_exec: AutomorphismKeyExec<Vec<u8>, B> = AutomorphismKeyExec::alloc(module, basek, k_ksk, rows, digits, rank);
let mut autokey_exec: AutomorphismKeyExec<Vec<u8>, B> =
AutomorphismKeyExec::alloc(module, n, basek, k_ksk, rows, digits, rank);
autokey_exec.prepare(module, &autokey, scratch.borrow());
ct.automorphism_inplace(module, &autokey_exec, scratch.borrow());

View File

@@ -1,8 +1,5 @@
use backend::hal::{
api::{
ScalarZnxAlloc, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAlloc, VecZnxCopy, VecZnxDftAlloc, VecZnxFillUniform,
VecZnxStd, VecZnxSubABInplace,
},
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxCopy, VecZnxDftAlloc, VecZnxFillUniform, VecZnxStd, VecZnxSubABInplace},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeScalarZnxImpl, TakeSvpPPolImpl,
@@ -16,8 +13,7 @@ use crate::{
GLWEPlaintext, GLWEPublicKey, GLWEPublicKeyExec, GLWESecret, GLWESecretExec, GLWESecretFamily, Infos,
};
pub(crate) trait EncryptionTestModuleFamily<B: Backend> =
GLWEDecryptFamily<B> + GLWESecretFamily<B> + VecZnxAlloc + ScalarZnxAlloc + VecZnxStd;
pub(crate) trait EncryptionTestModuleFamily<B: Backend> = GLWEDecryptFamily<B> + GLWESecretFamily<B> + VecZnxStd;
pub(crate) trait EncryptionTestScratchFamily<B: Backend> = TakeVecZnxDftImpl<B>
+ TakeVecZnxBigImpl<B>
@@ -33,20 +29,21 @@ where
Module<B>: EncryptionTestModuleFamily<B> + GLWEEncryptSkFamily<B>,
B: EncryptionTestScratchFamily<B>,
{
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_pt);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_pt);
let n = module.n();
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k()),
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k()),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -83,21 +80,22 @@ pub(crate) fn test_encrypt_sk_compressed<B: Backend>(
Module<B>: EncryptionTestModuleFamily<B> + GLWEEncryptSkFamily<B> + VecZnxCopy,
B: EncryptionTestScratchFamily<B>,
{
let mut ct_compressed: GLWECiphertextCompressed<Vec<u8>> = GLWECiphertextCompressed::alloc(module, basek, k_ct, rank);
let n = module.n();
let mut ct_compressed: GLWECiphertextCompressed<Vec<u8>> = GLWECiphertextCompressed::alloc(n, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_pt);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_pt);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertextCompressed::encrypt_sk_scratch_space(module, basek, k_ct)
| GLWECiphertext::decrypt_scratch_space(module, basek, k_ct),
GLWECiphertextCompressed::encrypt_sk_scratch_space(module, n, basek, k_ct)
| GLWECiphertext::decrypt_scratch_space(module, n, basek, k_ct),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -115,7 +113,7 @@ pub(crate) fn test_encrypt_sk_compressed<B: Backend>(
scratch.borrow(),
);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
ct.decompress(module, &ct_compressed);
ct.decrypt(module, &mut pt_have, &sk_exec, scratch.borrow());
@@ -138,21 +136,22 @@ where
Module<B>: EncryptionTestModuleFamily<B> + GLWEEncryptSkFamily<B>,
B: EncryptionTestScratchFamily<B>,
{
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let n = module.n();
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([1u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::decrypt_scratch_space(module, basek, k_ct)
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, k_ct),
GLWECiphertext::decrypt_scratch_space(module, n, basek, k_ct)
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct),
);
ct.encrypt_zero_sk(
@@ -178,26 +177,27 @@ where
+ VecZnxSubABInplace,
B: EncryptionTestScratchFamily<B>,
{
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let n: usize = module.n();
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut source_xu: Source = Source::new([0u8; 32]);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
let mut pk: GLWEPublicKey<Vec<u8>> = GLWEPublicKey::alloc(module, basek, k_pk, rank);
let mut pk: GLWEPublicKey<Vec<u8>> = GLWEPublicKey::alloc(n, basek, k_pk, rank);
pk.generate_from_sk(module, &sk_exec, &mut source_xa, &mut source_xe, sigma);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
| GLWECiphertext::encrypt_pk_scratch_space(module, basek, pk.k()),
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::encrypt_pk_scratch_space(module, n, basek, pk.k()),
);
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
@@ -219,7 +219,7 @@ where
pt_want.sub_inplace_ab(module, &pt_have);
let noise_have: f64 = module.vec_znx_std(basek, &pt_want.data, 0).log2();
let noise_want: f64 = ((((rank as f64) + 1.0) * module.n() as f64 * 0.5 * sigma * sigma).sqrt()).log2() - (k_ct as f64);
let noise_want: f64 = ((((rank as f64) + 1.0) * n as f64 * 0.5 * sigma * sigma).sqrt()).log2() - (k_ct as f64);
assert!(
noise_have <= noise_want + 0.2,

View File

@@ -1,7 +1,7 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAlloc,
VecZnxAllocBytes, VecZnxFillUniform, VecZnxRotateInplace, VecZnxStd, ZnxViewMut,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxFillUniform, VecZnxRotateInplace, VecZnxStd,
ZnxViewMut,
},
layouts::{Backend, Module, ScalarZnx, ScratchOwned},
oep::{
@@ -21,10 +21,6 @@ pub(crate) trait ExternalProductTestModuleFamily<B: Backend> = GLWEEncryptSkFami
+ GLWESecretFamily<B>
+ GLWEExternalProductFamily<B>
+ GGSWLayoutFamily<B>
+ MatZnxAlloc
+ VecZnxAlloc
+ ScalarZnxAlloc
+ VecZnxAllocBytes
+ VecZnxAddScalarInplace
+ VecZnxRotateInplace
+ VecZnxStd;
@@ -51,13 +47,14 @@ pub(crate) fn test_external_product<B: Backend>(
Module<B>: ExternalProductTestModuleFamily<B>,
B: ExternalProductTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_in, rank);
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_out, rank);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_in);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_in, rank);
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_out, rank);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_in);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -73,10 +70,11 @@ pub(crate) fn test_external_product<B: Backend>(
pt_rgsw.raw_mut()[k] = 1; // X^{k}
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe_in.k())
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe_in.k())
| GLWECiphertext::external_product_scratch_space(
module,
n,
basek,
ct_glwe_out.k(),
ct_glwe_in.k(),
@@ -86,7 +84,7 @@ pub(crate) fn test_external_product<B: Backend>(
),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -119,12 +117,12 @@ pub(crate) fn test_external_product<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise: f64 = noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
var_msg,
@@ -152,12 +150,13 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
Module<B>: ExternalProductTestModuleFamily<B>,
B: ExternalProductTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(basek * digits);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(module, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.scalar_znx_alloc(1);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -173,12 +172,12 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
pt_rgsw.raw_mut()[k] = 1; // X^{k}
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe.k())
| GLWECiphertext::external_product_inplace_scratch_space(module, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, ct_ggsw.k(), rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe.k())
| GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
@@ -211,12 +210,12 @@ pub(crate) fn test_external_product_inplace<B: Backend>(
let var_gct_err_lhs: f64 = sigma * sigma;
let var_gct_err_rhs: f64 = 0f64;
let var_msg: f64 = 1f64 / module.n() as f64; // X^{k}
let var_msg: f64 = 1f64 / n as f64; // X^{k}
let var_a0_err: f64 = sigma * sigma;
let var_a1_err: f64 = 1f64 / 12f64;
let max_noise: f64 = noise_ggsw_product(
module.n() as f64,
n as f64,
basek * digits,
0.5,
var_msg,

View File

@@ -1,8 +1,5 @@
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxFillUniform, VecZnxStd, VecZnxSwithcDegree,
},
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxFillUniform, VecZnxStd, VecZnxSwithcDegree},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeScalarZnxImpl, TakeSvpPPolImpl,
@@ -22,11 +19,6 @@ pub(crate) trait KeySwitchTestModuleFamily<B: Backend> = GLWESecretFamily<B>
+ GLWEKeyswitchFamily<B>
+ GLWEDecryptFamily<B>
+ GGLWEExecLayoutFamily<B>
+ MatZnxAlloc
+ VecZnxAlloc
+ ScalarZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxStd
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace;
@@ -54,12 +46,13 @@ pub(crate) fn test_keyswitch<B: Backend>(
Module<B>: KeySwitchTestModuleFamily<B>,
B: KeySwitchTestScratchFamily<B>,
{
let n = module.n();
let rows: usize = k_in.div_ceil(basek * digits);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_in, rank_in);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_out, rank_out);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_in);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank_in, rank_out);
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_in, rank_in);
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_out, rank_out);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_in);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -68,10 +61,11 @@ pub(crate) fn test_keyswitch<B: Backend>(
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, ksk.k(), rank_in, rank_out)
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_in.k())
GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, ksk.k(), rank_in, rank_out)
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_in.k())
| GLWECiphertext::keyswitch_scratch_space(
module,
n,
basek,
ct_out.k(),
ct_in.k(),
@@ -82,11 +76,11 @@ pub(crate) fn test_keyswitch<B: Backend>(
),
);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_in);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank_out);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);
@@ -142,11 +136,12 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
Module<B>: KeySwitchTestModuleFamily<B>,
B: KeySwitchTestScratchFamily<B>,
{
let n: usize = module.n();
let rows: usize = k_ct.div_ceil(basek * digits);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(module, basek, k_ksk, rows, digits, rank, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank, rank);
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
@@ -155,16 +150,16 @@ pub(crate) fn test_keyswitch_inplace<B: Backend>(
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWESwitchingKey::encrypt_sk_scratch_space(module, basek, ksk.k(), rank, rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe.k())
| GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, ct_glwe.k(), ksk.k(), digits, rank),
GLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, ksk.k(), rank, rank)
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe.k())
| GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, ct_glwe.k(), ksk.k(), digits, rank),
);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_in.fill_ternary_prob(0.5, &mut source_xs);
let sk_in_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_in);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_out.fill_ternary_prob(0.5, &mut source_xs);
let sk_out_exec: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk_out);

View File

@@ -1,23 +1,15 @@
use backend::hal::{
api::VecZnxAlloc,
layouts::{Backend, Module},
tests::serialization::test_reader_writer_interface,
};
use backend::hal::tests::serialization::test_reader_writer_interface;
use crate::{GLWECiphertext, GLWECiphertextCompressed};
pub(crate) fn test_serialization<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxAlloc,
{
let original: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, 12, 54, 3);
#[test]
fn test_serialization() {
let original: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(1024, 12, 54, 3);
test_reader_writer_interface(original);
}
pub(crate) fn test_serialization_compressed<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxAlloc,
{
let original: GLWECiphertextCompressed<Vec<u8>> = GLWECiphertextCompressed::alloc(module, 12, 54, 3);
#[test]
fn test_serialization_compressed() {
let original: GLWECiphertextCompressed<Vec<u8>> = GLWECiphertextCompressed::alloc(1024, 12, 54, 3);
test_reader_writer_interface(original);
}

View File

@@ -2,9 +2,8 @@ use std::collections::HashMap;
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxBigSubSmallBInplace, VecZnxEncodeVeci64, VecZnxRotateInplace,
VecZnxStd, VecZnxSwithcDegree,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigSubSmallBInplace,
VecZnxEncodeVeci64, VecZnxRotateInplace, VecZnxStd, VecZnxSwithcDegree,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
@@ -25,11 +24,6 @@ pub(crate) trait PackingTestModuleFamily<B: Backend> = GLWEPackingFamily<B>
+ GLWEKeyswitchFamily<B>
+ GLWEDecryptFamily<B>
+ GGLWEExecLayoutFamily<B>
+ MatZnxAlloc
+ VecZnxAlloc
+ ScalarZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxStd
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
@@ -56,6 +50,7 @@ where
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let n: usize = module.n();
let basek: usize = 18;
let k_ct: usize = 36;
let pt_k: usize = 18;
@@ -67,17 +62,17 @@ where
let rows: usize = k_ct.div_ceil(basek * digits);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k_ct)
| AutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
| GLWEPacker::scratch_space(module, basek, k_ct, k_ksk, digits, rank),
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct)
| AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
| GLWEPacker::scratch_space(module, n, basek, k_ct, k_ksk, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut data: Vec<i64> = vec![0i64; module.n()];
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut data: Vec<i64> = vec![0i64; n];
data.iter_mut().enumerate().for_each(|(i, x)| {
*x = i as i64;
});
@@ -87,7 +82,7 @@ where
let gal_els: Vec<i64> = GLWEPacker::galois_elements(module);
let mut auto_keys: HashMap<i64, AutomorphismKeyExec<Vec<u8>, B>> = HashMap::new();
let mut tmp: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_ksk, rows, digits, rank);
let mut tmp: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_ksk, rows, digits, rank);
gal_els.iter().for_each(|gal_el| {
tmp.encrypt_sk(
module,
@@ -104,9 +99,9 @@ where
let log_batch: usize = 0;
let mut packer: GLWEPacker = GLWEPacker::new(module, log_batch, basek, k_ct, rank);
let mut packer: GLWEPacker = GLWEPacker::new(n, log_batch, basek, k_ct, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
ct.encrypt_sk(
module,
@@ -120,7 +115,7 @@ where
let log_n: usize = module.log_n();
(0..module.n() >> log_batch).for_each(|i| {
(0..n >> log_batch).for_each(|i| {
ct.encrypt_sk(
module,
&pt,
@@ -145,11 +140,11 @@ where
}
});
let mut res = GLWECiphertext::alloc(module, basek, k_ct, rank);
let mut res = GLWECiphertext::alloc(n, basek, k_ct, rank);
packer.flush(module, &mut res);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k_ct);
let mut data: Vec<i64> = vec![0i64; module.n()];
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_ct);
let mut data: Vec<i64> = vec![0i64; n];
data.iter_mut().enumerate().for_each(|(i, x)| {
if i % 5 == 0 {
*x = reverse_bits_msb(i, log_n as u32) as i64;

View File

@@ -2,10 +2,9 @@ use std::collections::HashMap;
use backend::hal::{
api::{
MatZnxAlloc, ScalarZnxAlloc, ScalarZnxAllocBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace,
VecZnxAlloc, VecZnxAllocBytes, VecZnxAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigSubSmallBInplace, VecZnxCopy,
VecZnxEncodeVeci64, VecZnxFillUniform, VecZnxNormalizeInplace, VecZnxRotateInplace, VecZnxRshInplace, VecZnxStd,
VecZnxSubABInplace, VecZnxSwithcDegree, ZnxView, ZnxViewMut,
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigAutomorphismInplace,
VecZnxBigSubSmallBInplace, VecZnxCopy, VecZnxEncodeVeci64, VecZnxFillUniform, VecZnxNormalizeInplace,
VecZnxRotateInplace, VecZnxRshInplace, VecZnxStd, VecZnxSubABInplace, VecZnxSwithcDegree, ZnxView, ZnxViewMut,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
@@ -26,11 +25,6 @@ pub(crate) trait TraceTestModuleFamily<B: Backend> = GLWESecretFamily<B>
+ GLWEKeyswitchFamily<B>
+ GLWEDecryptFamily<B>
+ GGLWEExecLayoutFamily<B>
+ MatZnxAlloc
+ VecZnxAlloc
+ ScalarZnxAlloc
+ ScalarZnxAllocBytes
+ VecZnxAllocBytes
+ VecZnxStd
+ VecZnxSwithcDegree
+ VecZnxAddScalarInplace
@@ -56,31 +50,32 @@ where
Module<B>: TraceTestModuleFamily<B>,
B: TraceTestScratchFamily<B>,
{
let n: usize = module.n();
let k_autokey: usize = k + basek;
let digits: usize = 1;
let rows: usize = k.div_ceil(basek * digits);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(module, basek, k, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(module, basek, k);
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k, rank);
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
| AutomorphismKey::encrypt_sk_scratch_space(module, basek, k_autokey, rank)
| GLWECiphertext::trace_inplace_scratch_space(module, basek, ct.k(), k_autokey, digits, rank),
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
| AutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_autokey, rank)
| GLWECiphertext::trace_inplace_scratch_space(module, n, basek, ct.k(), k_autokey, digits, rank),
);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(module, rank);
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk.fill_ternary_prob(0.5, &mut source_xs);
let sk_dft: GLWESecretExec<Vec<u8>, B> = GLWESecretExec::from(module, &sk);
let mut data_want: Vec<i64> = vec![0i64; module.n()];
let mut data_want: Vec<i64> = vec![0i64; n];
data_want
.iter_mut()
@@ -100,7 +95,7 @@ where
let mut auto_keys: HashMap<i64, AutomorphismKeyExec<Vec<u8>, B>> = HashMap::new();
let gal_els: Vec<i64> = GLWECiphertext::trace_galois_elements(module);
let mut tmp: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(module, basek, k_autokey, rows, digits, rank);
let mut tmp: AutomorphismKey<Vec<u8>> = AutomorphismKey::alloc(n, basek, k_autokey, rows, digits, rank);
gal_els.iter().for_each(|gal_el| {
tmp.encrypt_sk(
module,
@@ -128,7 +123,7 @@ where
let noise_have: f64 = module.vec_znx_std(basek, &pt_want.data, 0).log2();
let mut noise_want: f64 = var_noise_gglwe_product(
module.n() as f64,
n as f64,
basek,
0.5,
0.5,
@@ -140,7 +135,7 @@ where
k_autokey,
);
noise_want += sigma * sigma * (-2.0 * (k) as f64).exp2();
noise_want += module.n() as f64 * 1.0 / 12.0 * 0.5 * rank as f64 * (-2.0 * (k) as f64).exp2();
noise_want += n as f64 * 1.0 / 12.0 * 0.5 * rank as f64 * (-2.0 * (k) as f64).exp2();
noise_want = noise_want.sqrt().log2();
assert!(

Some files were not shown because too many files have changed in this diff Show More