mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Add Zn type
This commit is contained in:
@@ -18,7 +18,7 @@ fn main() {
|
|||||||
let log_scale: usize = msg_size * basek - 5;
|
let log_scale: usize = msg_size * basek - 5;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n as u64);
|
let module: Module<FFT64> = Module::<FFT64>::new(n as u64);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::<FFT64>::alloc(module.vec_znx_big_normalize_tmp_bytes(n));
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::<FFT64>::alloc(module.vec_znx_big_normalize_tmp_bytes());
|
||||||
|
|
||||||
let seed: [u8; 32] = [0; 32];
|
let seed: [u8; 32] = [0; 32];
|
||||||
let mut source: Source = Source::new(seed);
|
let mut source: Source = Source::new(seed);
|
||||||
@@ -28,7 +28,7 @@ fn main() {
|
|||||||
s.fill_ternary_prob(0, 0.5, &mut source);
|
s.fill_ternary_prob(0, 0.5, &mut source);
|
||||||
|
|
||||||
// Buffer to store s in the DFT domain
|
// Buffer to store s in the DFT domain
|
||||||
let mut s_dft: SvpPPol<Vec<u8>, FFT64> = module.svp_ppol_alloc(n, s.cols());
|
let mut s_dft: SvpPPol<Vec<u8>, FFT64> = module.svp_ppol_alloc(s.cols());
|
||||||
|
|
||||||
// s_dft <- DFT(s)
|
// s_dft <- DFT(s)
|
||||||
module.svp_prepare(&mut s_dft, 0, &s, 0);
|
module.svp_prepare(&mut s_dft, 0, &s, 0);
|
||||||
@@ -43,7 +43,7 @@ fn main() {
|
|||||||
// Fill the second column with random values: ct = (0, a)
|
// Fill the second column with random values: ct = (0, a)
|
||||||
module.vec_znx_fill_uniform(basek, &mut ct, 1, ct_size * basek, &mut source);
|
module.vec_znx_fill_uniform(basek, &mut ct, 1, ct_size * basek, &mut source);
|
||||||
|
|
||||||
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.vec_znx_dft_alloc(n, 1, ct_size);
|
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.vec_znx_dft_alloc(1, ct_size);
|
||||||
|
|
||||||
module.vec_znx_dft_from_vec_znx(1, 0, &mut buf_dft, 0, &ct, 1);
|
module.vec_znx_dft_from_vec_znx(1, 0, &mut buf_dft, 0, &ct, 1);
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ fn main() {
|
|||||||
// Alias scratch space (VecZnxDft<B> is always at least as big as VecZnxBig<B>)
|
// Alias scratch space (VecZnxDft<B> is always at least as big as VecZnxBig<B>)
|
||||||
|
|
||||||
// BIG(ct[1] * s) <- IDFT(DFT(ct[1] * s)) (not normalized)
|
// BIG(ct[1] * s) <- IDFT(DFT(ct[1] * s)) (not normalized)
|
||||||
let mut buf_big: VecZnxBig<Vec<u8>, FFT64> = module.vec_znx_big_alloc(n, 1, ct_size);
|
let mut buf_big: VecZnxBig<Vec<u8>, FFT64> = module.vec_znx_big_alloc(1, ct_size);
|
||||||
module.vec_znx_dft_to_vec_znx_big_tmp_a(&mut buf_big, 0, &mut buf_dft, 0);
|
module.vec_znx_dft_to_vec_znx_big_tmp_a(&mut buf_big, 0, &mut buf_dft, 0);
|
||||||
|
|
||||||
// Creates a plaintext: VecZnx with 1 column
|
// Creates a plaintext: VecZnx with 1 column
|
||||||
|
|||||||
@@ -12,4 +12,6 @@ pub mod vec_znx_dft;
|
|||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
pub mod vmp;
|
pub mod vmp;
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
|
pub mod zn64;
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
pub mod znx;
|
pub mod znx;
|
||||||
|
|||||||
@@ -103,7 +103,6 @@ unsafe extern "C" {
|
|||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_normalize_base2k(
|
pub unsafe fn vec_znx_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
n: u64,
|
|
||||||
base2k: u64,
|
base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
@@ -114,6 +113,7 @@ unsafe extern "C" {
|
|||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_normalize_base2k_tmp_bytes(module: *const MODULE, n: u64) -> u64;
|
pub unsafe fn vec_znx_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -93,13 +93,12 @@ unsafe extern "C" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_normalize_base2k_tmp_bytes(module: *const MODULE, n: u64) -> u64;
|
pub unsafe fn vec_znx_big_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_normalize_base2k(
|
pub unsafe fn vec_znx_big_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
n: u64,
|
|
||||||
log2_base2k: u64,
|
log2_base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
@@ -113,7 +112,6 @@ unsafe extern "C" {
|
|||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_range_normalize_base2k(
|
pub unsafe fn vec_znx_big_range_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
n: u64,
|
|
||||||
log2_base2k: u64,
|
log2_base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
@@ -127,7 +125,7 @@ unsafe extern "C" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_range_normalize_base2k_tmp_bytes(module: *const MODULE, n: u64) -> u64;
|
pub unsafe fn vec_znx_big_range_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ unsafe extern "C" {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE, n: u64) -> u64;
|
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_idft_tmp_a(
|
pub unsafe fn vec_znx_idft_tmp_a(
|
||||||
|
|||||||
@@ -79,7 +79,6 @@ unsafe extern "C" {
|
|||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes(
|
pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
nn: u64,
|
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
@@ -99,5 +98,5 @@ unsafe extern "C" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nn: u64, nrows: u64, ncols: u64) -> u64;
|
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
|
||||||
}
|
}
|
||||||
|
|||||||
13
poulpy-backend/src/cpu_spqlios/ffi/zn64.rs
Normal file
13
poulpy-backend/src/cpu_spqlios/ffi/zn64.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
unsafe extern "C" {
|
||||||
|
pub unsafe fn zn64_normalize_base2k_ref(
|
||||||
|
n: u64,
|
||||||
|
base2k: u64,
|
||||||
|
res: *mut i64,
|
||||||
|
res_size: u64,
|
||||||
|
res_sl: u64,
|
||||||
|
a: *const i64,
|
||||||
|
a_size: u64,
|
||||||
|
a_sl: u64,
|
||||||
|
tmp_space: *mut u8,
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -5,6 +5,7 @@ mod vec_znx;
|
|||||||
mod vec_znx_big;
|
mod vec_znx_big;
|
||||||
mod vec_znx_dft;
|
mod vec_znx_dft;
|
||||||
mod vmp_pmat;
|
mod vmp_pmat;
|
||||||
|
mod zn;
|
||||||
|
|
||||||
pub use module::FFT64;
|
pub use module::FFT64;
|
||||||
|
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ use crate::cpu_spqlios::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64 {
|
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64 {
|
||||||
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>, n: usize) -> usize {
|
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr() as *const module_info_t, n as u64) as usize }
|
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr() as *const module_info_t) as usize }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,12 +54,11 @@ where
|
|||||||
assert_eq!(res.n(), a.n());
|
assert_eq!(res.n(), a.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes(a.n()));
|
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes());
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
module.ptr() as *const module_info_t,
|
module.ptr() as *const module_info_t,
|
||||||
a.n() as u64,
|
|
||||||
basek as u64,
|
basek as u64,
|
||||||
res.at_mut_ptr(res_col, 0),
|
res.at_mut_ptr(res_col, 0),
|
||||||
res.size() as u64,
|
res.size() as u64,
|
||||||
@@ -88,12 +87,11 @@ where
|
|||||||
{
|
{
|
||||||
let mut a: VecZnx<&mut [u8]> = a.to_mut();
|
let mut a: VecZnx<&mut [u8]> = a.to_mut();
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes(a.n()));
|
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes());
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
module.ptr() as *const module_info_t,
|
module.ptr() as *const module_info_t,
|
||||||
a.n() as u64,
|
|
||||||
basek as u64,
|
basek as u64,
|
||||||
a.at_mut_ptr(a_col, 0),
|
a.at_mut_ptr(a_col, 0),
|
||||||
a.size() as u64,
|
a.size() as u64,
|
||||||
|
|||||||
@@ -569,8 +569,8 @@ unsafe impl VecZnxBigNegateInplaceImpl<Self> for FFT64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigNormalizeTmpBytesImpl<Self> for FFT64 {
|
unsafe impl VecZnxBigNormalizeTmpBytesImpl<Self> for FFT64 {
|
||||||
fn vec_znx_big_normalize_tmp_bytes_impl(module: &Module<Self>, n: usize) -> usize {
|
fn vec_znx_big_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr(), n as u64) as usize }
|
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr()) as usize }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -598,11 +598,10 @@ where
|
|||||||
assert_eq!(res.n(), a.n());
|
assert_eq!(res.n(), a.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes(a.n()));
|
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes());
|
||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
module.ptr(),
|
module.ptr(),
|
||||||
a.n() as u64,
|
|
||||||
basek as u64,
|
basek as u64,
|
||||||
res.at_mut_ptr(res_col, 0),
|
res.at_mut_ptr(res_col, 0),
|
||||||
res.size() as u64,
|
res.size() as u64,
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ unsafe impl VecZnxDftAllocImpl<Self> for FFT64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftToVecZnxBigTmpBytesImpl<Self> for FFT64 {
|
unsafe impl VecZnxDftToVecZnxBigTmpBytesImpl<Self> for FFT64 {
|
||||||
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<Self>, n: usize) -> usize {
|
fn vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
unsafe { vec_znx_dft::vec_znx_idft_tmp_bytes(module.ptr(), n as u64) as usize }
|
unsafe { vec_znx_dft::vec_znx_idft_tmp_bytes(module.ptr()) as usize }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ unsafe impl VecZnxDftToVecZnxBigImpl<Self> for FFT64 {
|
|||||||
assert_eq!(res.n(), a.n())
|
assert_eq!(res.n(), a.n())
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_dft_to_vec_znx_big_tmp_bytes(a.n()));
|
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_dft_to_vec_znx_big_tmp_bytes());
|
||||||
|
|
||||||
let min_size: usize = res.size().min(a.size());
|
let min_size: usize = res.size().min(a.size());
|
||||||
|
|
||||||
|
|||||||
@@ -41,18 +41,10 @@ unsafe impl VmpPMatAllocImpl<FFT64> for FFT64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VmpPrepareTmpBytesImpl<FFT64> for FFT64 {
|
unsafe impl VmpPrepareTmpBytesImpl<FFT64> for FFT64 {
|
||||||
fn vmp_prepare_tmp_bytes_impl(
|
fn vmp_prepare_tmp_bytes_impl(module: &Module<FFT64>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||||
module: &Module<FFT64>,
|
|
||||||
n: usize,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> usize {
|
|
||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_prepare_tmp_bytes(
|
vmp::vmp_prepare_tmp_bytes(
|
||||||
module.ptr(),
|
module.ptr(),
|
||||||
n as u64,
|
|
||||||
(rows * cols_in) as u64,
|
(rows * cols_in) as u64,
|
||||||
(cols_out * size) as u64,
|
(cols_out * size) as u64,
|
||||||
) as usize
|
) as usize
|
||||||
@@ -102,8 +94,7 @@ unsafe impl VmpPMatPrepareImpl<FFT64> for FFT64 {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) =
|
let (tmp_bytes, _) = scratch.take_slice(module.vmp_prepare_tmp_bytes(a.rows(), a.cols_in(), a.cols_out(), a.size()));
|
||||||
scratch.take_slice(module.vmp_prepare_tmp_bytes(res.n(), a.rows(), a.cols_in(), a.cols_out(), a.size()));
|
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_prepare_contiguous(
|
vmp::vmp_prepare_contiguous(
|
||||||
@@ -121,7 +112,6 @@ unsafe impl VmpPMatPrepareImpl<FFT64> for FFT64 {
|
|||||||
unsafe impl VmpApplyTmpBytesImpl<FFT64> for FFT64 {
|
unsafe impl VmpApplyTmpBytesImpl<FFT64> for FFT64 {
|
||||||
fn vmp_apply_tmp_bytes_impl(
|
fn vmp_apply_tmp_bytes_impl(
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
n: usize,
|
|
||||||
res_size: usize,
|
res_size: usize,
|
||||||
a_size: usize,
|
a_size: usize,
|
||||||
b_rows: usize,
|
b_rows: usize,
|
||||||
@@ -132,7 +122,6 @@ unsafe impl VmpApplyTmpBytesImpl<FFT64> for FFT64 {
|
|||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_apply_dft_to_dft_tmp_bytes(
|
vmp::vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
module.ptr(),
|
module.ptr(),
|
||||||
n as u64,
|
|
||||||
(res_size * b_cols_out) as u64,
|
(res_size * b_cols_out) as u64,
|
||||||
(a_size * b_cols_in) as u64,
|
(a_size * b_cols_in) as u64,
|
||||||
(b_rows * b_cols_in) as u64,
|
(b_rows * b_cols_in) as u64,
|
||||||
@@ -174,7 +163,6 @@ unsafe impl VmpApplyImpl<FFT64> for FFT64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
|
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
|
||||||
res.n(),
|
|
||||||
res.size(),
|
res.size(),
|
||||||
a.size(),
|
a.size(),
|
||||||
b.rows(),
|
b.rows(),
|
||||||
@@ -201,7 +189,6 @@ unsafe impl VmpApplyImpl<FFT64> for FFT64 {
|
|||||||
unsafe impl VmpApplyAddTmpBytesImpl<FFT64> for FFT64 {
|
unsafe impl VmpApplyAddTmpBytesImpl<FFT64> for FFT64 {
|
||||||
fn vmp_apply_add_tmp_bytes_impl(
|
fn vmp_apply_add_tmp_bytes_impl(
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
n: usize,
|
|
||||||
res_size: usize,
|
res_size: usize,
|
||||||
a_size: usize,
|
a_size: usize,
|
||||||
b_rows: usize,
|
b_rows: usize,
|
||||||
@@ -212,7 +199,6 @@ unsafe impl VmpApplyAddTmpBytesImpl<FFT64> for FFT64 {
|
|||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_apply_dft_to_dft_tmp_bytes(
|
vmp::vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
module.ptr(),
|
module.ptr(),
|
||||||
n as u64,
|
|
||||||
(res_size * b_cols_out) as u64,
|
(res_size * b_cols_out) as u64,
|
||||||
(a_size * b_cols_in) as u64,
|
(a_size * b_cols_in) as u64,
|
||||||
(b_rows * b_cols_in) as u64,
|
(b_rows * b_cols_in) as u64,
|
||||||
@@ -254,7 +240,6 @@ unsafe impl VmpApplyAddImpl<FFT64> for FFT64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
|
let (tmp_bytes, _) = scratch.take_slice(module.vmp_apply_tmp_bytes(
|
||||||
res.n(),
|
|
||||||
res.size(),
|
res.size(),
|
||||||
a.size(),
|
a.size(),
|
||||||
b.rows(),
|
b.rows(),
|
||||||
|
|||||||
201
poulpy-backend/src/cpu_spqlios/fft64/zn.rs
Normal file
201
poulpy-backend/src/cpu_spqlios/fft64/zn.rs
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
use poulpy_hal::{
|
||||||
|
api::{TakeSlice, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut},
|
||||||
|
layouts::{Scratch, Zn, ZnToMut},
|
||||||
|
oep::{
|
||||||
|
TakeSliceImpl, ZnAddDistF64Impl, ZnAddNormalImpl, ZnFillDistF64Impl, ZnFillNormalImpl, ZnFillUniformImpl,
|
||||||
|
ZnNormalizeInplaceImpl,
|
||||||
|
},
|
||||||
|
source::Source,
|
||||||
|
};
|
||||||
|
use rand_distr::Normal;
|
||||||
|
|
||||||
|
use crate::cpu_spqlios::{FFT64, ffi::zn64};
|
||||||
|
|
||||||
|
unsafe impl ZnNormalizeInplaceImpl<Self> for FFT64
|
||||||
|
where
|
||||||
|
Self: TakeSliceImpl<Self>,
|
||||||
|
{
|
||||||
|
fn zn_normalize_inplace_impl<A>(n: usize, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<Self>)
|
||||||
|
where
|
||||||
|
A: ZnToMut,
|
||||||
|
{
|
||||||
|
let mut a: Zn<&mut [u8]> = a.to_mut();
|
||||||
|
|
||||||
|
let (tmp_bytes, _) = scratch.take_slice(n * size_of::<i64>());
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
zn64::zn64_normalize_base2k_ref(
|
||||||
|
n as u64,
|
||||||
|
basek as u64,
|
||||||
|
a.at_mut_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
tmp_bytes.as_mut_ptr(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ZnFillUniformImpl<Self> for FFT64 {
|
||||||
|
fn zn_fill_uniform_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, k: usize, source: &mut Source)
|
||||||
|
where
|
||||||
|
R: ZnToMut,
|
||||||
|
{
|
||||||
|
let mut a: Zn<&mut [u8]> = res.to_mut();
|
||||||
|
let base2k: u64 = 1 << basek;
|
||||||
|
let mask: u64 = base2k - 1;
|
||||||
|
let base2k_half: i64 = (base2k >> 1) as i64;
|
||||||
|
(0..k.div_ceil(basek)).for_each(|j| {
|
||||||
|
a.at_mut(res_col, j)[..n]
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ZnFillDistF64Impl<Self> for FFT64 {
|
||||||
|
fn zn_fill_dist_f64_impl<R, D: rand::prelude::Distribution<f64>>(
|
||||||
|
n: usize,
|
||||||
|
basek: usize,
|
||||||
|
res: &mut R,
|
||||||
|
res_col: usize,
|
||||||
|
k: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
dist: D,
|
||||||
|
bound: f64,
|
||||||
|
) where
|
||||||
|
R: ZnToMut,
|
||||||
|
{
|
||||||
|
let mut a: Zn<&mut [u8]> = res.to_mut();
|
||||||
|
assert!(
|
||||||
|
(bound.log2().ceil() as i64) < 64,
|
||||||
|
"invalid bound: ceil(log2(bound))={} > 63",
|
||||||
|
(bound.log2().ceil() as i64)
|
||||||
|
);
|
||||||
|
|
||||||
|
let limb: usize = k.div_ceil(basek) - 1;
|
||||||
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
|
if basek_rem != 0 {
|
||||||
|
a.at_mut(res_col, limb)[..n].iter_mut().for_each(|a| {
|
||||||
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
while dist_f64.abs() > bound {
|
||||||
|
dist_f64 = dist.sample(source)
|
||||||
|
}
|
||||||
|
*a = (dist_f64.round() as i64) << basek_rem;
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
a.at_mut(res_col, limb)[..n].iter_mut().for_each(|a| {
|
||||||
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
while dist_f64.abs() > bound {
|
||||||
|
dist_f64 = dist.sample(source)
|
||||||
|
}
|
||||||
|
*a = dist_f64.round() as i64
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ZnAddDistF64Impl<Self> for FFT64 {
|
||||||
|
fn zn_add_dist_f64_impl<R, D: rand::prelude::Distribution<f64>>(
|
||||||
|
n: usize,
|
||||||
|
basek: usize,
|
||||||
|
res: &mut R,
|
||||||
|
res_col: usize,
|
||||||
|
k: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
dist: D,
|
||||||
|
bound: f64,
|
||||||
|
) where
|
||||||
|
R: ZnToMut,
|
||||||
|
{
|
||||||
|
let mut a: Zn<&mut [u8]> = res.to_mut();
|
||||||
|
assert!(
|
||||||
|
(bound.log2().ceil() as i64) < 64,
|
||||||
|
"invalid bound: ceil(log2(bound))={} > 63",
|
||||||
|
(bound.log2().ceil() as i64)
|
||||||
|
);
|
||||||
|
|
||||||
|
let limb: usize = k.div_ceil(basek) - 1;
|
||||||
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
|
if basek_rem != 0 {
|
||||||
|
a.at_mut(res_col, limb)[..n].iter_mut().for_each(|a| {
|
||||||
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
while dist_f64.abs() > bound {
|
||||||
|
dist_f64 = dist.sample(source)
|
||||||
|
}
|
||||||
|
*a += (dist_f64.round() as i64) << basek_rem;
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
a.at_mut(res_col, limb)[..n].iter_mut().for_each(|a| {
|
||||||
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
|
while dist_f64.abs() > bound {
|
||||||
|
dist_f64 = dist.sample(source)
|
||||||
|
}
|
||||||
|
*a += dist_f64.round() as i64
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ZnFillNormalImpl<Self> for FFT64
|
||||||
|
where
|
||||||
|
Self: ZnFillDistF64Impl<Self>,
|
||||||
|
{
|
||||||
|
fn zn_fill_normal_impl<R>(
|
||||||
|
n: usize,
|
||||||
|
basek: usize,
|
||||||
|
res: &mut R,
|
||||||
|
res_col: usize,
|
||||||
|
k: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
sigma: f64,
|
||||||
|
bound: f64,
|
||||||
|
) where
|
||||||
|
R: ZnToMut,
|
||||||
|
{
|
||||||
|
Self::zn_fill_dist_f64_impl(
|
||||||
|
n,
|
||||||
|
basek,
|
||||||
|
res,
|
||||||
|
res_col,
|
||||||
|
k,
|
||||||
|
source,
|
||||||
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
|
bound,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ZnAddNormalImpl<Self> for FFT64
|
||||||
|
where
|
||||||
|
Self: ZnAddDistF64Impl<Self>,
|
||||||
|
{
|
||||||
|
fn zn_add_normal_impl<R>(
|
||||||
|
n: usize,
|
||||||
|
basek: usize,
|
||||||
|
res: &mut R,
|
||||||
|
res_col: usize,
|
||||||
|
k: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
sigma: f64,
|
||||||
|
bound: f64,
|
||||||
|
) where
|
||||||
|
R: ZnToMut,
|
||||||
|
{
|
||||||
|
Self::zn_add_dist_f64_impl(
|
||||||
|
n,
|
||||||
|
basek,
|
||||||
|
res,
|
||||||
|
res_col,
|
||||||
|
k,
|
||||||
|
source,
|
||||||
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
|
bound,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
Submodule poulpy-backend/src/cpu_spqlios/spqlios-arithmetic updated: de62af3507...708e5d7e86
@@ -44,11 +44,10 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
|
|||||||
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_glwe_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe_in.k())
|
||||||
| GLWECiphertext::external_product_scratch_space(
|
| GLWECiphertext::external_product_scratch_space(
|
||||||
&module,
|
&module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
ct_glwe_out.k(),
|
ct_glwe_out.k(),
|
||||||
ct_glwe_in.k(),
|
ct_glwe_in.k(),
|
||||||
@@ -137,17 +136,9 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_glwe.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe.k())
|
||||||
| GLWECiphertext::external_product_inplace_scratch_space(
|
| GLWECiphertext::external_product_inplace_scratch_space(&module, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
|
||||||
&module,
|
|
||||||
n,
|
|
||||||
basek,
|
|
||||||
ct_glwe.k(),
|
|
||||||
ct_ggsw.k(),
|
|
||||||
digits,
|
|
||||||
rank,
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs = Source::new([0u8; 32]);
|
let mut source_xs = Source::new([0u8; 32]);
|
||||||
|
|||||||
@@ -45,11 +45,10 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
|
|||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_out, rank_out);
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_out, rank_out);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, n, basek, ksk.k(), rank_in, rank_out)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank_in, rank_out)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_in.k())
|
||||||
| GLWECiphertext::keyswitch_scratch_space(
|
| GLWECiphertext::keyswitch_scratch_space(
|
||||||
&module,
|
&module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
ct_out.k(),
|
ct_out.k(),
|
||||||
ct_in.k(),
|
ct_in.k(),
|
||||||
@@ -148,9 +147,9 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, n, basek, ksk.k(), rank, rank)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank, rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct.k())
|
||||||
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, n, basek, ct.k(), ksk.k(), digits, rank),
|
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, basek, ct.k(), ksk.k(), digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
|
|||||||
@@ -45,8 +45,8 @@ fn main() {
|
|||||||
|
|
||||||
// Scratch space
|
// Scratch space
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct.k())
|
||||||
| GLWECiphertext::decrypt_scratch_space(&module, n, basek, ct.k()),
|
| GLWECiphertext::decrypt_scratch_space(&module, basek, ct.k()),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Generate secret-key
|
// Generate secret-key
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
pub fn automorphism_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -24,12 +23,11 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -39,7 +37,7 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWEAutomorphismKey::automorphism_scratch_space(module, n, basek, k_out, k_out, k_ksk, digits, rank)
|
GGLWEAutomorphismKey::automorphism_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
pub fn automorphism_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -32,17 +31,16 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
let out_size: usize = k_out.div_ceil(basek);
|
||||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
|
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
||||||
let ks_internal: usize =
|
let ks_internal: usize =
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
||||||
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, n, basek, k_out, k_tsk, digits_tsk, rank);
|
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
|
||||||
ci_dft + (ks_internal | expand)
|
ci_dft + (ks_internal | expand)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -56,7 +54,7 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::automorphism_scratch_space(
|
GGSWCiphertext::automorphism_scratch_space(
|
||||||
module, n, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,7 +115,6 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGSWCiphertext::automorphism_scratch_space(
|
>= GGSWCiphertext::automorphism_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
pub fn automorphism_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -24,12 +23,11 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
Self::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -39,7 +37,7 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
|
Self::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ use crate::{
|
|||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn from_glwe_scratch_space<B: Backend>(
|
pub fn from_glwe_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_lwe: usize,
|
k_lwe: usize,
|
||||||
k_glwe: usize,
|
k_glwe: usize,
|
||||||
@@ -25,8 +24,8 @@ impl LWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(n, basek, k_lwe, 1)
|
GLWECiphertext::bytes_of(module.n(), basek, k_lwe, 1)
|
||||||
+ GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_lwe, k_glwe, k_ksk, 1, rank, 1)
|
+ GLWECiphertext::keyswitch_scratch_space(module, basek, k_lwe, k_glwe, k_ksk, 1, rank, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ use crate::{
|
|||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn from_lwe_scratch_space<B: Backend>(
|
pub fn from_lwe_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_lwe: usize,
|
k_lwe: usize,
|
||||||
k_glwe: usize,
|
k_glwe: usize,
|
||||||
@@ -25,8 +24,8 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_glwe, k_lwe, k_ksk, 1, 1, rank)
|
GLWECiphertext::keyswitch_scratch_space(module, basek, k_glwe, k_lwe, k_ksk, 1, 1, rank)
|
||||||
+ GLWECiphertext::bytes_of(n, basek, k_lwe, 1)
|
+ GLWECiphertext::bytes_of(module.n(), basek, k_lwe, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,13 +9,12 @@ use poulpy_hal::{
|
|||||||
use crate::layouts::{GLWECiphertext, GLWEPlaintext, Infos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GLWECiphertext, GLWEPlaintext, Infos, prepared::GLWESecretPrepared};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn decrypt_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn decrypt_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = k.div_ceil(basek);
|
||||||
(module.vec_znx_normalize_tmp_bytes(n) | module.vec_znx_dft_alloc_bytes(n, 1, size))
|
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_alloc_bytes(1, size)) + module.vec_znx_dft_alloc_bytes(1, size)
|
||||||
+ module.vec_znx_dft_alloc_bytes(n, 1, size)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxNormalizeInplace, ZnxView, ZnxViewMut},
|
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, ZnNormalizeInplace, ZnxView, ZnxViewMut},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
};
|
};
|
||||||
@@ -14,7 +14,7 @@ where
|
|||||||
where
|
where
|
||||||
DataPt: DataMut,
|
DataPt: DataMut,
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
Module<B>: VecZnxNormalizeInplace<B>,
|
Module<B>: ZnNormalizeInplace<B>,
|
||||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -30,7 +30,8 @@ where
|
|||||||
.map(|(x, y)| x * y)
|
.map(|(x, y)| x * y)
|
||||||
.sum::<i64>();
|
.sum::<i64>();
|
||||||
});
|
});
|
||||||
module.vec_znx_normalize_inplace(
|
module.zn_normalize_inplace(
|
||||||
|
pt.n(),
|
||||||
self.basek(),
|
self.basek(),
|
||||||
&mut pt.data,
|
&mut pt.data,
|
||||||
0,
|
0,
|
||||||
|
|||||||
@@ -18,11 +18,12 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, n, basek, k, rank, rank) + GLWESecret::bytes_of(n, rank)
|
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, basek, k, rank, rank)
|
||||||
|
+ GLWESecret::bytes_of(module.n(), rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,18 +67,12 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
|
|||||||
assert_eq!(sk.rank(), self.rank());
|
assert_eq!(sk.rank(), self.rank());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(
|
>= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
|
||||||
module,
|
|
||||||
sk.n(),
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rank()
|
|
||||||
),
|
|
||||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
self.rank(),
|
||||||
self.size(),
|
self.size(),
|
||||||
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank())
|
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,11 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWECiphertextCompressed<Vec<u8>> {
|
impl GGLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
|
GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,13 +71,12 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n(), sk.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
||||||
>= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k()),
|
|
||||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
self.rank(),
|
||||||
self.size(),
|
self.size(),
|
||||||
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k())
|
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
self.rows() * self.digits() * self.basek() <= self.k(),
|
self.rows() * self.digits() * self.basek() <= self.k(),
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ use crate::{
|
|||||||
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(
|
pub fn encrypt_sk_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
rank_in: usize,
|
rank_in: usize,
|
||||||
@@ -26,9 +25,9 @@ impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||||
{
|
{
|
||||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k) | ScalarZnx::alloc_bytes(n, 1))
|
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ ScalarZnx::alloc_bytes(n, rank_in)
|
+ ScalarZnx::alloc_bytes(module.n(), rank_in)
|
||||||
+ GLWESecretPrepared::bytes_of(module, n, rank_out)
|
+ GLWESecretPrepared::bytes_of(module, rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,7 +71,6 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
sk_out.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
@@ -82,7 +80,6 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
scratch.available(),
|
scratch.available(),
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
sk_out.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
|
|||||||
@@ -16,12 +16,12 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>:
|
Module<B>:
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k, rank)
|
GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,11 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertextCompressed<Vec<u8>> {
|
impl GGSWCiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GLWECiphertextCompressed<Vec<u8>> {
|
impl GLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,15 +15,15 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank, rank) + GLWESecret::bytes_of(n, rank)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank) + GLWESecret::bytes_of(module.n(), rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, _n: usize, _basek: usize, _k: usize, _rank: usize) -> usize {
|
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
|
||||||
GGLWESwitchingKey::encrypt_pk_scratch_space(module, _n, _basek, _k, _rank, _rank)
|
GGLWESwitchingKey::encrypt_pk_scratch_space(module, _basek, _k, _rank, _rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,12 +67,12 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
assert_eq!(sk.rank(), self.rank());
|
assert_eq!(sk.rank(), self.rank());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank()),
|
>= GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
|
||||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
self.rank(),
|
||||||
self.size(),
|
self.size(),
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k(), self.rank())
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,15 +14,15 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWECiphertext<Vec<u8>> {
|
impl GGLWECiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||||
+ (GLWEPlaintext::byte_of(n, basek, k) | module.vec_znx_normalize_tmp_bytes(n))
|
+ (GLWEPlaintext::byte_of(module.n(), basek, k) | module.vec_znx_normalize_tmp_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(_module: &Module<B>, _n: usize, _basek: usize, _k: usize, _rank: usize) -> usize {
|
pub fn encrypt_pk_scratch_space<B: Backend>(_module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -75,12 +75,12 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n(), sk.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k()),
|
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
||||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
self.rank(),
|
||||||
self.size(),
|
self.size(),
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, sk.n(), self.basek(), self.k())
|
GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
self.rows() * self.digits() * self.basek() <= self.k(),
|
self.rows() * self.digits() * self.basek() <= self.k(),
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ use crate::{
|
|||||||
impl GGLWESwitchingKey<Vec<u8>> {
|
impl GGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(
|
pub fn encrypt_sk_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
rank_in: usize,
|
rank_in: usize,
|
||||||
@@ -26,20 +25,19 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k) | ScalarZnx::alloc_bytes(n, 1))
|
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ ScalarZnx::alloc_bytes(n, rank_in)
|
+ ScalarZnx::alloc_bytes(module.n(), rank_in)
|
||||||
+ GLWESecretPrepared::bytes_of(module, n, rank_out)
|
+ GLWESecretPrepared::bytes_of(module, rank_out)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(
|
pub fn encrypt_pk_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
_n: usize,
|
|
||||||
_basek: usize,
|
_basek: usize,
|
||||||
_k: usize,
|
_k: usize,
|
||||||
_rank_in: usize,
|
_rank_in: usize,
|
||||||
_rank_out: usize,
|
_rank_out: usize,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
GGLWECiphertext::encrypt_pk_scratch_space(module, _n, _basek, _k, _rank_out)
|
GGLWECiphertext::encrypt_pk_scratch_space(module, _basek, _k, _rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +81,6 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
sk_out.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
@@ -93,7 +90,6 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
scratch.available(),
|
scratch.available(),
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
sk_out.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
|
|||||||
@@ -18,17 +18,17 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWETensorKey<Vec<u8>> {
|
impl GGLWETensorKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>:
|
Module<B>:
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, n, rank)
|
GLWESecretPrepared::bytes_of(module, rank)
|
||||||
+ module.vec_znx_dft_alloc_bytes(n, rank, 1)
|
+ module.vec_znx_dft_alloc_bytes(rank, 1)
|
||||||
+ module.vec_znx_big_alloc_bytes(n, 1, 1)
|
+ module.vec_znx_big_alloc_bytes(1, 1)
|
||||||
+ module.vec_znx_dft_alloc_bytes(n, 1, 1)
|
+ module.vec_znx_dft_alloc_bytes(1, 1)
|
||||||
+ GLWESecret::bytes_of(n, 1)
|
+ GLWESecret::bytes_of(module.n(), 1)
|
||||||
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank, rank)
|
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,15 +14,15 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size = k.div_ceil(basek);
|
let size = k.div_ceil(basek);
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||||
+ VecZnx::alloc_bytes(n, rank + 1, size)
|
+ VecZnx::alloc_bytes(module.n(), rank + 1, size)
|
||||||
+ VecZnx::alloc_bytes(n, 1, size)
|
+ VecZnx::alloc_bytes(module.n(), 1, size)
|
||||||
+ module.vec_znx_dft_alloc_bytes(n, rank + 1, size)
|
+ module.vec_znx_dft_alloc_bytes(rank + 1, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,21 +19,24 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = k.div_ceil(basek);
|
||||||
module.vec_znx_normalize_tmp_bytes(n) + 2 * VecZnx::alloc_bytes(n, 1, size) + module.vec_znx_dft_alloc_bytes(n, 1, size)
|
module.vec_znx_normalize_tmp_bytes()
|
||||||
|
+ 2 * VecZnx::alloc_bytes(module.n(), 1, size)
|
||||||
|
+ module.vec_znx_dft_alloc_bytes(1, size)
|
||||||
}
|
}
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = k.div_ceil(basek);
|
||||||
((module.vec_znx_dft_alloc_bytes(n, 1, size) + module.vec_znx_big_alloc_bytes(n, 1, size)) | ScalarZnx::alloc_bytes(n, 1))
|
((module.vec_znx_dft_alloc_bytes(1, size) + module.vec_znx_big_alloc_bytes(1, size))
|
||||||
+ module.svp_ppol_alloc_bytes(n, 1)
|
| ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ module.vec_znx_normalize_tmp_bytes(n)
|
+ module.svp_ppol_alloc_bytes(1)
|
||||||
|
+ module.vec_znx_normalize_tmp_bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,10 +72,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(sk.n(), self.n());
|
assert_eq!(sk.n(), self.n());
|
||||||
assert_eq!(pt.n(), self.n());
|
assert_eq!(pt.n(), self.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k()),
|
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
||||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,10 +110,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank(), sk.rank());
|
||||||
assert_eq!(sk.n(), self.n());
|
assert_eq!(sk.n(), self.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k()),
|
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
||||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, self.n(), self.basek(), self.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
self.encrypt_sk_internal(
|
self.encrypt_sk_internal(
|
||||||
|
|||||||
@@ -54,7 +54,6 @@ impl<D: DataMut> GLWEPublicKey<D> {
|
|||||||
// Its ok to allocate scratch space here since pk is usually generated only once.
|
// Its ok to allocate scratch space here since pk is usually generated only once.
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
));
|
));
|
||||||
|
|||||||
@@ -16,12 +16,13 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank_in: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_in: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, n, rank_in)
|
GLWESecretPrepared::bytes_of(module, rank_in)
|
||||||
+ (GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank_in, 1) | GLWESecret::bytes_of(n, rank_in))
|
+ (GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_in, 1)
|
||||||
|
| GLWESecret::bytes_of(module.n(), rank_in))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace, ZnxView, ZnxViewMut},
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddNormal, VecZnxFillUniform, VecZnxNormalizeInplace, ZnxView, ZnxViewMut,
|
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned, Zn},
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned, VecZnx},
|
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
@@ -23,7 +21,7 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
) where
|
) where
|
||||||
DataPt: DataRef,
|
DataPt: DataRef,
|
||||||
DataSk: DataRef,
|
DataSk: DataRef,
|
||||||
Module<B>: VecZnxFillUniform + VecZnxAddNormal + VecZnxNormalizeInplace<B>,
|
Module<B>: ZnFillUniform + ZnAddNormal + ZnNormalizeInplace<B>,
|
||||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -34,9 +32,9 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
let basek: usize = self.basek();
|
let basek: usize = self.basek();
|
||||||
let k: usize = self.k();
|
let k: usize = self.k();
|
||||||
|
|
||||||
module.vec_znx_fill_uniform(basek, &mut self.data, 0, k, source_xa);
|
module.zn_fill_uniform(self.n() + 1, basek, &mut self.data, 0, k, source_xa);
|
||||||
|
|
||||||
let mut tmp_znx: VecZnx<Vec<u8>> = VecZnx::alloc(1, 1, self.size());
|
let mut tmp_znx: Zn<Vec<u8>> = Zn::alloc(1, 1, self.size());
|
||||||
|
|
||||||
let min_size = self.size().min(pt.size());
|
let min_size = self.size().min(pt.size());
|
||||||
|
|
||||||
@@ -57,9 +55,19 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
.sum::<i64>();
|
.sum::<i64>();
|
||||||
});
|
});
|
||||||
|
|
||||||
module.vec_znx_add_normal(basek, &mut self.data, 0, k, source_xe, SIGMA, SIGMA_BOUND);
|
module.zn_add_normal(
|
||||||
|
1,
|
||||||
|
basek,
|
||||||
|
&mut self.data,
|
||||||
|
0,
|
||||||
|
k,
|
||||||
|
source_xe,
|
||||||
|
SIGMA,
|
||||||
|
SIGMA_BOUND,
|
||||||
|
);
|
||||||
|
|
||||||
module.vec_znx_normalize_inplace(
|
module.zn_normalize_inplace(
|
||||||
|
1,
|
||||||
basek,
|
basek,
|
||||||
&mut tmp_znx,
|
&mut tmp_znx,
|
||||||
0,
|
0,
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl LWESwitchingKey<Vec<u8>> {
|
impl LWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESecret::bytes_of(n, 1)
|
GLWESecret::bytes_of(module.n(), 1)
|
||||||
+ GLWESecretPrepared::bytes_of(module, n, 1)
|
+ GLWESecretPrepared::bytes_of(module, 1)
|
||||||
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, 1, 1)
|
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, 1, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,11 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank_out: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_out: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, 1, rank_out) + GLWESecret::bytes_of(n, 1)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, 1, rank_out) + GLWESecret::bytes_of(module.n(), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -23,12 +22,11 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::external_product_scratch_space(module, n, basek, k_out, k_in, ggsw_k, digits, rank)
|
GGLWESwitchingKey::external_product_scratch_space(module, basek, k_out, k_in, ggsw_k, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
ggsw_k: usize,
|
ggsw_k: usize,
|
||||||
@@ -38,7 +36,7 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::external_product_inplace_scratch_space(module, n, basek, k_out, ggsw_k, digits, rank)
|
GGLWESwitchingKey::external_product_inplace_scratch_space(module, basek, k_out, ggsw_k, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -23,12 +22,11 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ggsw: usize,
|
k_ggsw: usize,
|
||||||
@@ -38,7 +36,7 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, k_out, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -23,12 +22,11 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ggsw: usize,
|
k_ggsw: usize,
|
||||||
@@ -38,7 +36,7 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, k_out, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,7 +84,6 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGSWCiphertext::external_product_scratch_space(
|
>= GGSWCiphertext::external_product_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -26,10 +25,9 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
let out_size: usize = k_out.div_ceil(basek);
|
||||||
let ggsw_size: usize = k_ggsw.div_ceil(basek);
|
let ggsw_size: usize = k_ggsw.div_ceil(basek);
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, ggsw_size);
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, ggsw_size);
|
||||||
let a_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, in_size);
|
let a_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, in_size);
|
||||||
let vmp: usize = module.vmp_apply_tmp_bytes(
|
let vmp: usize = module.vmp_apply_tmp_bytes(
|
||||||
n,
|
|
||||||
out_size,
|
out_size,
|
||||||
in_size,
|
in_size,
|
||||||
in_size, // rows
|
in_size, // rows
|
||||||
@@ -37,13 +35,12 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
rank + 1, // cols out
|
rank + 1, // cols out
|
||||||
ggsw_size,
|
ggsw_size,
|
||||||
);
|
);
|
||||||
let normalize: usize = module.vec_znx_normalize_tmp_bytes(n);
|
let normalize: usize = module.vec_znx_normalize_tmp_bytes();
|
||||||
res_dft + a_dft + (vmp | normalize)
|
res_dft + a_dft + (vmp | normalize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ggsw: usize,
|
k_ggsw: usize,
|
||||||
@@ -53,7 +50,7 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::external_product_scratch_space(module, n, basek, k_out, k_out, k_ggsw, digits, rank)
|
Self::external_product_scratch_space(module, basek, k_out, k_out, k_ggsw, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,7 +88,6 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GLWECiphertext::external_product_scratch_space(
|
>= GLWECiphertext::external_product_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
|
|||||||
@@ -89,7 +89,6 @@ impl GLWEPacker {
|
|||||||
/// Number of scratch space bytes required to call [Self::add].
|
/// Number of scratch space bytes required to call [Self::add].
|
||||||
pub fn scratch_space<B: Backend>(
|
pub fn scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
ct_k: usize,
|
ct_k: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -99,7 +98,7 @@ impl GLWEPacker {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
pack_core_scratch_space(module, n, basek, ct_k, k_ksk, digits, rank)
|
pack_core_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
||||||
@@ -180,7 +179,6 @@ impl GLWEPacker {
|
|||||||
|
|
||||||
fn pack_core_scratch_space<B: Backend>(
|
fn pack_core_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
ct_k: usize,
|
ct_k: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -190,7 +188,7 @@ fn pack_core_scratch_space<B: Backend>(
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
combine_scratch_space(module, n, basek, ct_k, k_ksk, digits, rank)
|
combine_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||||
@@ -275,7 +273,6 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
|
|
||||||
fn combine_scratch_space<B: Backend>(
|
fn combine_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
ct_k: usize,
|
ct_k: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -285,9 +282,9 @@ fn combine_scratch_space<B: Backend>(
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(n, basek, ct_k, rank)
|
GLWECiphertext::bytes_of(module.n(), basek, ct_k, rank)
|
||||||
+ (GLWECiphertext::rsh_scratch_space(n)
|
+ (GLWECiphertext::rsh_scratch_space(module.n())
|
||||||
| GLWECiphertext::automorphism_scratch_space(module, n, basek, ct_k, ct_k, k_ksk, digits, rank))
|
| GLWECiphertext::automorphism_scratch_space(module, basek, ct_k, ct_k, k_ksk, digits, rank))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [combine] merges two ciphertexts together.
|
/// [combine] merges two ciphertexts together.
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn trace_scratch_space<B: Backend>(
|
pub fn trace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
out_k: usize,
|
out_k: usize,
|
||||||
in_k: usize,
|
in_k: usize,
|
||||||
@@ -41,12 +40,11 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::automorphism_inplace_scratch_space(module, n, basek, out_k.min(in_k), ksk_k, digits, rank)
|
Self::automorphism_inplace_scratch_space(module, basek, out_k.min(in_k), ksk_k, digits, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn trace_inplace_scratch_space<B: Backend>(
|
pub fn trace_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
out_k: usize,
|
out_k: usize,
|
||||||
ksk_k: usize,
|
ksk_k: usize,
|
||||||
@@ -56,7 +54,7 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::automorphism_inplace_scratch_space(module, n, basek, out_k, ksk_k, digits, rank)
|
Self::automorphism_inplace_scratch_space(module, basek, out_k, ksk_k, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -26,12 +25,11 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
GGLWESwitchingKey::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -41,7 +39,7 @@ impl GGLWEAutomorphismKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
|
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +90,6 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -104,14 +101,11 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(
|
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out)
|
||||||
module, n, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -121,7 +115,7 @@ impl GGLWESwitchingKey<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
|
GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ use crate::{
|
|||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
pub(crate) fn expand_row_scratch_space<B: Backend>(
|
pub(crate) fn expand_row_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
self_k: usize,
|
self_k: usize,
|
||||||
k_tsk: usize,
|
k_tsk: usize,
|
||||||
@@ -33,10 +32,9 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
let self_size_out: usize = self_k.div_ceil(basek);
|
let self_size_out: usize = self_k.div_ceil(basek);
|
||||||
let self_size_in: usize = self_size_out.div_ceil(digits);
|
let self_size_in: usize = self_size_out.div_ceil(digits);
|
||||||
|
|
||||||
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, tsk_size);
|
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(rank + 1, tsk_size);
|
||||||
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(n, 1, self_size_in);
|
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, self_size_in);
|
||||||
let vmp: usize = module.vmp_apply_tmp_bytes(
|
let vmp: usize = module.vmp_apply_tmp_bytes(
|
||||||
n,
|
|
||||||
self_size_out,
|
self_size_out,
|
||||||
self_size_in,
|
self_size_in,
|
||||||
self_size_in,
|
self_size_in,
|
||||||
@@ -44,15 +42,14 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
rank,
|
rank,
|
||||||
tsk_size,
|
tsk_size,
|
||||||
);
|
);
|
||||||
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(n, 1, tsk_size);
|
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
|
||||||
let norm: usize = module.vec_znx_normalize_tmp_bytes(n);
|
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -67,18 +64,17 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
let out_size: usize = k_out.div_ceil(basek);
|
||||||
let res_znx: usize = VecZnx::alloc_bytes(n, rank + 1, out_size);
|
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, out_size);
|
||||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
|
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
||||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
||||||
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, n, basek, k_out, k_tsk, digits_tsk, rank);
|
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
||||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -92,7 +88,7 @@ impl GGSWCiphertext<Vec<u8>> {
|
|||||||
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::keyswitch_scratch_space(
|
GGSWCiphertext::keyswitch_scratch_space(
|
||||||
module, n, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -217,7 +213,6 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GGSWCiphertext::expand_row_scratch_space(
|
>= GGSWCiphertext::expand_row_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
tsk.k(),
|
tsk.k(),
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_in: usize,
|
k_in: usize,
|
||||||
@@ -27,24 +26,16 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
let out_size: usize = k_out.div_ceil(basek);
|
||||||
let ksk_size: usize = k_ksk.div_ceil(basek);
|
let ksk_size: usize = k_ksk.div_ceil(basek);
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_out + 1, ksk_size); // TODO OPTIMIZE
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank_out + 1, ksk_size); // TODO OPTIMIZE
|
||||||
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
|
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
||||||
let vmp: usize = module.vmp_apply_tmp_bytes(
|
let vmp: usize = module.vmp_apply_tmp_bytes(out_size, in_size, in_size, rank_in, rank_out + 1, ksk_size)
|
||||||
n,
|
+ module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
||||||
out_size,
|
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||||
in_size,
|
|
||||||
in_size,
|
|
||||||
rank_in,
|
|
||||||
rank_out + 1,
|
|
||||||
ksk_size,
|
|
||||||
) + module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
|
|
||||||
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes(n);
|
|
||||||
res_dft + ((ai_dft + vmp) | normalize)
|
res_dft + ((ai_dft + vmp) | normalize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_out: usize,
|
k_out: usize,
|
||||||
k_ksk: usize,
|
k_ksk: usize,
|
||||||
@@ -54,7 +45,7 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, n, basek, k_out, k_out, k_ksk, digits, rank, rank)
|
Self::keyswitch_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,7 +86,6 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
scratch.available()
|
scratch.available()
|
||||||
>= GLWECiphertext::keyswitch_scratch_space(
|
>= GLWECiphertext::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
@@ -117,7 +107,6 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::keyswitch_scratch_space(
|
GLWECiphertext::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ use crate::{
|
|||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k_lwe_out: usize,
|
k_lwe_out: usize,
|
||||||
k_lwe_in: usize,
|
k_lwe_in: usize,
|
||||||
@@ -33,8 +32,8 @@ impl LWECiphertext<Vec<u8>> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(n, basek, k_lwe_out.max(k_lwe_in), 1)
|
GLWECiphertext::bytes_of(module.n(), basek, k_lwe_out.max(k_lwe_in), 1)
|
||||||
+ GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_lwe_out, k_ksk, 1, 1)
|
+ GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_lwe_out, k_ksk, 1, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -106,11 +106,11 @@ impl<D: DataRef> WriterTo for GGLWEAutomorphismKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWEAutomorphismKeyCompressed<DR>> for GGLWEAutomorphismKey<D> {
|
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWEAutomorphismKeyCompressed<DR>> for GGLWEAutomorphismKey<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GGLWEAutomorphismKeyCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GGLWEAutomorphismKeyCompressed<DR>) {
|
||||||
self.key.decompress(module, &other.key);
|
self.key.decompress(module, &other.key);
|
||||||
self.p = other.p;
|
self.p = other.p;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -194,11 +194,11 @@ impl<D: DataRef> WriterTo for GGLWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGLWECiphertextCompressed<DR>> for GGLWECiphertext<D> {
|
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGLWECiphertextCompressed<DR>> for GGLWECiphertext<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GGLWECiphertextCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GGLWECiphertextCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use poulpy_hal::api::ZnxInfos;
|
use poulpy_hal::api::ZnxInfos;
|
||||||
|
|||||||
@@ -115,11 +115,11 @@ impl<D: DataRef> WriterTo for GGLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWESwitchingKeyCompressed<DR>> for GGLWESwitchingKey<D> {
|
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWESwitchingKeyCompressed<DR>> for GGLWESwitchingKey<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GGLWESwitchingKeyCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GGLWESwitchingKeyCompressed<DR>) {
|
||||||
self.key.decompress(module, &other.key);
|
self.key.decompress(module, &other.key);
|
||||||
self.sk_in_n = other.sk_in_n;
|
self.sk_in_n = other.sk_in_n;
|
||||||
self.sk_out_n = other.sk_out_n;
|
self.sk_out_n = other.sk_out_n;
|
||||||
|
|||||||
@@ -139,11 +139,11 @@ impl<D: DataMut> GGLWETensorKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWETensorKeyCompressed<DR>> for GGLWETensorKey<D> {
|
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, GGLWETensorKeyCompressed<DR>> for GGLWETensorKey<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GGLWETensorKeyCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GGLWETensorKeyCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|||||||
@@ -185,11 +185,11 @@ impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGSWCiphertextCompressed<DR>> for GGSWCiphertext<D> {
|
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GGSWCiphertextCompressed<DR>> for GGSWCiphertext<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GGSWCiphertextCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GGSWCiphertextCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.rank(), other.rank())
|
assert_eq!(self.rank(), other.rank())
|
||||||
|
|||||||
@@ -111,11 +111,11 @@ impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GLWECiphertextCompressed<DR>> for GLWECiphertext<D> {
|
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GLWECiphertextCompressed<DR>> for GLWECiphertext<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxCopy + VecZnxFillUniform,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use poulpy_hal::api::ZnxInfos;
|
use poulpy_hal::api::ZnxInfos;
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank_in: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_in: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
@@ -112,6 +112,6 @@ impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
+ SvpPPolAllocBytes
|
+ SvpPPolAllocBytes
|
||||||
+ SvpPPolAlloc<B>,
|
+ SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
GLWEToLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank_in)
|
GLWEToLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_in)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{FillUniform, Reset, VecZnxFillUniform, ZnxInfos, ZnxView, ZnxViewMut},
|
api::{FillUniform, Reset, ZnFillUniform, ZnxInfos, ZnxView, ZnxViewMut},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, VecZnx, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, VecZnx, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
@@ -117,13 +117,20 @@ impl<D: DataRef> WriterTo for LWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, LWECiphertextCompressed<DR>> for LWECiphertext<D> {
|
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, LWECiphertextCompressed<DR>> for LWECiphertext<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &LWECiphertextCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxFillUniform,
|
Module<B>: ZnFillUniform,
|
||||||
{
|
{
|
||||||
let mut source = Source::new(other.seed);
|
fn decompress(&mut self, module: &Module<B>, other: &LWECiphertextCompressed<DR>) {
|
||||||
module.vec_znx_fill_uniform(other.basek(), &mut self.data, 0, other.k(), &mut source);
|
let mut source: Source = Source::new(other.seed);
|
||||||
|
module.zn_fill_uniform(
|
||||||
|
self.n(),
|
||||||
|
other.basek(),
|
||||||
|
&mut self.data,
|
||||||
|
0,
|
||||||
|
other.k(),
|
||||||
|
&mut source,
|
||||||
|
);
|
||||||
(0..self.size()).for_each(|i| {
|
(0..self.size()).for_each(|i| {
|
||||||
self.data.at_mut(0, i)[0] = other.data.at(0, i)[0];
|
self.data.at_mut(0, i)[0] = other.data.at(0, i)[0];
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ impl LWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
@@ -113,15 +113,15 @@ impl LWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
+ SvpPPolAllocBytes
|
+ SvpPPolAllocBytes
|
||||||
+ SvpPPolAlloc<B>,
|
+ SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
LWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k)
|
LWESwitchingKey::encrypt_sk_scratch_space(module, basek, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWESwitchingKeyCompressed<DR>> for LWESwitchingKey<D> {
|
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWESwitchingKeyCompressed<DR>> for LWESwitchingKey<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &LWESwitchingKeyCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxCopy + VecZnxFillUniform,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &LWESwitchingKeyCompressed<DR>) {
|
||||||
self.0.decompress(module, &other.0);
|
self.0.decompress(module, &other.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank_out: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_out: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
@@ -114,15 +114,15 @@ impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
+ SvpPPolAllocBytes
|
+ SvpPPolAllocBytes
|
||||||
+ SvpPPolAlloc<B>,
|
+ SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k, rank_out)
|
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWEToGLWESwitchingKeyCompressed<DR>> for LWEToGLWESwitchingKey<D> {
|
impl<D: DataMut, DR: DataRef, B: Backend> Decompress<B, LWEToGLWESwitchingKeyCompressed<DR>> for LWEToGLWESwitchingKey<D>
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &LWEToGLWESwitchingKeyCompressed<DR>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxCopy + VecZnxFillUniform,
|
Module<B>: VecZnxFillUniform + VecZnxCopy,
|
||||||
{
|
{
|
||||||
|
fn decompress(&mut self, module: &Module<B>, other: &LWEToGLWESwitchingKeyCompressed<DR>) {
|
||||||
self.0.decompress(module, &other.0);
|
self.0.decompress(module, &other.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,13 +20,8 @@ pub use lwe_ct::*;
|
|||||||
pub use lwe_ksk::*;
|
pub use lwe_ksk::*;
|
||||||
pub use lwe_to_glwe_ksk::*;
|
pub use lwe_to_glwe_ksk::*;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::layouts::{Backend, Module};
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
|
||||||
layouts::{Backend, Module},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub trait Decompress<B: Backend, C> {
|
pub trait Decompress<B: Backend, C> {
|
||||||
fn decompress(&mut self, module: &Module<B>, other: &C)
|
fn decompress(&mut self, module: &Module<B>, other: &C);
|
||||||
where
|
|
||||||
Module<B>: VecZnxFillUniform + VecZnxCopy;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,25 +2,25 @@ use std::fmt;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{FillUniform, Reset, ZnxInfos},
|
api::{FillUniform, Reset, ZnxInfos},
|
||||||
layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo},
|
layouts::{Data, DataMut, DataRef, ReaderFrom, WriterTo, Zn, ZnToMut, ZnToRef},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWECiphertext<D: Data> {
|
pub struct LWECiphertext<D: Data> {
|
||||||
pub(crate) data: VecZnx<D>,
|
pub(crate) data: Zn<D>,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: usize,
|
||||||
pub(crate) basek: usize,
|
pub(crate) basek: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> LWECiphertext<D> {
|
impl<D: DataRef> LWECiphertext<D> {
|
||||||
pub fn data(&self) -> &VecZnx<D> {
|
pub fn data(&self) -> &Zn<D> {
|
||||||
&self.data
|
&self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> LWECiphertext<D> {
|
impl<D: DataMut> LWECiphertext<D> {
|
||||||
pub fn data_mut(&mut self) -> &VecZnx<D> {
|
pub fn data_mut(&mut self) -> &Zn<D> {
|
||||||
&mut self.data
|
&mut self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -53,7 +53,7 @@ impl<D: DataMut> Reset for LWECiphertext<D> {
|
|||||||
|
|
||||||
impl<D: DataMut> FillUniform for LWECiphertext<D>
|
impl<D: DataMut> FillUniform for LWECiphertext<D>
|
||||||
where
|
where
|
||||||
VecZnx<D>: FillUniform,
|
Zn<D>: FillUniform,
|
||||||
{
|
{
|
||||||
fn fill_uniform(&mut self, source: &mut Source) {
|
fn fill_uniform(&mut self, source: &mut Source) {
|
||||||
self.data.fill_uniform(source);
|
self.data.fill_uniform(source);
|
||||||
@@ -63,7 +63,7 @@ where
|
|||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize) -> Self {
|
pub fn alloc(n: usize, basek: usize, k: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(n + 1, 1, k.div_ceil(basek)),
|
data: Zn::alloc(n + 1, 1, k.div_ceil(basek)),
|
||||||
k,
|
k,
|
||||||
basek,
|
basek,
|
||||||
}
|
}
|
||||||
@@ -72,9 +72,9 @@ impl LWECiphertext<Vec<u8>> {
|
|||||||
|
|
||||||
impl<D: Data> Infos for LWECiphertext<D>
|
impl<D: Data> Infos for LWECiphertext<D>
|
||||||
where
|
where
|
||||||
VecZnx<D>: ZnxInfos,
|
Zn<D>: ZnxInfos,
|
||||||
{
|
{
|
||||||
type Inner = VecZnx<D>;
|
type Inner = Zn<D>;
|
||||||
|
|
||||||
fn n(&self) -> usize {
|
fn n(&self) -> usize {
|
||||||
&self.inner().n() - 1
|
&self.inner().n() - 1
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::layouts::{Data, DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef};
|
use poulpy_hal::layouts::{Data, DataMut, DataRef, Zn, ZnToMut, ZnToRef};
|
||||||
|
|
||||||
use crate::layouts::{Infos, SetMetaData};
|
use crate::layouts::{Infos, SetMetaData};
|
||||||
|
|
||||||
pub struct LWEPlaintext<D: Data> {
|
pub struct LWEPlaintext<D: Data> {
|
||||||
pub(crate) data: VecZnx<D>,
|
pub(crate) data: Zn<D>,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: usize,
|
||||||
pub(crate) basek: usize,
|
pub(crate) basek: usize,
|
||||||
}
|
}
|
||||||
@@ -13,7 +13,7 @@ pub struct LWEPlaintext<D: Data> {
|
|||||||
impl LWEPlaintext<Vec<u8>> {
|
impl LWEPlaintext<Vec<u8>> {
|
||||||
pub fn alloc(basek: usize, k: usize) -> Self {
|
pub fn alloc(basek: usize, k: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(1, 1, k.div_ceil(basek)),
|
data: Zn::alloc(1, 1, k.div_ceil(basek)),
|
||||||
k,
|
k,
|
||||||
basek,
|
basek,
|
||||||
}
|
}
|
||||||
@@ -33,7 +33,7 @@ impl<D: DataRef> fmt::Display for LWEPlaintext<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWEPlaintext<D> {
|
impl<D: Data> Infos for LWEPlaintext<D> {
|
||||||
type Inner = VecZnx<D>;
|
type Inner = Zn<D>;
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
fn inner(&self) -> &Self::Inner {
|
||||||
&self.data
|
&self.data
|
||||||
@@ -89,7 +89,7 @@ impl<D: DataMut> LWEPlaintextToMut for LWEPlaintext<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> LWEPlaintext<D> {
|
impl<D: DataMut> LWEPlaintext<D> {
|
||||||
pub fn data_mut(&mut self) -> &mut VecZnx<D> {
|
pub fn data_mut(&mut self) -> &mut Zn<D> {
|
||||||
&mut self.data
|
&mut self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,21 +15,21 @@ pub struct GGLWEAutomorphismKeyPrepared<D: Data, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
|
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
|
||||||
key: GGLWESwitchingKeyPrepared::alloc(module, n, basek, k, rows, digits, rank, rank),
|
key: GGLWESwitchingKeyPrepared::alloc(module, basek, k, rows, digits, rank, rank),
|
||||||
p: 0,
|
p: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyPrepared::bytes_of(module, n, basek, k, rows, digits, rank, rank)
|
GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, rank, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,7 +88,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(
|
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rows(),
|
self.rows(),
|
||||||
|
|||||||
@@ -18,16 +18,7 @@ pub struct GGLWECiphertextPrepared<D: Data, B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn alloc(
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
|
||||||
module: &Module<B>,
|
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
|
||||||
k: usize,
|
|
||||||
rows: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> Self
|
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
@@ -48,7 +39,7 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: module.vmp_pmat_alloc(n, rows, rank_in, rank_out + 1, size),
|
data: module.vmp_pmat_alloc(rows, rank_in, rank_out + 1, size),
|
||||||
basek,
|
basek,
|
||||||
k,
|
k,
|
||||||
digits,
|
digits,
|
||||||
@@ -58,7 +49,6 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn bytes_of(
|
pub fn bytes_of(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
@@ -85,7 +75,7 @@ impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
|||||||
size
|
size
|
||||||
);
|
);
|
||||||
|
|
||||||
module.vmp_pmat_alloc_bytes(n, rows, rank_in, rank_out + 1, rows)
|
module.vmp_pmat_alloc_bytes(rows, rank_in, rank_out + 1, rows)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,7 +132,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(
|
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rows(),
|
self.rows(),
|
||||||
|
|||||||
@@ -17,21 +17,12 @@ pub struct GGLWESwitchingKeyPrepared<D: Data, B: Backend> {
|
|||||||
|
|
||||||
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn alloc(
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
|
||||||
module: &Module<B>,
|
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
|
||||||
k: usize,
|
|
||||||
rows: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> Self
|
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
|
||||||
key: GGLWECiphertextPrepared::alloc(module, n, basek, k, rows, digits, rank_in, rank_out),
|
key: GGLWECiphertextPrepared::alloc(module, basek, k, rows, digits, rank_in, rank_out),
|
||||||
sk_in_n: 0,
|
sk_in_n: 0,
|
||||||
sk_out_n: 0,
|
sk_out_n: 0,
|
||||||
}
|
}
|
||||||
@@ -40,7 +31,6 @@ impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn bytes_of(
|
pub fn bytes_of(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
n: usize,
|
|
||||||
basek: usize,
|
basek: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
@@ -51,7 +41,7 @@ impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
|||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWECiphertextPrepared::bytes_of(module, n, basek, k, rows, digits, rank_in, rank_out)
|
GGLWECiphertextPrepared::bytes_of(module, basek, k, rows, digits, rank_in, rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +105,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(
|
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rows(),
|
self.rows(),
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ pub struct GGLWETensorKeyPrepared<D: Data, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
@@ -22,18 +22,18 @@ impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
|||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
||||||
(0..pairs).for_each(|_| {
|
(0..pairs).for_each(|_| {
|
||||||
keys.push(GGLWESwitchingKeyPrepared::alloc(
|
keys.push(GGLWESwitchingKeyPrepared::alloc(
|
||||||
module, n, basek, k, rows, digits, 1, rank,
|
module, basek, k, rows, digits, 1, rank,
|
||||||
));
|
));
|
||||||
});
|
});
|
||||||
Self { keys }
|
Self { keys }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
||||||
pairs * GGLWESwitchingKeyPrepared::bytes_of(module, n, basek, k, rows, digits, 1, rank)
|
pairs * GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, 1, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,7 +118,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
||||||
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(
|
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rows(),
|
self.rows(),
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub struct GGSWCiphertextPrepared<D: Data, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
@@ -40,14 +40,14 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: module.vmp_pmat_alloc(n, rows, rank + 1, rank + 1, k.div_ceil(basek)),
|
data: module.vmp_pmat_alloc(rows, rank + 1, rank + 1, k.div_ceil(basek)),
|
||||||
basek,
|
basek,
|
||||||
k,
|
k,
|
||||||
digits,
|
digits,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
@@ -67,7 +67,7 @@ impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
|
|||||||
size
|
size
|
||||||
);
|
);
|
||||||
|
|
||||||
module.vmp_pmat_alloc_bytes(n, rows, rank + 1, rank + 1, size)
|
module.vmp_pmat_alloc_bytes(rows, rank + 1, rank + 1, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,7 +122,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGSWCiphertextPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGSWCiphertextPrepared<Vec<u8>, B> {
|
||||||
let mut ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = GGSWCiphertextPrepared::alloc(
|
let mut ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = GGSWCiphertextPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rows(),
|
self.rows(),
|
||||||
|
|||||||
@@ -42,23 +42,23 @@ impl<D: Data, B: Backend> GLWEPublicKeyPrepared<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAlloc<B>,
|
Module<B>: VecZnxDftAlloc<B>,
|
||||||
{
|
{
|
||||||
Self {
|
Self {
|
||||||
data: module.vec_znx_dft_alloc(n, rank + 1, k.div_ceil(basek)),
|
data: module.vec_znx_dft_alloc(rank + 1, k.div_ceil(basek)),
|
||||||
basek,
|
basek,
|
||||||
k,
|
k,
|
||||||
dist: Distribution::NONE,
|
dist: Distribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes,
|
Module<B>: VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
module.vec_znx_dft_alloc_bytes(n, rank + 1, k.div_ceil(basek))
|
module.vec_znx_dft_alloc_bytes(rank + 1, k.div_ceil(basek))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ where
|
|||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
||||||
let mut pk_prepared: GLWEPublicKeyPrepared<Vec<u8>, B> =
|
let mut pk_prepared: GLWEPublicKeyPrepared<Vec<u8>, B> =
|
||||||
GLWEPublicKeyPrepared::alloc(module, self.n(), self.basek(), self.k(), self.rank());
|
GLWEPublicKeyPrepared::alloc(module, self.basek(), self.k(), self.rank());
|
||||||
pk_prepared.prepare(module, self, scratch);
|
pk_prepared.prepare(module, self, scratch);
|
||||||
pk_prepared
|
pk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,21 +17,21 @@ pub struct GLWESecretPrepared<D: Data, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
|
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, rank: usize) -> Self
|
pub fn alloc(module: &Module<B>, rank: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAlloc<B>,
|
Module<B>: SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
Self {
|
Self {
|
||||||
data: module.svp_ppol_alloc(n, rank),
|
data: module.svp_ppol_alloc(rank),
|
||||||
dist: Distribution::NONE,
|
dist: Distribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, rank: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, rank: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolAllocBytes,
|
Module<B>: SvpPPolAllocBytes,
|
||||||
{
|
{
|
||||||
module.svp_ppol_alloc_bytes(n, rank)
|
module.svp_ppol_alloc_bytes(rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,7 +54,7 @@ where
|
|||||||
Module<B>: SvpPrepare<B> + SvpPPolAlloc<B>,
|
Module<B>: SvpPrepare<B> + SvpPPolAlloc<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut poulpy_hal::layouts::Scratch<B>) -> GLWESecretPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut poulpy_hal::layouts::Scratch<B>) -> GLWESecretPrepared<Vec<u8>, B> {
|
||||||
let mut sk_dft: GLWESecretPrepared<Vec<u8>, B> = GLWESecretPrepared::alloc(module, self.n(), self.rank());
|
let mut sk_dft: GLWESecretPrepared<Vec<u8>, B> = GLWESecretPrepared::alloc(module, self.rank());
|
||||||
sk_dft.prepare(module, self, scratch);
|
sk_dft.prepare(module, self, scratch);
|
||||||
sk_dft
|
sk_dft
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,20 +46,20 @@ impl<D: Data, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, rank_in: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_in: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
Self(GGLWESwitchingKeyPrepared::alloc(
|
Self(GGLWESwitchingKeyPrepared::alloc(
|
||||||
module, n, basek, k, rows, 1, rank_in, 1,
|
module, basek, k, rows, 1, rank_in, 1,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, n, basek, k, rows, digits, rank_in, 1)
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, rank_in, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +70,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> = GLWEToLWESwitchingKeyPrepared::alloc(
|
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> = GLWEToLWESwitchingKeyPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.0.n(),
|
|
||||||
self.0.basek(),
|
self.0.basek(),
|
||||||
self.0.k(),
|
self.0.k(),
|
||||||
self.0.rows(),
|
self.0.rows(),
|
||||||
|
|||||||
@@ -46,20 +46,20 @@ impl<D: Data, B: Backend> LWESwitchingKeyPrepared<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
Self(GGLWESwitchingKeyPrepared::alloc(
|
Self(GGLWESwitchingKeyPrepared::alloc(
|
||||||
module, n, basek, k, rows, 1, 1, 1,
|
module, basek, k, rows, 1, 1, 1,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, n, basek, k, rows, digits, 1, 1)
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, 1, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,13 +68,8 @@ where
|
|||||||
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>,
|
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
let mut ksk_prepared: LWESwitchingKeyPrepared<Vec<u8>, B> = LWESwitchingKeyPrepared::alloc(
|
let mut ksk_prepared: LWESwitchingKeyPrepared<Vec<u8>, B> =
|
||||||
module,
|
LWESwitchingKeyPrepared::alloc(module, self.0.basek(), self.0.k(), self.0.rows());
|
||||||
self.0.n(),
|
|
||||||
self.0.basek(),
|
|
||||||
self.0.k(),
|
|
||||||
self.0.rows(),
|
|
||||||
);
|
|
||||||
ksk_prepared.prepare(module, self, scratch);
|
ksk_prepared.prepare(module, self, scratch);
|
||||||
ksk_prepared
|
ksk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,20 +47,20 @@ impl<D: Data, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, rank_out: usize) -> Self
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_out: usize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
Self(GGLWESwitchingKeyPrepared::alloc(
|
Self(GGLWESwitchingKeyPrepared::alloc(
|
||||||
module, n, basek, k, rows, 1, 1, rank_out,
|
module, basek, k, rows, 1, 1, rank_out,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_out: usize) -> usize
|
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_out: usize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, n, basek, k, rows, digits, 1, rank_out)
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, 1, rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,7 +71,6 @@ where
|
|||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> = LWEToGLWESwitchingKeyPrepared::alloc(
|
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> = LWEToGLWESwitchingKeyPrepared::alloc(
|
||||||
module,
|
module,
|
||||||
self.0.n(),
|
|
||||||
self.0.basek(),
|
self.0.basek(),
|
||||||
self.0.k(),
|
self.0.k(),
|
||||||
self.0.rows(),
|
self.0.rows(),
|
||||||
|
|||||||
@@ -36,12 +36,7 @@ impl<D: DataRef> GGLWECiphertext<D> {
|
|||||||
let basek: usize = self.basek();
|
let basek: usize = self.basek();
|
||||||
let k: usize = self.k();
|
let k: usize = self.k();
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(module, basek, k));
|
||||||
module,
|
|
||||||
self.n(),
|
|
||||||
basek,
|
|
||||||
k,
|
|
||||||
));
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in()).for_each(|col_i| {
|
||||||
|
|||||||
@@ -45,12 +45,11 @@ impl<D: DataRef> GGSWCiphertext<D> {
|
|||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(self.n(), 1, self.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(self.n(), 1, self.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> =
|
||||||
GLWECiphertext::decrypt_scratch_space(module, self.n(), basek, k) | module.vec_znx_normalize_tmp_bytes(self.n()),
|
ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(module, basek, k) | module.vec_znx_normalize_tmp_bytes());
|
||||||
);
|
|
||||||
|
|
||||||
(0..self.rank() + 1).for_each(|col_j| {
|
(0..self.rank() + 1).for_each(|col_j| {
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows()).for_each(|row_i| {
|
||||||
@@ -112,12 +111,11 @@ impl<D: DataRef> GGSWCiphertext<D> {
|
|||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(self.n(), basek, k);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(self.n(), 1, self.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(self.n(), 1, self.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> =
|
||||||
GLWECiphertext::decrypt_scratch_space(module, self.n(), basek, k) | module.vec_znx_normalize_tmp_bytes(module.n()),
|
ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(module, basek, k) | module.vec_znx_normalize_tmp_bytes());
|
||||||
);
|
|
||||||
|
|
||||||
(0..self.rank() + 1).for_each(|col_j| {
|
(0..self.rank() + 1).for_each(|col_j| {
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows()).for_each(|row_i| {
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ impl<D: DataRef> GLWECiphertext<D> {
|
|||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::decrypt_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.n(),
|
|
||||||
self.basek(),
|
self.basek(),
|
||||||
self.k(),
|
self.k(),
|
||||||
));
|
));
|
||||||
|
|||||||
@@ -92,8 +92,8 @@ pub fn test_gglwe_automorphism_key_automorphism<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_apply, rank)
|
||||||
| GGLWEAutomorphismKey::automorphism_scratch_space(module, n, basek, k_out, k_in, k_apply, digits, rank),
|
| GGLWEAutomorphismKey::automorphism_scratch_space(module, basek, k_out, k_in, k_apply, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -120,7 +120,7 @@ pub fn test_gglwe_automorphism_key_automorphism<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut auto_key_apply_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut auto_key_apply_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_apply, rows_apply, digits, rank);
|
||||||
|
|
||||||
auto_key_apply_prepared.prepare(module, &auto_key_apply, scratch.borrow());
|
auto_key_apply_prepared.prepare(module, &auto_key_apply, scratch.borrow());
|
||||||
|
|
||||||
@@ -266,8 +266,8 @@ pub fn test_gglwe_automorphism_key_automorphism_inplace<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_apply, rank)
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_apply, rank)
|
||||||
| GGLWEAutomorphismKey::automorphism_inplace_scratch_space(module, n, basek, k_in, k_apply, digits, rank),
|
| GGLWEAutomorphismKey::automorphism_inplace_scratch_space(module, basek, k_in, k_apply, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -294,7 +294,7 @@ pub fn test_gglwe_automorphism_key_automorphism_inplace<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut auto_key_apply_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut auto_key_apply_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_apply, rows_apply, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_apply, rows_apply, digits, rank);
|
||||||
|
|
||||||
auto_key_apply_prepared.prepare(module, &auto_key_apply, scratch.borrow());
|
auto_key_apply_prepared.prepare(module, &auto_key_apply, scratch.borrow());
|
||||||
|
|
||||||
|
|||||||
@@ -102,11 +102,11 @@ pub fn test_ggsw_automorphism<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_in, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_in, rank)
|
||||||
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
|
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
|
||||||
| GGLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
|
| GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
|
||||||
| GGSWCiphertext::automorphism_scratch_space(
|
| GGSWCiphertext::automorphism_scratch_space(
|
||||||
module, n, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
|
module, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -144,11 +144,11 @@ pub fn test_ggsw_automorphism<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut auto_key_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut auto_key_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_ksk, rows, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_ksk, rows, digits, rank);
|
||||||
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
||||||
|
|
||||||
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> =
|
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWETensorKeyPrepared::alloc(module, n, basek, k_tsk, rows, digits, rank);
|
GGLWETensorKeyPrepared::alloc(module, basek, k_tsk, rows, digits, rank);
|
||||||
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
||||||
|
|
||||||
ct_out.automorphism(
|
ct_out.automorphism(
|
||||||
@@ -255,10 +255,10 @@ pub fn test_ggsw_automorphism_inplace<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ct, rank)
|
||||||
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
|
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
|
||||||
| GGLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
|
| GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
|
||||||
| GGSWCiphertext::automorphism_inplace_scratch_space(module, n, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
|
| GGSWCiphertext::automorphism_inplace_scratch_space(module, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
@@ -295,11 +295,11 @@ pub fn test_ggsw_automorphism_inplace<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut auto_key_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut auto_key_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_ksk, rows, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_ksk, rows, digits, rank);
|
||||||
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
||||||
|
|
||||||
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> =
|
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWETensorKeyPrepared::alloc(module, n, basek, k_tsk, rows, digits, rank);
|
GGLWETensorKeyPrepared::alloc(module, basek, k_tsk, rows, digits, rank);
|
||||||
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
||||||
|
|
||||||
ct.automorphism_inplace(module, &auto_key_prepared, &tsk_prepared, scratch.borrow());
|
ct.automorphism_inplace(module, &auto_key_prepared, &tsk_prepared, scratch.borrow());
|
||||||
|
|||||||
@@ -89,12 +89,11 @@ pub fn test_glwe_automorphism<B>(
|
|||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
|
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, autokey.k(), rank)
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, autokey.k(), rank)
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct_out.k())
|
| GLWECiphertext::decrypt_scratch_space(module, basek, ct_out.k())
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_in.k())
|
||||||
| GLWECiphertext::automorphism_scratch_space(
|
| GLWECiphertext::automorphism_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
ct_out.k(),
|
ct_out.k(),
|
||||||
ct_in.k(),
|
ct_in.k(),
|
||||||
@@ -127,7 +126,7 @@ pub fn test_glwe_automorphism<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut autokey_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut autokey_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_ksk, rows, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_ksk, rows, digits, rank);
|
||||||
autokey_prepared.prepare(module, &autokey, scratch.borrow());
|
autokey_prepared.prepare(module, &autokey, scratch.borrow());
|
||||||
|
|
||||||
ct_out.automorphism(module, &ct_in, &autokey_prepared, scratch.borrow());
|
ct_out.automorphism(module, &ct_in, &autokey_prepared, scratch.borrow());
|
||||||
@@ -213,10 +212,10 @@ pub fn test_glwe_automorphism_inplace<B>(
|
|||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
|
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, autokey.k(), rank)
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, autokey.k(), rank)
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
|
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::automorphism_inplace_scratch_space(module, n, basek, ct.k(), autokey.k(), digits, rank),
|
| GLWECiphertext::automorphism_inplace_scratch_space(module, basek, ct.k(), autokey.k(), digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -242,7 +241,7 @@ pub fn test_glwe_automorphism_inplace<B>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut autokey_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
let mut autokey_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> =
|
||||||
GGLWEAutomorphismKeyPrepared::alloc(module, n, basek, k_ksk, rows, digits, rank);
|
GGLWEAutomorphismKeyPrepared::alloc(module, basek, k_ksk, rows, digits, rank);
|
||||||
autokey_prepared.prepare(module, &autokey, scratch.borrow());
|
autokey_prepared.prepare(module, &autokey, scratch.borrow());
|
||||||
|
|
||||||
ct.automorphism_inplace(module, &autokey_prepared, scratch.borrow());
|
ct.automorphism_inplace(module, &autokey_prepared, scratch.borrow());
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use poulpy_hal::{
|
|||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
|
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
|
||||||
VecZnxDftToVecZnxBigConsume, VecZnxFillUniform, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
VecZnxDftToVecZnxBigConsume, VecZnxFillUniform, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||||
VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
ZnxView,
|
ZnAddNormal, ZnFillUniform, ZnNormalizeInplace, ZnxView,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned},
|
layouts::{Backend, Module, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -50,7 +50,10 @@ where
|
|||||||
+ VmpApplyAdd<B>
|
+ VmpApplyAdd<B>
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxSwithcDegree
|
+ VecZnxSwithcDegree
|
||||||
+ VecZnxAutomorphismInplace,
|
+ VecZnxAutomorphismInplace
|
||||||
|
+ ZnNormalizeInplace<B>
|
||||||
|
+ ZnFillUniform
|
||||||
|
+ ZnAddNormal,
|
||||||
B: Backend
|
B: Backend
|
||||||
+ TakeVecZnxDftImpl<B>
|
+ TakeVecZnxDftImpl<B>
|
||||||
+ TakeVecZnxBigImpl<B>
|
+ TakeVecZnxBigImpl<B>
|
||||||
@@ -79,9 +82,9 @@ where
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
|
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
|
||||||
| GLWECiphertext::from_lwe_scratch_space(module, n, basek, k_lwe_ct, k_glwe_ct, k_ksk, rank)
|
| GLWECiphertext::from_lwe_scratch_space(module, basek, k_lwe_ct, k_glwe_ct, k_ksk, rank)
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, k_glwe_ct),
|
| GLWECiphertext::decrypt_scratch_space(module, basek, k_glwe_ct),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -152,7 +155,8 @@ where
|
|||||||
+ VmpApplyAdd<B>
|
+ VmpApplyAdd<B>
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxSwithcDegree
|
+ VecZnxSwithcDegree
|
||||||
+ VecZnxAutomorphismInplace,
|
+ VecZnxAutomorphismInplace
|
||||||
|
+ ZnNormalizeInplace<B>,
|
||||||
B: Backend
|
B: Backend
|
||||||
+ TakeVecZnxDftImpl<B>
|
+ TakeVecZnxDftImpl<B>
|
||||||
+ TakeVecZnxBigImpl<B>
|
+ TakeVecZnxBigImpl<B>
|
||||||
@@ -181,9 +185,9 @@ where
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
|
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
|
||||||
| LWECiphertext::from_glwe_scratch_space(module, n, basek, k_lwe_ct, k_glwe_ct, k_ksk, rank)
|
| LWECiphertext::from_glwe_scratch_space(module, basek, k_lwe_ct, k_glwe_ct, k_ksk, rank)
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, k_glwe_ct),
|
| GLWECiphertext::decrypt_scratch_space(module, basek, k_glwe_ct),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ where
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWEAutomorphismKey::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWEAutomorphismKey::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k_ksk, rank,
|
module, basek, k_ksk, rank,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -169,7 +169,7 @@ pub fn test_gglwe_automorphisk_key_compressed_encrypt_sk<B>(
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWEAutomorphismKey::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWEAutomorphismKey::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k_ksk, rank,
|
module, basek, k_ksk, rank,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ pub fn test_gglwe_switching_key_encrypt_sk<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k_ksk, rank_in, rank_out,
|
module, basek, k_ksk, rank_in, rank_out,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
|
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
|
||||||
@@ -156,7 +156,7 @@ pub fn test_gglwe_switching_key_compressed_encrypt_sk<B>(
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k_ksk, rank_in, rank_out,
|
module, basek, k_ksk, rank_in, rank_out,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
|
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ where
|
|||||||
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertext::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertext::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k, rank,
|
module, basek, k, rank,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -154,7 +154,7 @@ where
|
|||||||
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertextCompressed::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCiphertextCompressed::encrypt_sk_scratch_space(
|
||||||
module, n, basek, k, rank,
|
module, basek, k, rank,
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -81,8 +81,8 @@ where
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k()),
|
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -169,8 +169,8 @@ where
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertextCompressed::encrypt_sk_scratch_space(module, n, basek, k_ct)
|
GLWECiphertextCompressed::encrypt_sk_scratch_space(module, basek, k_ct)
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, k_ct),
|
| GLWECiphertext::decrypt_scratch_space(module, basek, k_ct),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -263,8 +263,8 @@ where
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::decrypt_scratch_space(module, n, basek, k_ct)
|
GLWECiphertext::decrypt_scratch_space(module, basek, k_ct)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct),
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, k_ct),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -331,9 +331,9 @@ where
|
|||||||
let mut source_xu: Source = Source::new([0u8; 32]);
|
let mut source_xu: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
|
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::encrypt_pk_scratch_space(module, n, basek, k_pk),
|
| GLWECiphertext::encrypt_pk_scratch_space(module, basek, k_pk),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -75,7 +75,6 @@ where
|
|||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWETensorKey::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWETensorKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
tensor_key.k(),
|
tensor_key.k(),
|
||||||
rank,
|
rank,
|
||||||
@@ -95,10 +94,10 @@ where
|
|||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
|
||||||
|
|
||||||
let mut sk_ij_dft = module.vec_znx_dft_alloc(n, 1, 1);
|
let mut sk_ij_dft = module.vec_znx_dft_alloc(1, 1);
|
||||||
let mut sk_ij_big = module.vec_znx_big_alloc(n, 1, 1);
|
let mut sk_ij_big = module.vec_znx_big_alloc(1, 1);
|
||||||
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
|
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
|
||||||
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(n, rank, 1);
|
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(rank, 1);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
(0..rank).for_each(|i| {
|
||||||
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||||
@@ -185,7 +184,6 @@ where
|
|||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWETensorKeyCompressed::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGLWETensorKeyCompressed::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
tensor_key_compressed.k(),
|
tensor_key_compressed.k(),
|
||||||
rank,
|
rank,
|
||||||
@@ -204,10 +202,10 @@ where
|
|||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k);
|
||||||
|
|
||||||
let mut sk_ij_dft = module.vec_znx_dft_alloc(n, 1, 1);
|
let mut sk_ij_dft = module.vec_znx_dft_alloc(1, 1);
|
||||||
let mut sk_ij_big = module.vec_znx_big_alloc(n, 1, 1);
|
let mut sk_ij_big = module.vec_znx_big_alloc(1, 1);
|
||||||
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
|
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, 1);
|
||||||
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(n, rank, 1);
|
let mut sk_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(rank, 1);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
(0..rank).for_each(|i| {
|
||||||
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
module.vec_znx_dft_from_vec_znx(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||||
|
|||||||
@@ -93,9 +93,9 @@ pub fn test_gglwe_switching_key_external_product<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_in, rank_in, rank_out)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_in, rank_in, rank_out)
|
||||||
| GGLWESwitchingKey::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank_out)
|
| GGLWESwitchingKey::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank_out)
|
||||||
| GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank_out),
|
| GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank_out),
|
||||||
);
|
);
|
||||||
|
|
||||||
let r: usize = 1;
|
let r: usize = 1;
|
||||||
@@ -231,9 +231,9 @@ pub fn test_gglwe_switching_key_external_product_inplace<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ct, rank_in, rank_out)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ct, rank_in, rank_out)
|
||||||
| GGLWESwitchingKey::external_product_inplace_scratch_space(module, n, basek, k_ct, k_ggsw, digits, rank_out)
|
| GGLWESwitchingKey::external_product_inplace_scratch_space(module, basek, k_ct, k_ggsw, digits, rank_out)
|
||||||
| GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank_out),
|
| GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank_out),
|
||||||
);
|
);
|
||||||
|
|
||||||
let r: usize = 1;
|
let r: usize = 1;
|
||||||
|
|||||||
@@ -99,8 +99,8 @@ pub fn test_ggsw_external_product<B>(
|
|||||||
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
|
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank)
|
||||||
| GGSWCiphertext::external_product_scratch_space(module, n, basek, k_out, k_in, k_ggsw, digits, rank),
|
| GGSWCiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
@@ -231,8 +231,8 @@ pub fn test_ggsw_external_product_inplace<B>(
|
|||||||
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
|
pt_ggsw_rhs.to_mut().raw_mut()[k] = 1; //X^{k}
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ggsw, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ggsw, rank)
|
||||||
| GGSWCiphertext::external_product_inplace_scratch_space(module, n, basek, k_ct, k_ggsw, digits, rank),
|
| GGSWCiphertext::external_product_inplace_scratch_space(module, basek, k_ct, k_ggsw, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -92,11 +92,10 @@ pub fn test_glwe_external_product<B>(
|
|||||||
pt_rgsw.raw_mut()[k] = 1; // X^{k}
|
pt_rgsw.raw_mut()[k] = 1; // X^{k}
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, ct_ggsw.k(), rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe_in.k())
|
||||||
| GLWECiphertext::external_product_scratch_space(
|
| GLWECiphertext::external_product_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
ct_glwe_out.k(),
|
ct_glwe_out.k(),
|
||||||
ct_glwe_in.k(),
|
ct_glwe_in.k(),
|
||||||
@@ -225,9 +224,9 @@ pub fn test_glwe_external_product_inplace<B>(
|
|||||||
pt_rgsw.raw_mut()[k] = 1; // X^{k}
|
pt_rgsw.raw_mut()[k] = 1; // X^{k}
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, ct_ggsw.k(), rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe.k())
|
||||||
| GLWECiphertext::external_product_inplace_scratch_space(module, n, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
|
| GLWECiphertext::external_product_inplace_scratch_space(module, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -97,7 +97,6 @@ pub fn test_gglwe_switching_key_keyswitch<B>(
|
|||||||
|
|
||||||
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
k_ksk,
|
k_ksk,
|
||||||
rank_in_s0s1 | rank_out_s0s1,
|
rank_in_s0s1 | rank_out_s0s1,
|
||||||
@@ -105,7 +104,6 @@ pub fn test_gglwe_switching_key_keyswitch<B>(
|
|||||||
));
|
));
|
||||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::keyswitch_scratch_space(
|
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
k_out,
|
k_out,
|
||||||
k_in,
|
k_in,
|
||||||
@@ -237,14 +235,13 @@ pub fn test_gglwe_switching_key_keyswitch_inplace<B>(
|
|||||||
|
|
||||||
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
k_ksk,
|
k_ksk,
|
||||||
rank_in | rank_out,
|
rank_in | rank_out,
|
||||||
rank_out,
|
rank_out,
|
||||||
));
|
));
|
||||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::keyswitch_inplace_scratch_space(
|
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GGLWESwitchingKey::keyswitch_inplace_scratch_space(
|
||||||
module, n, basek, k_ct, k_ksk, digits, rank_out,
|
module, basek, k_ct, k_ksk, digits, rank_out,
|
||||||
));
|
));
|
||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|||||||
@@ -94,11 +94,11 @@ pub fn test_ggsw_keyswitch<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_in, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_in, rank)
|
||||||
| GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank, rank)
|
| GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank, rank)
|
||||||
| GGLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
|
| GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
|
||||||
| GGSWCiphertext::keyswitch_scratch_space(
|
| GGSWCiphertext::keyswitch_scratch_space(
|
||||||
module, n, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
|
module, basek, k_out, k_in, k_ksk, digits, k_tsk, digits, rank,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -237,10 +237,10 @@ pub fn test_ggsw_keyswitch_inplace<B>(
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k_ct, rank)
|
||||||
| GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank, rank)
|
| GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank, rank)
|
||||||
| GGLWETensorKey::encrypt_sk_scratch_space(module, n, basek, k_tsk, rank)
|
| GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k_tsk, rank)
|
||||||
| GGSWCiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
|
| GGSWCiphertext::keyswitch_inplace_scratch_space(module, basek, k_ct, k_ksk, digits, k_tsk, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|||||||
@@ -86,11 +86,10 @@ pub fn test_glwe_keyswitch<B>(
|
|||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
|
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_in, &mut source_xa);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, ksk.k(), rank_in, rank_out)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, ksk.k(), rank_in, rank_out)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_in.k())
|
||||||
| GLWECiphertext::keyswitch_scratch_space(
|
| GLWECiphertext::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
n,
|
|
||||||
basek,
|
basek,
|
||||||
ct_out.k(),
|
ct_out.k(),
|
||||||
ct_in.k(),
|
ct_in.k(),
|
||||||
@@ -200,9 +199,9 @@ where
|
|||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
|
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_ct, &mut source_xa);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, ksk.k(), rank, rank)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, ksk.k(), rank, rank)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct_glwe.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct_glwe.k())
|
||||||
| GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, ct_glwe.k(), ksk.k(), digits, rank),
|
| GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, ct_glwe.k(), ksk.k(), digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use poulpy_hal::{
|
|||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
|
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftFromVecZnx,
|
||||||
VecZnxDftToVecZnxBigConsume, VecZnxFillUniform, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
VecZnxDftToVecZnxBigConsume, VecZnxFillUniform, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||||
VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, VmpPMatAlloc, VmpPrepare,
|
VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, VmpPMatAlloc, VmpPrepare,
|
||||||
ZnxView,
|
ZnAddNormal, ZnFillUniform, ZnNormalizeInplace, ZnxView,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScratchOwned},
|
layouts::{Backend, Module, ScratchOwned},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -49,7 +49,10 @@ where
|
|||||||
+ VmpApplyAdd<B>
|
+ VmpApplyAdd<B>
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
+ VecZnxSwithcDegree
|
+ VecZnxSwithcDegree
|
||||||
+ VecZnxAutomorphismInplace,
|
+ VecZnxAutomorphismInplace
|
||||||
|
+ ZnNormalizeInplace<B>
|
||||||
|
+ ZnFillUniform
|
||||||
|
+ ZnAddNormal,
|
||||||
B: Backend
|
B: Backend
|
||||||
+ TakeVecZnxDftImpl<B>
|
+ TakeVecZnxDftImpl<B>
|
||||||
+ TakeVecZnxBigImpl<B>
|
+ TakeVecZnxBigImpl<B>
|
||||||
@@ -75,8 +78,8 @@ where
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
LWESwitchingKey::encrypt_sk_scratch_space(module, n, basek, k_ksk)
|
LWESwitchingKey::encrypt_sk_scratch_space(module, basek, k_ksk)
|
||||||
| LWECiphertext::keyswitch_scratch_space(module, n, basek, k_lwe_ct, k_lwe_ct, k_ksk),
|
| LWECiphertext::keyswitch_scratch_space(module, basek, k_lwe_ct, k_lwe_ct, k_ksk),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_lwe_in: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe_in);
|
let mut sk_lwe_in: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe_in);
|
||||||
|
|||||||
@@ -89,9 +89,9 @@ where
|
|||||||
let rows: usize = k_ct.div_ceil(basek * digits);
|
let rows: usize = k_ct.div_ceil(basek * digits);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, k_ct)
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k_ct)
|
||||||
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_ksk, rank)
|
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_ksk, rank)
|
||||||
| GLWEPacker::scratch_space(module, n, basek, k_ct, k_ksk, digits, rank),
|
| GLWEPacker::scratch_space(module, basek, k_ct, k_ksk, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -87,10 +87,10 @@ where
|
|||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, n, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, basek, ct.k())
|
||||||
| GLWECiphertext::decrypt_scratch_space(module, n, basek, ct.k())
|
| GLWECiphertext::decrypt_scratch_space(module, basek, ct.k())
|
||||||
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, n, basek, k_autokey, rank)
|
| GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, basek, k_autokey, rank)
|
||||||
| GLWECiphertext::trace_inplace_scratch_space(module, n, basek, ct.k(), k_autokey, digits, rank),
|
| GLWECiphertext::trace_inplace_scratch_space(module, basek, ct.k(), k_autokey, digits, rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
||||||
|
|||||||
@@ -37,19 +37,16 @@ impl<D: DataRef> GLWEPlaintext<D> {
|
|||||||
impl<D: DataMut> LWEPlaintext<D> {
|
impl<D: DataMut> LWEPlaintext<D> {
|
||||||
pub fn encode_i64(&mut self, data: i64, k: usize) {
|
pub fn encode_i64(&mut self, data: i64, k: usize) {
|
||||||
let basek: usize = self.basek();
|
let basek: usize = self.basek();
|
||||||
self.data
|
self.data.encode_i64(basek, k, data, i64::BITS as usize);
|
||||||
.encode_coeff_i64(basek, 0, k, 0, data, i64::BITS as usize);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> LWEPlaintext<D> {
|
impl<D: DataRef> LWEPlaintext<D> {
|
||||||
pub fn decode_i64(&self, k: usize) -> i64 {
|
pub fn decode_i64(&self, k: usize) -> i64 {
|
||||||
self.data.decode_coeff_i64(self.basek(), 0, k, 0)
|
self.data.decode_i64(self.basek(), k)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_float(&self) -> Float {
|
pub fn decode_float(&self) -> Float {
|
||||||
let mut data: Vec<Float> = vec![Float::new(self.k() as u32)];
|
self.data.decode_float(self.basek())
|
||||||
self.data.decode_vec_float(self.basek(), 0, &mut data);
|
|
||||||
data[0].clone()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ mod vec_znx;
|
|||||||
mod vec_znx_big;
|
mod vec_znx_big;
|
||||||
mod vec_znx_dft;
|
mod vec_znx_dft;
|
||||||
mod vmp_pmat;
|
mod vmp_pmat;
|
||||||
|
mod zn;
|
||||||
mod znx_base;
|
mod znx_base;
|
||||||
|
|
||||||
pub use module::*;
|
pub use module::*;
|
||||||
@@ -14,4 +15,5 @@ pub use vec_znx::*;
|
|||||||
pub use vec_znx_big::*;
|
pub use vec_znx_big::*;
|
||||||
pub use vec_znx_dft::*;
|
pub use vec_znx_dft::*;
|
||||||
pub use vmp_pmat::*;
|
pub use vmp_pmat::*;
|
||||||
|
pub use zn::*;
|
||||||
pub use znx_base::*;
|
pub use znx_base::*;
|
||||||
|
|||||||
@@ -2,18 +2,18 @@ use crate::layouts::{Backend, ScalarZnxToRef, SvpPPolOwned, SvpPPolToMut, SvpPPo
|
|||||||
|
|
||||||
/// Allocates as [crate::layouts::SvpPPol].
|
/// Allocates as [crate::layouts::SvpPPol].
|
||||||
pub trait SvpPPolAlloc<B: Backend> {
|
pub trait SvpPPolAlloc<B: Backend> {
|
||||||
fn svp_ppol_alloc(&self, n: usize, cols: usize) -> SvpPPolOwned<B>;
|
fn svp_ppol_alloc(&self, cols: usize) -> SvpPPolOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the size in bytes to allocate a [crate::layouts::SvpPPol].
|
/// Returns the size in bytes to allocate a [crate::layouts::SvpPPol].
|
||||||
pub trait SvpPPolAllocBytes {
|
pub trait SvpPPolAllocBytes {
|
||||||
fn svp_ppol_alloc_bytes(&self, n: usize, cols: usize) -> usize;
|
fn svp_ppol_alloc_bytes(&self, cols: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consume a vector of bytes into a [crate::layouts::MatZnx].
|
/// Consume a vector of bytes into a [crate::layouts::MatZnx].
|
||||||
/// User must ensure that bytes is memory aligned and that it length is equal to [SvpPPolAllocBytes].
|
/// User must ensure that bytes is memory aligned and that it length is equal to [SvpPPolAllocBytes].
|
||||||
pub trait SvpPPolFromBytes<B: Backend> {
|
pub trait SvpPPolFromBytes<B: Backend> {
|
||||||
fn svp_ppol_from_bytes(&self, n: usize, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B>;
|
fn svp_ppol_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepare a [crate::layouts::ScalarZnx] into an [crate::layouts::SvpPPol].
|
/// Prepare a [crate::layouts::ScalarZnx] into an [crate::layouts::SvpPPol].
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::{
|
|||||||
|
|
||||||
pub trait VecZnxNormalizeTmpBytes {
|
pub trait VecZnxNormalizeTmpBytes {
|
||||||
/// Returns the minimum number of bytes necessary for normalization.
|
/// Returns the minimum number of bytes necessary for normalization.
|
||||||
fn vec_znx_normalize_tmp_bytes(&self, n: usize) -> usize;
|
fn vec_znx_normalize_tmp_bytes(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VecZnxNormalize<B: Backend> {
|
pub trait VecZnxNormalize<B: Backend> {
|
||||||
|
|||||||
@@ -7,18 +7,18 @@ use crate::{
|
|||||||
|
|
||||||
/// Allocates as [crate::layouts::VecZnxBig].
|
/// Allocates as [crate::layouts::VecZnxBig].
|
||||||
pub trait VecZnxBigAlloc<B: Backend> {
|
pub trait VecZnxBigAlloc<B: Backend> {
|
||||||
fn vec_znx_big_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxBigOwned<B>;
|
fn vec_znx_big_alloc(&self, cols: usize, size: usize) -> VecZnxBigOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the size in bytes to allocate a [crate::layouts::VecZnxBig].
|
/// Returns the size in bytes to allocate a [crate::layouts::VecZnxBig].
|
||||||
pub trait VecZnxBigAllocBytes {
|
pub trait VecZnxBigAllocBytes {
|
||||||
fn vec_znx_big_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize;
|
fn vec_znx_big_alloc_bytes(&self, cols: usize, size: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consume a vector of bytes into a [crate::layouts::VecZnxBig].
|
/// Consume a vector of bytes into a [crate::layouts::VecZnxBig].
|
||||||
/// User must ensure that bytes is memory aligned and that it length is equal to [VecZnxBigAllocBytes].
|
/// User must ensure that bytes is memory aligned and that it length is equal to [VecZnxBigAllocBytes].
|
||||||
pub trait VecZnxBigFromBytes<B: Backend> {
|
pub trait VecZnxBigFromBytes<B: Backend> {
|
||||||
fn vec_znx_big_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B>;
|
fn vec_znx_big_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
@@ -187,7 +187,7 @@ pub trait VecZnxBigNegateInplace<B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait VecZnxBigNormalizeTmpBytes {
|
pub trait VecZnxBigNormalizeTmpBytes {
|
||||||
fn vec_znx_big_normalize_tmp_bytes(&self, n: usize) -> usize;
|
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VecZnxBigNormalize<B: Backend> {
|
pub trait VecZnxBigNormalize<B: Backend> {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user