mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
add test for GLWEBlindRotation
This commit is contained in:
@@ -14,7 +14,7 @@ use poulpy_hal::{
|
|||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl, VecZnxZeroImpl,
|
||||||
},
|
},
|
||||||
reference::vec_znx::{
|
reference::vec_znx::{
|
||||||
vec_znx_add, vec_znx_add_inplace, vec_znx_add_normal_ref, vec_znx_add_scalar, vec_znx_add_scalar_inplace,
|
vec_znx_add, vec_znx_add_inplace, vec_znx_add_normal_ref, vec_znx_add_scalar, vec_znx_add_scalar_inplace,
|
||||||
@@ -25,13 +25,22 @@ use poulpy_hal::{
|
|||||||
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
||||||
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
||||||
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
vec_znx_sub_scalar_inplace, vec_znx_switch_ring, vec_znx_zero,
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_fft64_avx::FFT64Avx;
|
use crate::cpu_fft64_avx::FFT64Avx;
|
||||||
|
|
||||||
|
unsafe impl VecZnxZeroImpl<Self> for FFT64Avx {
|
||||||
|
fn vec_znx_zero_impl<R>(_module: &Module<Self>, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut,
|
||||||
|
{
|
||||||
|
vec_znx_zero::<_, FFT64Avx>(res, res_col);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
vec_znx_normalize_tmp_bytes(module.n())
|
vec_znx_normalize_tmp_bytes(module.n())
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use poulpy_hal::{
|
|||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl, VecZnxZeroImpl,
|
||||||
},
|
},
|
||||||
reference::vec_znx::{
|
reference::vec_znx::{
|
||||||
vec_znx_add, vec_znx_add_inplace, vec_znx_add_normal_ref, vec_znx_add_scalar, vec_znx_add_scalar_inplace,
|
vec_znx_add, vec_znx_add_inplace, vec_znx_add_normal_ref, vec_znx_add_scalar, vec_znx_add_scalar_inplace,
|
||||||
@@ -25,13 +25,22 @@ use poulpy_hal::{
|
|||||||
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
||||||
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
||||||
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
vec_znx_sub_scalar_inplace, vec_znx_switch_ring, vec_znx_zero,
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_fft64_ref::FFT64Ref;
|
use crate::cpu_fft64_ref::FFT64Ref;
|
||||||
|
|
||||||
|
unsafe impl VecZnxZeroImpl<Self> for FFT64Ref {
|
||||||
|
fn vec_znx_zero_impl<R>(_module: &Module<Self>, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut,
|
||||||
|
{
|
||||||
|
vec_znx_zero::<_, FFT64Ref>(res, res_col);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
vec_znx_normalize_tmp_bytes(module.n())
|
vec_znx_normalize_tmp_bytes(module.n())
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use poulpy_hal::{
|
|||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl, VecZnxZeroImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
vec_znx::{
|
vec_znx::{
|
||||||
@@ -23,7 +23,7 @@ use poulpy_hal::{
|
|||||||
vec_znx_fill_uniform_ref, vec_znx_lsh, vec_znx_lsh_inplace, vec_znx_lsh_tmp_bytes, vec_znx_merge_rings,
|
vec_znx_fill_uniform_ref, vec_znx_lsh, vec_znx_lsh_inplace, vec_znx_lsh_tmp_bytes, vec_znx_merge_rings,
|
||||||
vec_znx_merge_rings_tmp_bytes, vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_normalize_tmp_bytes,
|
vec_znx_merge_rings_tmp_bytes, vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_normalize_tmp_bytes,
|
||||||
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_split_ring_tmp_bytes, vec_znx_switch_ring,
|
vec_znx_split_ring_tmp_bytes, vec_znx_switch_ring, vec_znx_zero,
|
||||||
},
|
},
|
||||||
znx::{znx_copy_ref, znx_zero_ref},
|
znx::{znx_copy_ref, znx_zero_ref},
|
||||||
},
|
},
|
||||||
@@ -35,6 +35,15 @@ use crate::cpu_spqlios::{
|
|||||||
ffi::{module::module_info_t, vec_znx, znx},
|
ffi::{module::module_info_t, vec_znx, znx},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
unsafe impl VecZnxZeroImpl<Self> for FFT64Spqlios {
|
||||||
|
fn vec_znx_zero_impl<R>(_module: &Module<Self>, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut,
|
||||||
|
{
|
||||||
|
vec_znx_zero::<_, FFT64Spqlios>(res, res_col);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
vec_znx_normalize_tmp_bytes(module.n())
|
vec_znx_normalize_tmp_bytes(module.n())
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ModuleN, VecZnxAdd, VecZnxAddInplace, VecZnxCopy, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegateInplace,
|
ModuleN, VecZnxAdd, VecZnxAddInplace, VecZnxCopy, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegateInplace,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||||
VecZnxSubInplace, VecZnxSubNegateInplace,
|
VecZnxSubInplace, VecZnxSubNegateInplace, VecZnxZero,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, Scratch, VecZnx, ZnxZero},
|
layouts::{Backend, Module, Scratch, VecZnx, ZnxZero},
|
||||||
reference::vec_znx::vec_znx_rotate_inplace_tmp_bytes,
|
reference::vec_znx::vec_znx_rotate_inplace_tmp_bytes,
|
||||||
@@ -262,11 +262,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GLWECopy for Module<BE> where Self: ModuleN + VecZnxCopy {}
|
impl<BE: Backend> GLWECopy for Module<BE> where Self: ModuleN + VecZnxCopy + VecZnxZero {}
|
||||||
|
|
||||||
pub trait GLWECopy
|
pub trait GLWECopy
|
||||||
where
|
where
|
||||||
Self: ModuleN + VecZnxCopy,
|
Self: ModuleN + VecZnxCopy + VecZnxZero,
|
||||||
{
|
{
|
||||||
fn glwe_copy<R, A>(&self, res: &mut R, a: &A)
|
fn glwe_copy<R, A>(&self, res: &mut R, a: &A)
|
||||||
where
|
where
|
||||||
@@ -278,12 +278,17 @@ where
|
|||||||
|
|
||||||
assert_eq!(res.n(), self.n() as u32);
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
assert_eq!(a.n(), self.n() as u32);
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
assert_eq!(res.rank(), a.rank());
|
|
||||||
|
|
||||||
for i in 0..res.rank().as_usize() + 1 {
|
let min_rank: usize = res.rank().min(a.rank()).as_usize() + 1;
|
||||||
|
|
||||||
|
for i in 0..min_rank {
|
||||||
self.vec_znx_copy(res.data_mut(), i, a.data(), i);
|
self.vec_znx_copy(res.data_mut(), i, a.data(), i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i in min_rank..(res.rank() + 1).into() {
|
||||||
|
self.vec_znx_zero(res.data_mut(), i);
|
||||||
|
}
|
||||||
|
|
||||||
res.set_k(a.k().min(res.max_k()));
|
res.set_k(a.k().min(res.max_k()));
|
||||||
res.set_base2k(a.base2k());
|
res.set_base2k(a.base2k());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,12 @@ pub trait VecZnxNormalizeTmpBytes {
|
|||||||
fn vec_znx_normalize_tmp_bytes(&self) -> usize;
|
fn vec_znx_normalize_tmp_bytes(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait VecZnxZero {
|
||||||
|
fn vec_znx_zero<R>(&self, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut;
|
||||||
|
}
|
||||||
|
|
||||||
pub trait VecZnxNormalize<B: Backend> {
|
pub trait VecZnxNormalize<B: Backend> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
/// Normalizes the selected column of `a` and stores the result into the selected column of `res`.
|
/// Normalizes the selected column of `a` and stores the result into the selected column of `res`.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use crate::{
|
|||||||
VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
|
VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
|
||||||
VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxSubNegateInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
|
VecZnxSubNegateInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing, VecZnxZero,
|
||||||
},
|
},
|
||||||
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -18,11 +18,23 @@ use crate::{
|
|||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl, VecZnxZeroImpl,
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
impl<B> VecZnxZero for Module<B>
|
||||||
|
where
|
||||||
|
B: Backend + VecZnxZeroImpl<B>,
|
||||||
|
{
|
||||||
|
fn vec_znx_zero<R>(&self, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut,
|
||||||
|
{
|
||||||
|
B::vec_znx_zero_impl(self, res, res_col);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<B> VecZnxNormalizeTmpBytes for Module<B>
|
impl<B> VecZnxNormalizeTmpBytes for Module<B>
|
||||||
where
|
where
|
||||||
B: Backend + VecZnxNormalizeTmpBytesImpl<B>,
|
B: Backend + VecZnxNormalizeTmpBytesImpl<B>,
|
||||||
|
|||||||
@@ -3,6 +3,16 @@ use crate::{
|
|||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||||
|
/// * See TODO for reference implementation.
|
||||||
|
/// * See [crate::api::VecZnxZero] for corresponding public API.
|
||||||
|
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||||
|
pub unsafe trait VecZnxZeroImpl<B: Backend> {
|
||||||
|
fn vec_znx_zero_impl<R>(module: &Module<B>, res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut;
|
||||||
|
}
|
||||||
|
|
||||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||||
/// * See [poulpy-backend/src/cpu_fft64_ref/vec_znx.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/vec_znx.rs) for reference implementation.
|
/// * See [poulpy-backend/src/cpu_fft64_ref/vec_znx.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/vec_znx.rs) for reference implementation.
|
||||||
/// * See [crate::api::VecZnxNormalizeTmpBytes] for corresponding public API.
|
/// * See [crate::api::VecZnxNormalizeTmpBytes] for corresponding public API.
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ mod split_ring;
|
|||||||
mod sub;
|
mod sub;
|
||||||
mod sub_scalar;
|
mod sub_scalar;
|
||||||
mod switch_ring;
|
mod switch_ring;
|
||||||
|
mod zero;
|
||||||
|
|
||||||
pub use add::*;
|
pub use add::*;
|
||||||
pub use add_scalar::*;
|
pub use add_scalar::*;
|
||||||
@@ -29,3 +30,4 @@ pub use split_ring::*;
|
|||||||
pub use sub::*;
|
pub use sub::*;
|
||||||
pub use sub_scalar::*;
|
pub use sub_scalar::*;
|
||||||
pub use switch_ring::*;
|
pub use switch_ring::*;
|
||||||
|
pub use zero::*;
|
||||||
|
|||||||
16
poulpy-hal/src/reference/vec_znx/zero.rs
Normal file
16
poulpy-hal/src/reference/vec_znx/zero.rs
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
use crate::{
|
||||||
|
layouts::{VecZnx, VecZnxToMut, ZnxInfos, ZnxViewMut},
|
||||||
|
reference::znx::ZnxZero,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn vec_znx_zero<R, ZNXARI>(res: &mut R, res_col: usize)
|
||||||
|
where
|
||||||
|
R: VecZnxToMut,
|
||||||
|
ZNXARI: ZnxZero,
|
||||||
|
{
|
||||||
|
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||||
|
let res_size = res.size();
|
||||||
|
for j in 0..res_size {
|
||||||
|
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,11 +1,18 @@
|
|||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
GLWECopy, GLWERotate, ScratchTakeCore,
|
GLWECopy, GLWERotate, ScratchTakeCore,
|
||||||
layouts::{GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, GLWEToMut},
|
layouts::{GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef},
|
||||||
};
|
};
|
||||||
use poulpy_hal::layouts::{Backend, Scratch};
|
use poulpy_hal::layouts::{Backend, Module, Scratch};
|
||||||
|
|
||||||
use crate::tfhe::bdd_arithmetic::{Cmux, GetGGSWBit, UnsignedInteger};
|
use crate::tfhe::bdd_arithmetic::{Cmux, GetGGSWBit, UnsignedInteger};
|
||||||
|
|
||||||
|
impl<T: UnsignedInteger, BE: Backend> GGSWBlindRotation<T, BE> for Module<BE>
|
||||||
|
where
|
||||||
|
Self: GLWEBlindRotation<T, BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
pub trait GGSWBlindRotation<T: UnsignedInteger, BE: Backend>
|
pub trait GGSWBlindRotation<T: UnsignedInteger, BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWEBlindRotation<T, BE>,
|
Self: GLWEBlindRotation<T, BE>,
|
||||||
@@ -19,9 +26,10 @@ where
|
|||||||
self.glwe_blind_rotation_tmp_bytes(res_infos, k_infos)
|
self.glwe_blind_rotation_tmp_bytes(res_infos, k_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ggsw_blind_rotation<R, K>(
|
fn ggsw_blind_rotation<R, G, K>(
|
||||||
&self,
|
&self,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
|
test_ggsw: &G,
|
||||||
k: &K,
|
k: &K,
|
||||||
bit_start: usize,
|
bit_start: usize,
|
||||||
bit_size: usize,
|
bit_size: usize,
|
||||||
@@ -29,15 +37,18 @@ where
|
|||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
R: GGSWToMut,
|
R: GGSWToMut,
|
||||||
|
G: GGSWToRef,
|
||||||
K: GetGGSWBit<T, BE>,
|
K: GetGGSWBit<T, BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let test_ggsw: &GGSW<&[u8]> = &test_ggsw.to_ref();
|
||||||
|
|
||||||
for row in 0..res.dnum().into() {
|
for row in 0..res.dnum().into() {
|
||||||
for col in 0..(res.rank() + 1).into() {
|
for col in 0..(res.rank() + 1).into() {
|
||||||
self.glwe_blind_rotation(
|
self.glwe_blind_rotation(
|
||||||
&mut res.at_mut(row, col),
|
&mut res.at_mut(row, col),
|
||||||
|
&test_ggsw.at(row, col),
|
||||||
k,
|
k,
|
||||||
bit_start,
|
bit_start,
|
||||||
bit_size,
|
bit_size,
|
||||||
@@ -49,6 +60,13 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: UnsignedInteger, BE: Backend> GLWEBlindRotation<T, BE> for Module<BE>
|
||||||
|
where
|
||||||
|
Self: GLWECopy + GLWERotate<BE> + Cmux<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
pub trait GLWEBlindRotation<T: UnsignedInteger, BE: Backend>
|
pub trait GLWEBlindRotation<T: UnsignedInteger, BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWECopy + GLWERotate<BE> + Cmux<BE>,
|
Self: GLWECopy + GLWERotate<BE> + Cmux<BE>,
|
||||||
@@ -63,9 +81,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Homomorphic multiplication of res by X^{k[bit_start..bit_start + bit_size] * bit_step}.
|
/// Homomorphic multiplication of res by X^{k[bit_start..bit_start + bit_size] * bit_step}.
|
||||||
fn glwe_blind_rotation<R, K>(
|
fn glwe_blind_rotation<R, G, K>(
|
||||||
&self,
|
&self,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
|
test_glwe: &G,
|
||||||
k: &K,
|
k: &K,
|
||||||
bit_start: usize,
|
bit_start: usize,
|
||||||
bit_size: usize,
|
bit_size: usize,
|
||||||
@@ -73,21 +92,43 @@ where
|
|||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
|
G: GLWEToRef,
|
||||||
K: GetGGSWBit<T, BE>,
|
K: GetGGSWBit<T, BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
assert!(bit_start + bit_size <= T::WORD_SIZE);
|
||||||
|
|
||||||
let (mut tmp_res, scratch_1) = scratch.take_glwe(res);
|
let mut res: GLWE<&mut [u8]> = res.to_mut();
|
||||||
|
|
||||||
self.glwe_copy(&mut tmp_res, res);
|
let (mut tmp_res, scratch_1) = scratch.take_glwe(&res);
|
||||||
|
|
||||||
for i in 1..bit_size {
|
// res <- test_glwe
|
||||||
// res' = res * X^2^(i * bit_step)
|
self.glwe_copy(&mut res, test_glwe);
|
||||||
self.glwe_rotate(1 << (i + bit_step), &mut tmp_res, res);
|
|
||||||
|
|
||||||
// res = (res - res') * GGSW(b[i]) + res'
|
// a_is_res = true => (a, b) = (&mut res, &mut tmp_res)
|
||||||
self.cmux_inplace(res, &tmp_res, &k.get_bit(i + bit_start), scratch_1);
|
// a_is_res = false => (a, b) = (&mut tmp_res, &mut res)
|
||||||
|
let mut a_is_res: bool = true;
|
||||||
|
|
||||||
|
for i in 0..bit_size {
|
||||||
|
let (a, b) = if a_is_res {
|
||||||
|
(&mut res, &mut tmp_res)
|
||||||
|
} else {
|
||||||
|
(&mut tmp_res, &mut res)
|
||||||
|
};
|
||||||
|
|
||||||
|
// a <- a ; b <- a * X^{-2^{i + bit_step}}
|
||||||
|
self.glwe_rotate(-1 << (i + bit_step), b, a);
|
||||||
|
|
||||||
|
// b <- (b - a) * GGSW(b[i]) + a
|
||||||
|
self.cmux_inplace(b, a, &k.get_bit(i + bit_start), scratch_1);
|
||||||
|
|
||||||
|
// ping-pong roles for next iter
|
||||||
|
a_is_res = !a_is_res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the final value ends up in `res`
|
||||||
|
if !a_is_res {
|
||||||
|
self.glwe_copy(&mut res, &tmp_res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,11 +3,16 @@ use poulpy_backend::FFT64Ref;
|
|||||||
use crate::tfhe::{
|
use crate::tfhe::{
|
||||||
bdd_arithmetic::tests::test_suite::{
|
bdd_arithmetic::tests::test_suite::{
|
||||||
test_bdd_add, test_bdd_and, test_bdd_or, test_bdd_prepare, test_bdd_sll, test_bdd_slt, test_bdd_sltu, test_bdd_sra,
|
test_bdd_add, test_bdd_and, test_bdd_or, test_bdd_prepare, test_bdd_sll, test_bdd_slt, test_bdd_sltu, test_bdd_sra,
|
||||||
test_bdd_srl, test_bdd_sub, test_bdd_xor,
|
test_bdd_srl, test_bdd_sub, test_bdd_xor, test_glwe_blind_rotation,
|
||||||
},
|
},
|
||||||
blind_rotation::CGGI,
|
blind_rotation::CGGI,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_glwe_blind_rotation_fft64_ref() {
|
||||||
|
test_glwe_blind_rotation::<FFT64Ref>()
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bdd_prepare_fft64_ref() {
|
fn test_bdd_prepare_fft64_ref() {
|
||||||
test_bdd_prepare::<CGGI, FFT64Ref>()
|
test_bdd_prepare::<CGGI, FFT64Ref>()
|
||||||
|
|||||||
@@ -0,0 +1,134 @@
|
|||||||
|
use poulpy_core::{
|
||||||
|
GGSWEncryptSk, GLWEDecrypt, GLWEEncryptSk, ScratchTakeCore,
|
||||||
|
layouts::{
|
||||||
|
Base2K, Degree, Dnum, Dsize, GGSWLayout, GGSWPreparedFactory, GLWE, GLWELayout, GLWEPlaintext, GLWESecret,
|
||||||
|
GLWESecretPrepared, GLWESecretPreparedFactory, LWEInfos, Rank, TorusPrecision,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use poulpy_hal::{
|
||||||
|
api::{ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow},
|
||||||
|
layouts::{Backend, Module, Scratch, ScratchOwned},
|
||||||
|
source::Source,
|
||||||
|
};
|
||||||
|
use rand::RngCore;
|
||||||
|
|
||||||
|
use crate::tfhe::bdd_arithmetic::{FheUintBlocksPrepared, GLWEBlindRotation};
|
||||||
|
|
||||||
|
pub fn test_glwe_blind_rotation<BE: Backend>()
|
||||||
|
where
|
||||||
|
Module<BE>: ModuleNew<BE>
|
||||||
|
+ GLWESecretPreparedFactory<BE>
|
||||||
|
+ GGSWPreparedFactory<BE>
|
||||||
|
+ GGSWEncryptSk<BE>
|
||||||
|
+ GLWEBlindRotation<u32, BE>
|
||||||
|
+ GLWEDecrypt<BE>
|
||||||
|
+ GLWEEncryptSk<BE>,
|
||||||
|
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let n: Degree = Degree(1 << 11);
|
||||||
|
let base2k: Base2K = Base2K(13);
|
||||||
|
let rank: Rank = Rank(1);
|
||||||
|
let k_glwe: TorusPrecision = TorusPrecision(26);
|
||||||
|
let k_ggsw: TorusPrecision = TorusPrecision(39);
|
||||||
|
let dnum: Dnum = Dnum(3);
|
||||||
|
|
||||||
|
let glwe_infos: GLWELayout = GLWELayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_glwe,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
let ggsw_infos: GGSWLayout = GGSWLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ggsw,
|
||||||
|
rank,
|
||||||
|
dnum,
|
||||||
|
dsize: Dsize(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
let n_glwe: usize = glwe_infos.n().into();
|
||||||
|
|
||||||
|
let module: Module<BE> = Module::<BE>::new(n_glwe as u64);
|
||||||
|
let mut source: Source = Source::new([6u8; 32]);
|
||||||
|
let mut source_xs: Source = Source::new([1u8; 32]);
|
||||||
|
let mut source_xa: Source = Source::new([2u8; 32]);
|
||||||
|
let mut source_xe: Source = Source::new([3u8; 32]);
|
||||||
|
|
||||||
|
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(1 << 22);
|
||||||
|
|
||||||
|
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||||
|
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
let mut sk_glwe_prep: GLWESecretPrepared<Vec<u8>, BE> = GLWESecretPrepared::alloc_from_infos(&module, &glwe_infos);
|
||||||
|
sk_glwe_prep.prepare(&module, &sk_glwe);
|
||||||
|
|
||||||
|
let mut res: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_infos);
|
||||||
|
|
||||||
|
let mut test_glwe: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&glwe_infos);
|
||||||
|
let mut data: Vec<i64> = vec![0i64; module.n()];
|
||||||
|
data.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
||||||
|
test_glwe.encode_vec_i64(&data, base2k.as_usize().into());
|
||||||
|
|
||||||
|
println!("pt: {}", test_glwe);
|
||||||
|
|
||||||
|
let k: u32 = source.next_u32();
|
||||||
|
|
||||||
|
println!("k: {k}");
|
||||||
|
|
||||||
|
let mut k_enc_prep: FheUintBlocksPrepared<Vec<u8>, u32, BE> =
|
||||||
|
FheUintBlocksPrepared::<Vec<u8>, u32, BE>::alloc(&module, &ggsw_infos);
|
||||||
|
k_enc_prep.encrypt_sk(
|
||||||
|
&module,
|
||||||
|
k,
|
||||||
|
&sk_glwe_prep,
|
||||||
|
&mut source_xa,
|
||||||
|
&mut source_xe,
|
||||||
|
scratch.borrow(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let base: [usize; 2] = [6, 5];
|
||||||
|
|
||||||
|
assert_eq!(base.iter().sum::<usize>(), module.log_n());
|
||||||
|
|
||||||
|
// Starting bit
|
||||||
|
let mut bit_start: usize = 0;
|
||||||
|
|
||||||
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&glwe_infos);
|
||||||
|
|
||||||
|
for _ in 0..32_usize.div_ceil(module.log_n()) {
|
||||||
|
// By how many bits to left shift
|
||||||
|
let mut bit_step: usize = 0;
|
||||||
|
|
||||||
|
for digit in base {
|
||||||
|
let mask: u32 = (1 << digit) - 1;
|
||||||
|
|
||||||
|
// How many bits to take
|
||||||
|
let bit_size: usize = (32 - bit_start).min(digit);
|
||||||
|
|
||||||
|
module.glwe_blind_rotation(
|
||||||
|
&mut res,
|
||||||
|
&test_glwe,
|
||||||
|
&k_enc_prep,
|
||||||
|
bit_start,
|
||||||
|
bit_size,
|
||||||
|
bit_step,
|
||||||
|
scratch.borrow(),
|
||||||
|
);
|
||||||
|
|
||||||
|
res.decrypt(&module, &mut pt, &sk_glwe_prep, scratch.borrow());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
(((k >> bit_start) & mask) << bit_step) as i64,
|
||||||
|
pt.decode_coeff_i64(base2k.as_usize().into(), 0)
|
||||||
|
);
|
||||||
|
|
||||||
|
bit_step += digit;
|
||||||
|
bit_start += digit;
|
||||||
|
|
||||||
|
if bit_start >= 32 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
mod add;
|
mod add;
|
||||||
mod and;
|
mod and;
|
||||||
|
mod glwe_blind_rotation;
|
||||||
mod or;
|
mod or;
|
||||||
mod prepare;
|
mod prepare;
|
||||||
mod sll;
|
mod sll;
|
||||||
@@ -12,6 +13,7 @@ mod xor;
|
|||||||
|
|
||||||
pub use add::*;
|
pub use add::*;
|
||||||
pub use and::*;
|
pub use and::*;
|
||||||
|
pub use glwe_blind_rotation::*;
|
||||||
pub use or::*;
|
pub use or::*;
|
||||||
pub use prepare::*;
|
pub use prepare::*;
|
||||||
pub use sll::*;
|
pub use sll::*;
|
||||||
|
|||||||
Reference in New Issue
Block a user