mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
wip all of Encryption
This commit is contained in:
@@ -1,10 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchAvailable, SvpPPolBytesOf, VecZnxAutomorphism, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||
api::{ModuleN, ScratchAvailable, ScratchTakeBasic, SvpPPolAlloc, SvpPPolBytesOf, VecZnxAutomorphism, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
|
||||
@@ -16,10 +17,10 @@ impl AutomorphismKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
Module<B>: ModuleN + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(module, infos.rank_out())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,8 +40,14 @@ pub trait GGLWEAutomorphismKeyCompressedEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Module<B>: ModuleN
|
||||
+ GGLWEKeyCompressedEncryptSk<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxAutomorphism
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeBasic + ScratchTakeCore<B>,
|
||||
{
|
||||
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
||||
&self,
|
||||
@@ -70,7 +77,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(self, sk.rank());
|
||||
|
||||
{
|
||||
(0..res.rank_out().into()).for_each(|i| {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||
ModuleN, ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
|
||||
ZnNormalizeInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
@@ -8,6 +8,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, LWEInfos,
|
||||
@@ -60,13 +61,14 @@ pub trait GGLWECompressedEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWECompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B>
|
||||
Module<B>: ModuleN
|
||||
+ GLWEEncryptSkInternal<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxAddScalarInplace
|
||||
+ ZnNormalizeInplace<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeCore<B>,
|
||||
{
|
||||
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
@@ -130,7 +132,7 @@ where
|
||||
|
||||
let mut source_xa = Source::new(seed);
|
||||
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(res);
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self, res);
|
||||
(0..rank_in).for_each(|col_i| {
|
||||
(0..dnum).for_each(|d_i| {
|
||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchAvailable, SvpPPolBytesOf, SvpPrepare, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing},
|
||||
api::{ModuleN, ScratchAvailable, ScratchTakeBasic, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
encryption::compressed::gglwe_ct::GGLWECompressedEncryptSk,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, RingDegree,
|
||||
@@ -17,7 +18,7 @@ impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
Module<B>: ModuleN + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
@@ -59,13 +60,15 @@ pub trait GGLWEKeyCompressedEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWEKeyCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GGLWECompressedEncryptSk<B>
|
||||
Module<B>: ModuleN
|
||||
+ GGLWECompressedEncryptSk<B>
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPrepare<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeBasic + ScratchTakeCore<B>,
|
||||
{
|
||||
fn gglwe_key_compressed_encrypt_sk<R, SI, SO>(
|
||||
&self,
|
||||
@@ -100,7 +103,7 @@ where
|
||||
|
||||
let n: usize = sk_in.n().max(sk_out.n()).into();
|
||||
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(self, sk_in.rank().into());
|
||||
(0..sk_in.rank().into()).for_each(|i| {
|
||||
self.vec_znx_switch_ring(
|
||||
&mut sk_in_tmp.as_vec_znx_mut(),
|
||||
@@ -110,9 +113,9 @@ where
|
||||
);
|
||||
});
|
||||
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(RingDegree(n as u32), sk_out.rank());
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(self, sk_out.rank());
|
||||
{
|
||||
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
||||
let (mut tmp, _) = scratch_2.take_scalar_znx(self, 1);
|
||||
(0..sk_out.rank().into()).for_each(|i| {
|
||||
self.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
||||
self.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
ModuleN, ScratchTakeBasic, SvpApplyDftToDft, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
oep::{SvpPPolAllocBytesImpl, VecZnxBigAllocBytesImpl, VecZnxDftAllocBytesImpl},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, Rank, TensorKey,
|
||||
GetDist, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, Rank, TensorKey,
|
||||
compressed::{TensorKeyCompressed, TensorKeyCompressedToMut},
|
||||
},
|
||||
};
|
||||
@@ -19,7 +21,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
TensorKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
@@ -35,18 +37,25 @@ pub trait GGLWETensorKeyCompressedEncryptSk<B: Backend> {
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
R: TensorKeyCompressedToMut,
|
||||
S: GLWESecretToRef;
|
||||
S: GLWESecretToRef + GetDist;
|
||||
}
|
||||
|
||||
impl<B: Backend> GGLWETensorKeyCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GGLWEKeyCompressedEncryptSk<B>
|
||||
Module<B>: ModuleN
|
||||
+ GGLWEKeyCompressedEncryptSk<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ SvpPrepare<B>,
|
||||
Scratch<B>:,
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAllocBytesImpl<B>
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxDftAllocBytesImpl<B>
|
||||
+ VecZnxBigAllocBytesImpl<B>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf,
|
||||
Scratch<B>: ScratchTakeBasic + ScratchTakeCore<B>,
|
||||
{
|
||||
fn gglwe_tensor_key_encrypt_sk<R, S>(
|
||||
&self,
|
||||
@@ -57,9 +66,13 @@ where
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
R: TensorKeyCompressedToMut,
|
||||
S: GLWESecretToRef,
|
||||
S: GLWESecretToRef + GetDist,
|
||||
{
|
||||
let res: &mut TensorKeyCompressed<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank_out());
|
||||
sk_dft_prep.prepare(self, sk);
|
||||
|
||||
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
@@ -68,21 +81,18 @@ where
|
||||
assert_eq!(res.n(), sk.n());
|
||||
}
|
||||
|
||||
let n: usize = sk.n().into();
|
||||
// let n: usize = sk.n().into();
|
||||
let rank: usize = res.rank_out().into();
|
||||
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(sk.n(), res.rank_out());
|
||||
sk_dft_prep.prepare(self, sk, scratch_1);
|
||||
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, rank, 1);
|
||||
|
||||
for i in 0..rank {
|
||||
self.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
}
|
||||
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(sk.n(), Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n, 1, 1);
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(self, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(self, Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(self, 1, 1);
|
||||
|
||||
let mut source_xa: Source = Source::new(seed_xa);
|
||||
|
||||
@@ -125,6 +135,7 @@ impl<DataSelf: DataMut> TensorKeyCompressed<DataSelf> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
GLWESecret<DataSk>: GetDist,
|
||||
Module<B>: GGLWETensorKeyCompressedEncryptSk<B>,
|
||||
{
|
||||
module.gglwe_tensor_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
api::{ModuleN, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||
layouts::{
|
||||
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
||||
@@ -40,8 +41,8 @@ pub trait GGSWCompressedEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>:,
|
||||
Module<B>: ModuleN + GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: ScratchTakeCore<B>,
|
||||
{
|
||||
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
@@ -74,7 +75,7 @@ where
|
||||
let cols: usize = rank + 1;
|
||||
let dsize: usize = res.dsize().into();
|
||||
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&res.glwe_layout());
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self, &res.glwe_layout());
|
||||
|
||||
let mut source = Source::new(seed_xa);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
ModuleN, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing,
|
||||
@@ -9,15 +9,18 @@ use poulpy_hal::{
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
AutomorphismKey, AutomorphismKeyToMut, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, GLWESwitchingKey, LWEInfos,
|
||||
},
|
||||
};
|
||||
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<BE>: ModuleN + SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolAlloc<BE>,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
@@ -76,7 +79,8 @@ where
|
||||
|
||||
impl<BE: Backend> GGLWEAutomorphismKeyEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
Module<BE>: ModuleN
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
@@ -93,8 +97,10 @@ where
|
||||
+ SvpPrepare<BE>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxAutomorphism,
|
||||
Scratch<BE>: ScratchAvailable,
|
||||
+ VecZnxAutomorphism
|
||||
+ SvpPPolAlloc<BE>
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
fn gglwe_automorphism_key_encrypt_sk<A, B>(
|
||||
&self,
|
||||
@@ -126,7 +132,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(self, sk.rank());
|
||||
|
||||
{
|
||||
(0..res.rank_out().into()).for_each(|i| {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
api::{ModuleN, ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encryption::glwe_ct::GLWEEncryptSk,
|
||||
encryption::glwe_ct::GLWEEncryptSk, layouts::GLWEInfos, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
||||
prepared::{GLWESecretPrepared, GLWESecretPreparedToRef},
|
||||
@@ -47,8 +47,8 @@ pub trait GGLWEEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Module<B>: ModuleN + GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeCore<B>,
|
||||
{
|
||||
fn gglwe_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
@@ -111,7 +111,7 @@ where
|
||||
let base2k: usize = res.base2k().into();
|
||||
let rank_in: usize = res.rank_in().into();
|
||||
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(res);
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self, &res.glwe_layout());
|
||||
// For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns
|
||||
//
|
||||
// Example for ksk rank 2 to rank 3:
|
||||
|
||||
@@ -1,22 +1,26 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
ModuleN, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
ScratchTakeBasic,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, RingDegree, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolAlloc<B>,
|
||||
{
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
@@ -42,7 +46,8 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
Module<B>: ModuleN
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
@@ -58,8 +63,9 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeBasic + ScratchTakeCore<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -75,7 +81,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
|
||||
let n: usize = sk_in.n().max(sk_out.n()).into();
|
||||
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(module, sk_in.rank().into());
|
||||
(0..sk_in.rank().into()).for_each(|i| {
|
||||
module.vec_znx_switch_ring(
|
||||
&mut sk_in_tmp.as_vec_znx_mut(),
|
||||
@@ -85,9 +91,9 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
);
|
||||
});
|
||||
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(RingDegree(n as u32), sk_out.rank());
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(module, sk_out.rank());
|
||||
{
|
||||
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
||||
let (mut tmp, _) = scratch_2.take_scalar_znx(module, 1);
|
||||
(0..sk_out.rank().into()).for_each(|i| {
|
||||
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
||||
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
ModuleN, ScratchTakeBasic, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
oep::VecZnxBigAllocBytesImpl,
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, Rank, RingDegree, TensorKey, prepared::GLWESecretPrepared,
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GetDist, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, Rank, TensorKey, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl TensorKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
||||
+ module.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
|
||||
+ module.bytes_of_vec_znx_big(1, 1)
|
||||
+ module.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(RingDegree(module.n() as u32), Rank(1))
|
||||
+ GLWESecret::bytes_of(module, Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
@@ -37,7 +41,9 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: SvpApplyDftToDft<B>
|
||||
GLWESecret<DataSk>: GetDist,
|
||||
Module<B>: ModuleN
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
@@ -55,8 +61,11 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>:,
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxBigAllocBytesImpl<B>
|
||||
+ VecZnxBigBytesOf
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchTakeBasic + ScratchTakeCore<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -64,21 +73,21 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
||||
assert_eq!(self.n(), sk.n());
|
||||
}
|
||||
|
||||
let n: RingDegree = sk.n();
|
||||
// let n: RingDegree = sk.n();
|
||||
let rank: Rank = self.rank_out();
|
||||
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);
|
||||
sk_dft_prep.prepare(module, sk, scratch_1);
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(module, rank);
|
||||
sk_dft_prep.prepare(module, sk);
|
||||
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n.into(), rank.into(), 1);
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(module, rank.into(), 1);
|
||||
|
||||
(0..rank.into()).for_each(|i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
});
|
||||
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n.into(), 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(n, Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n.into(), 1, 1);
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(module, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(module, Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(module, 1, 1);
|
||||
|
||||
(0..rank.into()).for_each(|i| {
|
||||
(i..rank.into()).for_each(|j| {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
api::{ModuleN, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
SIGMA,
|
||||
SIGMA, ScratchTakeCore,
|
||||
encryption::glwe_ct::GLWEEncryptSkInternal,
|
||||
layouts::{
|
||||
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
||||
@@ -44,8 +44,8 @@ pub trait GGSWEncryptSk<B: Backend> {
|
||||
|
||||
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>:,
|
||||
Module<B>: ModuleN + GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||
Scratch<B>: ScratchTakeCore<B>,
|
||||
{
|
||||
fn ggsw_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
@@ -80,7 +80,7 @@ where
|
||||
let dsize: usize = res.dsize().into();
|
||||
let cols: usize = (rank + 1).into();
|
||||
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&res.glwe_layout());
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self, &res.glwe_layout());
|
||||
|
||||
for row_i in 0..res.dnum().into() {
|
||||
tmp_pt.data.zero();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace,
|
||||
ModuleN, ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace,
|
||||
VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, ScratchTakeBasic,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
||||
source::Source,
|
||||
@@ -19,19 +19,19 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Module<BE>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size: usize = infos.size();
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
|
||||
}
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<BE>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let size: usize = infos.size();
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
@@ -42,68 +42,68 @@ impl GLWE<Vec<u8>> {
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWE<D> {
|
||||
pub fn encrypt_sk<R, P, S, B: Backend>(
|
||||
pub fn encrypt_sk<R, P, S, BE: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
module: &Module<BE>,
|
||||
pt: &P,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
P: GLWEPlaintextToRef,
|
||||
S: GLWESecretPreparedToRef<B>,
|
||||
Module<B>: GLWEEncryptSk<B>,
|
||||
S: GLWESecretPreparedToRef<BE>,
|
||||
Module<BE>: GLWEEncryptSk<BE>,
|
||||
{
|
||||
module.glwe_encrypt_sk(self, pt, sk, source_xa, source_xe, scratch);
|
||||
}
|
||||
|
||||
pub fn encrypt_zero_sk<S, B: Backend>(
|
||||
pub fn encrypt_zero_sk<S, BE: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
module: &Module<BE>,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
S: GLWESecretPreparedToRef<B>,
|
||||
Module<B>: GLWEEncryptZeroSk<B>,
|
||||
S: GLWESecretPreparedToRef<BE>,
|
||||
Module<BE>: GLWEEncryptZeroSk<BE>,
|
||||
{
|
||||
module.glwe_encrypt_zero_sk(self, sk, source_xa, source_xe, scratch);
|
||||
}
|
||||
|
||||
pub fn encrypt_pk<P, K, B: Backend>(
|
||||
pub fn encrypt_pk<P, K, BE: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
module: &Module<BE>,
|
||||
pt: &P,
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
P: GLWEPlaintextToRef,
|
||||
K: GLWEPublicKeyPreparedToRef<B>,
|
||||
Module<B>: GLWEEncryptPk<B>,
|
||||
K: GLWEPublicKeyPreparedToRef<BE>,
|
||||
Module<BE>: GLWEEncryptPk<BE>,
|
||||
{
|
||||
module.glwe_encrypt_pk(self, pt, pk, source_xu, source_xe, scratch);
|
||||
}
|
||||
|
||||
pub fn encrypt_zero_pk<K, B: Backend>(
|
||||
pub fn encrypt_zero_pk<K, BE: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
module: &Module<BE>,
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
K: GLWEPublicKeyPreparedToRef<B>,
|
||||
Module<B>: GLWEEncryptZeroPk<B>,
|
||||
K: GLWEPublicKeyPreparedToRef<BE>,
|
||||
Module<BE>: GLWEEncryptZeroPk<BE>,
|
||||
{
|
||||
module.glwe_encrypt_zero_pk(self, pk, source_xu, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEEncryptSk<B: Backend> {
|
||||
pub trait GLWEEncryptSk<BE: Backend> {
|
||||
fn glwe_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
@@ -111,17 +111,17 @@ pub trait GLWEEncryptSk<B: Backend> {
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
S: GLWESecretPreparedToRef<B>;
|
||||
S: GLWESecretPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptSk<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Module<BE>: GLWEEncryptSkInternal<BE> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<BE>: ScratchAvailable,
|
||||
{
|
||||
fn glwe_encrypt_sk<R, P, S>(
|
||||
&self,
|
||||
@@ -130,18 +130,18 @@ where
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
S: GLWESecretPreparedToRef<B>,
|
||||
S: GLWESecretPreparedToRef<BE>,
|
||||
{
|
||||
let mut res: GLWE<&mut [u8]> = res.to_mut();
|
||||
let pt: GLWEPlaintext<&[u8]> = pt.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
|
||||
let sk: GLWESecretPrepared<&[u8], BE> = sk.to_ref();
|
||||
assert_eq!(res.rank(), sk.rank());
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
@@ -171,23 +171,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEEncryptZeroSk<B: Backend> {
|
||||
pub trait GLWEEncryptZeroSk<BE: Backend> {
|
||||
fn glwe_encrypt_zero_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
S: GLWESecretPreparedToRef<B>;
|
||||
S: GLWESecretPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptZeroSk<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptZeroSk<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Module<BE>: GLWEEncryptSkInternal<BE> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
Scratch<BE>: ScratchAvailable,
|
||||
{
|
||||
fn glwe_encrypt_zero_sk<R, S>(
|
||||
&self,
|
||||
@@ -195,16 +195,16 @@ where
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
S: GLWESecretPreparedToRef<B>,
|
||||
S: GLWESecretPreparedToRef<BE>,
|
||||
{
|
||||
let mut res: GLWE<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
|
||||
let sk: GLWESecretPrepared<&[u8], BE> = sk.to_ref();
|
||||
assert_eq!(res.rank(), sk.rank());
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
@@ -233,7 +233,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEEncryptPk<B: Backend> {
|
||||
pub trait GLWEEncryptPk<BE: Backend> {
|
||||
fn glwe_encrypt_pk<R, P, K>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
@@ -241,16 +241,16 @@ pub trait GLWEEncryptPk<B: Backend> {
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
K: GLWEPublicKeyPreparedToRef<B>;
|
||||
K: GLWEPublicKeyPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptPk<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptPk<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: GLWEEncryptPkInternal<B>,
|
||||
Module<BE>: GLWEEncryptPkInternal<BE>,
|
||||
{
|
||||
fn glwe_encrypt_pk<R, P, K>(
|
||||
&self,
|
||||
@@ -259,32 +259,32 @@ where
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
K: GLWEPublicKeyPreparedToRef<B>,
|
||||
K: GLWEPublicKeyPreparedToRef<BE>,
|
||||
{
|
||||
self.glwe_encrypt_pk_internal(res, Some((pt, 0)), pk, source_xu, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEEncryptZeroPk<B: Backend> {
|
||||
pub trait GLWEEncryptZeroPk<BE: Backend> {
|
||||
fn glwe_encrypt_zero_pk<R, K>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
K: GLWEPublicKeyPreparedToRef<B>;
|
||||
K: GLWEPublicKeyPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptZeroPk<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptZeroPk<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: GLWEEncryptPkInternal<B>,
|
||||
Module<BE>: GLWEEncryptPkInternal<BE>,
|
||||
{
|
||||
fn glwe_encrypt_zero_pk<R, K>(
|
||||
&self,
|
||||
@@ -292,10 +292,10 @@ where
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
K: GLWEPublicKeyPreparedToRef<B>,
|
||||
K: GLWEPublicKeyPreparedToRef<BE>,
|
||||
{
|
||||
self.glwe_encrypt_pk_internal(
|
||||
res,
|
||||
@@ -308,7 +308,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait GLWEEncryptPkInternal<B: Backend> {
|
||||
pub(crate) trait GLWEEncryptPkInternal<BE: Backend> {
|
||||
fn glwe_encrypt_pk_internal<R, P, K>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
@@ -316,22 +316,25 @@ pub(crate) trait GLWEEncryptPkInternal<B: Backend> {
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
K: GLWEPublicKeyPreparedToRef<B>;
|
||||
K: GLWEPublicKeyPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptPkInternal<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptPkInternal<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: SvpPrepare<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddNormal<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>:,
|
||||
Module<BE>: SvpPrepare<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddNormal<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ SvpPPolBytesOf
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf,
|
||||
Scratch<BE>: ScratchTakeBasic,
|
||||
{
|
||||
fn glwe_encrypt_pk_internal<R, P, K>(
|
||||
&self,
|
||||
@@ -340,14 +343,14 @@ where
|
||||
pk: &K,
|
||||
source_xu: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
K: GLWEPublicKeyPreparedToRef<B>,
|
||||
K: GLWEPublicKeyPreparedToRef<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let pk: &GLWEPublicKeyPrepared<&[u8], B> = &pk.to_ref();
|
||||
let pk: &GLWEPublicKeyPrepared<&[u8], BE> = &pk.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -365,10 +368,10 @@ where
|
||||
let cols: usize = (res.rank() + 1).into();
|
||||
|
||||
// Generates u according to the underlying secret distribution.
|
||||
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(res.n().into(), 1);
|
||||
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(self, 1);
|
||||
|
||||
{
|
||||
let (mut u, _) = scratch_1.take_scalar_znx(res.n().into(), 1);
|
||||
let (mut u, _) = scratch_1.take_scalar_znx(self, 1);
|
||||
match pk.dist {
|
||||
Distribution::NONE => panic!(
|
||||
"invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \
|
||||
@@ -387,7 +390,7 @@ where
|
||||
|
||||
// ct[i] = pk[i] * u + ei (+ m if col = i)
|
||||
(0..cols).for_each(|i| {
|
||||
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(res.n().into(), 1, size_pk);
|
||||
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, 1, size_pk);
|
||||
// ci_dft = DFT(u) * DFT(pk[i])
|
||||
self.svp_apply_dft_to_dft(&mut ci_dft, 0, &u_dft, 0, &pk.data, i);
|
||||
|
||||
@@ -418,7 +421,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait GLWEEncryptSkInternal<B: Backend> {
|
||||
pub(crate) trait GLWEEncryptSkInternal<BE: Backend> {
|
||||
fn glwe_encrypt_sk_internal<R, P, S>(
|
||||
&self,
|
||||
base2k: usize,
|
||||
@@ -431,29 +434,30 @@ pub(crate) trait GLWEEncryptSkInternal<B: Backend> {
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
sigma: f64,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
S: GLWESecretPreparedToRef<B>;
|
||||
S: GLWESecretPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEEncryptSkInternal<B> for Module<B>
|
||||
impl<BE: Backend> GLWEEncryptSkInternal<BE> for Module<BE>
|
||||
where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
Module<BE>: ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeInplace<BE>
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxSub,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeBasic,
|
||||
{
|
||||
fn glwe_encrypt_sk_internal<R, P, S>(
|
||||
&self,
|
||||
@@ -467,14 +471,14 @@ where
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
sigma: f64,
|
||||
scratch: &mut Scratch<B>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
P: GLWEPlaintextToRef,
|
||||
S: GLWESecretPreparedToRef<B>,
|
||||
S: GLWESecretPreparedToRef<BE>,
|
||||
{
|
||||
let ct: &mut VecZnx<&mut [u8]> = &mut res.to_mut();
|
||||
let sk: GLWESecretPrepared<&[u8], B> = sk.to_ref();
|
||||
let sk: GLWESecretPrepared<&[u8], BE> = sk.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -490,11 +494,11 @@ where
|
||||
|
||||
let size: usize = ct.size();
|
||||
|
||||
let (mut c0, scratch_1) = scratch.take_vec_znx(ct.n(), 1, size);
|
||||
let (mut c0, scratch_1) = scratch.take_vec_znx(self, 1, size);
|
||||
c0.zero();
|
||||
|
||||
{
|
||||
let (mut ci, scratch_2) = scratch_1.take_vec_znx(ct.n(), 1, size);
|
||||
let (mut ci, scratch_2) = scratch_1.take_vec_znx(self, 1, size);
|
||||
|
||||
// ct[i] = uniform
|
||||
// ct[0] -= c[i] * s[i],
|
||||
@@ -504,7 +508,7 @@ where
|
||||
// ct[i] = uniform (+ pt)
|
||||
self.vec_znx_fill_uniform(base2k, ct, col_ct, source_xa);
|
||||
|
||||
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
|
||||
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(self, 1, size);
|
||||
|
||||
// ci = ct[i] - pt
|
||||
// i.e. we act as we sample ct[i] already as uniform + pt
|
||||
@@ -522,7 +526,7 @@ where
|
||||
}
|
||||
|
||||
self.svp_apply_dft_to_dft_inplace(&mut ci_dft, 0, &sk.data, i - 1);
|
||||
let ci_big: VecZnxBig<&mut [u8], B> = self.vec_znx_idft_apply_consume(ci_dft);
|
||||
let ci_big: VecZnxBig<&mut [u8], BE> = self.vec_znx_idft_apply_consume(ci_dft);
|
||||
|
||||
// use c[0] as buffer, which is overwritten later by the normalization step
|
||||
self.vec_znx_big_normalize(base2k, &mut ci, 0, base2k, &ci_big, 0, scratch_3);
|
||||
|
||||
@@ -47,7 +47,7 @@ where
|
||||
// Its ok to allocate scratch space here since pk is usually generated only once.
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_tmp_bytes(self, res));
|
||||
|
||||
let mut tmp: GLWE<Vec<u8>> = GLWE::alloc_from_infos(res);
|
||||
let mut tmp: GLWE<Vec<u8>> = GLWE::alloc_from_infos(self, res);
|
||||
|
||||
tmp.encrypt_zero_sk(self, sk, source_xa, source_xe, scratch.borrow());
|
||||
res.dist = sk.dist;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
ModuleN, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing,
|
||||
@@ -9,18 +9,22 @@ use poulpy_hal::{
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGLWEInfos, GLWESecret, GLWESwitchingKey, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank, prepared::GLWESecretPrepared,
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWESecret, GLWESwitchingKey, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank,
|
||||
prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
||||
+ (GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
+ (GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) | GLWESecret::bytes_of(module, infos.rank_in()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +41,8 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
) where
|
||||
DLwe: DataRef,
|
||||
DGlwe: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
Module<B>: ModuleN
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
@@ -54,15 +59,16 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeCore<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(sk_lwe.n().0 <= module.n() as u32);
|
||||
}
|
||||
|
||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), Rank(1));
|
||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(module, Rank(1));
|
||||
sk_lwe_as_glwe.data.zero();
|
||||
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n().into()].copy_from_slice(sk_lwe.data.at(0, 0));
|
||||
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
||||
|
||||
@@ -1,64 +1,79 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned, Zn, ZnxView, ZnxViewMut},
|
||||
layouts::{Backend, DataMut, Module, ScratchOwned, Zn, ZnxView, ZnxViewMut},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encryption::{SIGMA, SIGMA_BOUND},
|
||||
layouts::{LWE, LWEInfos, LWEPlaintext, LWESecret},
|
||||
layouts::{LWE, LWEInfos, LWEPlaintext, LWESecret, LWEToMut, LWEPlaintextToRef, LWESecretToRef},
|
||||
};
|
||||
|
||||
impl<DataSelf: DataMut> LWE<DataSelf> {
|
||||
pub fn encrypt_sk<DataPt, DataSk, B>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
pt: &LWEPlaintext<DataPt>,
|
||||
sk: &LWESecret<DataSk>,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
) where
|
||||
DataPt: DataRef,
|
||||
DataSk: DataRef,
|
||||
Module<B>: ZnFillUniform + ZnAddNormal + ZnNormalizeInplace<B>,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
|
||||
pub fn encrypt_sk<P, S, M, BE: Backend>(&mut self, module: &M, pt: &P, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
|
||||
where
|
||||
P: LWEPlaintextToRef,
|
||||
S: LWESecretToRef,
|
||||
M: LWEEncryptSk<BE>,
|
||||
BE: Backend + ScratchOwnedAllocImpl<BE> + ScratchOwnedBorrowImpl<BE>,
|
||||
{
|
||||
module.lwe_encrypt_sk(self, pt, sk, source_xa, source_xe);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub trait LWEEncryptSk<BE: Backend>
|
||||
where
|
||||
Self: Sized + ZnFillUniform + ZnAddNormal + ZnNormalizeInplace<BE>,
|
||||
{
|
||||
fn lwe_encrypt_sk<R, P, S>(&self, res: &mut R, pt: &P, sk: &S, source_xa: &mut Source, source_xe: &mut Source)
|
||||
where
|
||||
R: LWEToMut,
|
||||
P: LWEPlaintextToRef,
|
||||
S: LWESecretToRef,
|
||||
BE: Backend + ScratchOwnedAllocImpl<BE> + ScratchOwnedBorrowImpl<BE>,
|
||||
{
|
||||
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
|
||||
let pt: &LWEPlaintext<&[u8]> = &pt.to_ref();
|
||||
let sk: &LWESecret<&[u8]> = &sk.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(self.n(), sk.n())
|
||||
assert_eq!(res.n(), sk.n())
|
||||
}
|
||||
|
||||
let base2k: usize = self.base2k().into();
|
||||
let k: usize = self.k().into();
|
||||
let base2k: usize = res.base2k().into();
|
||||
let k: usize = res.k().into();
|
||||
|
||||
module.zn_fill_uniform((self.n() + 1).into(), base2k, &mut self.data, 0, source_xa);
|
||||
self.zn_fill_uniform((res.n() + 1).into(), base2k, &mut res.data, 0, source_xa);
|
||||
|
||||
let mut tmp_znx: Zn<Vec<u8>> = Zn::alloc(1, 1, self.size());
|
||||
let mut tmp_znx: Zn<Vec<u8>> = Zn::alloc(1, 1, res.size());
|
||||
|
||||
let min_size = self.size().min(pt.size());
|
||||
let min_size = res.size().min(pt.size());
|
||||
|
||||
(0..min_size).for_each(|i| {
|
||||
tmp_znx.at_mut(0, i)[0] = pt.data.at(0, i)[0]
|
||||
- self.data.at(0, i)[1..]
|
||||
- res.data.at(0, i)[1..]
|
||||
.iter()
|
||||
.zip(sk.data.at(0, 0))
|
||||
.map(|(x, y)| x * y)
|
||||
.sum::<i64>();
|
||||
});
|
||||
|
||||
(min_size..self.size()).for_each(|i| {
|
||||
tmp_znx.at_mut(0, i)[0] -= self.data.at(0, i)[1..]
|
||||
(min_size..res.size()).for_each(|i| {
|
||||
tmp_znx.at_mut(0, i)[0] -= res.data.at(0, i)[1..]
|
||||
.iter()
|
||||
.zip(sk.data.at(0, 0))
|
||||
.map(|(x, y)| x * y)
|
||||
.sum::<i64>();
|
||||
});
|
||||
|
||||
module.zn_add_normal(
|
||||
self.zn_add_normal(
|
||||
1,
|
||||
base2k,
|
||||
&mut self.data,
|
||||
&mut res.data,
|
||||
0,
|
||||
k,
|
||||
source_xe,
|
||||
@@ -66,7 +81,7 @@ impl<DataSelf: DataMut> LWE<DataSelf> {
|
||||
SIGMA_BOUND,
|
||||
);
|
||||
|
||||
module.zn_normalize_inplace(
|
||||
self.zn_normalize_inplace(
|
||||
1,
|
||||
base2k,
|
||||
&mut tmp_znx,
|
||||
@@ -74,8 +89,13 @@ impl<DataSelf: DataMut> LWE<DataSelf> {
|
||||
ScratchOwned::alloc(size_of::<i64>()).borrow(),
|
||||
);
|
||||
|
||||
(0..self.size()).for_each(|i| {
|
||||
self.data.at_mut(0, i)[0] = tmp_znx.at(0, i)[0];
|
||||
(0..res.size()).for_each(|i| {
|
||||
res.data.at_mut(0, i)[0] = tmp_znx.at(0, i)[0];
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> LWEEncryptSk<BE> for Module<BE> where
|
||||
Self: Sized + ZnFillUniform + ZnAddNormal + ZnNormalizeInplace<BE>,
|
||||
{
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
ModuleN, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing,
|
||||
@@ -9,16 +9,19 @@ use poulpy_hal::{
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWESwitchingKey, Rank, RingDegree,
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWESwitchingKey, Rank,
|
||||
prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl LWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + SvpPPolAlloc<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
@@ -35,7 +38,7 @@ impl LWESwitchingKey<Vec<u8>> {
|
||||
1,
|
||||
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||
);
|
||||
GLWESecret::bytes_of(RingDegree(module.n() as u32), Rank(1))
|
||||
GLWESecret::bytes_of(module, Rank(1))
|
||||
+ GLWESecretPrepared::bytes_of(module, Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
@@ -54,7 +57,8 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
||||
) where
|
||||
DIn: DataRef,
|
||||
DOut: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
Module<B>: ModuleN
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
@@ -71,8 +75,9 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>:,
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchTakeCore<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -81,8 +86,8 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
||||
assert!(self.n().0 <= module.n() as u32);
|
||||
}
|
||||
|
||||
let (mut sk_in_glwe, scratch_1) = scratch.take_glwe_secret(self.n(), Rank(1));
|
||||
let (mut sk_out_glwe, scratch_2) = scratch_1.take_glwe_secret(self.n(), Rank(1));
|
||||
let (mut sk_in_glwe, scratch_1) = scratch.take_glwe_secret(module, Rank(1));
|
||||
let (mut sk_out_glwe, scratch_2) = scratch_1.take_glwe_secret(module, Rank(1));
|
||||
|
||||
sk_out_glwe.data.at_mut(0, 0)[..sk_lwe_out.n().into()].copy_from_slice(sk_lwe_out.data.at(0, 0));
|
||||
sk_out_glwe.data.at_mut(0, 0)[sk_lwe_out.n().into()..].fill(0);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
ModuleN, ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VecZnxSwitchRing,
|
||||
@@ -9,13 +9,16 @@ use poulpy_hal::{
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank, RingDegree};
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank},
|
||||
};
|
||||
|
||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
Module<B>: ModuleN + SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolAlloc<B>,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_in(),
|
||||
@@ -23,7 +26,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
"rank_in != 1 is not supported for LWEToGLWESwitchingKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
+ GLWESecret::bytes_of(RingDegree(module.n() as u32), infos.rank_in())
|
||||
+ GLWESecret::bytes_of(module, infos.rank_in())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +43,8 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
) where
|
||||
DLwe: DataRef,
|
||||
DGlwe: DataRef,
|
||||
Module<B>: VecZnxAutomorphismInplace<B>
|
||||
Module<B>: ModuleN
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
@@ -57,8 +61,9 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolBytesOf,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>,
|
||||
Scratch<B>: ScratchAvailable + ScratchTakeCore<B>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -67,7 +72,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
assert!(sk_lwe.n().0 <= module.n() as u32);
|
||||
}
|
||||
|
||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), Rank(1));
|
||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(module, Rank(1));
|
||||
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n().into()].copy_from_slice(sk_lwe.data.at(0, 0));
|
||||
sk_lwe_as_glwe.data.at_mut(0, 0)[sk_lwe.n().into()..].fill(0);
|
||||
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
||||
|
||||
Reference in New Issue
Block a user