mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Distinguish between gglwe_to_ggsw key and tensor_key + update key repreentation
This commit is contained in:
@@ -15,6 +15,7 @@ poulpy-hal = {workspace = true}
|
||||
poulpy-backend = {workspace = true}
|
||||
itertools = {workspace = true}
|
||||
byteorder = {workspace = true}
|
||||
bytemuck = {workspace = true}
|
||||
once_cell = {workspace = true}
|
||||
|
||||
[[bench]]
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use poulpy_hal::{
|
||||
api::VecZnxAutomorphism,
|
||||
layouts::{Backend, DataMut, GaloisElement, Module, Scratch},
|
||||
api::{VecZnxAutomorphism, VecZnxAutomorphismInplace},
|
||||
layouts::{Backend, CyclotomicOrder, DataMut, GaloisElement, Module, Scratch},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
automorphism::glwe_ct::GLWEAutomorphism,
|
||||
GLWEKeyswitch, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEPreparedToRef, GGLWEToMut, GGLWEToRef, GLWE, GLWEAutomorphismKey, GetGaloisElement,
|
||||
SetGaloisElement,
|
||||
@@ -45,14 +44,10 @@ impl<DataSelf: DataMut> GLWEAutomorphismKey<DataSelf> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEAutomorphismKeyAutomorphism<BE> for Module<BE> where
|
||||
Self: GaloisElement + GLWEAutomorphism<BE> + VecZnxAutomorphism
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEAutomorphismKeyAutomorphism<BE: Backend>
|
||||
impl<BE: Backend> GLWEAutomorphismKeyAutomorphism<BE> for Module<BE>
|
||||
where
|
||||
Self: GaloisElement + GLWEAutomorphism<BE> + VecZnxAutomorphism,
|
||||
Self: GaloisElement + GLWEKeyswitch<BE> + VecZnxAutomorphism + VecZnxAutomorphismInplace<BE> + CyclotomicOrder,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_automorphism_key_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
@@ -68,7 +63,6 @@ where
|
||||
R: GGLWEToMut + SetGaloisElement + GGLWEInfos,
|
||||
A: GGLWEToRef + GetGaloisElement + GGLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
assert!(
|
||||
res.dnum().as_u32() <= a.dnum().as_u32(),
|
||||
@@ -163,3 +157,22 @@ where
|
||||
res.set_p((res.p() * key.p()) % self.cyclotomic_order());
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEAutomorphismKeyAutomorphism<BE: Backend> {
|
||||
fn glwe_automorphism_key_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GGLWEInfos,
|
||||
A: GGLWEInfos,
|
||||
K: GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_key_automorphism<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToMut + SetGaloisElement + GGLWEInfos,
|
||||
A: GGLWEToRef + GetGaloisElement + GGLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_key_automorphism_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToMut + SetGaloisElement + GetGaloisElement + GGLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos;
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ use crate::{
|
||||
GGSWExpandRows, ScratchTakeCore,
|
||||
automorphism::glwe_ct::GLWEAutomorphism,
|
||||
layouts::{
|
||||
GGLWEInfos, GGLWEPreparedToRef, GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GetGaloisElement,
|
||||
prepared::{GLWETensorKeyPrepared, GLWETensorKeyPreparedToRef},
|
||||
GGLWEInfos, GGLWEPreparedToRef, GGLWEToGGSWKeyPrepared, GGLWEToGGSWKeyPreparedToRef, GGSW, GGSWInfos, GGSWToMut,
|
||||
GGSWToRef, GetGaloisElement,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -36,7 +36,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
where
|
||||
A: GGSWToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWAutomorphism<BE>,
|
||||
{
|
||||
@@ -46,7 +46,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
pub fn automorphism_inplace<K, T, M, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWAutomorphism<BE>,
|
||||
{
|
||||
@@ -67,11 +67,8 @@ where
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos,
|
||||
{
|
||||
let out_size: usize = res_infos.size();
|
||||
let ci_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
||||
let ks_internal: usize = self.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos);
|
||||
let expand: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
|
||||
ci_dft + (ks_internal.max(expand))
|
||||
self.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
.max(self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos))
|
||||
}
|
||||
|
||||
fn ggsw_automorphism<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
@@ -79,12 +76,12 @@ where
|
||||
R: GGSWToMut,
|
||||
A: GGSWToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGSW<&[u8]> = &a.to_ref();
|
||||
let tsk: &GLWETensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
let tsk: &GGLWEToGGSWKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
assert_eq!(res.dsize(), a.dsize());
|
||||
assert!(res.dnum() <= a.dnum());
|
||||
@@ -104,11 +101,11 @@ where
|
||||
where
|
||||
R: GGSWToMut,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let tsk: &GLWETensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
let tsk: &GGLWEToGGSWKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row in 0..res.dnum().as_usize() {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchTakeBasic, VecZnxAutomorphismInplace, VecZnxBigAutomorphismInplace, VecZnxBigSubSmallInplace,
|
||||
VecZnxBigSubSmallNegateInplace,
|
||||
ScratchTakeBasic, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||
VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace, VecZnxNormalize,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, Scratch, VecZnxBig},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWEKeyswitch, ScratchTakeCore, keyswitch_internal,
|
||||
GLWEKeySwitchInternal, GLWEKeyswitch, ScratchTakeCore,
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetGaloisElement, LWEInfos},
|
||||
};
|
||||
|
||||
@@ -101,13 +101,71 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEAutomorphism<BE: Backend>
|
||||
pub trait GLWEAutomorphism<BE: Backend> {
|
||||
fn glwe_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
K: GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_add<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_add_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_sub<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_sub_negate<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_sub_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
|
||||
fn glwe_automorphism_sub_negate_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GetGaloisElement + GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEAutomorphism<BE> for Module<BE>
|
||||
where
|
||||
Self: GLWEKeyswitch<BE>
|
||||
Self: Sized
|
||||
+ GLWEKeyswitch<BE>
|
||||
+ GLWEKeySwitchInternal<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>,
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
@@ -160,7 +218,7 @@ where
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, a, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -186,7 +244,7 @@ where
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, res, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -214,7 +272,7 @@ where
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, a, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -242,7 +300,7 @@ where
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, a, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -268,7 +326,7 @@ where
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, res, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -294,7 +352,7 @@ where
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, key, scratch_1);
|
||||
let mut res_big: VecZnxBig<_, BE> = self.glwe_keyswitch_internal(res_dft, res, key, scratch_1);
|
||||
|
||||
for i in 0..res.rank().as_usize() + 1 {
|
||||
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||
@@ -311,12 +369,3 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEAutomorphism<BE> for Module<BE> where
|
||||
Self: GLWEKeyswitch<BE>
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftAddInplace, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, ScratchTakeBasic, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, Scratch, VmpPMat, ZnxInfos},
|
||||
layouts::{Backend, DataMut, Module, Scratch, VecZnxBig},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWECopy, ScratchTakeCore,
|
||||
GGLWEProduct, GLWECopy, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, LWEInfos,
|
||||
prepared::{GLWETensorKeyPrepared, GLWETensorKeyPreparedToRef},
|
||||
GGLWE, GGLWEInfos, GGLWEToGGSWKeyPrepared, GGLWEToGGSWKeyPreparedToRef, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWE,
|
||||
GLWEInfos, LWEInfos,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -31,7 +30,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
where
|
||||
M: GGSWFromGGLWE<BE>,
|
||||
G: GGLWEToRef,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.ggsw_from_gglwe(self, gglwe, tsk, scratch);
|
||||
@@ -54,12 +53,12 @@ where
|
||||
where
|
||||
R: GGSWToMut,
|
||||
A: GGLWEToRef,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGLWE<&[u8]> = &a.to_ref();
|
||||
let tsk: &GLWETensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
let tsk: &GGLWEToGGSWKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
assert_eq!(res.rank(), a.rank_out());
|
||||
assert_eq!(res.dnum(), a.dnum());
|
||||
@@ -85,177 +84,140 @@ pub trait GGSWFromGGLWE<BE: Backend> {
|
||||
where
|
||||
R: GGSWToMut,
|
||||
A: GGLWEToRef,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGSWExpandRows<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxDftCopy<BE>
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
{
|
||||
pub trait GGSWExpandRows<BE: Backend> {
|
||||
fn ggsw_expand_rows_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn ggsw_expand_row<R, T>(&self, res: &mut R, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>;
|
||||
}
|
||||
|
||||
pub trait GGSWExpandRows<BE: Backend>
|
||||
impl<BE: Backend> GGSWExpandRows<BE> for Module<BE>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxDftCopy<BE>
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
Self: GGLWEProduct<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>,
|
||||
{
|
||||
fn ggsw_expand_rows_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = res_infos
|
||||
.k()
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||
let base2k_in: usize = res_infos.base2k().into();
|
||||
let base2k_tsk: usize = tsk_infos.base2k().into();
|
||||
|
||||
let tmp_dft_i: usize = self.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = self.bytes_of_vec_znx_dft(1, size_in);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
tsk_size,
|
||||
size_in,
|
||||
size_in,
|
||||
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = self.bytes_of_vec_znx_big(1, tsk_size);
|
||||
let norm: usize = self.vec_znx_normalize_tmp_bytes();
|
||||
let rank: usize = res_infos.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
let res_size = res_infos.size();
|
||||
let a_size: usize = (res_infos.size() * base2k_in).div_ceil(base2k_tsk);
|
||||
|
||||
let a_dft = self.bytes_of_vec_znx_dft(cols - 1, a_size);
|
||||
let res_dft = self.bytes_of_vec_znx_dft(cols, a_size);
|
||||
let gglwe_prod: usize = self.gglwe_product_dft_tmp_bytes(res_size, a_size, tsk_infos);
|
||||
let normalize = self.vec_znx_big_normalize_tmp_bytes();
|
||||
|
||||
(a_dft + res_dft + gglwe_prod).max(normalize)
|
||||
}
|
||||
|
||||
fn ggsw_expand_row<R, T>(&self, res: &mut R, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let tsk: &GLWETensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
let tsk: &GGLWEToGGSWKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
let basek_in: usize = res.base2k().into();
|
||||
let basek_tsk: usize = tsk.base2k().into();
|
||||
let base2k_in: usize = res.base2k().into();
|
||||
let base2k_tsk: usize = tsk.base2k().into();
|
||||
|
||||
assert!(scratch.available() >= self.ggsw_expand_rows_tmp_bytes(res, tsk));
|
||||
|
||||
let rank: usize = res.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
let a_size: usize = (res.size() * basek_in).div_ceil(basek_tsk);
|
||||
let a_size: usize = (res.size() * base2k_in).div_ceil(base2k_tsk);
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row_i in 0..res.dnum().into() {
|
||||
let a = &res.at(row_i, 0).data;
|
||||
for row in 0..res.dnum().as_usize() {
|
||||
let (mut a_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols - 1, a_size);
|
||||
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, a_size);
|
||||
{
|
||||
let glwe_mi_1: &GLWE<&[u8]> = &res.at(row, 0);
|
||||
|
||||
if basek_in == basek_tsk {
|
||||
for i in 0..cols {
|
||||
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self.n(), 1, a_size);
|
||||
for i in 0..cols {
|
||||
self.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||
if base2k_in == base2k_tsk {
|
||||
for col_i in 0..cols - 1 {
|
||||
self.vec_znx_dft_apply(1, 0, &mut a_dft, col_i, glwe_mi_1.data(), col_i + 1);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self.n(), 1, a_size);
|
||||
for i in 0..cols - 1 {
|
||||
self.vec_znx_normalize(
|
||||
base2k_tsk,
|
||||
&mut a_conv,
|
||||
0,
|
||||
base2k_in,
|
||||
glwe_mi_1.data(),
|
||||
i + 1,
|
||||
scratch_2,
|
||||
);
|
||||
self.vec_znx_dft_apply(1, 0, &mut a_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for col_j in 1..cols {
|
||||
// Example for rank 3:
|
||||
// Example for rank 3:
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
// actually composed of that many dnum and we focus on a specific row here
|
||||
// implicitely given ci_dft.
|
||||
//
|
||||
// # Input
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (0, 0, 0, 0)
|
||||
// col 2: (0, 0, 0, 0)
|
||||
// col 3: (0, 0, 0, 0)
|
||||
//
|
||||
// # Output
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
for col in 1..cols {
|
||||
let (mut res_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, tsk.size()); // Todo optimise
|
||||
|
||||
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
// actually composed of that many dnum and we focus on a specific row here
|
||||
// implicitely given ci_dft.
|
||||
// # Example for col=1
|
||||
//
|
||||
// # Input
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (0, 0, 0, 0)
|
||||
// col 2: (0, 0, 0, 0)
|
||||
// col 3: (0, 0, 0, 0)
|
||||
//
|
||||
// # Output
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
|
||||
// +
|
||||
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
|
||||
// +
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
self.gglwe_product_dft(&mut res_dft, &a_dft, tsk.at(col - 1), scratch_2);
|
||||
|
||||
let dsize: usize = tsk.dsize().into();
|
||||
|
||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, tsk.size());
|
||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(self, 1, ci_dft.size().div_ceil(dsize));
|
||||
|
||||
{
|
||||
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
|
||||
//
|
||||
// # Example for col=1
|
||||
//
|
||||
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
|
||||
// +
|
||||
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
|
||||
// +
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
for col_i in 1..cols {
|
||||
let pmat: &VmpPMat<&[u8], BE> = &tsk.at(col_i - 1, col_j - 1).data; // Selects Enc(s[i]s[j])
|
||||
|
||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||
for di in 0..dsize {
|
||||
tmp_a.set_size((ci_dft.size() + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
self.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
|
||||
if di == 0 && col_i == 1 {
|
||||
self.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
|
||||
} else {
|
||||
self.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut res_big: VecZnxBig<&mut [u8], BE> = self.vec_znx_idft_apply_consume(res_dft);
|
||||
|
||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||
//
|
||||
@@ -266,18 +228,17 @@ where
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||
self.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(self, 1, tsk.size());
|
||||
for i in 0..cols {
|
||||
self.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||
self.vec_znx_big_add_small_inplace(&mut res_big, col, res.at(row, 0).data(), 0);
|
||||
|
||||
for j in 0..cols {
|
||||
self.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut res.at_mut(row_i, col_j).data,
|
||||
i,
|
||||
basek_tsk,
|
||||
&tmp_idft,
|
||||
0,
|
||||
scratch_3,
|
||||
res.base2k().as_usize(),
|
||||
res.at_mut(row, col).data_mut(),
|
||||
j,
|
||||
tsk.base2k().as_usize(),
|
||||
&res_big,
|
||||
j,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::ScratchTakeBasic,
|
||||
api::{ScratchTakeBasic, VecZnxNormalize, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, DataMut, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
@@ -8,11 +8,10 @@ use crate::{
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GLWE, GLWEInfos, GLWELayout, GLWEToMut, LWE, LWEInfos, LWEToRef},
|
||||
};
|
||||
|
||||
impl<BE: Backend> GLWEFromLWE<BE> for Module<BE> where Self: GLWEKeyswitch<BE> {}
|
||||
|
||||
pub trait GLWEFromLWE<BE: Backend>
|
||||
impl<BE: Backend> GLWEFromLWE<BE> for Module<BE>
|
||||
where
|
||||
Self: GLWEKeyswitch<BE>,
|
||||
Self: GLWEKeyswitch<BE> + VecZnxNormalizeTmpBytes + VecZnxNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_from_lwe_tmp_bytes<R, A, K>(&self, glwe_infos: &R, lwe_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
@@ -41,7 +40,6 @@ where
|
||||
R: GLWEToMut,
|
||||
A: LWEToRef,
|
||||
K: GGLWEPreparedToRef<BE> + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let lwe: &LWE<&[u8]> = &lwe.to_ref();
|
||||
@@ -105,6 +103,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEFromLWE<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeyswitch<BE>,
|
||||
{
|
||||
fn glwe_from_lwe_tmp_bytes<R, A, K>(&self, glwe_infos: &R, lwe_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: LWEInfos,
|
||||
K: GGLWEInfos;
|
||||
|
||||
fn glwe_from_lwe<R, A, K>(&self, res: &mut R, lwe: &A, ksk: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: LWEToRef,
|
||||
K: GGLWEPreparedToRef<BE> + GGLWEInfos;
|
||||
}
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn from_lwe_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, glwe_infos: &R, lwe_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
|
||||
124
poulpy-core/src/encryption/compressed/gglwe_to_ggsw_key.rs
Normal file
124
poulpy-core/src/encryption/compressed/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
use poulpy_hal::{
|
||||
api::{ModuleN, ScratchTakeBasic, VecZnxCopy},
|
||||
layouts::{Backend, DataMut, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGLWECompressedEncryptSk, GetDistribution, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GGLWEToGGSWKeyCompressed, GGLWEToGGSWKeyCompressedToMut, GLWEInfos, GLWESecret, GLWESecretTensor,
|
||||
GLWESecretTensorFactory, GLWESecretToRef,
|
||||
prepared::{GLWESecretPrepared, GLWESecretPreparedFactory},
|
||||
},
|
||||
};
|
||||
|
||||
impl GGLWEToGGSWKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<M, A, BE: Backend>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GGLWEToGGSWKeyCompressedEncryptSk<BE>,
|
||||
{
|
||||
module.gglwe_to_ggsw_key_encrypt_sk_tmp_bytes(infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGLWEToGGSWKeyCompressed<DataSelf> {
|
||||
pub fn encrypt_sk<M, S, BE: Backend>(
|
||||
&mut self,
|
||||
module: &M,
|
||||
sk: &S,
|
||||
seed_xa: [u8; 32],
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
M: GGLWEToGGSWKeyCompressedEncryptSk<BE>,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.gglwe_to_ggsw_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyCompressedEncryptSk<BE: Backend> {
|
||||
fn gglwe_to_ggsw_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn gglwe_to_ggsw_key_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
seed_xa: [u8; 32],
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GGLWEToGGSWKeyCompressedToMut + GGLWEInfos,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGLWEToGGSWKeyCompressedEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleN + GGLWECompressedEncryptSk<BE> + GLWESecretTensorFactory<BE> + GLWESecretPreparedFactory<BE> + VecZnxCopy,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn gglwe_to_ggsw_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
let sk_prepared: usize = GLWESecretPrepared::bytes_of(self, infos.rank());
|
||||
let sk_tensor: usize = GLWESecretTensor::bytes_of_from_infos(infos);
|
||||
let gglwe_encrypt: usize = self.gglwe_compressed_encrypt_sk_tmp_bytes(infos);
|
||||
let sk_ij = GLWESecret::bytes_of(self.n().into(), infos.rank());
|
||||
(sk_prepared + sk_tensor + sk_ij) + gglwe_encrypt.max(self.glwe_secret_tensor_prepare_tmp_bytes(infos.rank()))
|
||||
}
|
||||
|
||||
fn gglwe_to_ggsw_key_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
seed_xa: [u8; 32],
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GGLWEToGGSWKeyCompressedToMut + GGLWEInfos,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
{
|
||||
assert_eq!(res.rank(), sk.rank());
|
||||
assert_eq!(res.n(), sk.n());
|
||||
|
||||
let res: &mut GGLWEToGGSWKeyCompressed<&mut [u8]> = &mut res.to_mut();
|
||||
let rank: usize = res.rank_out().as_usize();
|
||||
|
||||
let (mut sk_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank());
|
||||
let (mut sk_tensor, scratch_2) = scratch_1.take_glwe_secret_tensor(self.n().into(), res.rank());
|
||||
sk_prepared.prepare(self, sk);
|
||||
sk_tensor.prepare(self, sk, scratch_2);
|
||||
|
||||
let (mut sk_ij, scratch_3) = scratch_2.take_scalar_znx(self.n(), rank);
|
||||
|
||||
let mut source_xa = Source::new(seed_xa);
|
||||
|
||||
for i in 0..rank {
|
||||
for j in 0..rank {
|
||||
self.vec_znx_copy(
|
||||
&mut sk_ij.as_vec_znx_mut(),
|
||||
j,
|
||||
&sk_tensor.at(i, j).as_vec_znx(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
|
||||
let (seed_xa_tmp, _) = source_xa.branch();
|
||||
|
||||
res.at_mut(i).encrypt_sk(
|
||||
self,
|
||||
&sk_ij,
|
||||
&sk_prepared,
|
||||
seed_xa_tmp,
|
||||
source_xe,
|
||||
scratch_3,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,15 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchTakeBasic, SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyTmpA,
|
||||
},
|
||||
api::ScratchTakeBasic,
|
||||
layouts::{Backend, DataMut, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGLWECompressedEncryptSk, GLWETensorKeyEncryptSk, GetDistribution, ScratchTakeCore,
|
||||
GGLWECompressedEncryptSk, GetDistribution, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretPrepared, GLWESecretPreparedFactory, GLWESecretToRef,
|
||||
GLWETensorKeyCompressedAtMut, LWEInfos, Rank, compressed::GLWETensorKeyCompressed,
|
||||
GGLWECompressedSeedMut, GGLWECompressedToMut, GGLWEInfos, GGLWELayout, GLWEInfos, GLWESecretPrepared,
|
||||
GLWESecretPreparedFactory, GLWESecretTensor, GLWESecretTensorFactory, GLWESecretToRef,
|
||||
compressed::GLWETensorKeyCompressed,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -34,7 +32,7 @@ impl<DataSelf: DataMut> GLWETensorKeyCompressed<DataSelf> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
S: GLWESecretToRef + GetDistribution,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
M: GLWETensorKeyCompressedEncryptSk<BE>,
|
||||
{
|
||||
module.glwe_tensor_key_compressed_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
||||
@@ -46,7 +44,7 @@ pub trait GLWETensorKeyCompressedEncryptSk<BE: Backend> {
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn glwe_tensor_key_compressed_encrypt_sk<R, S, D>(
|
||||
fn glwe_tensor_key_compressed_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
@@ -54,40 +52,38 @@ pub trait GLWETensorKeyCompressedEncryptSk<BE: Backend> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
D: DataMut,
|
||||
R: GLWETensorKeyCompressedAtMut<D> + GGLWEInfos,
|
||||
S: GLWESecretToRef + GetDistribution;
|
||||
R: GGLWECompressedToMut + GGLWEInfos + GGLWECompressedSeedMut,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWETensorKeyCompressedEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleN
|
||||
+ GGLWECompressedEncryptSk<BE>
|
||||
+ GLWETensorKeyEncryptSk<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ SvpPrepare<BE>
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ GLWESecretPreparedFactory<BE>,
|
||||
Self: GGLWECompressedEncryptSk<BE> + GLWESecretPreparedFactory<BE> + GLWESecretTensorFactory<BE>,
|
||||
Scratch<BE>: ScratchTakeBasic + ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_tensor_key_compressed_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(self, infos.rank_out())
|
||||
+ self.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
|
||||
+ self.bytes_of_vec_znx_big(1, 1)
|
||||
+ self.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(self.n().into(), Rank(1))
|
||||
+ self.gglwe_compressed_encrypt_sk_tmp_bytes(infos)
|
||||
let sk_prepared: usize = GLWESecretPrepared::bytes_of(self, infos.rank_out());
|
||||
let sk_tensor: usize = GLWESecretTensor::bytes_of_from_infos(infos);
|
||||
|
||||
let tensor_infos: GGLWELayout = GGLWELayout {
|
||||
n: infos.n(),
|
||||
base2k: infos.base2k(),
|
||||
k: infos.k(),
|
||||
rank_in: GLWESecretTensor::pairs(infos.rank().into()).into(),
|
||||
rank_out: infos.rank_out(),
|
||||
dnum: infos.dnum(),
|
||||
dsize: infos.dsize(),
|
||||
};
|
||||
|
||||
let gglwe_encrypt: usize = self.gglwe_compressed_encrypt_sk_tmp_bytes(&tensor_infos);
|
||||
|
||||
(sk_prepared + sk_tensor) + gglwe_encrypt.max(self.glwe_secret_tensor_prepare_tmp_bytes(infos.rank()))
|
||||
}
|
||||
|
||||
fn glwe_tensor_key_compressed_encrypt_sk<R, S, D>(
|
||||
fn glwe_tensor_key_compressed_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
@@ -95,62 +91,24 @@ where
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
D: DataMut,
|
||||
R: GGLWEInfos + GLWETensorKeyCompressedAtMut<D>,
|
||||
S: GLWESecretToRef + GetDistribution,
|
||||
R: GGLWEInfos + GGLWECompressedToMut + GGLWECompressedSeedMut,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
{
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank());
|
||||
sk_dft_prep.prepare(self, sk);
|
||||
assert_eq!(res.rank_out(), sk.rank());
|
||||
assert_eq!(res.n(), sk.n());
|
||||
|
||||
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
|
||||
let (mut sk_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank());
|
||||
let (mut sk_tensor, scratch_2) = scratch_1.take_glwe_secret_tensor(self.n().into(), res.rank());
|
||||
sk_prepared.prepare(self, sk);
|
||||
sk_tensor.prepare(self, sk, scratch_2);
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.rank_out(), sk.rank());
|
||||
assert_eq!(res.n(), sk.n());
|
||||
}
|
||||
|
||||
// let n: usize = sk.n().into();
|
||||
let rank: usize = res.rank_out().into();
|
||||
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, rank, 1);
|
||||
|
||||
for i in 0..rank {
|
||||
self.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
}
|
||||
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(self, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(self.n().into(), Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(self, 1, 1);
|
||||
|
||||
let mut source_xa: Source = Source::new(seed_xa);
|
||||
|
||||
for i in 0..rank {
|
||||
for j in i..rank {
|
||||
self.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
||||
|
||||
self.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||
self.vec_znx_big_normalize(
|
||||
res.base2k().into(),
|
||||
&mut sk_ij.data.as_vec_znx_mut(),
|
||||
0,
|
||||
res.base2k().into(),
|
||||
&sk_ij_big,
|
||||
0,
|
||||
scratch_5,
|
||||
);
|
||||
|
||||
let (seed_xa_tmp, _) = source_xa.branch();
|
||||
|
||||
self.gglwe_compressed_encrypt_sk(
|
||||
res.at_mut(i, j),
|
||||
&sk_ij.data,
|
||||
&sk_dft_prep,
|
||||
seed_xa_tmp,
|
||||
source_xe,
|
||||
scratch_5,
|
||||
);
|
||||
}
|
||||
}
|
||||
self.gglwe_compressed_encrypt_sk(
|
||||
res,
|
||||
&sk_tensor.data,
|
||||
&sk_prepared,
|
||||
seed_xa,
|
||||
source_xe,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
mod gglwe;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw;
|
||||
mod glwe_automorphism_key;
|
||||
mod glwe_ct;
|
||||
@@ -6,6 +7,7 @@ mod glwe_switching_key;
|
||||
mod glwe_tensor_key;
|
||||
|
||||
pub use gglwe::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw::*;
|
||||
pub use glwe_automorphism_key::*;
|
||||
pub use glwe_ct::*;
|
||||
|
||||
@@ -148,7 +148,7 @@ where
|
||||
// Example for ksk rank 2 to rank 3:
|
||||
//
|
||||
// (-(a0*s0 + a1*s1 + a2*s2) + s0', a0, a1, a2)
|
||||
// (-(b0*s0 + b1*s1 + b2*s2) + s0', b0, b1, b2)
|
||||
// (-(b0*s0 + b1*s1 + b2*s2) + s1', b0, b1, b2)
|
||||
//
|
||||
// Example ksk rank 2 to rank 1
|
||||
//
|
||||
|
||||
112
poulpy-core/src/encryption/gglwe_to_ggsw_key.rs
Normal file
112
poulpy-core/src/encryption/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use poulpy_hal::{
|
||||
api::{ModuleN, ScratchTakeBasic, VecZnxCopy},
|
||||
layouts::{Backend, DataMut, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGLWEEncryptSk, GetDistribution, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GGLWEToGGSWKey, GGLWEToGGSWKeyToMut, GLWEInfos, GLWESecret, GLWESecretTensor, GLWESecretTensorFactory,
|
||||
GLWESecretToRef,
|
||||
prepared::{GLWESecretPrepared, GLWESecretPreparedFactory},
|
||||
},
|
||||
};
|
||||
|
||||
impl GGLWEToGGSWKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<M, A, BE: Backend>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GGLWEToGGSWKeyEncryptSk<BE>,
|
||||
{
|
||||
module.gglwe_to_ggsw_key_encrypt_sk_tmp_bytes(infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGLWEToGGSWKey<DataSelf> {
|
||||
pub fn encrypt_sk<M, S, BE: Backend>(
|
||||
&mut self,
|
||||
module: &M,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
M: GGLWEToGGSWKeyEncryptSk<BE>,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.gglwe_to_ggsw_key_encrypt_sk(self, sk, source_xa, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyEncryptSk<BE: Backend> {
|
||||
fn gglwe_to_ggsw_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn gglwe_to_ggsw_key_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GGLWEToGGSWKeyToMut,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGLWEToGGSWKeyEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleN + GGLWEEncryptSk<BE> + GLWESecretTensorFactory<BE> + GLWESecretPreparedFactory<BE> + VecZnxCopy,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn gglwe_to_ggsw_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
let sk_prepared: usize = GLWESecretPrepared::bytes_of(self, infos.rank());
|
||||
let sk_tensor: usize = GLWESecretTensor::bytes_of_from_infos(infos);
|
||||
let gglwe_encrypt: usize = self.gglwe_encrypt_sk_tmp_bytes(infos);
|
||||
let sk_ij = GLWESecret::bytes_of(self.n().into(), infos.rank());
|
||||
(sk_prepared + sk_tensor + sk_ij) + gglwe_encrypt.max(self.glwe_secret_tensor_prepare_tmp_bytes(infos.rank()))
|
||||
}
|
||||
|
||||
fn gglwe_to_ggsw_key_encrypt_sk<R, S>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk: &S,
|
||||
source_xa: &mut Source,
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GGLWEToGGSWKeyToMut,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
{
|
||||
let res: &mut GGLWEToGGSWKey<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
let rank: usize = res.rank_out().as_usize();
|
||||
|
||||
let (mut sk_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank());
|
||||
let (mut sk_tensor, scratch_2) = scratch_1.take_glwe_secret_tensor(self.n().into(), res.rank());
|
||||
sk_prepared.prepare(self, sk);
|
||||
sk_tensor.prepare(self, sk, scratch_2);
|
||||
|
||||
let (mut sk_ij, scratch_3) = scratch_2.take_scalar_znx(self.n(), rank);
|
||||
|
||||
for i in 0..rank {
|
||||
for j in 0..rank {
|
||||
self.vec_znx_copy(
|
||||
&mut sk_ij.as_vec_znx_mut(),
|
||||
j,
|
||||
&sk_tensor.at(i, j).as_vec_znx(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
|
||||
res.at_mut(i)
|
||||
.encrypt_sk(self, &sk_ij, &sk_prepared, source_xa, source_xe, scratch_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,5 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchTakeBasic, SvpApplyDftToDft, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxIdftApplyTmpA,
|
||||
},
|
||||
api::ModuleN,
|
||||
layouts::{Backend, DataMut, Module, Scratch},
|
||||
source::Source,
|
||||
};
|
||||
@@ -10,7 +7,8 @@ use poulpy_hal::{
|
||||
use crate::{
|
||||
GGLWEEncryptSk, GetDistribution, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, GLWETensorKey, GLWETensorKeyToMut, LWEInfos, Rank,
|
||||
GGLWEInfos, GGLWELayout, GGLWEToMut, GLWEInfos, GLWESecretTensor, GLWESecretTensorFactory, GLWESecretToRef,
|
||||
GLWETensorKey,
|
||||
prepared::{GLWESecretPrepared, GLWESecretPreparedFactory},
|
||||
},
|
||||
};
|
||||
@@ -55,33 +53,35 @@ pub trait GLWETensorKeyEncryptSk<BE: Backend> {
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWETensorKeyToMut,
|
||||
R: GGLWEToMut + GGLWEInfos,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWETensorKeyEncryptSk<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleN
|
||||
+ GGLWEEncryptSk<BE>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Self: ModuleN + GGLWEEncryptSk<BE> + GLWESecretPreparedFactory<BE> + GLWESecretTensorFactory<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_tensor_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(self, infos.rank_out())
|
||||
+ self.bytes_of_vec_znx_dft(infos.rank_out().into(), 1)
|
||||
+ self.bytes_of_vec_znx_big(1, 1)
|
||||
+ self.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(self.n().into(), Rank(1))
|
||||
+ GGLWE::encrypt_sk_tmp_bytes(self, infos)
|
||||
let sk_prepared: usize = GLWESecretPrepared::bytes_of(self, infos.rank_out());
|
||||
let sk_tensor: usize = GLWESecretTensor::bytes_of_from_infos(infos);
|
||||
|
||||
let tensor_infos: GGLWELayout = GGLWELayout {
|
||||
n: infos.n(),
|
||||
base2k: infos.base2k(),
|
||||
k: infos.k(),
|
||||
rank_in: GLWESecretTensor::pairs(infos.rank().into()).into(),
|
||||
rank_out: infos.rank_out(),
|
||||
dnum: infos.dnum(),
|
||||
dsize: infos.dsize(),
|
||||
};
|
||||
|
||||
let gglwe_encrypt: usize = self.gglwe_encrypt_sk_tmp_bytes(&tensor_infos);
|
||||
|
||||
(sk_prepared + sk_tensor) + gglwe_encrypt.max(self.glwe_secret_tensor_prepare_tmp_bytes(infos.rank()))
|
||||
}
|
||||
|
||||
fn glwe_tensor_key_encrypt_sk<R, S>(
|
||||
@@ -92,56 +92,24 @@ where
|
||||
source_xe: &mut Source,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWETensorKeyToMut,
|
||||
R: GGLWEToMut + GGLWEInfos,
|
||||
S: GLWESecretToRef + GetDistribution + GLWEInfos,
|
||||
{
|
||||
let res: &mut GLWETensorKey<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
// let n: RingDegree = sk.n();
|
||||
let rank: Rank = res.rank_out();
|
||||
|
||||
let (mut sk_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, sk.rank());
|
||||
sk_prepared.prepare(self, sk);
|
||||
|
||||
let sk: &GLWESecret<&[u8]> = &sk.to_ref();
|
||||
|
||||
assert_eq!(res.rank_out(), sk.rank());
|
||||
assert_eq!(res.n(), sk.n());
|
||||
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, rank.into(), 1);
|
||||
let (mut sk_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, res.rank());
|
||||
let (mut sk_tensor, scratch_2) = scratch_1.take_glwe_secret_tensor(self.n().into(), res.rank());
|
||||
sk_prepared.prepare(self, sk);
|
||||
sk_tensor.prepare(self, sk, scratch_2);
|
||||
|
||||
(0..rank.into()).for_each(|i| {
|
||||
self.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
});
|
||||
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(self, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(self.n().into(), Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(self, 1, 1);
|
||||
|
||||
(0..rank.into()).for_each(|i| {
|
||||
(i..rank.into()).for_each(|j| {
|
||||
self.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_prepared.data, j, &sk_dft, i);
|
||||
|
||||
self.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||
self.vec_znx_big_normalize(
|
||||
res.base2k().into(),
|
||||
&mut sk_ij.data.as_vec_znx_mut(),
|
||||
0,
|
||||
res.base2k().into(),
|
||||
&sk_ij_big,
|
||||
0,
|
||||
scratch_5,
|
||||
);
|
||||
|
||||
res.at_mut(i, j).encrypt_sk(
|
||||
self,
|
||||
&sk_ij.data,
|
||||
&sk_prepared,
|
||||
source_xa,
|
||||
source_xe,
|
||||
scratch_5,
|
||||
);
|
||||
});
|
||||
})
|
||||
self.gglwe_encrypt_sk(
|
||||
res,
|
||||
&sk_tensor.data,
|
||||
&sk_prepared,
|
||||
source_xa,
|
||||
source_xe,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,23 +7,22 @@ use poulpy_hal::{
|
||||
use crate::{
|
||||
GGLWEEncryptSk, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWESecret, GLWESecretToRef, GLWEToLWESwitchingKey, LWEInfos, LWESecret, LWESecretToRef,
|
||||
Rank,
|
||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWESecret, GLWESecretToRef, GLWEToLWEKey, LWEInfos, LWESecret, LWESecretToRef, Rank,
|
||||
prepared::{GLWESecretPrepared, GLWESecretPreparedFactory},
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
impl GLWEToLWEKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<M, A, BE: Backend>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GLWEToLWESwitchingKeyEncryptSk<BE>,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_encrypt_sk_tmp_bytes(infos)
|
||||
module.glwe_to_lwe_key_encrypt_sk_tmp_bytes(infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GLWEToLWEKey<D> {
|
||||
pub fn encrypt_sk<M, S1, S2, BE: Backend>(
|
||||
&mut self,
|
||||
module: &M,
|
||||
@@ -38,16 +37,16 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
S2: GLWESecretToRef,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_to_lwe_switching_key_encrypt_sk(self, sk_lwe, sk_glwe, source_xa, source_xe, scratch);
|
||||
module.glwe_to_lwe_key_encrypt_sk(self, sk_lwe, sk_glwe, source_xa, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEToLWESwitchingKeyEncryptSk<BE: Backend> {
|
||||
fn glwe_to_lwe_switching_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
fn glwe_to_lwe_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn glwe_to_lwe_switching_key_encrypt_sk<R, S1, S2>(
|
||||
fn glwe_to_lwe_key_encrypt_sk<R, S1, S2>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk_lwe: &S1,
|
||||
@@ -70,7 +69,7 @@ where
|
||||
+ VecZnxAutomorphismInplaceTmpBytes,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_to_lwe_switching_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
fn glwe_to_lwe_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -79,7 +78,7 @@ where
|
||||
.max(GLWESecret::bytes_of(self.n().into(), infos.rank_in()) + self.vec_znx_automorphism_inplace_tmp_bytes())
|
||||
}
|
||||
|
||||
fn glwe_to_lwe_switching_key_encrypt_sk<R, S1, S2>(
|
||||
fn glwe_to_lwe_key_encrypt_sk<R, S1, S2>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk_lwe: &S1,
|
||||
@@ -8,21 +8,21 @@ use crate::{
|
||||
GGLWEEncryptSk, ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWESecret, GLWESecretPreparedFactory, GLWESecretPreparedToRef, LWEInfos, LWESecret,
|
||||
LWESecretToRef, LWEToGLWESwitchingKey, Rank,
|
||||
LWESecretToRef, LWEToGLWEKey, Rank,
|
||||
},
|
||||
};
|
||||
|
||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
impl LWEToGLWEKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_tmp_bytes<M, A, BE: Backend>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: LWEToGLWESwitchingKeyEncryptSk<BE>,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_encrypt_sk_tmp_bytes(infos)
|
||||
module.lwe_to_glwe_key_encrypt_sk_tmp_bytes(infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> LWEToGLWEKey<D> {
|
||||
pub fn encrypt_sk<S1, S2, M, BE: Backend>(
|
||||
&mut self,
|
||||
module: &M,
|
||||
@@ -37,16 +37,16 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
M: LWEToGLWESwitchingKeyEncryptSk<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.lwe_to_glwe_switching_key_encrypt_sk(self, sk_lwe, sk_glwe, source_xa, source_xe, scratch);
|
||||
module.lwe_to_glwe_key_encrypt_sk(self, sk_lwe, sk_glwe, source_xa, source_xe, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait LWEToGLWESwitchingKeyEncryptSk<BE: Backend> {
|
||||
fn lwe_to_glwe_switching_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
fn lwe_to_glwe_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn lwe_to_glwe_switching_key_encrypt_sk<R, S1, S2>(
|
||||
fn lwe_to_glwe_key_encrypt_sk<R, S1, S2>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk_lwe: &S1,
|
||||
@@ -69,20 +69,20 @@ where
|
||||
+ VecZnxAutomorphismInplaceTmpBytes,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn lwe_to_glwe_switching_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
fn lwe_to_glwe_key_encrypt_sk_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_in(),
|
||||
Rank(1),
|
||||
"rank_in != 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"rank_in != 1 is not supported for LWEToGLWEKeyPrepared"
|
||||
);
|
||||
GLWESecret::bytes_of(self.n().into(), infos.rank_in())
|
||||
+ GGLWE::encrypt_sk_tmp_bytes(self, infos).max(self.vec_znx_automorphism_inplace_tmp_bytes())
|
||||
}
|
||||
|
||||
fn lwe_to_glwe_switching_key_encrypt_sk<R, S1, S2>(
|
||||
fn lwe_to_glwe_key_encrypt_sk<R, S1, S2>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
sk_lwe: &S1,
|
||||
@@ -1,28 +1,30 @@
|
||||
mod compressed;
|
||||
mod gglwe;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw;
|
||||
mod glwe;
|
||||
mod glwe_automorphism_key;
|
||||
mod glwe_public_key;
|
||||
mod glwe_switching_key;
|
||||
mod glwe_tensor_key;
|
||||
mod glwe_to_lwe_switching_key;
|
||||
mod glwe_to_lwe_key;
|
||||
mod lwe;
|
||||
mod lwe_switching_key;
|
||||
mod lwe_to_glwe_switching_key;
|
||||
mod lwe_to_glwe_key;
|
||||
|
||||
pub use compressed::*;
|
||||
pub use gglwe::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw::*;
|
||||
pub use glwe::*;
|
||||
pub use glwe_automorphism_key::*;
|
||||
pub use glwe_public_key::*;
|
||||
pub use glwe_switching_key::*;
|
||||
pub use glwe_tensor_key::*;
|
||||
pub use glwe_to_lwe_switching_key::*;
|
||||
pub use glwe_to_lwe_key::*;
|
||||
pub use lwe::*;
|
||||
pub use lwe_switching_key::*;
|
||||
pub use lwe_to_glwe_switching_key::*;
|
||||
pub use lwe_to_glwe_key::*;
|
||||
|
||||
pub const SIGMA: f64 = 3.2;
|
||||
pub(crate) const SIGMA_BOUND: f64 = 6.0 * SIGMA;
|
||||
|
||||
388
poulpy-core/src/glwe_packer.rs
Normal file
388
poulpy-core/src/glwe_packer.rs
Normal file
@@ -0,0 +1,388 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::ModuleLogN,
|
||||
layouts::{Backend, GaloisElement, Module, Scratch},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWEAdd, GLWEAutomorphism, GLWECopy, GLWENormalize, GLWERotate, GLWEShift, GLWESub, ScratchTakeCore,
|
||||
glwe_trace::GLWETrace,
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetGaloisElement, LWEInfos},
|
||||
};
|
||||
|
||||
/// [GLWEPacker] enables only the fly GLWE packing
|
||||
/// with constant memory of Log(N) ciphertexts.
|
||||
/// Main difference with usual GLWE packing is that
|
||||
/// the output is bit-reversed.
|
||||
pub struct GLWEPacker {
|
||||
accumulators: Vec<Accumulator>,
|
||||
log_batch: usize,
|
||||
counter: usize,
|
||||
}
|
||||
|
||||
/// [Accumulator] stores intermediate packing result.
|
||||
/// There are Log(N) such accumulators in a [GLWEPacker].
|
||||
struct Accumulator {
|
||||
data: GLWE<Vec<u8>>,
|
||||
value: bool, // Implicit flag for zero ciphertext
|
||||
control: bool, // Can be combined with incoming value
|
||||
}
|
||||
|
||||
impl Accumulator {
|
||||
/// Allocates a new [Accumulator].
|
||||
///
|
||||
/// #Arguments
|
||||
///
|
||||
/// * `module`: static backend FFT tables.
|
||||
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
||||
/// * `rank`: rank of the GLWE ciphertext.
|
||||
pub fn alloc<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
Self {
|
||||
data: GLWE::alloc_from_infos(infos),
|
||||
value: false,
|
||||
control: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWEPacker {
|
||||
/// Instantiates a new [GLWEPacker].
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `log_batch`: packs coefficients which are multiples of X^{N/2^log_batch}.
|
||||
/// i.e. with `log_batch=0` only the constant coefficient is packed
|
||||
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
||||
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
||||
/// can be packed.
|
||||
pub fn alloc<A>(infos: &A, log_batch: usize) -> Self
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
||||
let log_n: usize = infos.n().log2();
|
||||
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(infos)));
|
||||
GLWEPacker {
|
||||
accumulators,
|
||||
log_batch,
|
||||
counter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Implicit reset of the internal state (to be called before a new packing procedure).
|
||||
fn reset(&mut self) {
|
||||
for i in 0..self.accumulators.len() {
|
||||
self.accumulators[i].value = false;
|
||||
self.accumulators[i].control = false;
|
||||
}
|
||||
self.counter = 0;
|
||||
}
|
||||
|
||||
/// Number of scratch space bytes required to call [Self::add].
|
||||
pub fn tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: GLWEPackerOps<BE>,
|
||||
{
|
||||
GLWE::bytes_of_from_infos(res_infos)
|
||||
+ module
|
||||
.glwe_rsh_tmp_byte()
|
||||
.max(module.glwe_automorphism_tmp_bytes(res_infos, res_infos, key_infos))
|
||||
}
|
||||
|
||||
pub fn galois_elements<M, BE: Backend>(module: &M) -> Vec<i64>
|
||||
where
|
||||
M: GLWETrace<BE>,
|
||||
{
|
||||
module.glwe_trace_galois_elements()
|
||||
}
|
||||
|
||||
/// Adds a GLWE ciphertext to the [GLWEPacker].
|
||||
/// #Arguments
|
||||
///
|
||||
/// * `module`: static backend FFT tables.
|
||||
/// * `res`: space to append fully packed ciphertext. Only when the number
|
||||
/// of packed ciphertexts reaches N/2^log_batch is a result written.
|
||||
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
|
||||
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
|
||||
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
|
||||
pub fn add<A, K, M, BE: Backend>(&mut self, module: &M, a: Option<&A>, auto_keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
M: GLWEPackerOps<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
assert!(
|
||||
(self.counter as u32) < self.accumulators[0].data.n(),
|
||||
"Packing limit of {} reached",
|
||||
self.accumulators[0].data.n().0 as usize >> self.log_batch
|
||||
);
|
||||
|
||||
module.packer_add(self, a, self.log_batch, auto_keys, scratch);
|
||||
self.counter += 1 << self.log_batch;
|
||||
}
|
||||
|
||||
/// Flush result to`res`.
|
||||
pub fn flush<R, M, BE: Backend>(&mut self, module: &M, res: &mut R)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
M: GLWEPackerOps<BE>,
|
||||
{
|
||||
assert!(self.counter as u32 == self.accumulators[0].data.n());
|
||||
// Copy result GLWE into res GLWE
|
||||
module.glwe_copy(
|
||||
res,
|
||||
&self.accumulators[module.log_n() - self.log_batch - 1].data,
|
||||
);
|
||||
|
||||
self.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEPackerOps<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleLogN
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEPackerOps<BE: Backend>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleLogN
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>,
|
||||
{
|
||||
fn packer_add<A, K>(
|
||||
&self,
|
||||
packer: &mut GLWEPacker,
|
||||
a: Option<&A>,
|
||||
i: usize,
|
||||
auto_keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
pack_core(self, a, &mut packer.accumulators, i, auto_keys, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
fn pack_core<A, K, M, BE: Backend>(
|
||||
module: &M,
|
||||
a: Option<&A>,
|
||||
accumulators: &mut [Accumulator],
|
||||
i: usize,
|
||||
auto_keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
M: ModuleLogN
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let log_n: usize = module.log_n();
|
||||
|
||||
if i == log_n {
|
||||
return;
|
||||
}
|
||||
|
||||
// Isolate the first accumulator
|
||||
let (acc_prev, acc_next) = accumulators.split_at_mut(1);
|
||||
|
||||
// Control = true accumlator is free to overide
|
||||
if !acc_prev[0].control {
|
||||
let acc_mut_ref: &mut Accumulator = &mut acc_prev[0]; // from split_at_mut
|
||||
|
||||
// No previous value -> copies and sets flags accordingly
|
||||
if let Some(a_ref) = a {
|
||||
module.glwe_copy(&mut acc_mut_ref.data, a_ref);
|
||||
acc_mut_ref.value = true
|
||||
} else {
|
||||
acc_mut_ref.value = false
|
||||
}
|
||||
acc_mut_ref.control = true; // Able to be combined on next call
|
||||
} else {
|
||||
// Compresses acc_prev <- combine(acc_prev, a).
|
||||
combine(module, &mut acc_prev[0], a, i, auto_keys, scratch);
|
||||
acc_prev[0].control = false;
|
||||
|
||||
// Propagates to next accumulator
|
||||
if acc_prev[0].value {
|
||||
pack_core(
|
||||
module,
|
||||
Some(&acc_prev[0].data),
|
||||
acc_next,
|
||||
i + 1,
|
||||
auto_keys,
|
||||
scratch,
|
||||
);
|
||||
} else {
|
||||
pack_core(
|
||||
module,
|
||||
None::<&GLWE<Vec<u8>>>,
|
||||
acc_next,
|
||||
i + 1,
|
||||
auto_keys,
|
||||
scratch,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn combine<B, K, M, BE: Backend>(
|
||||
module: &M,
|
||||
acc: &mut Accumulator,
|
||||
b: Option<&B>,
|
||||
i: usize,
|
||||
auto_keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
B: GLWEToRef + GLWEInfos,
|
||||
B: GLWEToRef + GLWEInfos,
|
||||
M: ModuleLogN
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let log_n: usize = acc.data.n().log2();
|
||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||
|
||||
let gal_el: i64 = if i == 0 {
|
||||
-1
|
||||
} else {
|
||||
module.galois_element(1 << (i - 1))
|
||||
};
|
||||
|
||||
let t: i64 = 1 << (log_n - i - 1);
|
||||
|
||||
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
||||
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
||||
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
|
||||
// Different cases for wether a and/or b are zero.
|
||||
//
|
||||
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
|
||||
// Necessary so that the scaling of the plaintext remains constant.
|
||||
// It however is ok to do so here because coefficients are eventually
|
||||
// either mapped to garbage or twice their value which vanishes I(X)
|
||||
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
||||
if acc.value {
|
||||
if let Some(b) = b {
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe(a);
|
||||
|
||||
// a = a * X^-t
|
||||
module.glwe_rotate_inplace(-t, a, scratch_1);
|
||||
|
||||
// tmp_b = a * X^-t - b
|
||||
module.glwe_sub(&mut tmp_b, a, b);
|
||||
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||
|
||||
// a = a * X^-t + b
|
||||
module.glwe_add_inplace(a, b);
|
||||
module.glwe_rsh(1, a, scratch_1);
|
||||
|
||||
module.glwe_normalize_inplace(&mut tmp_b, scratch_1);
|
||||
|
||||
// tmp_b = phi(a * X^-t - b)
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_inplace(&mut tmp_b, auto_key, scratch_1);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
|
||||
// a = a * X^-t + b - phi(a * X^-t - b)
|
||||
module.glwe_sub_inplace(a, &tmp_b);
|
||||
module.glwe_normalize_inplace(a, scratch_1);
|
||||
|
||||
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
||||
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
||||
// = a + b * X^t + phi(a - b * X^t)
|
||||
module.glwe_rotate_inplace(t, a, scratch_1);
|
||||
} else {
|
||||
module.glwe_rsh(1, a, scratch);
|
||||
// a = a + phi(a)
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_add_inplace(a, auto_key, scratch);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
}
|
||||
} else if let Some(b) = b {
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe(a);
|
||||
module.glwe_rotate(t, &mut tmp_b, b);
|
||||
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||
|
||||
// a = (b* X^t - phi(b* X^t))
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_sub_negate(a, &tmp_b, auto_key, scratch_1);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
|
||||
acc.value = true;
|
||||
}
|
||||
}
|
||||
@@ -7,166 +7,23 @@ use poulpy_hal::{
|
||||
|
||||
use crate::{
|
||||
GLWEAdd, GLWEAutomorphism, GLWECopy, GLWENormalize, GLWERotate, GLWEShift, GLWESub, ScratchTakeCore,
|
||||
glwe_trace::GLWETrace,
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetGaloisElement, LWEInfos},
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GLWEInfos, GLWEToMut, GLWEToRef, GetGaloisElement},
|
||||
};
|
||||
|
||||
/// [GLWEPacker] enables only the fly GLWE packing
|
||||
/// with constant memory of Log(N) ciphertexts.
|
||||
/// Main difference with usual GLWE packing is that
|
||||
/// the output is bit-reversed.
|
||||
pub struct GLWEPacker {
|
||||
accumulators: Vec<Accumulator>,
|
||||
log_batch: usize,
|
||||
counter: usize,
|
||||
pub trait GLWEPacking<BE: Backend> {
|
||||
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
|
||||
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
|
||||
fn glwe_pack<R, K>(
|
||||
&self,
|
||||
cts: &mut HashMap<usize, &mut R>,
|
||||
log_gap_out: usize,
|
||||
keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut + GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos;
|
||||
}
|
||||
|
||||
/// [Accumulator] stores intermediate packing result.
|
||||
/// There are Log(N) such accumulators in a [GLWEPacker].
|
||||
struct Accumulator {
|
||||
data: GLWE<Vec<u8>>,
|
||||
value: bool, // Implicit flag for zero ciphertext
|
||||
control: bool, // Can be combined with incoming value
|
||||
}
|
||||
|
||||
impl Accumulator {
|
||||
/// Allocates a new [Accumulator].
|
||||
///
|
||||
/// #Arguments
|
||||
///
|
||||
/// * `module`: static backend FFT tables.
|
||||
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
||||
/// * `rank`: rank of the GLWE ciphertext.
|
||||
pub fn alloc<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
Self {
|
||||
data: GLWE::alloc_from_infos(infos),
|
||||
value: false,
|
||||
control: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWEPacker {
|
||||
/// Instantiates a new [GLWEPacker].
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `log_batch`: packs coefficients which are multiples of X^{N/2^log_batch}.
|
||||
/// i.e. with `log_batch=0` only the constant coefficient is packed
|
||||
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
||||
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
||||
/// can be packed.
|
||||
pub fn alloc<A>(infos: &A, log_batch: usize) -> Self
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
||||
let log_n: usize = infos.n().log2();
|
||||
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(infos)));
|
||||
GLWEPacker {
|
||||
accumulators,
|
||||
log_batch,
|
||||
counter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Implicit reset of the internal state (to be called before a new packing procedure).
|
||||
fn reset(&mut self) {
|
||||
for i in 0..self.accumulators.len() {
|
||||
self.accumulators[i].value = false;
|
||||
self.accumulators[i].control = false;
|
||||
}
|
||||
self.counter = 0;
|
||||
}
|
||||
|
||||
/// Number of scratch space bytes required to call [Self::add].
|
||||
pub fn tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: GLWEPacking<BE>,
|
||||
{
|
||||
GLWE::bytes_of_from_infos(res_infos)
|
||||
+ module
|
||||
.glwe_rsh_tmp_byte()
|
||||
.max(module.glwe_automorphism_tmp_bytes(res_infos, res_infos, key_infos))
|
||||
}
|
||||
|
||||
pub fn galois_elements<M, BE: Backend>(module: &M) -> Vec<i64>
|
||||
where
|
||||
M: GLWETrace<BE>,
|
||||
{
|
||||
module.glwe_trace_galois_elements()
|
||||
}
|
||||
|
||||
/// Adds a GLWE ciphertext to the [GLWEPacker].
|
||||
/// #Arguments
|
||||
///
|
||||
/// * `module`: static backend FFT tables.
|
||||
/// * `res`: space to append fully packed ciphertext. Only when the number
|
||||
/// of packed ciphertexts reaches N/2^log_batch is a result written.
|
||||
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
|
||||
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
|
||||
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
|
||||
pub fn add<A, K, M, BE: Backend>(&mut self, module: &M, a: Option<&A>, auto_keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
M: GLWEPacking<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
assert!(
|
||||
(self.counter as u32) < self.accumulators[0].data.n(),
|
||||
"Packing limit of {} reached",
|
||||
self.accumulators[0].data.n().0 as usize >> self.log_batch
|
||||
);
|
||||
|
||||
pack_core(
|
||||
module,
|
||||
a,
|
||||
&mut self.accumulators,
|
||||
self.log_batch,
|
||||
auto_keys,
|
||||
scratch,
|
||||
);
|
||||
self.counter += 1 << self.log_batch;
|
||||
}
|
||||
|
||||
/// Flush result to`res`.
|
||||
pub fn flush<R, M, BE: Backend>(&mut self, module: &M, res: &mut R)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
M: GLWEPacking<BE>,
|
||||
{
|
||||
assert!(self.counter as u32 == self.accumulators[0].data.n());
|
||||
// Copy result GLWE into res GLWE
|
||||
module.glwe_copy(
|
||||
res,
|
||||
&self.accumulators[module.log_n() - self.log_batch - 1].data,
|
||||
);
|
||||
|
||||
self.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEPacking<BE> for Module<BE> where
|
||||
Self: GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ ModuleLogN
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEPacking<BE: Backend>
|
||||
impl<BE: Backend> GLWEPacking<BE> for Module<BE>
|
||||
where
|
||||
Self: GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
@@ -177,6 +34,7 @@ where
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
|
||||
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
|
||||
@@ -189,7 +47,6 @@ where
|
||||
) where
|
||||
R: GLWEToMut + GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -223,169 +80,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn pack_core<A, K, M, BE: Backend>(
|
||||
module: &M,
|
||||
a: Option<&A>,
|
||||
accumulators: &mut [Accumulator],
|
||||
i: usize,
|
||||
auto_keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
M: ModuleLogN
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GaloisElement
|
||||
+ GLWERotate<BE>
|
||||
+ GLWESub
|
||||
+ GLWEShift<BE>
|
||||
+ GLWEAdd
|
||||
+ GLWENormalize<BE>
|
||||
+ GLWECopy,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let log_n: usize = module.log_n();
|
||||
|
||||
if i == log_n {
|
||||
return;
|
||||
}
|
||||
|
||||
// Isolate the first accumulator
|
||||
let (acc_prev, acc_next) = accumulators.split_at_mut(1);
|
||||
|
||||
// Control = true accumlator is free to overide
|
||||
if !acc_prev[0].control {
|
||||
let acc_mut_ref: &mut Accumulator = &mut acc_prev[0]; // from split_at_mut
|
||||
|
||||
// No previous value -> copies and sets flags accordingly
|
||||
if let Some(a_ref) = a {
|
||||
module.glwe_copy(&mut acc_mut_ref.data, a_ref);
|
||||
acc_mut_ref.value = true
|
||||
} else {
|
||||
acc_mut_ref.value = false
|
||||
}
|
||||
acc_mut_ref.control = true; // Able to be combined on next call
|
||||
} else {
|
||||
// Compresses acc_prev <- combine(acc_prev, a).
|
||||
combine(module, &mut acc_prev[0], a, i, auto_keys, scratch);
|
||||
acc_prev[0].control = false;
|
||||
|
||||
// Propagates to next accumulator
|
||||
if acc_prev[0].value {
|
||||
pack_core(
|
||||
module,
|
||||
Some(&acc_prev[0].data),
|
||||
acc_next,
|
||||
i + 1,
|
||||
auto_keys,
|
||||
scratch,
|
||||
);
|
||||
} else {
|
||||
pack_core(
|
||||
module,
|
||||
None::<&GLWE<Vec<u8>>>,
|
||||
acc_next,
|
||||
i + 1,
|
||||
auto_keys,
|
||||
scratch,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// [combine] merges two ciphertexts together.
|
||||
fn combine<B, M, K, BE: Backend>(
|
||||
module: &M,
|
||||
acc: &mut Accumulator,
|
||||
b: Option<&B>,
|
||||
i: usize,
|
||||
auto_keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
B: GLWEToRef + GLWEInfos,
|
||||
M: GLWEAutomorphism<BE> + GaloisElement + GLWERotate<BE> + GLWESub + GLWEShift<BE> + GLWEAdd + GLWENormalize<BE>,
|
||||
B: GLWEToRef + GLWEInfos,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let log_n: usize = acc.data.n().log2();
|
||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||
|
||||
let gal_el: i64 = if i == 0 {
|
||||
-1
|
||||
} else {
|
||||
module.galois_element(1 << (i - 1))
|
||||
};
|
||||
|
||||
let t: i64 = 1 << (log_n - i - 1);
|
||||
|
||||
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
||||
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
||||
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
|
||||
// Different cases for wether a and/or b are zero.
|
||||
//
|
||||
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
|
||||
// Necessary so that the scaling of the plaintext remains constant.
|
||||
// It however is ok to do so here because coefficients are eventually
|
||||
// either mapped to garbage or twice their value which vanishes I(X)
|
||||
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
||||
if acc.value {
|
||||
if let Some(b) = b {
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe(a);
|
||||
|
||||
// a = a * X^-t
|
||||
module.glwe_rotate_inplace(-t, a, scratch_1);
|
||||
|
||||
// tmp_b = a * X^-t - b
|
||||
module.glwe_sub(&mut tmp_b, a, b);
|
||||
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||
|
||||
// a = a * X^-t + b
|
||||
module.glwe_add_inplace(a, b);
|
||||
module.glwe_rsh(1, a, scratch_1);
|
||||
|
||||
module.glwe_normalize_inplace(&mut tmp_b, scratch_1);
|
||||
|
||||
// tmp_b = phi(a * X^-t - b)
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_inplace(&mut tmp_b, auto_key, scratch_1);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
|
||||
// a = a * X^-t + b - phi(a * X^-t - b)
|
||||
module.glwe_sub_inplace(a, &tmp_b);
|
||||
module.glwe_normalize_inplace(a, scratch_1);
|
||||
|
||||
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
||||
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
||||
// = a + b * X^t + phi(a - b * X^t)
|
||||
module.glwe_rotate_inplace(t, a, scratch_1);
|
||||
} else {
|
||||
module.glwe_rsh(1, a, scratch);
|
||||
// a = a + phi(a)
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_add_inplace(a, auto_key, scratch);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
}
|
||||
} else if let Some(b) = b {
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe(a);
|
||||
module.glwe_rotate(t, &mut tmp_b, b);
|
||||
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||
|
||||
// a = (b* X^t - phi(b* X^t))
|
||||
if let Some(auto_key) = auto_keys.get(&gal_el) {
|
||||
module.glwe_automorphism_sub_negate(a, &tmp_b, auto_key, scratch_1);
|
||||
} else {
|
||||
panic!("auto_key[{gal_el}] not found");
|
||||
}
|
||||
|
||||
acc.value = true;
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn pack_internal<M, A, B, K, BE: Backend>(
|
||||
module: &M,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::ModuleLogN,
|
||||
layouts::{Backend, DataMut, GaloisElement, Module, Scratch, VecZnx, galois_element},
|
||||
api::{ModuleLogN, VecZnxNormalize, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, CyclotomicOrder, DataMut, GaloisElement, Module, Scratch, VecZnx, galois_element},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -27,7 +27,7 @@ impl GLWE<Vec<u8>> {
|
||||
K: GGLWEInfos,
|
||||
M: GLWETrace<BE>,
|
||||
{
|
||||
module.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
module.glwe_trace_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,11 +65,6 @@ impl<D: DataMut> GLWE<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWETrace<BE> for Module<BE> where
|
||||
Self: ModuleLogN + GaloisElement + GLWEAutomorphism<BE> + GLWEShift<BE> + GLWECopy
|
||||
{
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn trace_galois_elements(log_n: usize, cyclotomic_order: i64) -> Vec<i64> {
|
||||
(0..log_n)
|
||||
@@ -83,9 +78,17 @@ pub fn trace_galois_elements(log_n: usize, cyclotomic_order: i64) -> Vec<i64> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub trait GLWETrace<BE: Backend>
|
||||
impl<BE: Backend> GLWETrace<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleLogN + GaloisElement + GLWEAutomorphism<BE> + GLWEShift<BE> + GLWECopy,
|
||||
Self: ModuleLogN
|
||||
+ GaloisElement
|
||||
+ GLWEAutomorphism<BE>
|
||||
+ GLWEShift<BE>
|
||||
+ GLWECopy
|
||||
+ CyclotomicOrder
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_trace_galois_elements(&self) -> Vec<i64> {
|
||||
trace_galois_elements(self.log_n(), self.cyclotomic_order())
|
||||
@@ -115,7 +118,6 @@ where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
self.glwe_copy(res, a);
|
||||
self.glwe_trace_inplace(res, start, end, keys, scratch);
|
||||
@@ -125,7 +127,6 @@ where
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
@@ -212,3 +213,31 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETrace<BE: Backend> {
|
||||
fn glwe_trace_galois_elements(&self) -> Vec<i64>;
|
||||
|
||||
fn glwe_trace_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
K: GGLWEInfos;
|
||||
|
||||
fn glwe_trace<R, A, K>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
start: usize,
|
||||
end: usize,
|
||||
a: &A,
|
||||
keys: &HashMap<i64, K>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos;
|
||||
|
||||
fn glwe_trace_inplace<R, K>(&self, res: &mut R, start: usize, end: usize, keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GGLWEPreparedToRef<BE> + GetGaloisElement + GGLWEInfos;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch, VecZnx};
|
||||
use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch};
|
||||
|
||||
use crate::{
|
||||
GGSWExpandRows, ScratchTakeCore,
|
||||
keyswitching::GLWEKeyswitch,
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GGSW, GGSWInfos, GGSWToMut, GGSWToRef, prepared::GLWETensorKeyPreparedToRef},
|
||||
layouts::{GGLWEInfos, GGLWEPreparedToRef, GGLWEToGGSWKeyPreparedToRef, GGSW, GGSWInfos, GGSWToMut, GGSWToRef},
|
||||
};
|
||||
|
||||
impl GGSW<Vec<u8>> {
|
||||
@@ -30,7 +30,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
where
|
||||
A: GGSWToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeyswitch<BE>,
|
||||
{
|
||||
@@ -40,7 +40,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
pub fn keyswitch_inplace<M, K, T, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeyswitch<BE>,
|
||||
{
|
||||
@@ -48,9 +48,7 @@ impl<D: DataMut> GGSW<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGSWKeyswitch<BE> for Module<BE> where Self: GLWEKeyswitch<BE> + GGSWExpandRows<BE> {}
|
||||
|
||||
pub trait GGSWKeyswitch<BE: Backend>
|
||||
impl<BE: Backend> GGSWKeyswitch<BE> for Module<BE>
|
||||
where
|
||||
Self: GLWEKeyswitch<BE> + GGSWExpandRows<BE>,
|
||||
{
|
||||
@@ -65,25 +63,26 @@ where
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(key_infos.rank_in(), tsk_infos.rank_in());
|
||||
|
||||
let rank: usize = key_infos.rank_out().into();
|
||||
self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
.max(self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos))
|
||||
}
|
||||
|
||||
let size_out: usize = res_infos.k().div_ceil(res_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::bytes_of(self.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
let ks: usize = self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos);
|
||||
let expand_rows: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
fn ggsw_keyswitch_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
if a_infos.base2k() == tsk_infos.base2k() {
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
} else {
|
||||
let a_conv: usize = VecZnx::bytes_of(
|
||||
self.n(),
|
||||
1,
|
||||
res_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
|
||||
for row in 0..res.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch_inplace(&mut res.at_mut(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
|
||||
fn ggsw_keyswitch<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
@@ -91,7 +90,7 @@ where
|
||||
R: GGSWToMut,
|
||||
A: GGSWToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
@@ -108,22 +107,31 @@ where
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGSWKeyswitch<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeyswitch<BE> + GGSWExpandRows<BE>,
|
||||
{
|
||||
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGSWInfos,
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos;
|
||||
|
||||
fn ggsw_keyswitch<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
A: GGSWToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>;
|
||||
|
||||
fn ggsw_keyswitch_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
T: GLWETensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch_inplace(&mut res.at_mut(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
T: GGLWEToGGSWKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||
layouts::{Backend, DataMut, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VecZnxDftToRef, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -45,46 +45,10 @@ impl<D: DataMut> GLWE<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEKeyswitch<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEKeyswitch<BE: Backend>
|
||||
impl<BE: Backend> GLWEKeyswitch<BE> for Module<BE>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Self: Sized + GLWEKeySwitchInternal<BE> + VecZnxBigNormalizeTmpBytes + VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, key_infos: &B) -> usize
|
||||
where
|
||||
@@ -92,34 +56,10 @@ where
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos,
|
||||
{
|
||||
let in_size: usize = a_infos
|
||||
.k()
|
||||
.div_ceil(key_infos.base2k())
|
||||
.div_ceil(key_infos.dsize().into()) as usize;
|
||||
let out_size: usize = res_infos.size();
|
||||
let ksk_size: usize = key_infos.size();
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_in()).into(), in_size);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size,
|
||||
(key_infos.rank_in()).into(),
|
||||
(key_infos.rank_out() + 1).into(),
|
||||
ksk_size,
|
||||
) + self.bytes_of_vec_znx_dft((key_infos.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
|
||||
if a_infos.base2k() == key_infos.base2k() {
|
||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||
} else if key_infos.dsize() == 1 {
|
||||
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||
} else {
|
||||
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (key_infos.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
let cols: usize = res_infos.rank().as_usize() + 1;
|
||||
self.glwe_keyswitch_internal_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
.max(self.vec_znx_big_normalize_tmp_bytes())
|
||||
+ self.bytes_of_vec_znx_dft(cols, key_infos.size())
|
||||
}
|
||||
|
||||
fn glwe_keyswitch<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
@@ -127,7 +67,6 @@ where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
@@ -164,8 +103,8 @@ where
|
||||
let base2k_out: usize = b.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), b.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, a, b, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = self.glwe_keyswitch_internal(res_dft, a, b, scratch_1);
|
||||
for i in 0..(res.rank() + 1).into() {
|
||||
self.vec_znx_big_normalize(
|
||||
basek_out,
|
||||
&mut res.data,
|
||||
@@ -175,37 +114,36 @@ where
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn glwe_keyswitch_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGLWEPrepared<&[u8], BE> = &key.to_ref();
|
||||
let key: &GGLWEPrepared<&[u8], BE> = &key.to_ref();
|
||||
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_in(),
|
||||
key.rank_in(),
|
||||
"res.rank(): {} != a.rank_in(): {}",
|
||||
res.rank(),
|
||||
a.rank_in()
|
||||
key.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_out(),
|
||||
key.rank_out(),
|
||||
"res.rank(): {} != b.rank_out(): {}",
|
||||
res.rank(),
|
||||
a.rank_out()
|
||||
key.rank_out()
|
||||
);
|
||||
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
assert_eq!(key.n(), self.n() as u32);
|
||||
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, res, a);
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, res, key);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
@@ -214,11 +152,11 @@ where
|
||||
);
|
||||
|
||||
let base2k_in: usize = res.base2k().into();
|
||||
let base2k_out: usize = a.base2k().into();
|
||||
let base2k_out: usize = key.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), a.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, res, a, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = self.glwe_keyswitch_internal(res_dft, res, key, scratch_1);
|
||||
for i in 0..(res.rank() + 1).into() {
|
||||
self.vec_znx_big_normalize(
|
||||
base2k_in,
|
||||
&mut res.data,
|
||||
@@ -228,143 +166,235 @@ where
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWE<Vec<u8>> {}
|
||||
pub trait GLWEKeyswitch<BE: Backend> {
|
||||
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, key_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos;
|
||||
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {}
|
||||
fn glwe_keyswitch<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE>;
|
||||
|
||||
pub(crate) fn keyswitch_internal<BE: Backend, M, DR, A, K>(
|
||||
module: &M,
|
||||
mut res: VecZnxDft<DR, BE>,
|
||||
a: &A,
|
||||
key: &K,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> VecZnxBig<DR, BE>
|
||||
where
|
||||
DR: DataMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
M: ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
fn glwe_keyswitch_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWEToMut,
|
||||
K: GGLWEPreparedToRef<BE>;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEKeySwitchInternal<BE> for Module<BE> where
|
||||
Self: GGLWEProduct<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
{
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
let key: &GGLWEPrepared<&[u8], BE> = &key.to_ref();
|
||||
}
|
||||
|
||||
let base2k_in: usize = a.base2k().into();
|
||||
let base2k_out: usize = key.base2k().into();
|
||||
let cols: usize = (a.rank() + 1).into();
|
||||
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
||||
let pmat: &VmpPMat<&[u8], BE> = &key.data;
|
||||
pub(crate) trait GLWEKeySwitchInternal<BE: Backend>
|
||||
where
|
||||
Self: GGLWEProduct<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
fn glwe_keyswitch_internal_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
{
|
||||
let cols: usize = (a_infos.rank() + 1).into();
|
||||
let a_size: usize = a_infos.size();
|
||||
|
||||
if key.dsize() == 1 {
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
|
||||
let a_conv = if a_infos.base2k() == key_infos.base2k() {
|
||||
0
|
||||
} else {
|
||||
VecZnx::bytes_of(self.n(), 1, a_size) + self.vec_znx_normalize_tmp_bytes()
|
||||
};
|
||||
|
||||
self.gglwe_product_dft_tmp_bytes(res_infos.size(), a_size, key_infos) + self.bytes_of_vec_znx_dft(cols, a_size) + a_conv
|
||||
}
|
||||
|
||||
fn glwe_keyswitch_internal<DR, A, K>(
|
||||
&self,
|
||||
mut res: VecZnxDft<DR, BE>,
|
||||
a: &A,
|
||||
key: &K,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> VecZnxBig<DR, BE>
|
||||
where
|
||||
DR: DataMut,
|
||||
A: GLWEToRef,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
let key: &GGLWEPrepared<&[u8], BE> = &key.to_ref();
|
||||
|
||||
let base2k_in: usize = a.base2k().into();
|
||||
let base2k_out: usize = key.base2k().into();
|
||||
let cols: usize = (a.rank() + 1).into();
|
||||
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
||||
|
||||
let (mut a_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols - 1, a_size);
|
||||
|
||||
if base2k_in == base2k_out {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a.data(), col_i + 1);
|
||||
});
|
||||
for col_i in 0..cols - 1 {
|
||||
self.vec_znx_dft_apply(1, 0, &mut a_dft, col_i, a.data(), col_i + 1);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module.n(), 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self.n(), 1, a_size);
|
||||
for i in 0..cols - 1 {
|
||||
self.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
0,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
col_i + 1,
|
||||
i + 1,
|
||||
scratch_2,
|
||||
);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
self.vec_znx_dft_apply(1, 0, &mut a_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
let dsize: usize = key.dsize().into();
|
||||
self.gglwe_product_dft(&mut res, &a_dft, key, scratch_1);
|
||||
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
|
||||
ai_dft.data_mut().fill(0);
|
||||
let mut res_big: VecZnxBig<DR, BE> = self.vec_znx_idft_apply_consume(res);
|
||||
self.vec_znx_big_add_small_inplace(&mut res_big, 0, a.data(), 0);
|
||||
res_big
|
||||
}
|
||||
}
|
||||
|
||||
if base2k_in == base2k_out {
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
impl<BE: Backend> GGLWEProduct<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftCopy<BE>
|
||||
{
|
||||
}
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
pub(crate) trait GGLWEProduct<BE: Backend>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftCopy<BE>,
|
||||
{
|
||||
fn gglwe_product_dft_tmp_bytes<K>(&self, res_size: usize, a_size: usize, key_infos: &K) -> usize
|
||||
where
|
||||
K: GGLWEInfos,
|
||||
{
|
||||
let dsize: usize = key_infos.dsize().as_usize();
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a.data(), j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
if dsize == 1 {
|
||||
self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
res_size,
|
||||
a_size,
|
||||
key_infos.dnum().into(),
|
||||
(key_infos.rank_in()).into(),
|
||||
(key_infos.rank_out() + 1).into(),
|
||||
key_infos.size(),
|
||||
)
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module.n(), cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
j,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
j + 1,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
let dnum: usize = key_infos.dnum().into();
|
||||
let a_size: usize = a_size.div_ceil(dsize).min(dnum);
|
||||
let ai_dft: usize = self.bytes_of_vec_znx_dft(key_infos.rank_in().into(), a_size);
|
||||
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
res_size,
|
||||
a_size,
|
||||
dnum,
|
||||
(key_infos.rank_in()).into(),
|
||||
(key_infos.rank_out() + 1).into(),
|
||||
key_infos.size(),
|
||||
);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
ai_dft + vmp
|
||||
}
|
||||
|
||||
res.set_size(res.max_size());
|
||||
}
|
||||
|
||||
let mut res_big: VecZnxBig<DR, BE> = module.vec_znx_idft_apply_consume(res);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a.data(), 0);
|
||||
res_big
|
||||
fn gglwe_product_dft<DR, A, K>(&self, res: &mut VecZnxDft<DR, BE>, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
DR: DataMut,
|
||||
A: VecZnxDftToRef<BE>,
|
||||
K: GGLWEPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let a: &VecZnxDft<&[u8], BE> = &a.to_ref();
|
||||
let key: &GGLWEPrepared<&[u8], BE> = &key.to_ref();
|
||||
|
||||
let cols: usize = a.cols();
|
||||
let a_size: usize = a.size();
|
||||
let pmat: &VmpPMat<&[u8], BE> = &key.data;
|
||||
|
||||
// If dsize == 1, then the digit decomposition is equal to Base2K and we can simply
|
||||
// can the vmp API.
|
||||
if key.dsize() == 1 {
|
||||
self.vmp_apply_dft_to_dft(res, a, pmat, scratch);
|
||||
// If dsize != 1, then the digit decomposition is k * Base2K with k > 1.
|
||||
// As such we need to perform a bivariate polynomial convolution in (X, Y) / (X^{N}+1) with Y = 2^-K
|
||||
// (instead of yn univariate one in X).
|
||||
//
|
||||
// Since the basis in Y is small (in practice degree 6-7 max), we perform it naiveley.
|
||||
// To do so, we group the different limbs of ai_dft by their respective degree in Y
|
||||
// which are multiples of the current digit.
|
||||
// For example if dsize = 3, with ai_dft = [a0, a1, a2, a3, a4, a5, a6],
|
||||
// we group them as [[a0, a3, a5], [a1, a4, a6], [a2, a5, 0]]
|
||||
// and evaluate sum(a_di * pmat * 2^{di*Base2k})
|
||||
} else {
|
||||
let dsize: usize = key.dsize().into();
|
||||
let dnum: usize = key.dnum().into();
|
||||
|
||||
// We bound ai_dft size by the number of rows of the matrix
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, a_size.div_ceil(dsize).min(dnum));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
for di in 0..dsize {
|
||||
// Sets ai_dft size according to the current digit (if dsize does not divides a_size),
|
||||
// bounded by the number of rows (digits) in the prepared matrix.
|
||||
ai_dft.set_size(((a_size + di) / dsize).min(dnum));
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * Base2k}, then
|
||||
// we also aggregate ei * 2^{di * Base2k}, with the largest error being ei * 2^{(dsize-1) * Base2k}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols {
|
||||
self.vec_znx_dft_copy(dsize, dsize - di - 1, &mut ai_dft, j, a, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
// res = pmat * ai_dft
|
||||
self.vmp_apply_dft_to_dft(res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
// res = (pmat * ai_dft) * 2^{di * Base2k}
|
||||
self.vmp_apply_dft_to_dft_add(res, &ai_dft, pmat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
|
||||
res.set_size(res.max_size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
237
poulpy-core/src/layouts/compressed/gglwe_to_ggsw_key.rs
Normal file
237
poulpy-core/src/layouts/compressed/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
use poulpy_hal::{
|
||||
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWECompressed, GGLWECompressedToMut, GGLWECompressedToRef, GGLWEDecompress, GGLWEInfos,
|
||||
GGLWEToGGSWKey, GGLWEToGGSWKeyToMut, GLWEInfos, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct GGLWEToGGSWKeyCompressed<D: Data> {
|
||||
pub(crate) keys: Vec<GGLWECompressed<D>>,
|
||||
}
|
||||
|
||||
impl<D: Data> LWEInfos for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GGLWEInfos for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key: &mut GGLWECompressed<D>| key.fill_uniform(log_bound, source))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "(GGLWEToGGSWKeyCompressed)",)?;
|
||||
for (i, key) in self.keys.iter().enumerate() {
|
||||
write!(f, "{i}: {key}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWEToGGSWKeyCompressed<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKeyCompressed"
|
||||
);
|
||||
Self::alloc(
|
||||
infos.n(),
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self {
|
||||
GGLWEToGGSWKeyCompressed {
|
||||
keys: (0..rank.as_usize())
|
||||
.map(|_| GGLWECompressed::alloc(n, base2k, k, rank, rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKeyCompressed"
|
||||
);
|
||||
Self::bytes_of(
|
||||
infos.n(),
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
rank.as_usize() * GGLWECompressed::bytes_of(n, base2k, k, rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToGGSWKeyCompressed<D> {
|
||||
// Returns a mutable reference to GGLWE_{s}([s[i]*s[0], s[i]*s[1], ..., s[i]*s[rank]])
|
||||
pub fn at_mut(&mut self, i: usize) -> &mut GGLWECompressed<D> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&mut self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToGGSWKeyCompressed<D> {
|
||||
// Returns a reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at(&self, i: usize) -> &GGLWECompressed<D> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
|
||||
if self.keys.len() != len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
|
||||
));
|
||||
}
|
||||
for key in &mut self.keys {
|
||||
key.read_from(reader)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for GGLWEToGGSWKeyCompressed<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
|
||||
for key in &self.keys {
|
||||
key.write_to(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyDecompress
|
||||
where
|
||||
Self: GGLWEDecompress,
|
||||
{
|
||||
fn decompress_gglwe_to_ggsw_key<R, O>(&self, res: &mut R, other: &O)
|
||||
where
|
||||
R: GGLWEToGGSWKeyToMut,
|
||||
O: GGLWEToGGSWKeyCompressedToRef,
|
||||
{
|
||||
let res: &mut GGLWEToGGSWKey<&mut [u8]> = &mut res.to_mut();
|
||||
let other: &GGLWEToGGSWKeyCompressed<&[u8]> = &other.to_ref();
|
||||
|
||||
assert_eq!(res.keys.len(), other.keys.len());
|
||||
|
||||
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
|
||||
self.decompress_gglwe(a, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToGGSWKey<D> {
|
||||
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||
where
|
||||
M: GGLWEToGGSWKeyDecompress,
|
||||
O: GGLWEToGGSWKeyCompressedToRef,
|
||||
{
|
||||
module.decompress_gglwe_to_ggsw_key(self, other);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyCompressedToRef {
|
||||
fn to_ref(&self) -> GGLWEToGGSWKeyCompressed<&[u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToGGSWKeyCompressedToRef for GGLWEToGGSWKeyCompressed<D>
|
||||
where
|
||||
GGLWECompressed<D>: GGLWECompressedToRef,
|
||||
{
|
||||
fn to_ref(&self) -> GGLWEToGGSWKeyCompressed<&[u8]> {
|
||||
GGLWEToGGSWKeyCompressed {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyCompressedToMut {
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKeyCompressed<&mut [u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToGGSWKeyCompressedToMut for GGLWEToGGSWKeyCompressed<D>
|
||||
where
|
||||
GGLWECompressed<D>: GGLWECompressedToMut,
|
||||
{
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKeyCompressed<&mut [u8]> {
|
||||
GGLWEToGGSWKeyCompressed {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,31 +4,34 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWECompressed, GGLWECompressedToMut, GGLWECompressedToRef, GGLWEDecompress, GGLWEInfos,
|
||||
GLWEInfos, GLWETensorKey, GLWETensorKeyToMut, LWEInfos, Rank, TorusPrecision,
|
||||
Base2K, Degree, Dnum, Dsize, GGLWECompressed, GGLWECompressedSeedMut, GGLWECompressedToMut, GGLWECompressedToRef,
|
||||
GGLWEDecompress, GGLWEInfos, GGLWEToMut, GLWEInfos, GLWETensorKey, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct GLWETensorKeyCompressed<D: Data> {
|
||||
pub(crate) keys: Vec<GGLWECompressed<D>>,
|
||||
pub struct GLWETensorKeyCompressed<D: Data>(pub(crate) GGLWECompressed<D>);
|
||||
|
||||
impl<D: DataMut> GGLWECompressedSeedMut for GLWETensorKeyCompressed<D> {
|
||||
fn seed_mut(&mut self) -> &mut Vec<[u8; 32]> {
|
||||
&mut self.0.seed
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> LWEInfos for GLWETensorKeyCompressed<D> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
self.0.n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
self.0.base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
self.0.k()
|
||||
}
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
self.0.size()
|
||||
}
|
||||
}
|
||||
impl<D: Data> GLWEInfos for GLWETensorKeyCompressed<D> {
|
||||
@@ -43,15 +46,15 @@ impl<D: Data> GGLWEInfos for GLWETensorKeyCompressed<D> {
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
self.0.rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
self.0.dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
self.0.dnum()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,18 +66,14 @@ impl<D: DataRef> fmt::Debug for GLWETensorKeyCompressed<D> {
|
||||
|
||||
impl<D: DataMut> FillUniform for GLWETensorKeyCompressed<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key: &mut GGLWECompressed<D>| key.fill_uniform(log_bound, source))
|
||||
self.0.fill_uniform(log_bound, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for GLWETensorKeyCompressed<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "(GLWETensorKeyCompressed)",)?;
|
||||
for (i, key) in self.keys.iter().enumerate() {
|
||||
write!(f, "{i}: {key}")?;
|
||||
}
|
||||
write!(f, "{}", self.0)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -96,11 +95,15 @@ impl GLWETensorKeyCompressed<Vec<u8>> {
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self {
|
||||
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
|
||||
GLWETensorKeyCompressed {
|
||||
keys: (0..pairs)
|
||||
.map(|_| GGLWECompressed::alloc(n, base2k, k, Rank(1), rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
GLWETensorKeyCompressed(GGLWECompressed::alloc(
|
||||
n,
|
||||
base2k,
|
||||
k,
|
||||
Rank(pairs),
|
||||
rank,
|
||||
dnum,
|
||||
dsize,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(infos: &A) -> usize
|
||||
@@ -118,88 +121,35 @@ impl GLWETensorKeyCompressed<Vec<u8>> {
|
||||
}
|
||||
|
||||
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||
pairs * GGLWECompressed::bytes_of(n, base2k, k, Rank(1), dnum, dsize)
|
||||
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
|
||||
GGLWECompressed::bytes_of(n, base2k, k, Rank(pairs), dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for GLWETensorKeyCompressed<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
|
||||
if self.keys.len() != len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
|
||||
));
|
||||
}
|
||||
for key in &mut self.keys {
|
||||
key.read_from(reader)?;
|
||||
}
|
||||
self.0.read_from(reader)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for GLWETensorKeyCompressed<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
|
||||
for key in &self.keys {
|
||||
key.write_to(writer)?;
|
||||
}
|
||||
self.0.write_to(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyCompressedAtRef<D: DataRef> {
|
||||
fn at(&self, i: usize, j: usize) -> &GGLWECompressed<D>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWETensorKeyCompressedAtRef<D> for GLWETensorKeyCompressed<D> {
|
||||
fn at(&self, mut i: usize, mut j: usize) -> &GGLWECompressed<D> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyCompressedAtMut<D: DataMut> {
|
||||
fn at_mut(&mut self, i: usize, j: usize) -> &mut GGLWECompressed<D>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWETensorKeyCompressedAtMut<D> for GLWETensorKeyCompressed<D> {
|
||||
fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWECompressed<D> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyDecompress
|
||||
where
|
||||
Self: GGLWEDecompress,
|
||||
{
|
||||
fn decompress_tensor_key<R, O>(&self, res: &mut R, other: &O)
|
||||
where
|
||||
R: GLWETensorKeyToMut,
|
||||
O: GLWETensorKeyCompressedToRef,
|
||||
R: GGLWEToMut,
|
||||
O: GGLWECompressedToRef,
|
||||
{
|
||||
let res: &mut GLWETensorKey<&mut [u8]> = &mut res.to_mut();
|
||||
let other: &GLWETensorKeyCompressed<&[u8]> = &other.to_ref();
|
||||
|
||||
assert_eq!(
|
||||
res.keys.len(),
|
||||
other.keys.len(),
|
||||
"invalid receiver: res.keys.len()={} != other.keys.len()={}",
|
||||
res.keys.len(),
|
||||
other.keys.len()
|
||||
);
|
||||
|
||||
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
|
||||
self.decompress_gglwe(a, b);
|
||||
}
|
||||
self.decompress_gglwe(res, other);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,39 +158,27 @@ impl<B: Backend> GLWETensorKeyDecompress for Module<B> where Self: GGLWEDecompre
|
||||
impl<D: DataMut> GLWETensorKey<D> {
|
||||
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||
where
|
||||
O: GLWETensorKeyCompressedToRef,
|
||||
O: GGLWECompressedToRef,
|
||||
M: GLWETensorKeyDecompress,
|
||||
{
|
||||
module.decompress_tensor_key(self, other);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyCompressedToMut {
|
||||
fn to_mut(&mut self) -> GLWETensorKeyCompressed<&mut [u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWETensorKeyCompressedToMut for GLWETensorKeyCompressed<D>
|
||||
impl<D: DataMut> GGLWECompressedToMut for GLWETensorKeyCompressed<D>
|
||||
where
|
||||
GGLWECompressed<D>: GGLWECompressedToMut,
|
||||
{
|
||||
fn to_mut(&mut self) -> GLWETensorKeyCompressed<&mut [u8]> {
|
||||
GLWETensorKeyCompressed {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
fn to_mut(&mut self) -> GGLWECompressed<&mut [u8]> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyCompressedToRef {
|
||||
fn to_ref(&self) -> GLWETensorKeyCompressed<&[u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWETensorKeyCompressedToRef for GLWETensorKeyCompressed<D>
|
||||
impl<D: DataRef> GGLWECompressedToRef for GLWETensorKeyCompressed<D>
|
||||
where
|
||||
GGLWECompressed<D>: GGLWECompressedToRef,
|
||||
{
|
||||
fn to_ref(&self) -> GLWETensorKeyCompressed<&[u8]> {
|
||||
GLWETensorKeyCompressed {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
fn to_ref(&self) -> GGLWECompressed<&[u8]> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use poulpy_hal::{
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWECompressed, GGLWECompressedToMut, GGLWECompressedToRef, GGLWEInfos, GGLWEToMut, GLWEInfos,
|
||||
GLWESwitchingKeyDegrees, GLWESwitchingKeyDegreesMut, GLWEToLWESwitchingKey, LWEInfos, Rank, TorusPrecision,
|
||||
GLWESwitchingKeyDegrees, GLWESwitchingKeyDegreesMut, GLWEToLWEKey, LWEInfos, Rank, TorusPrecision,
|
||||
compressed::{GLWESwitchingKeyCompressed, GLWESwitchingKeyDecompress},
|
||||
};
|
||||
|
||||
@@ -147,7 +147,7 @@ pub trait GLWEToLWESwitchingKeyDecompress
|
||||
where
|
||||
Self: GLWESwitchingKeyDecompress,
|
||||
{
|
||||
fn decompress_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O)
|
||||
fn decompress_glwe_to_lwe_key<R, O>(&self, res: &mut R, other: &O)
|
||||
where
|
||||
R: GGLWEToMut + GLWESwitchingKeyDegreesMut,
|
||||
O: GGLWECompressedToRef + GLWESwitchingKeyDegrees,
|
||||
@@ -158,13 +158,13 @@ where
|
||||
|
||||
impl<B: Backend> GLWEToLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||
|
||||
impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GLWEToLWEKey<D> {
|
||||
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||
where
|
||||
O: GGLWECompressedToRef + GLWESwitchingKeyDegrees,
|
||||
M: GLWEToLWESwitchingKeyDecompress,
|
||||
{
|
||||
module.decompress_glwe_to_lwe_switching_key(self, other);
|
||||
module.decompress_glwe_to_lwe_key(self, other);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,15 +5,15 @@ use poulpy_hal::{
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWECompressed, GGLWECompressedToMut, GGLWECompressedToRef, GGLWEInfos, GGLWEToMut, GLWEInfos,
|
||||
GLWESwitchingKeyDegrees, GLWESwitchingKeyDegreesMut, LWEInfos, LWEToGLWESwitchingKey, Rank, TorusPrecision,
|
||||
GLWESwitchingKeyDegrees, GLWESwitchingKeyDegreesMut, LWEInfos, LWEToGLWEKey, Rank, TorusPrecision,
|
||||
compressed::{GLWESwitchingKeyCompressed, GLWESwitchingKeyDecompress},
|
||||
};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct LWEToGLWESwitchingKeyCompressed<D: Data>(pub(crate) GLWESwitchingKeyCompressed<D>);
|
||||
pub struct LWEToGLWEKeyCompressed<D: Data>(pub(crate) GLWESwitchingKeyCompressed<D>);
|
||||
|
||||
impl<D: Data> LWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: Data> LWEInfos for LWEToGLWEKeyCompressed<D> {
|
||||
fn n(&self) -> Degree {
|
||||
self.0.n()
|
||||
}
|
||||
@@ -29,13 +29,13 @@ impl<D: Data> LWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
self.0.size()
|
||||
}
|
||||
}
|
||||
impl<D: Data> GLWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: Data> GLWEInfos for LWEToGLWEKeyCompressed<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GGLWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: Data> GGLWEInfos for LWEToGLWEKeyCompressed<D> {
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.0.dsize()
|
||||
}
|
||||
@@ -53,37 +53,37 @@ impl<D: Data> GGLWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataRef> fmt::Debug for LWEToGLWEKeyCompressed<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataMut> FillUniform for LWEToGLWEKeyCompressed<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.0.fill_uniform(log_bound, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataRef> fmt::Display for LWEToGLWEKeyCompressed<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "(LWEToGLWESwitchingKeyCompressed) {}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataMut> ReaderFrom for LWEToGLWEKeyCompressed<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
self.0.read_from(reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataRef> WriterTo for LWEToGLWEKeyCompressed<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.0.write_to(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
impl LWEToGLWEKeyCompressed<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -108,7 +108,7 @@ impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
||||
LWEToGLWESwitchingKeyCompressed(GLWESwitchingKeyCompressed::alloc(
|
||||
LWEToGLWEKeyCompressed(GLWESwitchingKeyCompressed::alloc(
|
||||
n,
|
||||
base2k,
|
||||
k,
|
||||
@@ -141,11 +141,11 @@ impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait LWEToGLWESwitchingKeyDecompress
|
||||
pub trait LWEToGLWEKeyDecompress
|
||||
where
|
||||
Self: GLWESwitchingKeyDecompress,
|
||||
{
|
||||
fn decompress_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O)
|
||||
fn decompress_lwe_to_glwe_key<R, O>(&self, res: &mut R, other: &O)
|
||||
where
|
||||
R: GGLWEToMut + GLWESwitchingKeyDegreesMut,
|
||||
O: GGLWECompressedToRef + GLWESwitchingKeyDegrees,
|
||||
@@ -154,25 +154,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> LWEToGLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||
impl<B: Backend> LWEToGLWEKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||
|
||||
impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> LWEToGLWEKey<D> {
|
||||
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||
where
|
||||
O: GGLWECompressedToRef + GLWESwitchingKeyDegrees,
|
||||
M: LWEToGLWESwitchingKeyDecompress,
|
||||
M: LWEToGLWEKeyDecompress,
|
||||
{
|
||||
module.decompress_lwe_to_glwe_switching_key(self, other);
|
||||
module.decompress_lwe_to_glwe_key(self, other);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWECompressedToRef for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataRef> GGLWECompressedToRef for LWEToGLWEKeyCompressed<D> {
|
||||
fn to_ref(&self) -> GGLWECompressed<&[u8]> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWECompressedToMut for LWEToGLWESwitchingKeyCompressed<D> {
|
||||
impl<D: DataMut> GGLWECompressedToMut for LWEToGLWEKeyCompressed<D> {
|
||||
fn to_mut(&mut self) -> GGLWECompressed<&mut [u8]> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
@@ -1,21 +1,23 @@
|
||||
mod gglwe;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw;
|
||||
mod glwe;
|
||||
mod glwe_automorphism_key;
|
||||
mod glwe_switching_key;
|
||||
mod glwe_tensor_key;
|
||||
mod glwe_to_lwe_switching_key;
|
||||
mod glwe_to_lwe_key;
|
||||
mod lwe;
|
||||
mod lwe_switching_key;
|
||||
mod lwe_to_glwe_switching_key;
|
||||
mod lwe_to_glwe_key;
|
||||
|
||||
pub use gglwe::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw::*;
|
||||
pub use glwe::*;
|
||||
pub use glwe_automorphism_key::*;
|
||||
pub use glwe_switching_key::*;
|
||||
pub use glwe_tensor_key::*;
|
||||
pub use glwe_to_lwe_switching_key::*;
|
||||
pub use glwe_to_lwe_key::*;
|
||||
pub use lwe::*;
|
||||
pub use lwe_switching_key::*;
|
||||
pub use lwe_to_glwe_switching_key::*;
|
||||
pub use lwe_to_glwe_key::*;
|
||||
|
||||
254
poulpy-core/src/layouts/gglwe_to_ggsw_key.rs
Normal file
254
poulpy-core/src/layouts/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,254 @@
|
||||
use poulpy_hal::{
|
||||
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWEInfos, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||
pub struct GGLWEToGGSWKeyLayout {
|
||||
pub n: Degree,
|
||||
pub base2k: Base2K,
|
||||
pub k: TorusPrecision,
|
||||
pub rank: Rank,
|
||||
pub dnum: Dnum,
|
||||
pub dsize: Dsize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct GGLWEToGGSWKey<D: Data> {
|
||||
pub(crate) keys: Vec<GGLWE<D>>,
|
||||
}
|
||||
|
||||
impl<D: Data> LWEInfos for GGLWEToGGSWKey<D> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for GGLWEToGGSWKey<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GGLWEInfos for GGLWEToGGSWKey<D> {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
}
|
||||
}
|
||||
|
||||
impl LWEInfos for GGLWEToGGSWKeyLayout {
|
||||
fn n(&self) -> Degree {
|
||||
self.n
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.base2k
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.k
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWEInfos for GGLWEToGGSWKeyLayout {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWEInfos for GGLWEToGGSWKeyLayout {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.rank
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.dsize
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.rank
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.dnum
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for GGLWEToGGSWKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for GGLWEToGGSWKey<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key: &mut GGLWE<D>| key.fill_uniform(log_bound, source))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for GGLWEToGGSWKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "(GGLWEToGGSWKey)",)?;
|
||||
for (i, key) in self.keys.iter().enumerate() {
|
||||
write!(f, "{i}: {key}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWEToGGSWKey<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKey"
|
||||
);
|
||||
Self::alloc(
|
||||
infos.n(),
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self {
|
||||
GGLWEToGGSWKey {
|
||||
keys: (0..rank.as_usize())
|
||||
.map(|_| GGLWE::alloc(n, base2k, k, rank, rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKey"
|
||||
);
|
||||
Self::bytes_of(
|
||||
infos.n(),
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
rank.as_usize() * GGLWE::bytes_of(n, base2k, k, rank, rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToGGSWKey<D> {
|
||||
// Returns a mutable reference to GGLWE_{s}([s[i]*s[0], s[i]*s[1], ..., s[i]*s[rank]])
|
||||
pub fn at_mut(&mut self, i: usize) -> &mut GGLWE<D> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&mut self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToGGSWKey<D> {
|
||||
// Returns a reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at(&self, i: usize) -> &GGLWE<D> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for GGLWEToGGSWKey<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
|
||||
if self.keys.len() != len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
|
||||
));
|
||||
}
|
||||
for key in &mut self.keys {
|
||||
key.read_from(reader)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for GGLWEToGGSWKey<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
|
||||
for key in &self.keys {
|
||||
key.write_to(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyToRef {
|
||||
fn to_ref(&self) -> GGLWEToGGSWKey<&[u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToGGSWKeyToRef for GGLWEToGGSWKey<D>
|
||||
where
|
||||
GGLWE<D>: GGLWEToRef,
|
||||
{
|
||||
fn to_ref(&self) -> GGLWEToGGSWKey<&[u8]> {
|
||||
GGLWEToGGSWKey {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyToMut {
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKey<&mut [u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToGGSWKeyToMut for GGLWEToGGSWKey<D>
|
||||
where
|
||||
GGLWE<D>: GGLWEToMut,
|
||||
{
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKey<&mut [u8]> {
|
||||
GGLWEToGGSWKey {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
221
poulpy-core/src/layouts/glwe_secret_tensor.rs
Normal file
221
poulpy-core/src/layouts/glwe_secret_tensor.rs
Normal file
@@ -0,0 +1,221 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchTakeBasic, SvpApplyDftToDft, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyTmpA,
|
||||
},
|
||||
layouts::{
|
||||
Backend, Data, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToMut, ScalarZnxToRef, Scratch, ZnxInfos, ZnxView,
|
||||
ZnxViewMut,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
dist::Distribution,
|
||||
layouts::{
|
||||
Base2K, Degree, GLWEInfos, GLWESecret, GLWESecretPreparedFactory, GLWESecretToMut, GLWESecretToRef, LWEInfos, Rank,
|
||||
TorusPrecision,
|
||||
},
|
||||
};
|
||||
|
||||
pub struct GLWESecretTensor<D: Data> {
|
||||
pub(crate) data: ScalarZnx<D>,
|
||||
pub(crate) rank: Rank,
|
||||
pub(crate) dist: Distribution,
|
||||
}
|
||||
|
||||
impl GLWESecretTensor<Vec<u8>> {
|
||||
pub(crate) fn pairs(rank: usize) -> usize {
|
||||
(((rank + 1) * rank) >> 1).max(1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> LWEInfos for GLWESecretTensor<D> {
|
||||
fn base2k(&self) -> Base2K {
|
||||
Base2K(0)
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
TorusPrecision(0)
|
||||
}
|
||||
|
||||
fn n(&self) -> Degree {
|
||||
Degree(self.data.n() as u32)
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWESecretTensor<D> {
|
||||
pub fn at(&self, mut i: usize, mut j: usize) -> ScalarZnx<&[u8]> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank().into();
|
||||
ScalarZnx {
|
||||
data: bytemuck::cast_slice(self.data.at(i * rank + j - (i * (i + 1) / 2), 0)),
|
||||
n: self.n().into(),
|
||||
cols: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWESecretTensor<D> {
|
||||
pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> ScalarZnx<&mut [u8]> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank().into();
|
||||
ScalarZnx {
|
||||
n: self.n().into(),
|
||||
data: bytemuck::cast_slice_mut(self.data.at_mut(i * rank + j - (i * (i + 1) / 2), 0)),
|
||||
cols: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for GLWESecretTensor<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWESecretToRef for GLWESecretTensor<D> {
|
||||
fn to_ref(&self) -> GLWESecret<&[u8]> {
|
||||
GLWESecret {
|
||||
data: self.data.to_ref(),
|
||||
dist: self.dist,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWESecretToMut for GLWESecretTensor<D> {
|
||||
fn to_mut(&mut self) -> GLWESecret<&mut [u8]> {
|
||||
GLWESecret {
|
||||
dist: self.dist,
|
||||
data: self.data.to_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWESecretTensor<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
Self::alloc(infos.n(), infos.rank())
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, rank: Rank) -> Self {
|
||||
GLWESecretTensor {
|
||||
data: ScalarZnx::alloc(n.into(), Self::pairs(rank.into())),
|
||||
rank,
|
||||
dist: Distribution::NONE,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
{
|
||||
Self::bytes_of(infos.n(), Self::pairs(infos.rank().into()).into())
|
||||
}
|
||||
|
||||
pub fn bytes_of(n: Degree, rank: Rank) -> usize {
|
||||
ScalarZnx::bytes_of(n.into(), Self::pairs(rank.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWESecretTensor<D> {
|
||||
pub fn prepare<M, S, BE: Backend>(&mut self, module: &M, other: &S, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
M: GLWESecretTensorFactory<BE>,
|
||||
S: GLWESecretToRef + GLWEInfos,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_secret_tensor_prepare(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWESecretTensorFactory<BE: Backend> {
|
||||
fn glwe_secret_tensor_prepare_tmp_bytes(&self, rank: Rank) -> usize;
|
||||
|
||||
fn glwe_secret_tensor_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWESecretToMut + GLWEInfos,
|
||||
O: GLWESecretToRef + GLWEInfos;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWESecretTensorFactory<BE> for Module<BE>
|
||||
where
|
||||
Self: ModuleN
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
fn glwe_secret_tensor_prepare_tmp_bytes(&self, rank: Rank) -> usize {
|
||||
self.bytes_of_glwe_secret_prepared(rank)
|
||||
+ self.bytes_of_vec_znx_dft(rank.into(), 1)
|
||||
+ self.bytes_of_vec_znx_dft(1, 1)
|
||||
+ self.bytes_of_vec_znx_big(1, 1)
|
||||
+ self.vec_znx_big_normalize_tmp_bytes()
|
||||
}
|
||||
|
||||
fn glwe_secret_tensor_prepare<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GLWESecretToMut + GLWEInfos,
|
||||
A: GLWESecretToRef + GLWEInfos,
|
||||
{
|
||||
let res: &mut GLWESecret<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWESecret<&[u8]> = &a.to_ref();
|
||||
|
||||
println!("res.rank: {} a.rank: {}", res.rank(), a.rank());
|
||||
|
||||
assert_eq!(res.rank(), GLWESecretTensor::pairs(a.rank().into()) as u32);
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
|
||||
let rank: usize = a.rank().into();
|
||||
|
||||
let (mut a_prepared, scratch_1) = scratch.take_glwe_secret_prepared(self, rank.into());
|
||||
a_prepared.prepare(self, a);
|
||||
|
||||
let base2k: usize = 17;
|
||||
|
||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, rank, 1);
|
||||
for i in 0..rank {
|
||||
self.vec_znx_dft_apply(1, 0, &mut a_dft, i, &a.data.as_vec_znx(), i);
|
||||
}
|
||||
|
||||
let (mut a_ij_big, scratch_3) = scratch_2.take_vec_znx_big(self, 1, 1);
|
||||
let (mut a_ij_dft, scratch_4) = scratch_3.take_vec_znx_dft(self, 1, 1);
|
||||
|
||||
// sk_tensor = sk (x) sk
|
||||
// For example: (s0, s1) (x) (s0, s1) = (s0^2, s0s1, s1^2)
|
||||
for i in 0..rank {
|
||||
for j in i..rank {
|
||||
let idx: usize = i * rank + j - (i * (i + 1) / 2);
|
||||
self.svp_apply_dft_to_dft(&mut a_ij_dft, 0, &a_prepared.data, j, &a_dft, i);
|
||||
self.vec_znx_idft_apply_tmpa(&mut a_ij_big, 0, &mut a_ij_dft, 0);
|
||||
self.vec_znx_big_normalize(
|
||||
base2k,
|
||||
&mut res.data.as_vec_znx_mut(),
|
||||
idx,
|
||||
base2k,
|
||||
&a_ij_big,
|
||||
0,
|
||||
scratch_4,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@ use poulpy_hal::{
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWEInfos, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
@@ -21,31 +20,29 @@ pub struct GLWETensorKeyLayout {
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct GLWETensorKey<D: Data> {
|
||||
pub(crate) keys: Vec<GGLWE<D>>,
|
||||
}
|
||||
pub struct GLWETensorKey<D: Data>(pub(crate) GGLWE<D>);
|
||||
|
||||
impl<D: Data> LWEInfos for GLWETensorKey<D> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
self.0.n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
self.0.base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
self.0.k()
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
self.0.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for GLWETensorKey<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
self.0.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,15 +52,15 @@ impl<D: Data> GGLWEInfos for GLWETensorKey<D> {
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
self.0.rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
self.0.dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
self.0.dnum()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,18 +110,14 @@ impl<D: DataRef> fmt::Debug for GLWETensorKey<D> {
|
||||
|
||||
impl<D: DataMut> FillUniform for GLWETensorKey<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key: &mut GGLWE<D>| key.fill_uniform(log_bound, source))
|
||||
self.0.fill_uniform(log_bound, source)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for GLWETensorKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "(GLWETensorKey)",)?;
|
||||
for (i, key) in self.keys.iter().enumerate() {
|
||||
write!(f, "{i}: {key}")?;
|
||||
}
|
||||
write!(f, "{}", self.0)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -151,11 +144,7 @@ impl GLWETensorKey<Vec<u8>> {
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self {
|
||||
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
|
||||
GLWETensorKey {
|
||||
keys: (0..pairs)
|
||||
.map(|_| GGLWE::alloc(n, base2k, k, Rank(1), rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
GLWETensorKey(GGLWE::alloc(n, base2k, k, Rank(pairs), rank, dnum, dsize))
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A>(infos: &A) -> usize
|
||||
@@ -178,85 +167,39 @@ impl GLWETensorKey<Vec<u8>> {
|
||||
}
|
||||
|
||||
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||
pairs * GGLWE::bytes_of(n, base2k, k, Rank(1), rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWETensorKey<D> {
|
||||
// Returns a mutable reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWE<D> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWETensorKey<D> {
|
||||
// Returns a reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at(&self, mut i: usize, mut j: usize) -> &GGLWE<D> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
|
||||
GGLWE::bytes_of(n, base2k, k, Rank(pairs), rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for GLWETensorKey<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
|
||||
if self.keys.len() != len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
|
||||
));
|
||||
}
|
||||
for key in &mut self.keys {
|
||||
key.read_from(reader)?;
|
||||
}
|
||||
self.0.read_from(reader)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for GLWETensorKey<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
|
||||
for key in &self.keys {
|
||||
key.write_to(writer)?;
|
||||
}
|
||||
self.0.write_to(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyToRef {
|
||||
fn to_ref(&self) -> GLWETensorKey<&[u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWETensorKeyToRef for GLWETensorKey<D>
|
||||
impl<D: DataRef> GGLWEToRef for GLWETensorKey<D>
|
||||
where
|
||||
GGLWE<D>: GGLWEToRef,
|
||||
{
|
||||
fn to_ref(&self) -> GLWETensorKey<&[u8]> {
|
||||
GLWETensorKey {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
fn to_ref(&self) -> GGLWE<&[u8]> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyToMut {
|
||||
fn to_mut(&mut self) -> GLWETensorKey<&mut [u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWETensorKeyToMut for GLWETensorKey<D>
|
||||
impl<D: DataMut> GGLWEToMut for GLWETensorKey<D>
|
||||
where
|
||||
GGLWE<D>: GGLWEToMut,
|
||||
{
|
||||
fn to_mut(&mut self) -> GLWETensorKey<&mut [u8]> {
|
||||
GLWETensorKey {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
fn to_mut(&mut self) -> GGLWE<&mut [u8]> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,9 +59,9 @@ impl GGLWEInfos for GLWEToLWEKeyLayout {
|
||||
|
||||
/// A special [GLWESwitchingKey] required to for the conversion from [GLWE] to [LWE].
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct GLWEToLWESwitchingKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
|
||||
pub struct GLWEToLWEKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
|
||||
|
||||
impl<D: Data> LWEInfos for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: Data> LWEInfos for GLWEToLWEKey<D> {
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.0.base2k()
|
||||
}
|
||||
@@ -79,12 +79,12 @@ impl<D: Data> LWEInfos for GLWEToLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: Data> GLWEInfos for GLWEToLWEKey<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
impl<D: Data> GGLWEInfos for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: Data> GGLWEInfos for GLWEToLWEKey<D> {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.0.rank_in()
|
||||
}
|
||||
@@ -102,37 +102,37 @@ impl<D: Data> GGLWEInfos for GLWEToLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataRef> fmt::Debug for GLWEToLWEKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> FillUniform for GLWEToLWEKey<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.0.fill_uniform(log_bound, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataRef> fmt::Display for GLWEToLWEKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "(GLWEToLWESwitchingKey) {}", self.0)
|
||||
write!(f, "(GLWEToLWEKey) {}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> ReaderFrom for GLWEToLWEKey<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
self.0.read_from(reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataRef> WriterTo for GLWEToLWEKey<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.0.write_to(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
impl GLWEToLWEKey<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -140,12 +140,12 @@ impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
assert_eq!(
|
||||
infos.rank_out().0,
|
||||
1,
|
||||
"rank_out > 1 is not supported for GLWEToLWESwitchingKey"
|
||||
"rank_out > 1 is not supported for GLWEToLWEKey"
|
||||
);
|
||||
assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for GLWEToLWESwitchingKey"
|
||||
"dsize > 1 is not supported for GLWEToLWEKey"
|
||||
);
|
||||
Self::alloc(
|
||||
infos.n(),
|
||||
@@ -157,7 +157,7 @@ impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self {
|
||||
GLWEToLWESwitchingKey(GLWESwitchingKey::alloc(
|
||||
GLWEToLWEKey(GLWESwitchingKey::alloc(
|
||||
n,
|
||||
base2k,
|
||||
k,
|
||||
@@ -196,19 +196,19 @@ impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToRef for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataRef> GGLWEToRef for GLWEToLWEKey<D> {
|
||||
fn to_ref(&self) -> GGLWE<&[u8]> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToMut for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GGLWEToMut for GLWEToLWEKey<D> {
|
||||
fn to_mut(&mut self) -> GGLWE<&mut [u8]> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWESwitchingKeyDegreesMut for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GLWESwitchingKeyDegreesMut for GLWEToLWEKey<D> {
|
||||
fn input_degree(&mut self) -> &mut Degree {
|
||||
&mut self.0.input_degree
|
||||
}
|
||||
@@ -218,7 +218,7 @@ impl<D: DataMut> GLWESwitchingKeyDegreesMut for GLWEToLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWESwitchingKeyDegrees for GLWEToLWESwitchingKey<D> {
|
||||
impl<D: DataRef> GLWESwitchingKeyDegrees for GLWEToLWEKey<D> {
|
||||
fn input_degree(&self) -> &Degree {
|
||||
&self.0.input_degree
|
||||
}
|
||||
@@ -11,7 +11,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||
pub struct LWEToGLWESwitchingKeyLayout {
|
||||
pub struct LWEToGLWEKeyLayout {
|
||||
pub n: Degree,
|
||||
pub base2k: Base2K,
|
||||
pub k: TorusPrecision,
|
||||
@@ -19,7 +19,7 @@ pub struct LWEToGLWESwitchingKeyLayout {
|
||||
pub dnum: Dnum,
|
||||
}
|
||||
|
||||
impl LWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||
impl LWEInfos for LWEToGLWEKeyLayout {
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.base2k
|
||||
}
|
||||
@@ -33,13 +33,13 @@ impl LWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||
}
|
||||
}
|
||||
|
||||
impl GLWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||
impl GLWEInfos for LWEToGLWEKeyLayout {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||
impl GGLWEInfos for LWEToGLWEKeyLayout {
|
||||
fn rank_in(&self) -> Rank {
|
||||
Rank(1)
|
||||
}
|
||||
@@ -58,9 +58,9 @@ impl GGLWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct LWEToGLWESwitchingKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
|
||||
pub struct LWEToGLWEKey<D: Data>(pub(crate) GLWESwitchingKey<D>);
|
||||
|
||||
impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: Data> LWEInfos for LWEToGLWEKey<D> {
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.0.base2k()
|
||||
}
|
||||
@@ -78,12 +78,12 @@ impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> GLWEInfos for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: Data> GLWEInfos for LWEToGLWEKey<D> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
impl<D: Data> GGLWEInfos for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: Data> GGLWEInfos for LWEToGLWEKey<D> {
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.0.dsize()
|
||||
}
|
||||
@@ -101,37 +101,37 @@ impl<D: Data> GGLWEInfos for LWEToGLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataRef> fmt::Debug for LWEToGLWEKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> FillUniform for LWEToGLWEKey<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.0.fill_uniform(log_bound, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataRef> fmt::Display for LWEToGLWEKey<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "(LWEToGLWESwitchingKey) {}", self.0)
|
||||
write!(f, "(LWEToGLWEKey) {}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> ReaderFrom for LWEToGLWEKey<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
self.0.read_from(reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataRef> WriterTo for LWEToGLWEKey<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.0.write_to(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
impl LWEToGLWEKey<Vec<u8>> {
|
||||
pub fn alloc_from_infos<A>(infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
@@ -139,12 +139,12 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
assert_eq!(
|
||||
infos.rank_in().0,
|
||||
1,
|
||||
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"rank_in > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"dsize > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
|
||||
Self::alloc(
|
||||
@@ -157,7 +157,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
}
|
||||
|
||||
pub fn alloc(n: Degree, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
||||
LWEToGLWESwitchingKey(GLWESwitchingKey::alloc(
|
||||
LWEToGLWEKey(GLWESwitchingKey::alloc(
|
||||
n,
|
||||
base2k,
|
||||
k,
|
||||
@@ -175,12 +175,12 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
assert_eq!(
|
||||
infos.rank_in().0,
|
||||
1,
|
||||
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"rank_in > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"dsize > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
Self::bytes_of(
|
||||
infos.n(),
|
||||
@@ -196,19 +196,19 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GGLWEToRef for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataRef> GGLWEToRef for LWEToGLWEKey<D> {
|
||||
fn to_ref(&self) -> GGLWE<&[u8]> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGLWEToMut for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GGLWEToMut for LWEToGLWEKey<D> {
|
||||
fn to_mut(&mut self) -> GGLWE<&mut [u8]> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWESwitchingKeyDegreesMut for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataMut> GLWESwitchingKeyDegreesMut for LWEToGLWEKey<D> {
|
||||
fn input_degree(&mut self) -> &mut Degree {
|
||||
&mut self.0.input_degree
|
||||
}
|
||||
@@ -218,7 +218,7 @@ impl<D: DataMut> GLWESwitchingKeyDegreesMut for LWEToGLWESwitchingKey<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWESwitchingKeyDegrees for LWEToGLWESwitchingKey<D> {
|
||||
impl<D: DataRef> GLWESwitchingKeyDegrees for LWEToGLWEKey<D> {
|
||||
fn input_degree(&self) -> &Degree {
|
||||
&self.0.input_degree
|
||||
}
|
||||
@@ -1,40 +1,44 @@
|
||||
mod gglwe;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw;
|
||||
mod glwe;
|
||||
mod glwe_automorphism_key;
|
||||
mod glwe_plaintext;
|
||||
mod glwe_public_key;
|
||||
mod glwe_secret;
|
||||
mod glwe_secret_tensor;
|
||||
mod glwe_switching_key;
|
||||
mod glwe_tensor;
|
||||
mod glwe_tensor_key;
|
||||
mod glwe_to_lwe_switching_key;
|
||||
mod glwe_to_lwe_key;
|
||||
mod lwe;
|
||||
mod lwe_plaintext;
|
||||
mod lwe_secret;
|
||||
mod lwe_switching_key;
|
||||
mod lwe_to_glwe_switching_key;
|
||||
mod lwe_to_glwe_key;
|
||||
|
||||
pub mod compressed;
|
||||
pub mod prepared;
|
||||
|
||||
pub use compressed::*;
|
||||
pub use gglwe::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw::*;
|
||||
pub use glwe::*;
|
||||
pub use glwe_automorphism_key::*;
|
||||
pub use glwe_plaintext::*;
|
||||
pub use glwe_public_key::*;
|
||||
pub use glwe_secret::*;
|
||||
pub use glwe_secret_tensor::*;
|
||||
pub use glwe_switching_key::*;
|
||||
pub use glwe_tensor::*;
|
||||
pub use glwe_tensor_key::*;
|
||||
pub use glwe_to_lwe_switching_key::*;
|
||||
pub use glwe_to_lwe_key::*;
|
||||
pub use lwe::*;
|
||||
pub use lwe_plaintext::*;
|
||||
pub use lwe_secret::*;
|
||||
pub use lwe_switching_key::*;
|
||||
pub use lwe_to_glwe_switching_key::*;
|
||||
pub use lwe_to_glwe_key::*;
|
||||
pub use prepared::*;
|
||||
|
||||
use poulpy_hal::layouts::{Backend, Module};
|
||||
|
||||
252
poulpy-core/src/layouts/prepared/gglwe_to_ggsw_key.rs
Normal file
252
poulpy-core/src/layouts/prepared/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWEPrepared, GGLWEPreparedFactory, GGLWEPreparedToMut, GGLWEPreparedToRef,
|
||||
GGLWEToGGSWKey, GGLWEToGGSWKeyToRef, GLWEInfos, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
|
||||
pub struct GGLWEToGGSWKeyPrepared<D: Data, BE: Backend> {
|
||||
pub(crate) keys: Vec<GGLWEPrepared<D, BE>>,
|
||||
}
|
||||
|
||||
impl<D: Data, BE: Backend> LWEInfos for GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, BE: Backend> GLWEInfos for GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, BE: Backend> GGLWEInfos for GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyPreparedFactory<BE: Backend> {
|
||||
fn alloc_gglwe_to_ggsw_key_prepared_from_infos<A>(&self, infos: &A) -> GGLWEToGGSWKeyPrepared<Vec<u8>, BE>
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn alloc_gglwe_to_ggsw_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
rank: Rank,
|
||||
dnum: Dnum,
|
||||
dsize: Dsize,
|
||||
) -> GGLWEToGGSWKeyPrepared<Vec<u8>, BE>;
|
||||
|
||||
fn bytes_of_gglwe_to_ggsw_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn bytes_of_gglwe_to_ggsw(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize;
|
||||
|
||||
fn prepare_gglwe_to_ggsw_key_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos;
|
||||
|
||||
fn prepare_gglwe_to_ggsw_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToGGSWKeyPreparedToMut<BE>,
|
||||
O: GGLWEToGGSWKeyToRef;
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGLWEToGGSWKeyPreparedFactory<BE> for Module<BE>
|
||||
where
|
||||
Self: GGLWEPreparedFactory<BE>,
|
||||
{
|
||||
fn alloc_gglwe_to_ggsw_key_prepared_from_infos<A>(&self, infos: &A) -> GGLWEToGGSWKeyPrepared<Vec<u8>, BE>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKeyPrepared"
|
||||
);
|
||||
self.alloc_gglwe_to_ggsw_key_prepared(
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
fn alloc_gglwe_to_ggsw_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
rank: Rank,
|
||||
dnum: Dnum,
|
||||
dsize: Dsize,
|
||||
) -> GGLWEToGGSWKeyPrepared<Vec<u8>, BE> {
|
||||
GGLWEToGGSWKeyPrepared {
|
||||
keys: (0..rank.as_usize())
|
||||
.map(|_| self.alloc_gglwe_prepared(base2k, k, rank, rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn bytes_of_gglwe_to_ggsw_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(
|
||||
infos.rank_in(),
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEToGGSWKeyPrepared"
|
||||
);
|
||||
self.bytes_of_gglwe_to_ggsw(
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank(),
|
||||
infos.dnum(),
|
||||
infos.dsize(),
|
||||
)
|
||||
}
|
||||
|
||||
fn bytes_of_gglwe_to_ggsw(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
rank.as_usize() * self.bytes_of_gglwe_prepared(base2k, k, rank, rank, dnum, dsize)
|
||||
}
|
||||
|
||||
fn prepare_gglwe_to_ggsw_key_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_gglwe_tmp_bytes(infos)
|
||||
}
|
||||
|
||||
fn prepare_gglwe_to_ggsw_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToGGSWKeyPreparedToMut<BE>,
|
||||
O: GGLWEToGGSWKeyToRef,
|
||||
{
|
||||
let res: &mut GGLWEToGGSWKeyPrepared<&mut [u8], BE> = &mut res.to_mut();
|
||||
let other: &GGLWEToGGSWKey<&[u8]> = &other.to_ref();
|
||||
|
||||
assert_eq!(res.keys.len(), other.keys.len());
|
||||
|
||||
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
|
||||
self.prepare_gglwe(a, b, scratch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGLWEToGGSWKeyPrepared<Vec<u8>, BE> {
|
||||
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GGLWEToGGSWKeyPreparedFactory<BE>,
|
||||
{
|
||||
module.alloc_gglwe_to_ggsw_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||
where
|
||||
M: GGLWEToGGSWKeyPreparedFactory<BE>,
|
||||
{
|
||||
module.alloc_gglwe_to_ggsw_key_prepared(base2k, k, rank, dnum, dsize)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GGLWEToGGSWKeyPreparedFactory<BE>,
|
||||
{
|
||||
module.bytes_of_gglwe_to_ggsw_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||
where
|
||||
M: GGLWEToGGSWKeyPreparedFactory<BE>,
|
||||
{
|
||||
module.bytes_of_gglwe_to_ggsw(base2k, k, rank, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, BE: Backend> GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
pub fn prepare<M, O>(&mut self, module: &M, other: &O, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
M: GGLWEToGGSWKeyPreparedFactory<BE>,
|
||||
O: GGLWEToGGSWKeyToRef,
|
||||
{
|
||||
module.prepare_gglwe_to_ggsw_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, BE: Backend> GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
// Returns a mutable reference to GGLWEPrepared_{s}([s[i]*s[0], s[i]*s[1], ..., s[i]*s[rank]])
|
||||
pub fn at_mut(&mut self, i: usize) -> &mut GGLWEPrepared<D, BE> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&mut self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef, BE: Backend> GGLWEToGGSWKeyPrepared<D, BE> {
|
||||
// Returns a reference to GGLWEPrepared_{s}([s[i]*s[0], s[i]*s[1], ..., s[i]*s[rank]])
|
||||
pub fn at(&self, i: usize) -> &GGLWEPrepared<D, BE> {
|
||||
assert!((i as u32) < self.rank());
|
||||
&self.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyPreparedToRef<BE: Backend> {
|
||||
fn to_ref(&self) -> GGLWEToGGSWKeyPrepared<&[u8], BE>;
|
||||
}
|
||||
|
||||
impl<D: DataRef, BE: Backend> GGLWEToGGSWKeyPreparedToRef<BE> for GGLWEToGGSWKeyPrepared<D, BE>
|
||||
where
|
||||
GGLWEPrepared<D, BE>: GGLWEPreparedToRef<BE>,
|
||||
{
|
||||
fn to_ref(&self) -> GGLWEToGGSWKeyPrepared<&[u8], BE> {
|
||||
GGLWEToGGSWKeyPrepared {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GGLWEToGGSWKeyPreparedToMut<BE: Backend> {
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKeyPrepared<&mut [u8], BE>;
|
||||
}
|
||||
|
||||
impl<D: DataMut, BE: Backend> GGLWEToGGSWKeyPreparedToMut<BE> for GGLWEToGGSWKeyPrepared<D, BE>
|
||||
where
|
||||
GGLWEPrepared<D, BE>: GGLWEPreparedToMut<BE>,
|
||||
{
|
||||
fn to_mut(&mut self) -> GGLWEToGGSWKeyPrepared<&mut [u8], BE> {
|
||||
GGLWEToGGSWKeyPrepared {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -109,7 +109,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_switching_key_prepared(
|
||||
fn bytes_of_glwe_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
@@ -125,7 +125,7 @@ where
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.bytes_of_glwe_switching_key_prepared(
|
||||
self.bytes_of_glwe_key_prepared(
|
||||
infos.base2k(),
|
||||
infos.k(),
|
||||
infos.rank_in(),
|
||||
@@ -199,7 +199,7 @@ impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
where
|
||||
M: GLWESwitchingKeyPreparedFactory<B>,
|
||||
{
|
||||
module.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||
module.bytes_of_glwe_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,29 +2,27 @@ use poulpy_hal::layouts::{Backend, Data, DataMut, DataRef, Module, Scratch};
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWEInfos, GGLWEPrepared, GGLWEPreparedFactory, GGLWEPreparedToMut, GGLWEPreparedToRef,
|
||||
GLWEInfos, GLWETensorKey, GLWETensorKeyToRef, LWEInfos, Rank, TorusPrecision,
|
||||
GGLWEToRef, GLWEInfos, LWEInfos, Rank, TorusPrecision,
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct GLWETensorKeyPrepared<D: Data, B: Backend> {
|
||||
pub(crate) keys: Vec<GGLWEPrepared<D, B>>,
|
||||
}
|
||||
pub struct GLWETensorKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWEPrepared<D, B>);
|
||||
|
||||
impl<D: Data, B: Backend> LWEInfos for GLWETensorKeyPrepared<D, B> {
|
||||
fn n(&self) -> Degree {
|
||||
self.keys[0].n()
|
||||
self.0.n()
|
||||
}
|
||||
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.keys[0].base2k()
|
||||
self.0.base2k()
|
||||
}
|
||||
|
||||
fn k(&self) -> TorusPrecision {
|
||||
self.keys[0].k()
|
||||
self.0.k()
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.keys[0].size()
|
||||
self.0.size()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,15 +38,15 @@ impl<D: Data, B: Backend> GGLWEInfos for GLWETensorKeyPrepared<D, B> {
|
||||
}
|
||||
|
||||
fn rank_out(&self) -> Rank {
|
||||
self.keys[0].rank_out()
|
||||
self.0.rank_out()
|
||||
}
|
||||
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.keys[0].dsize()
|
||||
self.0.dsize()
|
||||
}
|
||||
|
||||
fn dnum(&self) -> Dnum {
|
||||
self.keys[0].dnum()
|
||||
self.0.dnum()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,11 +63,7 @@ where
|
||||
rank: Rank,
|
||||
) -> GLWETensorKeyPrepared<Vec<u8>, B> {
|
||||
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
|
||||
GLWETensorKeyPrepared {
|
||||
keys: (0..pairs)
|
||||
.map(|_| self.alloc_gglwe_prepared(base2k, k, Rank(1), rank, dnum, dsize))
|
||||
.collect(),
|
||||
}
|
||||
GLWETensorKeyPrepared(self.alloc_gglwe_prepared(base2k, k, Rank(pairs), rank, dnum, dsize))
|
||||
}
|
||||
|
||||
fn alloc_tensor_key_prepared_from_infos<A>(&self, infos: &A) -> GLWETensorKeyPrepared<Vec<u8>, B>
|
||||
@@ -91,8 +85,8 @@ where
|
||||
}
|
||||
|
||||
fn bytes_of_tensor_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
||||
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||
pairs * self.bytes_of_gglwe_prepared(base2k, k, Rank(1), rank, dnum, dsize)
|
||||
let pairs: u32 = (((rank.as_u32() + 1) * rank.as_u32()) >> 1).max(1);
|
||||
self.bytes_of_gglwe_prepared(base2k, k, Rank(pairs), rank, dnum, dsize)
|
||||
}
|
||||
|
||||
fn bytes_of_tensor_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
@@ -117,17 +111,10 @@ where
|
||||
|
||||
fn prepare_tensor_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: GLWETensorKeyPreparedToMut<B>,
|
||||
O: GLWETensorKeyToRef,
|
||||
R: GGLWEPreparedToMut<B>,
|
||||
O: GGLWEToRef,
|
||||
{
|
||||
let mut res: GLWETensorKeyPrepared<&mut [u8], B> = res.to_mut();
|
||||
let other: GLWETensorKey<&[u8]> = other.to_ref();
|
||||
|
||||
assert_eq!(res.keys.len(), other.keys.len());
|
||||
|
||||
for (a, b) in res.keys.iter_mut().zip(other.keys.iter()) {
|
||||
self.prepare_gglwe(a, b, scratch);
|
||||
}
|
||||
self.prepare_gglwe(res, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,28 +152,6 @@ impl<B: Backend> GLWETensorKeyPrepared<Vec<u8>, B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GLWETensorKeyPrepared<D, B> {
|
||||
// Returns a mutable reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at_mut(&mut self, mut i: usize, mut j: usize) -> &mut GGLWEPrepared<D, B> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef, B: Backend> GLWETensorKeyPrepared<D, B> {
|
||||
// Returns a reference to GGLWE_{s}(s[i] * s[j])
|
||||
pub fn at(&self, mut i: usize, mut j: usize) -> &GGLWEPrepared<D, B> {
|
||||
if i > j {
|
||||
std::mem::swap(&mut i, &mut j);
|
||||
};
|
||||
let rank: usize = self.rank_out().into();
|
||||
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWETensorKeyPrepared<Vec<u8>, B> {
|
||||
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A) -> usize
|
||||
where
|
||||
@@ -200,39 +165,27 @@ impl<B: Backend> GLWETensorKeyPrepared<Vec<u8>, B> {
|
||||
impl<D: DataMut, B: Backend> GLWETensorKeyPrepared<D, B> {
|
||||
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
O: GLWETensorKeyToRef,
|
||||
O: GGLWEToRef,
|
||||
M: GLWETensorKeyPreparedFactory<B>,
|
||||
{
|
||||
module.prepare_tensor_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyPreparedToMut<B: Backend> {
|
||||
fn to_mut(&mut self) -> GLWETensorKeyPrepared<&mut [u8], B>;
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GLWETensorKeyPreparedToMut<B> for GLWETensorKeyPrepared<D, B>
|
||||
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for GLWETensorKeyPrepared<D, B>
|
||||
where
|
||||
GGLWEPrepared<D, B>: GGLWEPreparedToMut<B>,
|
||||
{
|
||||
fn to_mut(&mut self) -> GLWETensorKeyPrepared<&mut [u8], B> {
|
||||
GLWETensorKeyPrepared {
|
||||
keys: self.keys.iter_mut().map(|c| c.to_mut()).collect(),
|
||||
}
|
||||
fn to_mut(&mut self) -> GGLWEPrepared<&mut [u8], B> {
|
||||
self.0.to_mut()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWETensorKeyPreparedToRef<B: Backend> {
|
||||
fn to_ref(&self) -> GLWETensorKeyPrepared<&[u8], B>;
|
||||
}
|
||||
|
||||
impl<D: DataRef, B: Backend> GLWETensorKeyPreparedToRef<B> for GLWETensorKeyPrepared<D, B>
|
||||
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for GLWETensorKeyPrepared<D, B>
|
||||
where
|
||||
GGLWEPrepared<D, B>: GGLWEPreparedToRef<B>,
|
||||
{
|
||||
fn to_ref(&self) -> GLWETensorKeyPrepared<&[u8], B> {
|
||||
GLWETensorKeyPrepared {
|
||||
keys: self.keys.iter().map(|c| c.to_ref()).collect(),
|
||||
}
|
||||
fn to_ref(&self) -> GGLWEPrepared<&[u8], B> {
|
||||
self.0.to_ref()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,9 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct GLWEToLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
|
||||
pub struct GLWEToLWEKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
|
||||
|
||||
impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> LWEInfos for GLWEToLWEKeyPrepared<D, B> {
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.0.base2k()
|
||||
}
|
||||
@@ -27,13 +27,13 @@ impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, B: Backend> GLWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> GLWEInfos for GLWEToLWEKeyPrepared<D, B> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, B: Backend> GGLWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> GGLWEInfos for GLWEToLWEKeyPrepared<D, B> {
|
||||
fn rank_in(&self) -> Rank {
|
||||
self.0.rank_in()
|
||||
}
|
||||
@@ -51,65 +51,65 @@ impl<D: Data, B: Backend> GGLWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GLWEToLWESwitchingKeyPreparedFactory<B: Backend>
|
||||
pub trait GLWEToLWEKeyPreparedFactory<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPreparedFactory<B>,
|
||||
{
|
||||
fn alloc_glwe_to_lwe_switching_key_prepared(
|
||||
fn alloc_glwe_to_lwe_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
rank_in: Rank,
|
||||
dnum: Dnum,
|
||||
) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
GLWEToLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
|
||||
) -> GLWEToLWEKeyPrepared<Vec<u8>, B> {
|
||||
GLWEToLWEKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1)))
|
||||
}
|
||||
fn alloc_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_glwe_to_lwe_key_prepared_from_infos<A>(&self, infos: &A) -> GLWEToLWEKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_out().0,
|
||||
1,
|
||||
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
"rank_out > 1 is not supported for GLWEToLWEKeyPrepared"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
"dsize > 1 is not supported for GLWEToLWEKeyPrepared"
|
||||
);
|
||||
self.alloc_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
self.alloc_glwe_to_lwe_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_to_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
|
||||
fn bytes_of_glwe_to_lwe_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_key_prepared(base2k, k, rank_in, Rank(1), dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn bytes_of_glwe_to_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_glwe_to_lwe_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_out().0,
|
||||
1,
|
||||
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
"rank_out > 1 is not supported for GLWEToLWEKeyPrepared"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
|
||||
"dsize > 1 is not supported for GLWEToLWEKeyPrepared"
|
||||
);
|
||||
self.bytes_of_glwe_to_lwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
self.bytes_of_glwe_to_lwe_key_prepared(infos.base2k(), infos.k(), infos.rank_in(), infos.dnum())
|
||||
}
|
||||
|
||||
fn prepare_glwe_to_lwe_switching_key_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
fn prepare_glwe_to_lwe_key_tmp_bytes<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_glwe_switching_key_tmp_bytes(infos)
|
||||
}
|
||||
|
||||
fn prepare_glwe_to_lwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
fn prepare_glwe_to_lwe_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: GGLWEPreparedToMut<B> + GLWESwitchingKeyDegreesMut,
|
||||
O: GGLWEToRef + GLWESwitchingKeyDegrees,
|
||||
@@ -118,61 +118,61 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEToLWESwitchingKeyPreparedFactory<B> for Module<B> where Self: GLWESwitchingKeyPreparedFactory<B> {}
|
||||
impl<B: Backend> GLWEToLWEKeyPreparedFactory<B> for Module<B> where Self: GLWESwitchingKeyPreparedFactory<B> {}
|
||||
|
||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
impl<B: Backend> GLWEToLWEKeyPrepared<Vec<u8>, B> {
|
||||
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||
module.alloc_glwe_to_lwe_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
||||
where
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||
module.alloc_glwe_to_lwe_key_prepared(base2k, k, rank_in, dnum)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||
module.bytes_of_glwe_to_lwe_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
|
||||
where
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||
module.bytes_of_glwe_to_lwe_key_prepared(base2k, k, rank_in, dnum)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
impl<B: Backend> GLWEToLWEKeyPrepared<Vec<u8>, B> {
|
||||
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
|
||||
module.prepare_glwe_to_lwe_key_tmp_bytes(infos);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: DataMut, B: Backend> GLWEToLWEKeyPrepared<D, B> {
|
||||
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
O: GGLWEToRef + GLWESwitchingKeyDegrees,
|
||||
M: GLWEToLWESwitchingKeyPreparedFactory<B>,
|
||||
M: GLWEToLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
|
||||
module.prepare_glwe_to_lwe_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for GLWEToLWESwitchingKeyPrepared<D, B>
|
||||
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for GLWEToLWEKeyPrepared<D, B>
|
||||
where
|
||||
GLWESwitchingKeyPrepared<D, B>: GGLWEPreparedToRef<B>,
|
||||
{
|
||||
@@ -181,7 +181,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for GLWEToLWESwitchingKeyPrepared<D, B>
|
||||
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for GLWEToLWEKeyPrepared<D, B>
|
||||
where
|
||||
GLWESwitchingKeyPrepared<D, B>: GGLWEPreparedToRef<B>,
|
||||
{
|
||||
@@ -190,7 +190,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GLWESwitchingKeyDegreesMut for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: DataMut, B: Backend> GLWESwitchingKeyDegreesMut for GLWEToLWEKeyPrepared<D, B> {
|
||||
fn input_degree(&mut self) -> &mut Degree {
|
||||
&mut self.0.input_degree
|
||||
}
|
||||
@@ -200,7 +200,7 @@ impl<D: DataMut, B: Backend> GLWESwitchingKeyDegreesMut for GLWEToLWESwitchingKe
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef, B: Backend> GLWESwitchingKeyDegrees for GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: DataRef, B: Backend> GLWESwitchingKeyDegrees for GLWEToLWEKeyPrepared<D, B> {
|
||||
fn input_degree(&self) -> &Degree {
|
||||
&self.0.input_degree
|
||||
}
|
||||
@@ -86,7 +86,7 @@ where
|
||||
}
|
||||
|
||||
fn bytes_of_lwe_switching_key_prepared(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
|
||||
self.bytes_of_glwe_key_prepared(base2k, k, Rank(1), Rank(1), dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn bytes_of_lwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
|
||||
@@ -8,9 +8,9 @@ use crate::layouts::{
|
||||
|
||||
/// A special [GLWESwitchingKey] required to for the conversion from [LWE] to [GLWE].
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct LWEToGLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
|
||||
pub struct LWEToGLWEKeyPrepared<D: Data, B: Backend>(pub(crate) GLWESwitchingKeyPrepared<D, B>);
|
||||
|
||||
impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> LWEInfos for LWEToGLWEKeyPrepared<D, B> {
|
||||
fn base2k(&self) -> Base2K {
|
||||
self.0.base2k()
|
||||
}
|
||||
@@ -28,13 +28,13 @@ impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, B: Backend> GLWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> GLWEInfos for LWEToGLWEKeyPrepared<D, B> {
|
||||
fn rank(&self) -> Rank {
|
||||
self.rank_out()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data, B: Backend> GGLWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: Data, B: Backend> GGLWEInfos for LWEToGLWEKeyPrepared<D, B> {
|
||||
fn dsize(&self) -> Dsize {
|
||||
self.0.dsize()
|
||||
}
|
||||
@@ -52,71 +52,65 @@ impl<D: Data, B: Backend> GGLWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait LWEToGLWESwitchingKeyPreparedFactory<B: Backend>
|
||||
pub trait LWEToGLWEKeyPreparedFactory<B: Backend>
|
||||
where
|
||||
Self: GLWESwitchingKeyPreparedFactory<B>,
|
||||
{
|
||||
fn alloc_lwe_to_glwe_switching_key_prepared(
|
||||
fn alloc_lwe_to_glwe_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
rank_out: Rank,
|
||||
dnum: Dnum,
|
||||
) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
LWEToGLWESwitchingKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
|
||||
) -> LWEToGLWEKeyPrepared<Vec<u8>, B> {
|
||||
LWEToGLWEKeyPrepared(self.alloc_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1)))
|
||||
}
|
||||
fn alloc_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
||||
fn alloc_lwe_to_glwe_key_prepared_from_infos<A>(&self, infos: &A) -> LWEToGLWEKeyPrepared<Vec<u8>, B>
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_in().0,
|
||||
1,
|
||||
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"rank_in > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"dsize > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
self.alloc_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
self.alloc_lwe_to_glwe_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
}
|
||||
|
||||
fn bytes_of_lwe_to_glwe_switching_key_prepared(
|
||||
&self,
|
||||
base2k: Base2K,
|
||||
k: TorusPrecision,
|
||||
rank_out: Rank,
|
||||
dnum: Dnum,
|
||||
) -> usize {
|
||||
self.bytes_of_glwe_switching_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
|
||||
fn bytes_of_lwe_to_glwe_key_prepared(&self, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
|
||||
self.bytes_of_glwe_key_prepared(base2k, k, Rank(1), rank_out, dnum, Dsize(1))
|
||||
}
|
||||
|
||||
fn bytes_of_lwe_to_glwe_switching_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
fn bytes_of_lwe_to_glwe_key_prepared_from_infos<A>(&self, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
debug_assert_eq!(
|
||||
infos.rank_in().0,
|
||||
1,
|
||||
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"rank_in > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
infos.dsize().0,
|
||||
1,
|
||||
"dsize > 1 is not supported for LWEToGLWESwitchingKey"
|
||||
"dsize > 1 is not supported for LWEToGLWEKey"
|
||||
);
|
||||
self.bytes_of_lwe_to_glwe_switching_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
self.bytes_of_lwe_to_glwe_key_prepared(infos.base2k(), infos.k(), infos.rank_out(), infos.dnum())
|
||||
}
|
||||
|
||||
fn prepare_lwe_to_glwe_switching_key_tmp_bytes<A>(&self, infos: &A)
|
||||
fn prepare_lwe_to_glwe_key_tmp_bytes<A>(&self, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.prepare_glwe_switching_key_tmp_bytes(infos);
|
||||
}
|
||||
|
||||
fn prepare_lwe_to_glwe_switching_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
fn prepare_lwe_to_glwe_key<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: GGLWEPreparedToMut<B> + GLWESwitchingKeyDegreesMut,
|
||||
O: GGLWEToRef + GLWESwitchingKeyDegrees,
|
||||
@@ -125,61 +119,61 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> LWEToGLWESwitchingKeyPreparedFactory<B> for Module<B> where Self: GLWESwitchingKeyPreparedFactory<B> {}
|
||||
impl<B: Backend> LWEToGLWEKeyPreparedFactory<B> for Module<B> where Self: GLWESwitchingKeyPreparedFactory<B> {}
|
||||
|
||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
impl<B: Backend> LWEToGLWEKeyPrepared<Vec<u8>, B> {
|
||||
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||
module.alloc_lwe_to_glwe_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
|
||||
where
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||
module.alloc_lwe_to_glwe_key_prepared(base2k, k, rank_out, dnum)
|
||||
}
|
||||
|
||||
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||
module.bytes_of_lwe_to_glwe_key_prepared_from_infos(infos)
|
||||
}
|
||||
|
||||
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize
|
||||
where
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||
module.bytes_of_lwe_to_glwe_key_prepared(base2k, k, rank_out, dnum)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||
impl<B: Backend> LWEToGLWEKeyPrepared<Vec<u8>, B> {
|
||||
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
|
||||
module.prepare_lwe_to_glwe_key_tmp_bytes(infos);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: DataMut, B: Backend> LWEToGLWEKeyPrepared<D, B> {
|
||||
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||
where
|
||||
O: GGLWEToRef + GLWESwitchingKeyDegrees,
|
||||
M: LWEToGLWESwitchingKeyPreparedFactory<B>,
|
||||
M: LWEToGLWEKeyPreparedFactory<B>,
|
||||
{
|
||||
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
|
||||
module.prepare_lwe_to_glwe_key(self, other, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for LWEToGLWESwitchingKeyPrepared<D, B>
|
||||
impl<D: DataRef, B: Backend> GGLWEPreparedToRef<B> for LWEToGLWEKeyPrepared<D, B>
|
||||
where
|
||||
GLWESwitchingKeyPrepared<D, B>: GGLWEPreparedToRef<B>,
|
||||
{
|
||||
@@ -188,7 +182,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for LWEToGLWESwitchingKeyPrepared<D, B>
|
||||
impl<D: DataMut, B: Backend> GGLWEPreparedToMut<B> for LWEToGLWEKeyPrepared<D, B>
|
||||
where
|
||||
GLWESwitchingKeyPrepared<D, B>: GGLWEPreparedToMut<B>,
|
||||
{
|
||||
@@ -197,7 +191,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut, B: Backend> GLWESwitchingKeyDegreesMut for LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||
impl<D: DataMut, B: Backend> GLWESwitchingKeyDegreesMut for LWEToGLWEKeyPrepared<D, B> {
|
||||
fn input_degree(&mut self) -> &mut Degree {
|
||||
&mut self.0.input_degree
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
mod gglwe;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw;
|
||||
mod glwe;
|
||||
mod glwe_automorphism_key;
|
||||
@@ -6,11 +7,12 @@ mod glwe_public_key;
|
||||
mod glwe_secret;
|
||||
mod glwe_switching_key;
|
||||
mod glwe_tensor_key;
|
||||
mod glwe_to_lwe_switching_key;
|
||||
mod glwe_to_lwe_key;
|
||||
mod lwe_switching_key;
|
||||
mod lwe_to_glwe_switching_key;
|
||||
mod lwe_to_glwe_key;
|
||||
|
||||
pub use gglwe::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw::*;
|
||||
pub use glwe::*;
|
||||
pub use glwe_automorphism_key::*;
|
||||
@@ -18,6 +20,6 @@ pub use glwe_public_key::*;
|
||||
pub use glwe_secret::*;
|
||||
pub use glwe_switching_key::*;
|
||||
pub use glwe_tensor_key::*;
|
||||
pub use glwe_to_lwe_switching_key::*;
|
||||
pub use glwe_to_lwe_key::*;
|
||||
pub use lwe_switching_key::*;
|
||||
pub use lwe_to_glwe_switching_key::*;
|
||||
pub use lwe_to_glwe_key::*;
|
||||
|
||||
@@ -4,6 +4,7 @@ mod decryption;
|
||||
mod dist;
|
||||
mod encryption;
|
||||
mod external_product;
|
||||
mod glwe_packer;
|
||||
mod glwe_packing;
|
||||
mod glwe_trace;
|
||||
mod keyswitching;
|
||||
@@ -20,6 +21,7 @@ pub use decryption::*;
|
||||
pub use dist::*;
|
||||
pub use encryption::*;
|
||||
pub use external_product::*;
|
||||
pub use glwe_packer::*;
|
||||
pub use glwe_packing::*;
|
||||
pub use glwe_trace::*;
|
||||
pub use keyswitching::*;
|
||||
|
||||
@@ -62,7 +62,7 @@ where
|
||||
|
||||
let noise_have: f64 = pt.data.std(base2k, 0).log2();
|
||||
|
||||
// println!("noise_have: {noise_have}");
|
||||
println!("noise_have: {noise_have}");
|
||||
|
||||
assert!(
|
||||
noise_have <= max_noise,
|
||||
|
||||
@@ -67,6 +67,14 @@ where
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// fn glwe_relinearize<R, A, T>(&self, res: &mut R, a: &A, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
// where
|
||||
// R: GLWEToRef,
|
||||
// A: GLWETensorToRef,
|
||||
// T: GLWETensorKeyPreparedToRef<BE>,
|
||||
// {
|
||||
// }
|
||||
}
|
||||
|
||||
pub trait GLWEAdd
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::{
|
||||
dist::Distribution,
|
||||
layouts::{
|
||||
Degree, GGLWE, GGLWEInfos, GGLWELayout, GGSW, GGSWInfos, GLWE, GLWEAutomorphismKey, GLWEInfos, GLWEPlaintext,
|
||||
GLWEPrepared, GLWEPublicKey, GLWESecret, GLWESwitchingKey, GLWETensorKey, Rank,
|
||||
GLWEPrepared, GLWEPublicKey, GLWESecret, GLWESecretTensor, GLWESwitchingKey, GLWETensorKey, Rank,
|
||||
prepared::{
|
||||
GGLWEPrepared, GGSWPrepared, GLWEAutomorphismKeyPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
||||
GLWESwitchingKeyPrepared, GLWETensorKeyPrepared,
|
||||
@@ -232,6 +232,18 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn take_glwe_secret_tensor(&mut self, n: Degree, rank: Rank) -> (GLWESecretTensor<&mut [u8]>, &mut Self) {
|
||||
let (data, scratch) = self.take_scalar_znx(n.into(), GLWESecretTensor::pairs(rank.into()));
|
||||
(
|
||||
GLWESecretTensor {
|
||||
data,
|
||||
rank,
|
||||
dist: Distribution::NONE,
|
||||
},
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
|
||||
fn take_glwe_secret_prepared<M>(&mut self, module: &M, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self)
|
||||
where
|
||||
M: ModuleN + SvpPPolBytesOf,
|
||||
@@ -313,25 +325,12 @@ where
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GLWETensorKey"
|
||||
);
|
||||
let mut keys: Vec<GGLWE<&mut [u8]>> = Vec::new();
|
||||
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
||||
|
||||
let mut scratch: &mut Self = self;
|
||||
|
||||
let pairs: u32 = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1);
|
||||
let mut ksk_infos: GGLWELayout = infos.gglwe_layout();
|
||||
ksk_infos.rank_in = Rank(1);
|
||||
|
||||
if pairs != 0 {
|
||||
let (gglwe, s) = scratch.take_gglwe(&ksk_infos);
|
||||
scratch = s;
|
||||
keys.push(gglwe);
|
||||
}
|
||||
for _ in 1..pairs {
|
||||
let (gglwe, s) = scratch.take_gglwe(&ksk_infos);
|
||||
scratch = s;
|
||||
keys.push(gglwe);
|
||||
}
|
||||
(GLWETensorKey { keys }, scratch)
|
||||
ksk_infos.rank_in = Rank(pairs);
|
||||
let (data, scratch) = self.take_gglwe(infos);
|
||||
(GLWETensorKey(data), scratch)
|
||||
}
|
||||
|
||||
fn take_glwe_tensor_key_prepared<A, M>(&mut self, module: &M, infos: &A) -> (GLWETensorKeyPrepared<&mut [u8], B>, &mut Self)
|
||||
@@ -346,25 +345,11 @@ where
|
||||
"rank_in != rank_out is not supported for GGLWETensorKeyPrepared"
|
||||
);
|
||||
|
||||
let mut keys: Vec<GGLWEPrepared<&mut [u8], B>> = Vec::new();
|
||||
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
||||
|
||||
let mut scratch: &mut Self = self;
|
||||
|
||||
let pairs: u32 = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1);
|
||||
let mut ksk_infos: GGLWELayout = infos.gglwe_layout();
|
||||
ksk_infos.rank_in = Rank(1);
|
||||
|
||||
if pairs != 0 {
|
||||
let (gglwe, s) = scratch.take_gglwe_prepared(module, &ksk_infos);
|
||||
scratch = s;
|
||||
keys.push(gglwe);
|
||||
}
|
||||
for _ in 1..pairs {
|
||||
let (gglwe, s) = scratch.take_gglwe_prepared(module, &ksk_infos);
|
||||
scratch = s;
|
||||
keys.push(gglwe);
|
||||
}
|
||||
(GLWETensorKeyPrepared { keys }, scratch)
|
||||
ksk_infos.rank_in = Rank(pairs);
|
||||
let (data, scratch) = self.take_gglwe_prepared(module, infos);
|
||||
(GLWETensorKeyPrepared(data), scratch)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ gglwe_automorphism_key_encrypt_sk => crate::tests::test_suite::encryption::test_
|
||||
gglwe_automorphism_key_compressed_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_automorphism_key_compressed_encrypt_sk,
|
||||
gglwe_tensor_key_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_tensor_key_encrypt_sk,
|
||||
gglwe_tensor_key_compressed_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_tensor_key_compressed_encrypt_sk,
|
||||
gglwe_to_ggsw_key_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_to_ggsw_key_encrypt_sk,
|
||||
// GGLWE Keyswitching
|
||||
gglwe_switching_key_keyswitch => crate::tests::test_suite::keyswitch::test_gglwe_switching_key_keyswitch,
|
||||
gglwe_switching_key_keyswitch_inplace => crate::tests::test_suite::keyswitch::test_gglwe_switching_key_keyswitch_inplace,
|
||||
@@ -75,7 +76,7 @@ backend_test_suite!(
|
||||
glwe_encrypt_zero_sk => crate::tests::test_suite::encryption::test_glwe_encrypt_zero_sk,
|
||||
glwe_encrypt_pk => crate::tests::test_suite::encryption::test_glwe_encrypt_pk,
|
||||
// GLWE Keyswitch
|
||||
glwe_keyswitch => crate::tests::test_suite::keyswitch::test_glwe_keyswitch,
|
||||
glwe_keyswitch => crate::tests::test_suite::keyswitch::test_glwe_keyswitch,
|
||||
glwe_keyswitch_inplace => crate::tests::test_suite::keyswitch::test_glwe_keyswitch_inplace,
|
||||
// GLWE Automorphism
|
||||
glwe_automorphism => crate::tests::test_suite::automorphism::test_glwe_automorphism,
|
||||
@@ -93,6 +94,7 @@ gglwe_automorphism_key_encrypt_sk => crate::tests::test_suite::encryption::test_
|
||||
gglwe_automorphism_key_compressed_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_automorphism_key_compressed_encrypt_sk,
|
||||
gglwe_tensor_key_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_tensor_key_encrypt_sk,
|
||||
gglwe_tensor_key_compressed_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_tensor_key_compressed_encrypt_sk,
|
||||
gglwe_to_ggsw_key_encrypt_sk => crate::tests::test_suite::encryption::test_gglwe_to_ggsw_key_encrypt_sk,
|
||||
// GGLWE Keyswitching
|
||||
gglwe_switching_key_keyswitch => crate::tests::test_suite::keyswitch::test_gglwe_switching_key_keyswitch,
|
||||
gglwe_switching_key_keyswitch_inplace => crate::tests::test_suite::keyswitch::test_gglwe_switching_key_keyswitch_inplace,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use poulpy_hal::test_suite::serialization::test_reader_writer_interface;
|
||||
|
||||
use crate::layouts::{
|
||||
Base2K, Degree, Dnum, Dsize, GGLWE, GGSW, GLWE, GLWEAutomorphismKey, GLWESwitchingKey, GLWETensorKey, GLWEToLWESwitchingKey,
|
||||
LWE, LWESwitchingKey, LWEToGLWESwitchingKey, Rank, TorusPrecision,
|
||||
Base2K, Degree, Dnum, Dsize, GGLWE, GGSW, GLWE, GLWEAutomorphismKey, GLWESwitchingKey, GLWETensorKey, GLWEToLWEKey, LWE,
|
||||
LWESwitchingKey, LWEToGLWEKey, Rank, TorusPrecision,
|
||||
compressed::{
|
||||
GGLWECompressed, GGSWCompressed, GLWEAutomorphismKeyCompressed, GLWECompressed, GLWESwitchingKeyCompressed,
|
||||
GLWETensorKeyCompressed, GLWEToLWESwitchingKeyCompressed, LWECompressed, LWESwitchingKeyCompressed,
|
||||
LWEToGLWESwitchingKeyCompressed,
|
||||
LWEToGLWEKeyCompressed,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -93,28 +93,27 @@ fn test_tensor_key_compressed_serialization() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glwe_to_lwe_switching_key_serialization() {
|
||||
let original: GLWEToLWESwitchingKey<Vec<u8>> = GLWEToLWESwitchingKey::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
fn glwe_to_lwe_key_serialization() {
|
||||
let original: GLWEToLWEKey<Vec<u8>> = GLWEToLWEKey::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
test_reader_writer_interface(original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glwe_to_lwe_switching_key_compressed_serialization() {
|
||||
fn glwe_to_lwe_key_compressed_serialization() {
|
||||
let original: GLWEToLWESwitchingKeyCompressed<Vec<u8>> =
|
||||
GLWEToLWESwitchingKeyCompressed::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
test_reader_writer_interface(original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lwe_to_glwe_switching_key_serialization() {
|
||||
let original: LWEToGLWESwitchingKey<Vec<u8>> = LWEToGLWESwitchingKey::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
fn lwe_to_glwe_key_serialization() {
|
||||
let original: LWEToGLWEKey<Vec<u8>> = LWEToGLWEKey::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
test_reader_writer_interface(original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lwe_to_glwe_switching_key_compressed_serialization() {
|
||||
let original: LWEToGLWESwitchingKeyCompressed<Vec<u8>> =
|
||||
LWEToGLWESwitchingKeyCompressed::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
fn lwe_to_glwe_key_compressed_serialization() {
|
||||
let original: LWEToGLWEKeyCompressed<Vec<u8>> = LWEToGLWEKeyCompressed::alloc(N_GLWE, BASE2K, K, RANK, DNUM);
|
||||
test_reader_writer_interface(original);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,12 +5,12 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGSWAutomorphism, GGSWEncryptSk, GGSWNoise, GLWEAutomorphismKeyEncryptSk, GLWETensorKeyEncryptSk, ScratchTakeCore,
|
||||
GGLWEToGGSWKeyEncryptSk, GGSWAutomorphism, GGSWEncryptSk, GGSWNoise, GLWEAutomorphismKeyEncryptSk, ScratchTakeCore,
|
||||
encryption::SIGMA,
|
||||
layouts::{
|
||||
GGSW, GGSWLayout, GLWEAutomorphismKey, GLWEAutomorphismKeyPreparedFactory, GLWESecret, GLWESecretPreparedFactory,
|
||||
GLWETensorKey, GLWETensorKeyLayout, GLWETensorKeyPreparedFactory,
|
||||
prepared::{GLWEAutomorphismKeyPrepared, GLWESecretPrepared, GLWETensorKeyPrepared},
|
||||
GGLWEToGGSWKey, GGLWEToGGSWKeyLayout, GGLWEToGGSWKeyPreparedFactory, GGSW, GGSWLayout, GLWEAutomorphismKey,
|
||||
GLWEAutomorphismKeyPreparedFactory, GLWESecret, GLWESecretPreparedFactory,
|
||||
prepared::{GGLWEToGGSWKeyPrepared, GLWEAutomorphismKeyPrepared, GLWESecretPrepared},
|
||||
},
|
||||
noise::noise_ggsw_keyswitch,
|
||||
};
|
||||
@@ -21,8 +21,8 @@ where
|
||||
+ GLWEAutomorphismKeyEncryptSk<BE>
|
||||
+ GLWEAutomorphismKeyPreparedFactory<BE>
|
||||
+ GGSWAutomorphism<BE>
|
||||
+ GLWETensorKeyPreparedFactory<BE>
|
||||
+ GLWETensorKeyEncryptSk<BE>
|
||||
+ GGLWEToGGSWKeyPreparedFactory<BE>
|
||||
+ GGLWEToGGSWKeyEncryptSk<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ GGSWNoise<BE>,
|
||||
@@ -64,7 +64,7 @@ where
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let tensor_key_layout: GLWETensorKeyLayout = GLWETensorKeyLayout {
|
||||
let tsk_layout: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k_tsk.into(),
|
||||
@@ -73,7 +73,7 @@ where
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let auto_key_layout: GLWETensorKeyLayout = GLWETensorKeyLayout {
|
||||
let auto_key_layout: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k_ksk.into(),
|
||||
@@ -84,7 +84,7 @@ where
|
||||
|
||||
let mut ct_in: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_in_layout);
|
||||
let mut ct_out: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_out_layout);
|
||||
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc_from_infos(&tensor_key_layout);
|
||||
let mut tsk: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&tsk_layout);
|
||||
let mut auto_key: GLWEAutomorphismKey<Vec<u8>> = GLWEAutomorphismKey::alloc_from_infos(&auto_key_layout);
|
||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||
|
||||
@@ -95,8 +95,8 @@ where
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ct_in)
|
||||
| GLWEAutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key)
|
||||
| GLWETensorKey::encrypt_sk_tmp_bytes(module, &tensor_key)
|
||||
| GGSW::automorphism_tmp_bytes(module, &ct_out, &ct_in, &auto_key, &tensor_key),
|
||||
| GGLWEToGGSWKey::encrypt_sk_tmp_bytes(module, &tsk)
|
||||
| GGSW::automorphism_tmp_bytes(module, &ct_out, &ct_in, &auto_key, &tsk),
|
||||
);
|
||||
|
||||
let var_xs: f64 = 0.5;
|
||||
@@ -115,7 +115,7 @@ where
|
||||
&mut source_xe,
|
||||
scratch.borrow(),
|
||||
);
|
||||
tensor_key.encrypt_sk(
|
||||
tsk.encrypt_sk(
|
||||
module,
|
||||
&sk,
|
||||
&mut source_xa,
|
||||
@@ -138,9 +138,8 @@ where
|
||||
GLWEAutomorphismKeyPrepared::alloc_from_infos(module, &auto_key_layout);
|
||||
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
||||
|
||||
let mut tsk_prepared: GLWETensorKeyPrepared<Vec<u8>, BE> =
|
||||
GLWETensorKeyPrepared::alloc_from_infos(module, &tensor_key_layout);
|
||||
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
||||
let mut tsk_prepared: GGLWEToGGSWKeyPrepared<Vec<u8>, BE> = GGLWEToGGSWKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
tsk_prepared.prepare(module, &tsk, scratch.borrow());
|
||||
|
||||
ct_out.automorphism(
|
||||
module,
|
||||
@@ -180,8 +179,8 @@ where
|
||||
+ GLWEAutomorphismKeyEncryptSk<BE>
|
||||
+ GLWEAutomorphismKeyPreparedFactory<BE>
|
||||
+ GGSWAutomorphism<BE>
|
||||
+ GLWETensorKeyPreparedFactory<BE>
|
||||
+ GLWETensorKeyEncryptSk<BE>
|
||||
+ GGLWEToGGSWKeyPreparedFactory<BE>
|
||||
+ GGLWEToGGSWKeyEncryptSk<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ GGSWNoise<BE>,
|
||||
@@ -211,7 +210,7 @@ where
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let tensor_key_layout: GLWETensorKeyLayout = GLWETensorKeyLayout {
|
||||
let tsk_layout: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k_tsk.into(),
|
||||
@@ -220,7 +219,7 @@ where
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let auto_key_layout: GLWETensorKeyLayout = GLWETensorKeyLayout {
|
||||
let auto_key_layout: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k_ksk.into(),
|
||||
@@ -230,7 +229,7 @@ where
|
||||
};
|
||||
|
||||
let mut ct: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_out_layout);
|
||||
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc_from_infos(&tensor_key_layout);
|
||||
let mut tsk: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&tsk_layout);
|
||||
let mut auto_key: GLWEAutomorphismKey<Vec<u8>> = GLWEAutomorphismKey::alloc_from_infos(&auto_key_layout);
|
||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||
|
||||
@@ -241,8 +240,8 @@ where
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ct)
|
||||
| GLWEAutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key)
|
||||
| GLWETensorKey::encrypt_sk_tmp_bytes(module, &tensor_key)
|
||||
| GGSW::automorphism_tmp_bytes(module, &ct, &ct, &auto_key, &tensor_key),
|
||||
| GGLWEToGGSWKey::encrypt_sk_tmp_bytes(module, &tsk)
|
||||
| GGSW::automorphism_tmp_bytes(module, &ct, &ct, &auto_key, &tsk),
|
||||
);
|
||||
|
||||
let var_xs: f64 = 0.5;
|
||||
@@ -261,7 +260,7 @@ where
|
||||
&mut source_xe,
|
||||
scratch.borrow(),
|
||||
);
|
||||
tensor_key.encrypt_sk(
|
||||
tsk.encrypt_sk(
|
||||
module,
|
||||
&sk,
|
||||
&mut source_xa,
|
||||
@@ -284,9 +283,8 @@ where
|
||||
GLWEAutomorphismKeyPrepared::alloc_from_infos(module, &auto_key_layout);
|
||||
auto_key_prepared.prepare(module, &auto_key, scratch.borrow());
|
||||
|
||||
let mut tsk_prepared: GLWETensorKeyPrepared<Vec<u8>, BE> =
|
||||
GLWETensorKeyPrepared::alloc_from_infos(module, &tensor_key_layout);
|
||||
tsk_prepared.prepare(module, &tensor_key, scratch.borrow());
|
||||
let mut tsk_prepared: GGLWEToGGSWKeyPrepared<Vec<u8>, BE> = GGLWEToGGSWKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
tsk_prepared.prepare(module, &tsk, scratch.borrow());
|
||||
|
||||
ct.automorphism_inplace(module, &auto_key_prepared, &tsk_prepared, scratch.borrow());
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ use crate::{
|
||||
GLWEDecrypt, GLWEEncryptSk, GLWEFromLWE, GLWEToLWESwitchingKeyEncryptSk, LWEDecrypt, LWEEncryptSk,
|
||||
LWEToGLWESwitchingKeyEncryptSk, ScratchTakeCore,
|
||||
layouts::{
|
||||
Base2K, Degree, Dnum, GLWE, GLWELayout, GLWEPlaintext, GLWESecret, GLWESecretPreparedFactory, GLWEToLWEKeyLayout,
|
||||
GLWEToLWESwitchingKey, GLWEToLWESwitchingKeyPreparedFactory, LWE, LWELayout, LWEPlaintext, LWESecret,
|
||||
LWEToGLWESwitchingKey, LWEToGLWESwitchingKeyLayout, LWEToGLWESwitchingKeyPreparedFactory, Rank, TorusPrecision,
|
||||
prepared::{GLWESecretPrepared, GLWEToLWESwitchingKeyPrepared, LWEToGLWESwitchingKeyPrepared},
|
||||
Base2K, Degree, Dnum, GLWE, GLWELayout, GLWEPlaintext, GLWESecret, GLWESecretPreparedFactory, GLWEToLWEKey,
|
||||
GLWEToLWEKeyLayout, GLWEToLWEKeyPrepared, GLWEToLWEKeyPreparedFactory, LWE, LWELayout, LWEPlaintext, LWESecret,
|
||||
LWEToGLWEKey, LWEToGLWEKeyLayout, LWEToGLWEKeyPrepared, LWEToGLWEKeyPreparedFactory, Rank, TorusPrecision,
|
||||
prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -22,7 +22,7 @@ where
|
||||
+ GLWEDecrypt<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ LWEEncryptSk<BE>
|
||||
+ LWEToGLWESwitchingKeyPreparedFactory<BE>,
|
||||
+ LWEToGLWEKeyPreparedFactory<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
@@ -36,7 +36,7 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let lwe_to_glwe_infos: LWEToGLWESwitchingKeyLayout = LWEToGLWESwitchingKeyLayout {
|
||||
let lwe_to_glwe_infos: LWEToGLWEKeyLayout = LWEToGLWEKeyLayout {
|
||||
n: n_glwe,
|
||||
base2k: Base2K(17),
|
||||
k: TorusPrecision(51),
|
||||
@@ -58,7 +58,7 @@ where
|
||||
};
|
||||
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
LWEToGLWESwitchingKey::encrypt_sk_tmp_bytes(module, &lwe_to_glwe_infos)
|
||||
LWEToGLWEKey::encrypt_sk_tmp_bytes(module, &lwe_to_glwe_infos)
|
||||
| GLWE::from_lwe_tmp_bytes(module, &glwe_infos, &lwe_infos, &lwe_to_glwe_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
@@ -80,7 +80,7 @@ where
|
||||
let mut lwe_ct: LWE<Vec<u8>> = LWE::alloc_from_infos(&lwe_infos);
|
||||
lwe_ct.encrypt_sk(module, &lwe_pt, &sk_lwe, &mut source_xa, &mut source_xe);
|
||||
|
||||
let mut ksk: LWEToGLWESwitchingKey<Vec<u8>> = LWEToGLWESwitchingKey::alloc_from_infos(&lwe_to_glwe_infos);
|
||||
let mut ksk: LWEToGLWEKey<Vec<u8>> = LWEToGLWEKey::alloc_from_infos(&lwe_to_glwe_infos);
|
||||
|
||||
ksk.encrypt_sk(
|
||||
module,
|
||||
@@ -93,8 +93,7 @@ where
|
||||
|
||||
let mut glwe_ct: GLWE<Vec<u8>> = GLWE::alloc_from_infos(&glwe_infos);
|
||||
|
||||
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, BE> =
|
||||
LWEToGLWESwitchingKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
let mut ksk_prepared: LWEToGLWEKeyPrepared<Vec<u8>, BE> = LWEToGLWEKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
ksk_prepared.prepare(module, &ksk, scratch.borrow());
|
||||
|
||||
glwe_ct.from_lwe(module, &lwe_ct, &ksk_prepared, scratch.borrow());
|
||||
@@ -114,7 +113,7 @@ where
|
||||
+ GLWEDecrypt<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWEToLWESwitchingKeyEncryptSk<BE>
|
||||
+ GLWEToLWESwitchingKeyPreparedFactory<BE>,
|
||||
+ GLWEToLWEKeyPreparedFactory<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
@@ -150,7 +149,7 @@ where
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
GLWEToLWESwitchingKey::encrypt_sk_tmp_bytes(module, &glwe_to_lwe_infos)
|
||||
GLWEToLWEKey::encrypt_sk_tmp_bytes(module, &glwe_to_lwe_infos)
|
||||
| LWE::from_glwe_tmp_bytes(module, &lwe_infos, &glwe_infos, &glwe_to_lwe_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
@@ -178,7 +177,7 @@ where
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
let mut ksk: GLWEToLWESwitchingKey<Vec<u8>> = GLWEToLWESwitchingKey::alloc_from_infos(&glwe_to_lwe_infos);
|
||||
let mut ksk: GLWEToLWEKey<Vec<u8>> = GLWEToLWEKey::alloc_from_infos(&glwe_to_lwe_infos);
|
||||
|
||||
ksk.encrypt_sk(
|
||||
module,
|
||||
@@ -191,8 +190,7 @@ where
|
||||
|
||||
let mut lwe_ct: LWE<Vec<u8>> = LWE::alloc_from_infos(&lwe_infos);
|
||||
|
||||
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, BE> =
|
||||
GLWEToLWESwitchingKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
let mut ksk_prepared: GLWEToLWEKeyPrepared<Vec<u8>, BE> = GLWEToLWEKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
ksk_prepared.prepare(module, &ksk, scratch.borrow());
|
||||
|
||||
lwe_ct.from_glwe(module, &glwe_ct, &ksk_prepared, scratch.borrow());
|
||||
|
||||
144
poulpy-core/src/tests/test_suite/encryption/gglwe_to_ggsw_key.rs
Normal file
144
poulpy-core/src/tests/test_suite/encryption/gglwe_to_ggsw_key.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use poulpy_hal::{
|
||||
api::{ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxCopy},
|
||||
layouts::{Backend, Module, ScalarZnx, Scratch, ScratchOwned},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGLWENoise, GGLWEToGGSWKeyCompressedEncryptSk, GGLWEToGGSWKeyEncryptSk, ScratchTakeCore,
|
||||
decryption::GLWEDecrypt,
|
||||
encryption::SIGMA,
|
||||
layouts::{
|
||||
Dsize, GGLWEDecompress, GGLWEToGGSWKey, GGLWEToGGSWKeyCompressed, GGLWEToGGSWKeyDecompress, GGLWEToGGSWKeyLayout,
|
||||
GLWESecret, GLWESecretPreparedFactory, GLWESecretTensor, GLWESecretTensorFactory, LWEInfos, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn test_gglwe_to_ggsw_key_encrypt_sk<BE: Backend>(module: &Module<BE>)
|
||||
where
|
||||
Module<BE>: GGLWEToGGSWKeyEncryptSk<BE>
|
||||
+ GLWESecretTensorFactory<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWEDecrypt<BE>
|
||||
+ GGLWENoise<BE>
|
||||
+ VecZnxCopy,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
let base2k: usize = 8;
|
||||
let k: usize = 54;
|
||||
|
||||
for rank in 2_usize..3 {
|
||||
let n: usize = module.n();
|
||||
let dnum: usize = k / base2k;
|
||||
|
||||
let key_infos: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k.into(),
|
||||
dnum: dnum.into(),
|
||||
dsize: Dsize(1),
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let mut key: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&key_infos);
|
||||
|
||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(GGLWEToGGSWKey::encrypt_sk_tmp_bytes(module, &key_infos));
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&key_infos);
|
||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||
let mut sk_prepared: GLWESecretPrepared<Vec<u8>, BE> = GLWESecretPrepared::alloc(module, rank.into());
|
||||
sk_prepared.prepare(module, &sk);
|
||||
|
||||
key.encrypt_sk(
|
||||
module,
|
||||
&sk,
|
||||
&mut source_xa,
|
||||
&mut source_xe,
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
let mut sk_tensor: GLWESecretTensor<Vec<u8>> = GLWESecretTensor::alloc_from_infos(&sk);
|
||||
sk_tensor.prepare(module, &sk, scratch.borrow());
|
||||
|
||||
let max_noise = SIGMA.log2() + 0.5 - (key.k().as_u32() as f64);
|
||||
|
||||
let mut pt_want: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(module.n(), rank);
|
||||
|
||||
for i in 0..rank {
|
||||
for j in 0..rank {
|
||||
module.vec_znx_copy(
|
||||
&mut pt_want.as_vec_znx_mut(),
|
||||
j,
|
||||
&sk_tensor.at(i, j).as_vec_znx(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
|
||||
println!("pt_want: {}", pt_want.as_vec_znx());
|
||||
|
||||
module.gglwe_assert_noise(key.at(i), &sk_prepared, &pt_want, max_noise);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_gglwe_to_ggsw_compressed_encrypt_sk<BE: Backend>(module: &Module<BE>)
|
||||
where
|
||||
Module<BE>: GGLWEToGGSWKeyCompressedEncryptSk<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWEDecrypt<BE>
|
||||
+ GLWESecretTensorFactory<BE>
|
||||
+ GGLWENoise<BE>
|
||||
+ GGLWEDecompress
|
||||
+ GGLWEToGGSWKeyDecompress,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
let base2k = 8;
|
||||
let k = 54;
|
||||
for rank in 1_usize..3 {
|
||||
let n: usize = module.n();
|
||||
let dnum: usize = k / base2k;
|
||||
|
||||
let key_infos: GGLWEToGGSWKeyLayout = GGLWEToGGSWKeyLayout {
|
||||
n: n.into(),
|
||||
base2k: base2k.into(),
|
||||
k: k.into(),
|
||||
dnum: dnum.into(),
|
||||
dsize: Dsize(1),
|
||||
rank: rank.into(),
|
||||
};
|
||||
|
||||
let mut key_compressed: GGLWEToGGSWKeyCompressed<Vec<u8>> = GGLWEToGGSWKeyCompressed::alloc_from_infos(&key_infos);
|
||||
|
||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(GGLWEToGGSWKeyCompressed::encrypt_sk_tmp_bytes(
|
||||
module, &key_infos,
|
||||
));
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&key_infos);
|
||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||
let mut sk_prepared: GLWESecretPrepared<Vec<u8>, BE> = GLWESecretPrepared::alloc(module, rank.into());
|
||||
sk_prepared.prepare(module, &sk);
|
||||
|
||||
let seed_xa: [u8; 32] = [1u8; 32];
|
||||
|
||||
key_compressed.encrypt_sk(module, &sk, seed_xa, &mut source_xe, scratch.borrow());
|
||||
|
||||
let mut key: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&key_infos);
|
||||
key.decompress(module, &key_compressed);
|
||||
|
||||
let mut sk_tensor: GLWESecretTensor<Vec<u8>> = GLWESecretTensor::alloc_from_infos(&sk);
|
||||
sk_tensor.prepare(module, &sk, scratch.borrow());
|
||||
|
||||
for i in 0..rank {
|
||||
module.gglwe_assert_noise(key.at(i), &sk_prepared, &sk_tensor.data, SIGMA + 0.5);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,16 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, VecZnxBigAlloc, VecZnxBigNormalize,
|
||||
VecZnxCopy, VecZnxDftAlloc, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyTmpA, VecZnxSubScalarInplace,
|
||||
VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, Module, Scratch, ScratchOwned, VecZnxBig, VecZnxDft},
|
||||
api::{ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow},
|
||||
layouts::{Backend, Module, Scratch, ScratchOwned},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWETensorKeyCompressedEncryptSk, GLWETensorKeyEncryptSk, ScratchTakeCore,
|
||||
GGLWENoise, GLWETensorKeyCompressedEncryptSk, GLWETensorKeyEncryptSk, ScratchTakeCore,
|
||||
decryption::GLWEDecrypt,
|
||||
encryption::SIGMA,
|
||||
layouts::{
|
||||
Dsize, GLWEPlaintext, GLWESecret, GLWESecretPreparedFactory, GLWETensorKey, GLWETensorKeyCompressed, GLWETensorKeyLayout,
|
||||
prepared::GLWESecretPrepared,
|
||||
Dsize, GGLWEDecompress, GLWESecret, GLWESecretPreparedFactory, GLWESecretTensor, GLWESecretTensorFactory, GLWETensorKey,
|
||||
GLWETensorKeyCompressed, GLWETensorKeyLayout, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -23,20 +19,15 @@ where
|
||||
Module<BE>: GLWETensorKeyEncryptSk<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWEDecrypt<BE>
|
||||
+ VecZnxDftAlloc<BE>
|
||||
+ VecZnxBigAlloc<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxSubScalarInplace,
|
||||
+ GLWESecretTensorFactory<BE>
|
||||
+ GGLWENoise<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
let base2k: usize = 8;
|
||||
let k: usize = 54;
|
||||
|
||||
for rank in 1_usize..3 {
|
||||
for rank in 2_usize..3 {
|
||||
let n: usize = module.n();
|
||||
let dnum: usize = k / base2k;
|
||||
|
||||
@@ -73,42 +64,10 @@ where
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&tensor_key_infos);
|
||||
let mut sk_tensor: GLWESecretTensor<Vec<u8>> = GLWESecretTensor::alloc_from_infos(&sk);
|
||||
sk_tensor.prepare(module, &sk, scratch.borrow());
|
||||
|
||||
let mut sk_ij_dft: VecZnxDft<Vec<u8>, BE> = module.vec_znx_dft_alloc(1, 1);
|
||||
let mut sk_ij_big: VecZnxBig<Vec<u8>, BE> = module.vec_znx_big_alloc(1, 1);
|
||||
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), 1_u32.into());
|
||||
let mut sk_dft: VecZnxDft<Vec<u8>, BE> = module.vec_znx_dft_alloc(rank, 1);
|
||||
|
||||
for i in 0..rank {
|
||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
}
|
||||
|
||||
for i in 0..rank {
|
||||
for j in 0..rank {
|
||||
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_prepared.data, j, &sk_dft, i);
|
||||
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||
module.vec_znx_big_normalize(
|
||||
base2k,
|
||||
&mut sk_ij.data.as_vec_znx_mut(),
|
||||
0,
|
||||
base2k,
|
||||
&sk_ij_big,
|
||||
0,
|
||||
scratch.borrow(),
|
||||
);
|
||||
for row_i in 0..dnum {
|
||||
let ct = tensor_key.at(i, j).at(row_i, 0);
|
||||
|
||||
ct.decrypt(module, &mut pt, &sk_prepared, scratch.borrow());
|
||||
|
||||
module.vec_znx_sub_scalar_inplace(&mut pt.data, 0, row_i, &sk_ij.data, 0);
|
||||
|
||||
let std_pt: f64 = pt.data.std(base2k, 0) * (k as f64).exp2();
|
||||
assert!((SIGMA - std_pt).abs() <= 0.5, "{SIGMA} {std_pt}");
|
||||
}
|
||||
}
|
||||
}
|
||||
module.gglwe_assert_noise(&tensor_key, &sk_prepared, &sk_tensor.data, SIGMA + 0.5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,15 +77,9 @@ where
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWETensorKeyCompressedEncryptSk<BE>
|
||||
+ GLWEDecrypt<BE>
|
||||
+ VecZnxDftAlloc<BE>
|
||||
+ VecZnxBigAlloc<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxSubScalarInplace
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSwitchRing,
|
||||
+ GLWESecretTensorFactory<BE>
|
||||
+ GGLWENoise<BE>
|
||||
+ GGLWEDecompress,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Scratch<BE>: ScratchAvailable + ScratchTakeCore<BE>,
|
||||
{
|
||||
@@ -168,42 +121,9 @@ where
|
||||
let mut tensor_key: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc_from_infos(&tensor_key_infos);
|
||||
tensor_key.decompress(module, &tensor_key_compressed);
|
||||
|
||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(&tensor_key_infos);
|
||||
let mut sk_tensor: GLWESecretTensor<Vec<u8>> = GLWESecretTensor::alloc_from_infos(&sk);
|
||||
sk_tensor.prepare(module, &sk, scratch.borrow());
|
||||
|
||||
let mut sk_ij_dft: VecZnxDft<Vec<u8>, BE> = module.vec_znx_dft_alloc(1, 1);
|
||||
let mut sk_ij_big: VecZnxBig<Vec<u8>, BE> = module.vec_znx_big_alloc(1, 1);
|
||||
let mut sk_ij: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), 1_u32.into());
|
||||
let mut sk_dft: VecZnxDft<Vec<u8>, BE> = module.vec_znx_dft_alloc(rank, 1);
|
||||
|
||||
for i in 0..rank {
|
||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
}
|
||||
|
||||
for i in 0..rank {
|
||||
for j in 0..rank {
|
||||
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_prepared.data, j, &sk_dft, i);
|
||||
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||
module.vec_znx_big_normalize(
|
||||
base2k,
|
||||
&mut sk_ij.data.as_vec_znx_mut(),
|
||||
0,
|
||||
base2k,
|
||||
&sk_ij_big,
|
||||
0,
|
||||
scratch.borrow(),
|
||||
);
|
||||
for row_i in 0..dnum {
|
||||
tensor_key
|
||||
.at(i, j)
|
||||
.at(row_i, 0)
|
||||
.decrypt(module, &mut pt, &sk_prepared, scratch.borrow());
|
||||
|
||||
module.vec_znx_sub_scalar_inplace(&mut pt.data, 0, row_i, &sk_ij.data, 0);
|
||||
|
||||
let std_pt: f64 = pt.data.std(base2k, 0) * (k as f64).exp2();
|
||||
assert!((SIGMA - std_pt).abs() <= 0.5, "{SIGMA} {std_pt}");
|
||||
}
|
||||
}
|
||||
}
|
||||
module.gglwe_assert_noise(&tensor_key, &sk_prepared, &sk_tensor.data, SIGMA + 0.5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
mod gglwe_atk;
|
||||
mod gglwe_ct;
|
||||
mod gglwe_to_ggsw_key;
|
||||
mod ggsw_ct;
|
||||
mod glwe_ct;
|
||||
mod glwe_tsk;
|
||||
|
||||
pub use gglwe_atk::*;
|
||||
pub use gglwe_ct::*;
|
||||
pub use gglwe_to_ggsw_key::*;
|
||||
pub use ggsw_ct::*;
|
||||
pub use glwe_ct::*;
|
||||
pub use glwe_tsk::*;
|
||||
|
||||
@@ -5,12 +5,13 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GGSWEncryptSk, GGSWKeyswitch, GGSWNoise, GLWESwitchingKeyEncryptSk, GLWETensorKeyEncryptSk, ScratchTakeCore,
|
||||
GGLWEToGGSWKeyEncryptSk, GGSWEncryptSk, GGSWKeyswitch, GGSWNoise, GLWESwitchingKeyEncryptSk, ScratchTakeCore,
|
||||
encryption::SIGMA,
|
||||
layouts::{
|
||||
GGSW, GGSWLayout, GLWESecret, GLWESecretPreparedFactory, GLWESwitchingKey, GLWESwitchingKeyLayout,
|
||||
GLWESwitchingKeyPreparedFactory, GLWETensorKey, GLWETensorKeyLayout, GLWETensorKeyPreparedFactory,
|
||||
prepared::{GLWESecretPrepared, GLWESwitchingKeyPrepared, GLWETensorKeyPrepared},
|
||||
GGLWEToGGSWKey, GGLWEToGGSWKeyPrepared, GGLWEToGGSWKeyPreparedFactory, GGSW, GGSWLayout, GLWESecret,
|
||||
GLWESecretPreparedFactory, GLWESwitchingKey, GLWESwitchingKeyLayout, GLWESwitchingKeyPreparedFactory,
|
||||
GLWETensorKeyLayout,
|
||||
prepared::{GLWESecretPrepared, GLWESwitchingKeyPrepared},
|
||||
},
|
||||
noise::noise_ggsw_keyswitch,
|
||||
};
|
||||
@@ -20,10 +21,10 @@ pub fn test_ggsw_keyswitch<BE: Backend>(module: &Module<BE>)
|
||||
where
|
||||
Module<BE>: GGSWEncryptSk<BE>
|
||||
+ GLWESwitchingKeyEncryptSk<BE>
|
||||
+ GLWETensorKeyEncryptSk<BE>
|
||||
+ GGLWEToGGSWKeyEncryptSk<BE>
|
||||
+ GGSWKeyswitch<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWETensorKeyPreparedFactory<BE>
|
||||
+ GGLWEToGGSWKeyPreparedFactory<BE>
|
||||
+ GLWESwitchingKeyPreparedFactory<BE>
|
||||
+ GGSWNoise<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
@@ -82,7 +83,7 @@ where
|
||||
|
||||
let mut ggsw_in: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_in_infos);
|
||||
let mut ggsw_out: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_out_infos);
|
||||
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc_from_infos(&tsk_infos);
|
||||
let mut tsk: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&tsk_infos);
|
||||
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc_from_infos(&ksk_apply_infos);
|
||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||
|
||||
@@ -93,7 +94,7 @@ where
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_in_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &ksk_apply_infos)
|
||||
| GLWETensorKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGLWEToGGSWKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGSW::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&ggsw_out_infos,
|
||||
@@ -148,7 +149,7 @@ where
|
||||
GLWESwitchingKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
ksk_prepared.prepare(module, &ksk, scratch.borrow());
|
||||
|
||||
let mut tsk_prepared: GLWETensorKeyPrepared<Vec<u8>, BE> = GLWETensorKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
let mut tsk_prepared: GGLWEToGGSWKeyPrepared<Vec<u8>, BE> = GGLWEToGGSWKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
tsk_prepared.prepare(module, &tsk, scratch.borrow());
|
||||
|
||||
ggsw_out.keyswitch(
|
||||
@@ -185,10 +186,10 @@ pub fn test_ggsw_keyswitch_inplace<BE: Backend>(module: &Module<BE>)
|
||||
where
|
||||
Module<BE>: GGSWEncryptSk<BE>
|
||||
+ GLWESwitchingKeyEncryptSk<BE>
|
||||
+ GLWETensorKeyEncryptSk<BE>
|
||||
+ GGLWEToGGSWKeyEncryptSk<BE>
|
||||
+ GGSWKeyswitch<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWETensorKeyPreparedFactory<BE>
|
||||
+ GGLWEToGGSWKeyPreparedFactory<BE>
|
||||
+ GLWESwitchingKeyPreparedFactory<BE>
|
||||
+ GGSWNoise<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
@@ -236,7 +237,7 @@ where
|
||||
};
|
||||
|
||||
let mut ggsw_out: GGSW<Vec<u8>> = GGSW::alloc_from_infos(&ggsw_out_infos);
|
||||
let mut tsk: GLWETensorKey<Vec<u8>> = GLWETensorKey::alloc_from_infos(&tsk_infos);
|
||||
let mut tsk: GGLWEToGGSWKey<Vec<u8>> = GGLWEToGGSWKey::alloc_from_infos(&tsk_infos);
|
||||
let mut ksk: GLWESwitchingKey<Vec<u8>> = GLWESwitchingKey::alloc_from_infos(&ksk_apply_infos);
|
||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||
|
||||
@@ -247,7 +248,7 @@ where
|
||||
let mut scratch: ScratchOwned<BE> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_out_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &ksk_apply_infos)
|
||||
| GLWETensorKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGLWEToGGSWKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGSW::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&ggsw_out_infos,
|
||||
@@ -302,7 +303,7 @@ where
|
||||
GLWESwitchingKeyPrepared::alloc_from_infos(module, &ksk);
|
||||
ksk_prepared.prepare(module, &ksk, scratch.borrow());
|
||||
|
||||
let mut tsk_prepared: GLWETensorKeyPrepared<Vec<u8>, BE> = GLWETensorKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
let mut tsk_prepared: GGLWEToGGSWKeyPrepared<Vec<u8>, BE> = GGLWEToGGSWKeyPrepared::alloc_from_infos(module, &tsk);
|
||||
tsk_prepared.prepare(module, &tsk, scratch.borrow());
|
||||
|
||||
ggsw_out.keyswitch_inplace(module, &ksk_prepared, &tsk_prepared, scratch.borrow());
|
||||
|
||||
@@ -7,7 +7,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWEAutomorphismKeyEncryptSk, GLWEDecrypt, GLWEEncryptSk, GLWEPacker, GLWEPacking, GLWERotate, GLWESub, ScratchTakeCore,
|
||||
GLWEAutomorphismKeyEncryptSk, GLWEDecrypt, GLWEEncryptSk, GLWEPacker, GLWEPackerOps, GLWERotate, GLWESub, ScratchTakeCore,
|
||||
layouts::{
|
||||
GLWE, GLWEAutomorphismKey, GLWEAutomorphismKeyLayout, GLWEAutomorphismKeyPreparedFactory, GLWELayout, GLWEPlaintext,
|
||||
GLWESecret, GLWESecretPreparedFactory,
|
||||
@@ -20,7 +20,7 @@ where
|
||||
Module<BE>: GLWEEncryptSk<BE>
|
||||
+ GLWEAutomorphismKeyEncryptSk<BE>
|
||||
+ GLWEAutomorphismKeyPreparedFactory<BE>
|
||||
+ GLWEPacking<BE>
|
||||
+ GLWEPackerOps<BE>
|
||||
+ GLWESecretPreparedFactory<BE>
|
||||
+ GLWESub
|
||||
+ GLWEDecrypt<BE>
|
||||
|
||||
Reference in New Issue
Block a user