mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Merge branch 'dev_trait' into dev_trait_practice
This commit is contained in:
@@ -1,198 +1,169 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::VecZnxAutomorphism,
|
||||||
ScratchAvailable, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
layouts::{Backend, DataMut, GaloisElement, Module, Scratch},
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{AutomorphismKey, GGLWEInfos, GLWE, prepared::AutomorphismKeyPrepared};
|
use crate::{
|
||||||
|
ScratchTakeCore,
|
||||||
|
automorphism::glwe_ct::GLWEAutomorphism,
|
||||||
|
layouts::{
|
||||||
|
AutomorphismKey, AutomorphismKeyToMut, AutomorphismKeyToRef, GGLWEInfos, GLWE, GLWEInfos,
|
||||||
|
prepared::{
|
||||||
|
AutomorphismKeyPrepared, AutomorphismKeyPreparedToRef, GetAutomorphismGaloisElement, SetAutomorphismGaloisElement,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
impl AutomorphismKey<Vec<u8>> {
|
impl AutomorphismKey<Vec<u8>> {
|
||||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
pub fn automorphism_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
out_infos: &OUT,
|
|
||||||
in_infos: &IN,
|
|
||||||
key_infos: &KEY,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GGLWEInfos,
|
R: GGLWEInfos,
|
||||||
IN: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: AutomorphismKeyAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
GLWE::keyswitch_tmp_bytes(
|
module.automorphism_key_automorphism_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
module,
|
|
||||||
&out_infos.glwe_layout(),
|
|
||||||
&in_infos.glwe_layout(),
|
|
||||||
key_infos,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
|
||||||
where
|
|
||||||
OUT: GGLWEInfos,
|
|
||||||
KEY: GGLWEInfos,
|
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
|
||||||
{
|
|
||||||
AutomorphismKey::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||||
pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
pub fn automorphism<A, K, M, BE: Backend>(&mut self, module: &M, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
A: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
||||||
lhs: &AutomorphismKey<DataLhs>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
scratch: &mut Scratch<B>,
|
M: AutomorphismKeyAutomorphism<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphism
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
module.automorphism_key_automorphism(self, a, key, scratch);
|
||||||
{
|
}
|
||||||
use crate::layouts::LWEInfos;
|
|
||||||
|
pub fn automorphism_inplace<K, M, BE: Backend>(&mut self, module: &M, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
M: AutomorphismKeyAutomorphism<BE>,
|
||||||
|
{
|
||||||
|
module.automorphism_key_automorphism_inplace(self, key, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> AutomorphismKeyAutomorphism<BE> for Module<BE> where
|
||||||
|
Self: GaloisElement + GLWEAutomorphism<BE> + VecZnxAutomorphism
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait AutomorphismKeyAutomorphism<BE: Backend>
|
||||||
|
where
|
||||||
|
Self: GaloisElement + GLWEAutomorphism<BE> + VecZnxAutomorphism,
|
||||||
|
{
|
||||||
|
fn automorphism_key_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
|
where
|
||||||
|
R: GGLWEInfos,
|
||||||
|
A: GGLWEInfos,
|
||||||
|
K: GGLWEInfos,
|
||||||
|
{
|
||||||
|
self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn automorphism_key_automorphism<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: AutomorphismKeyToMut + SetAutomorphismGaloisElement,
|
||||||
|
A: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
{
|
||||||
|
let res: &mut AutomorphismKey<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &AutomorphismKey<&[u8]> = &a.to_ref();
|
||||||
|
let key: &AutomorphismKeyPrepared<&[u8], _> = &key.to_ref();
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
self.rank_in(),
|
|
||||||
lhs.rank_in(),
|
|
||||||
"ksk_out input rank: {} != ksk_in input rank: {}",
|
|
||||||
self.rank_in(),
|
|
||||||
lhs.rank_in()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
"ksk_in output rank: {} != ksk_apply input rank: {}",
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_in()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
"ksk_out output rank: {} != ksk_apply output rank: {}",
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_out()
|
|
||||||
);
|
|
||||||
assert!(
|
assert!(
|
||||||
self.k() <= lhs.k(),
|
res.dnum().as_u32() <= a.dnum().as_u32(),
|
||||||
"output k={} cannot be greater than input k={}",
|
"res dnum: {} > a dnum: {}",
|
||||||
self.k(),
|
res.dnum(),
|
||||||
lhs.k()
|
a.dnum()
|
||||||
)
|
);
|
||||||
}
|
|
||||||
|
|
||||||
let cols_out: usize = (rhs.rank_out() + 1).into();
|
assert_eq!(
|
||||||
|
res.dsize(),
|
||||||
|
a.dsize(),
|
||||||
|
"res dnum: {} != a dnum: {}",
|
||||||
|
res.dsize(),
|
||||||
|
a.dsize()
|
||||||
|
);
|
||||||
|
|
||||||
let p: i64 = lhs.p();
|
let cols_out: usize = (key.rank_out() + 1).into();
|
||||||
let p_inv: i64 = module.galois_element_inv(p);
|
|
||||||
|
|
||||||
(0..self.rank_in().into()).for_each(|col_i| {
|
let p: i64 = a.p();
|
||||||
(0..self.dnum().into()).for_each(|row_j| {
|
let p_inv: i64 = self.galois_element_inv(p);
|
||||||
let mut res_ct: GLWE<&mut [u8]> = self.at_mut(row_j, col_i);
|
|
||||||
let lhs_ct: GLWE<&[u8]> = lhs.at(row_j, col_i);
|
for row in 0..res.dnum().as_usize() {
|
||||||
|
for col in 0..cols_out {
|
||||||
|
let mut res_tmp: GLWE<&mut [u8]> = res.at_mut(row, col);
|
||||||
|
let a_ct: GLWE<&[u8]> = a.at(row, col);
|
||||||
|
|
||||||
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
||||||
(0..cols_out).for_each(|i| {
|
for i in 0..cols_out {
|
||||||
module.vec_znx_automorphism(lhs.p(), &mut res_ct.data, i, &lhs_ct.data, i);
|
self.vec_znx_automorphism(a.p(), res_tmp.data_mut(), i, &a_ct.data, i);
|
||||||
});
|
}
|
||||||
|
|
||||||
// Key-switch (-sa + pi_{k}(s), a) to (-pi^{-1}_{k'}(s)a + pi_{k}(s), a)
|
// Key-switch (-sa + pi_{k}(s), a) to (-pi^{-1}_{k'}(s)a + pi_{k}(s), a)
|
||||||
res_ct.keyswitch_inplace(module, &rhs.key, scratch);
|
self.glwe_keyswitch_inplace(&mut res_tmp, &key.key, scratch);
|
||||||
|
|
||||||
// Applies back the automorphism X^{-k}: (-pi^{-1}_{k'}(s)a + pi_{k}(s), a) to (-pi^{-1}_{k'+k}(s)a + s, a)
|
// Applies back the automorphism X^{-k}: (-pi^{-1}_{k'}(s)a + pi_{k}(s), a) to (-pi^{-1}_{k'+k}(s)a + s, a)
|
||||||
(0..cols_out).for_each(|i| {
|
(0..cols_out).for_each(|i| {
|
||||||
module.vec_znx_automorphism_inplace(p_inv, &mut res_ct.data, i, scratch);
|
self.vec_znx_automorphism_inplace(p_inv, res_tmp.data_mut(), i, scratch);
|
||||||
});
|
});
|
||||||
});
|
}
|
||||||
});
|
}
|
||||||
|
|
||||||
(self.dnum().min(lhs.dnum()).into()..self.dnum().into()).for_each(|row_i| {
|
|
||||||
(0..self.rank_in().into()).for_each(|col_j| {
|
|
||||||
self.at_mut(row_i, col_j).data.zero();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
self.p = (lhs.p * rhs.p) % (module.cyclotomic_order() as i64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>(
|
res.set_p((a.p() * key.p()) % (self.cyclotomic_order() as i64));
|
||||||
&mut self,
|
|
||||||
module: &Module<B>,
|
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphism
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
{
|
|
||||||
assert_eq!(
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
"ksk_in output rank: {} != ksk_apply input rank: {}",
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_in()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
"ksk_out output rank: {} != ksk_apply output rank: {}",
|
|
||||||
self.rank_out(),
|
|
||||||
rhs.rank_out()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols_out: usize = (rhs.rank_out() + 1).into();
|
fn automorphism_key_automorphism_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: AutomorphismKeyToMut + SetAutomorphismGaloisElement + GetAutomorphismGaloisElement,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
{
|
||||||
|
let res: &mut AutomorphismKey<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let key: &AutomorphismKeyPrepared<&[u8], _> = &key.to_ref();
|
||||||
|
|
||||||
let p: i64 = self.p();
|
assert_eq!(
|
||||||
let p_inv = module.galois_element_inv(p);
|
res.rank(),
|
||||||
|
key.rank(),
|
||||||
|
"key rank: {} != key rank: {}",
|
||||||
|
res.rank(),
|
||||||
|
key.rank()
|
||||||
|
);
|
||||||
|
|
||||||
(0..self.rank_in().into()).for_each(|col_i| {
|
let cols_out: usize = (key.rank_out() + 1).into();
|
||||||
(0..self.dnum().into()).for_each(|row_j| {
|
|
||||||
let mut res_ct: GLWE<&mut [u8]> = self.at_mut(row_j, col_i);
|
let p: i64 = res.p();
|
||||||
|
let p_inv: i64 = self.galois_element_inv(p);
|
||||||
|
|
||||||
|
for row in 0..res.dnum().as_usize() {
|
||||||
|
for col in 0..cols_out {
|
||||||
|
let mut res_tmp: GLWE<&mut [u8]> = res.at_mut(row, col);
|
||||||
|
|
||||||
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
||||||
(0..cols_out).for_each(|i| {
|
for i in 0..cols_out {
|
||||||
module.vec_znx_automorphism_inplace(p_inv, &mut res_ct.data, i, scratch);
|
self.vec_znx_automorphism_inplace(p_inv, res_tmp.data_mut(), i, scratch);
|
||||||
});
|
}
|
||||||
|
|
||||||
// Key-switch (-sa + pi_{k}(s), a) to (-pi^{-1}_{k'}(s)a + pi_{k}(s), a)
|
// Key-switch (-sa + pi_{k}(s), a) to (-pi^{-1}_{k'}(s)a + pi_{k}(s), a)
|
||||||
res_ct.keyswitch_inplace(module, &rhs.key, scratch);
|
self.glwe_keyswitch_inplace(&mut res_tmp, &key.key, scratch);
|
||||||
|
|
||||||
// Applies back the automorphism X^{-k}: (-pi^{-1}_{k'}(s)a + pi_{k}(s), a) to (-pi^{-1}_{k'+k}(s)a + s, a)
|
// Applies back the automorphism X^{-k}: (-pi^{-1}_{k'}(s)a + pi_{k}(s), a) to (-pi^{-1}_{k'+k}(s)a + s, a)
|
||||||
(0..cols_out).for_each(|i| {
|
for i in 0..cols_out {
|
||||||
module.vec_znx_automorphism_inplace(p_inv, &mut res_ct.data, i, scratch);
|
self.vec_znx_automorphism_inplace(p_inv, res_tmp.data_mut(), i, scratch);
|
||||||
});
|
}
|
||||||
});
|
}
|
||||||
});
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self.p = (self.p * rhs.p) % (module.cyclotomic_order() as i64);
|
res.set_p((res.p() * key.p()) % (self.cyclotomic_order() as i64));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,165 +1,127 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::ScratchAvailable,
|
||||||
ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
layouts::{Backend, DataMut, Module, Scratch},
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume,
|
|
||||||
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::{
|
||||||
GGLWEInfos, GGSW, GGSWInfos, GLWE,
|
GGSWExpandRows, ScratchTakeCore,
|
||||||
prepared::{AutomorphismKeyPrepared, TensorKeyPrepared},
|
automorphism::glwe_ct::GLWEAutomorphism,
|
||||||
|
layouts::{
|
||||||
|
GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GLWEInfos, LWEInfos,
|
||||||
|
prepared::{AutomorphismKeyPrepared, AutomorphismKeyPreparedToRef, TensorKeyPrepared, TensorKeyPreparedToRef},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGSW<Vec<u8>> {
|
impl GGSW<Vec<u8>> {
|
||||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY, TSK>(
|
pub fn automorphism_tmp_bytes<R, A, K, T, M, BE: Backend>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
out_infos: &OUT,
|
res_infos: &R,
|
||||||
in_infos: &IN,
|
a_infos: &A,
|
||||||
key_infos: &KEY,
|
key_infos: &K,
|
||||||
tsk_infos: &TSK,
|
tsk_infos: &T,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
R: GGSWInfos,
|
||||||
IN: GGSWInfos,
|
A: GGSWInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
TSK: GGLWEInfos,
|
T: GGLWEInfos,
|
||||||
Module<B>:
|
M: GGSWAutomorphism<BE>,
|
||||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
let out_size: usize = out_infos.size();
|
module.ggsw_automorphism_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
|
||||||
let ci_dft: usize = module.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
}
|
||||||
let ks_internal: usize = GLWE::keyswitch_tmp_bytes(
|
|
||||||
module,
|
|
||||||
&out_infos.glwe_layout(),
|
|
||||||
&in_infos.glwe_layout(),
|
|
||||||
key_infos,
|
|
||||||
);
|
|
||||||
let expand: usize = GGSW::expand_row_tmp_bytes(module, out_infos, tsk_infos);
|
|
||||||
ci_dft + (ks_internal | expand)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY, TSK>(
|
impl<D: DataMut> GGSW<D> {
|
||||||
module: &Module<B>,
|
pub fn automorphism<A, K, T, M, BE: Backend>(&mut self, module: &M, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||||
out_infos: &OUT,
|
|
||||||
key_infos: &KEY,
|
|
||||||
tsk_infos: &TSK,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
A: GGSWToRef,
|
||||||
KEY: GGLWEInfos,
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
TSK: GGLWEInfos,
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
Module<B>:
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
M: GGSWAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
GGSW::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos, tsk_infos)
|
module.ggsw_automorphism(self, a, key, tsk, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_inplace<K, T, M, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
M: GGSWAutomorphism<BE>,
|
||||||
|
{
|
||||||
|
module.ggsw_automorphism_inplace(self, key, tsk, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GGSW<DataSelf> {
|
impl<BE: Backend> GGSWAutomorphism<BE> for Module<BE> where Self: GLWEAutomorphism<BE> + GGSWExpandRows<BE> {}
|
||||||
pub fn automorphism<DataLhs: DataRef, DataAk: DataRef, DataTsk: DataRef, B: Backend>(
|
|
||||||
&mut self,
|
|
||||||
module: &Module<B>,
|
|
||||||
lhs: &GGSW<DataLhs>,
|
|
||||||
auto_key: &AutomorphismKeyPrepared<DataAk, B>,
|
|
||||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigBytesOf
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxDftCopy<B>
|
|
||||||
+ VecZnxDftAddInplace<B>
|
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
{
|
|
||||||
use crate::layouts::{GLWEInfos, LWEInfos};
|
|
||||||
|
|
||||||
assert_eq!(self.n(), module.n() as u32);
|
pub trait GGSWAutomorphism<BE: Backend>
|
||||||
assert_eq!(lhs.n(), module.n() as u32);
|
where
|
||||||
assert_eq!(auto_key.n(), module.n() as u32);
|
Self: GLWEAutomorphism<BE> + GGSWExpandRows<BE>,
|
||||||
assert_eq!(tensor_key.n(), module.n() as u32);
|
{
|
||||||
|
fn ggsw_automorphism_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
||||||
|
where
|
||||||
|
R: GGSWInfos,
|
||||||
|
A: GGSWInfos,
|
||||||
|
K: GGLWEInfos,
|
||||||
|
T: GGLWEInfos,
|
||||||
|
{
|
||||||
|
let out_size: usize = res_infos.size();
|
||||||
|
let ci_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
||||||
|
let ks_internal: usize = self.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos);
|
||||||
|
let expand: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
|
||||||
|
ci_dft + (ks_internal.max(expand))
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(
|
fn ggsw_automorphism<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||||
self.rank(),
|
where
|
||||||
lhs.rank(),
|
R: GGSWToMut,
|
||||||
"ggsw_out rank: {} != ggsw_in rank: {}",
|
A: GGSWToRef,
|
||||||
self.rank(),
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
lhs.rank()
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
);
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
assert_eq!(
|
{
|
||||||
self.rank(),
|
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||||
auto_key.rank_out(),
|
let a: &GGSW<&[u8]> = &a.to_ref();
|
||||||
"ggsw_in rank: {} != auto_key rank: {}",
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
self.rank(),
|
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||||
auto_key.rank_out()
|
|
||||||
);
|
assert_eq!(res.ggsw_layout(), a.ggsw_layout());
|
||||||
assert_eq!(
|
assert_eq!(res.glwe_layout(), a.glwe_layout());
|
||||||
self.rank(),
|
assert_eq!(res.lwe_layout(), a.lwe_layout());
|
||||||
tensor_key.rank_out(),
|
assert!(scratch.available() >= self.ggsw_automorphism_tmp_bytes(res, a, key, tsk));
|
||||||
"ggsw_in rank: {} != tensor_key rank: {}",
|
|
||||||
self.rank(),
|
|
||||||
tensor_key.rank_out()
|
|
||||||
);
|
|
||||||
assert!(scratch.available() >= GGSW::automorphism_tmp_bytes(module, self, lhs, auto_key, tensor_key))
|
|
||||||
};
|
|
||||||
|
|
||||||
// Keyswitch the j-th row of the col 0
|
// Keyswitch the j-th row of the col 0
|
||||||
(0..lhs.dnum().into()).for_each(|row_i| {
|
for row in 0..res.dnum().as_usize() {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.glwe_automorphism(&mut res.at_mut(row, 0), &a.at(row, 0), key, scratch);
|
||||||
.automorphism(module, &lhs.at(row_i, 0), auto_key, scratch);
|
|
||||||
});
|
|
||||||
self.expand_row(module, tensor_key, scratch);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
|
self.ggsw_expand_row(res, tsk, scratch);
|
||||||
&mut self,
|
}
|
||||||
module: &Module<B>,
|
|
||||||
auto_key: &AutomorphismKeyPrepared<DataKsk, B>,
|
fn ggsw_automorphism_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||||
tensor_key: &TensorKeyPrepared<DataTsk, B>,
|
where
|
||||||
scratch: &mut Scratch<B>,
|
R: GGSWToMut,
|
||||||
) where
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
Module<B>: VecZnxDftBytesOf
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
+ VmpApplyDftToDftTmpBytes
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigBytesOf
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxDftCopy<B>
|
|
||||||
+ VecZnxDftAddInplace<B>
|
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
|
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
|
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||||
|
|
||||||
// Keyswitch the j-th row of the col 0
|
// Keyswitch the j-th row of the col 0
|
||||||
(0..self.dnum().into()).for_each(|row_i| {
|
for row in 0..res.dnum().as_usize() {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.glwe_automorphism_inplace(&mut res.at_mut(row, 0), key, scratch);
|
||||||
.automorphism_inplace(module, auto_key, scratch);
|
}
|
||||||
});
|
|
||||||
self.expand_row(module, tensor_key, scratch);
|
self.ggsw_expand_row(res, tsk, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<DataSelf: DataMut> GGSW<DataSelf> {}
|
||||||
|
|||||||
@@ -1,345 +1,331 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
ScratchTakeBasic, VecZnxAutomorphismInplace, VecZnxBigAutomorphismInplace, VecZnxBigSubSmallInplace,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
VecZnxBigSubSmallNegateInplace,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
layouts::{Backend, DataMut, Module, Scratch, VecZnxBig},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared};
|
use crate::{
|
||||||
|
GLWEKeyswitch, ScratchTakeCore, keyswitch_internal,
|
||||||
|
layouts::{
|
||||||
|
GGLWEInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||||
|
prepared::{AutomorphismKeyPrepared, AutomorphismKeyPreparedToRef, GetAutomorphismGaloisElement},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl GLWE<Vec<u8>> {
|
||||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
pub fn automorphism_tmp_bytes<M, R, A, K, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
out_infos: &OUT,
|
|
||||||
in_infos: &IN,
|
|
||||||
key_infos: &KEY,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
A: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: GLWEAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
Self::keyswitch_tmp_bytes(module, out_infos, in_infos, key_infos)
|
module.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
}
|
|
||||||
|
|
||||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
|
||||||
where
|
|
||||||
OUT: GLWEInfos,
|
|
||||||
KEY: GGLWEInfos,
|
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
|
||||||
{
|
|
||||||
Self::keyswitch_inplace_tmp_bytes(module, out_infos, key_infos)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||||
pub fn automorphism<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
pub fn automorphism<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
M: GLWEAutomorphism<BE>,
|
||||||
lhs: &GLWE<DataLhs>,
|
A: GLWEToRef,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
self.keyswitch(module, lhs, &rhs.key, scratch);
|
module.glwe_automorphism(self, a, key, scratch);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace<DataRhs: DataRef, B: Backend>(
|
pub fn automorphism_add<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
M: GLWEAutomorphism<BE>,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
A: GLWEToRef,
|
||||||
scratch: &mut Scratch<B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
) where
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
self.keyswitch_inplace(module, &rhs.key, scratch);
|
module.glwe_automorphism_add(self, a, key, scratch);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_add<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
pub fn automorphism_sub<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
M: GLWEAutomorphism<BE>,
|
||||||
lhs: &GLWE<DataLhs>,
|
A: GLWEToRef,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
module.glwe_automorphism_sub(self, a, key, scratch);
|
||||||
{
|
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
pub fn glwe_automorphism_sub_negate<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
where
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
M: GLWEAutomorphism<BE>,
|
||||||
module.vec_znx_big_add_small_inplace(&mut res_big, i, &lhs.data, i);
|
A: GLWEToRef,
|
||||||
module.vec_znx_big_normalize(
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
self.base2k().into(),
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
&mut self.data,
|
{
|
||||||
|
module.glwe_automorphism_sub_negate(self, a, key, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_inplace<M, K, BE: Backend>(&mut self, module: &M, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
M: GLWEAutomorphism<BE>,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_automorphism_inplace(self, key, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_add_inplace<M, K, BE: Backend>(&mut self, module: &M, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
M: GLWEAutomorphism<BE>,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_automorphism_add_inplace(self, key, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_sub_inplace<M, K, BE: Backend>(&mut self, module: &M, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
M: GLWEAutomorphism<BE>,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_automorphism_sub_inplace(self, key, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_sub_negate_inplace<M, K, BE: Backend>(&mut self, module: &M, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
M: GLWEAutomorphism<BE>,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_automorphism_sub_negate_inplace(self, key, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait GLWEAutomorphism<BE: Backend>
|
||||||
|
where
|
||||||
|
Self: GLWEKeyswitch<BE>
|
||||||
|
+ VecZnxAutomorphismInplace<BE>
|
||||||
|
+ VecZnxBigAutomorphismInplace<BE>
|
||||||
|
+ VecZnxBigSubSmallInplace<BE>
|
||||||
|
+ VecZnxBigSubSmallNegateInplace<BE>,
|
||||||
|
{
|
||||||
|
fn glwe_automorphism_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
|
where
|
||||||
|
R: GLWEInfos,
|
||||||
|
A: GLWEInfos,
|
||||||
|
K: GGLWEInfos,
|
||||||
|
{
|
||||||
|
self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_automorphism<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
self.glwe_keyswitch(res, a, &key.to_ref().key, scratch);
|
||||||
|
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_automorphism_inplace(key.p(), res.data_mut(), i, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_automorphism_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
self.glwe_keyswitch_inplace(res, &key.to_ref().key, scratch);
|
||||||
|
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_automorphism_inplace(key.p(), res.data_mut(), i, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_automorphism_add<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
|
|
||||||
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, &key.key, scratch_1);
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
|
self.vec_znx_big_add_small_inplace(&mut res_big, i, a.data(), i);
|
||||||
|
self.vec_znx_big_normalize(
|
||||||
|
res.base2k().into(),
|
||||||
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_add_inplace<DataRhs: DataRef, B: Backend>(
|
fn glwe_automorphism_add_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GLWEToMut,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
|
||||||
}
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, &key.key, scratch_1);
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_add_small_inplace(&mut res_big, i, &self.data, i);
|
self.vec_znx_big_add_small_inplace(&mut res_big, i, res.data(), i);
|
||||||
module.vec_znx_big_normalize(
|
self.vec_znx_big_normalize(
|
||||||
self.base2k().into(),
|
res.base2k().into(),
|
||||||
&mut self.data,
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_ab<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
fn glwe_automorphism_sub<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GLWEToMut,
|
||||||
lhs: &GLWE<DataLhs>,
|
A: GLWEToRef,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
}
|
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, &key.key, scratch_1);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
module.vec_znx_big_sub_small_inplace(&mut res_big, i, &lhs.data, i);
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_normalize(
|
self.vec_znx_big_sub_small_inplace(&mut res_big, i, a.data(), i);
|
||||||
self.base2k().into(),
|
self.vec_znx_big_normalize(
|
||||||
&mut self.data,
|
res.base2k().into(),
|
||||||
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_inplace<DataRhs: DataRef, B: Backend>(
|
fn glwe_automorphism_sub_negate<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GLWEToMut,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
A: GLWEToRef,
|
||||||
scratch: &mut Scratch<B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
) where
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
}
|
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, a, &key.key, scratch_1);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
module.vec_znx_big_sub_small_inplace(&mut res_big, i, &self.data, i);
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_normalize(
|
self.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, a.data(), i);
|
||||||
self.base2k().into(),
|
self.vec_znx_big_normalize(
|
||||||
&mut self.data,
|
res.base2k().into(),
|
||||||
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_negate<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
fn glwe_automorphism_sub_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GLWEToMut,
|
||||||
lhs: &GLWE<DataLhs>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
|
||||||
}
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, &key.key, scratch_1);
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, &lhs.data, i);
|
self.vec_znx_big_sub_small_inplace(&mut res_big, i, res.data(), i);
|
||||||
module.vec_znx_big_normalize(
|
self.vec_znx_big_normalize(
|
||||||
self.base2k().into(),
|
res.base2k().into(),
|
||||||
&mut self.data,
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_negate_inplace<DataRhs: DataRef, B: Backend>(
|
fn glwe_automorphism_sub_negate_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
R: GLWEToMut,
|
||||||
rhs: &AutomorphismKeyPrepared<DataRhs, B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let key: &AutomorphismKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
|
||||||
}
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), key.size()); // TODO: optimise size
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
let mut res_big: VecZnxBig<_, BE> = keyswitch_internal(self, res_dft, res, &key.key, scratch_1);
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
self.vec_znx_big_automorphism_inplace(key.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, &self.data, i);
|
self.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, res.data(), i);
|
||||||
module.vec_znx_big_normalize(
|
self.vec_znx_big_normalize(
|
||||||
self.base2k().into(),
|
res.base2k().into(),
|
||||||
&mut self.data,
|
res.data_mut(),
|
||||||
i,
|
i,
|
||||||
rhs.base2k().into(),
|
key.base2k().into(),
|
||||||
&res_big,
|
&res_big,
|
||||||
i,
|
i,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> GLWEAutomorphism<BE> for Module<BE> where
|
||||||
|
Self: GLWEKeyswitch<BE>
|
||||||
|
+ VecZnxAutomorphismInplace<BE>
|
||||||
|
+ VecZnxBigAutomorphismInplace<BE>
|
||||||
|
+ VecZnxBigSubSmallInplace<BE>
|
||||||
|
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
mod gglwe_atk;
|
mod gglwe_atk;
|
||||||
mod ggsw_ct;
|
mod ggsw_ct;
|
||||||
mod glwe_ct;
|
mod glwe_ct;
|
||||||
|
|
||||||
|
pub use gglwe_atk::*;
|
||||||
|
pub use ggsw_ct::*;
|
||||||
|
pub use glwe_ct::*;
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAddInplace,
|
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftAddInplace, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, Scratch, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, Module, Scratch, VmpPMat, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScratchTakeCore,
|
GLWECopy, ScratchTakeCore,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWE, GGLWEInfos, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, LWEInfos,
|
GGLWE, GGLWEInfos, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, LWEInfos,
|
||||||
prepared::{TensorKeyPrepared, TensorKeyPreparedToRef},
|
prepared::{TensorKeyPrepared, TensorKeyPreparedToRef},
|
||||||
},
|
},
|
||||||
operations::GLWEOperations,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWE<Vec<u8>> {
|
impl GGLWE<Vec<u8>> {
|
||||||
@@ -39,11 +38,11 @@ impl<D: DataMut> GGSW<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GGSWFromGGLWE<BE> for Module<BE> where Self: GGSWExpandRows<BE> + VecZnxCopy {}
|
impl<BE: Backend> GGSWFromGGLWE<BE> for Module<BE> where Self: GGSWExpandRows<BE> + GLWECopy {}
|
||||||
|
|
||||||
pub trait GGSWFromGGLWE<BE: Backend>
|
pub trait GGSWFromGGLWE<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GGSWExpandRows<BE> + VecZnxCopy,
|
Self: GGSWExpandRows<BE> + GLWECopy,
|
||||||
{
|
{
|
||||||
fn ggsw_from_gglwe_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
fn ggsw_from_gglwe_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
||||||
where
|
where
|
||||||
@@ -71,7 +70,7 @@ where
|
|||||||
assert_eq!(tsk.n(), self.n() as u32);
|
assert_eq!(tsk.n(), self.n() as u32);
|
||||||
|
|
||||||
for row in 0..res.dnum().into() {
|
for row in 0..res.dnum().into() {
|
||||||
res.at_mut(row, 0).copy(self, &a.at(row, 0));
|
self.glwe_copy(&mut res.at_mut(row, 0), &a.at(row, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.ggsw_expand_row(res, tsk, scratch);
|
self.ggsw_expand_row(res, tsk, scratch);
|
||||||
|
|||||||
@@ -141,3 +141,18 @@ impl<DataSelf: DataMut> TensorKeyCompressed<DataSelf> {
|
|||||||
module.gglwe_tensor_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
module.gglwe_tensor_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<DataSelf: DataMut> TensorKeyCompressed<DataSelf> {
|
||||||
|
pub fn encrypt_sk<DataSk: DataRef, B: Backend>(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<B>,
|
||||||
|
sk: &GLWESecret<DataSk>,
|
||||||
|
seed_xa: [u8; 32],
|
||||||
|
source_xe: &mut Source,
|
||||||
|
scratch: &mut Scratch<B>,
|
||||||
|
) where
|
||||||
|
Module<B>: GGLWETensorKeyCompressedEncryptSk<B>,
|
||||||
|
{
|
||||||
|
module.gglwe_tensor_key_encrypt_sk(self, sk, seed_xa, source_xe, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ use poulpy_hal::{
|
|||||||
VecZnxSwitchRing,
|
VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, Scratch},
|
layouts::{Backend, DataMut, Module, Scratch},
|
||||||
|
layouts::{Backend, DataMut, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -16,6 +17,8 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
impl AutomorphismKey<Vec<u8>> {
|
||||||
|
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||||
impl AutomorphismKey<Vec<u8>> {
|
impl AutomorphismKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
@@ -28,8 +31,10 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
);
|
);
|
||||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||||
|
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||||
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
@@ -40,6 +45,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
);
|
);
|
||||||
GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
|
GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
|
||||||
|
GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,6 +63,25 @@ pub trait GGLWEAutomorphismKeyEncryptSk<BE: Backend> {
|
|||||||
B: GLWESecretToRef;
|
B: GLWESecretToRef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<DM: DataMut> AutomorphismKey<DM>
|
||||||
|
where
|
||||||
|
Self: AutomorphismKeyToMut,
|
||||||
|
{
|
||||||
|
pub fn encrypt_sk<S, BE: Backend>(
|
||||||
|
pub trait GGLWEAutomorphismKeyEncryptSk<BE: Backend> {
|
||||||
|
fn gglwe_automorphism_key_encrypt_sk<A, B>(
|
||||||
|
&self,
|
||||||
|
res: &mut A,
|
||||||
|
p: i64,
|
||||||
|
sk: &B,
|
||||||
|
source_xa: &mut Source,
|
||||||
|
source_xe: &mut Source,
|
||||||
|
scratch: &mut Scratch<BE>,
|
||||||
|
) where
|
||||||
|
A: AutomorphismKeyToMut,
|
||||||
|
B: GLWESecretToRef;
|
||||||
|
}
|
||||||
|
|
||||||
impl<DM: DataMut> AutomorphismKey<DM>
|
impl<DM: DataMut> AutomorphismKey<DM>
|
||||||
where
|
where
|
||||||
Self: AutomorphismKeyToMut,
|
Self: AutomorphismKeyToMut,
|
||||||
@@ -64,11 +89,14 @@ where
|
|||||||
pub fn encrypt_sk<S, BE: Backend>(
|
pub fn encrypt_sk<S, BE: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<BE>,
|
module: &Module<BE>,
|
||||||
|
module: &Module<BE>,
|
||||||
p: i64,
|
p: i64,
|
||||||
sk: &S,
|
sk: &S,
|
||||||
|
sk: &S,
|
||||||
source_xa: &mut Source,
|
source_xa: &mut Source,
|
||||||
source_xe: &mut Source,
|
source_xe: &mut Source,
|
||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
S: GLWESecretToRef,
|
S: GLWESecretToRef,
|
||||||
Module<BE>: GGLWEAutomorphismKeyEncryptSk<BE>,
|
Module<BE>: GGLWEAutomorphismKeyEncryptSk<BE>,
|
||||||
@@ -121,20 +149,29 @@ where
|
|||||||
{
|
{
|
||||||
use crate::layouts::{GLWEInfos, LWEInfos};
|
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||||
|
|
||||||
|
assert_eq!(res.n(), sk.n());
|
||||||
|
assert_eq!(res.rank_out(), res.rank_in());
|
||||||
|
assert_eq!(sk.rank(), res.rank_out());
|
||||||
assert_eq!(res.n(), sk.n());
|
assert_eq!(res.n(), sk.n());
|
||||||
assert_eq!(res.rank_out(), res.rank_in());
|
assert_eq!(res.rank_out(), res.rank_in());
|
||||||
assert_eq!(sk.rank(), res.rank_out());
|
assert_eq!(sk.rank(), res.rank_out());
|
||||||
assert!(
|
assert!(
|
||||||
|
scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
|
||||||
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
|
||||||
scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
|
scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
|
||||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
|
AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
|
||||||
|
AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(self, sk.rank());
|
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(self, sk.rank());
|
||||||
|
|
||||||
{
|
{
|
||||||
|
(0..res.rank_out().into()).for_each(|i| {
|
||||||
|
self.vec_znx_automorphism(
|
||||||
|
self.galois_element_inv(p),
|
||||||
(0..res.rank_out().into()).for_each(|i| {
|
(0..res.rank_out().into()).for_each(|i| {
|
||||||
self.vec_znx_automorphism(
|
self.vec_znx_automorphism(
|
||||||
self.galois_element_inv(p),
|
self.galois_element_inv(p),
|
||||||
@@ -146,9 +183,12 @@ where
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
res.key
|
||||||
|
.encrypt_sk(self, sk, &sk_out, source_xa, source_xe, scratch_1);
|
||||||
res.key
|
res.key
|
||||||
.encrypt_sk(self, sk, &sk_out, source_xa, source_xe, scratch_1);
|
.encrypt_sk(self, sk, &sk_out, source_xa, source_xe, scratch_1);
|
||||||
|
|
||||||
res.p = p;
|
res.p = p;
|
||||||
|
res.p = p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{ModuleLogN, VecZnxCopy, VecZnxRotateInplace},
|
||||||
ScratchAvailable, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace,
|
layouts::{Backend, DataMut, DataRef, GaloisElement, Module, Scratch},
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply,
|
|
||||||
VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize,
|
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
|
||||||
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
GLWEOperations,
|
GLWEAdd, GLWEAutomorphism, GLWENormalize, GLWERotate, GLWEShift, GLWESub, ScratchTakeCore,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
layouts::{
|
||||||
|
GGLWEInfos, GLWE, GLWEAlloc, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||||
|
prepared::{AutomorphismKeyPreparedToRef, GetAutomorphismGaloisElement},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// [GLWEPacker] enables only the fly GLWE packing
|
/// [GLWEPacker] enables only the fly GLWE packing
|
||||||
@@ -43,9 +40,10 @@ impl Accumulator {
|
|||||||
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
||||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
||||||
/// * `rank`: rank of the GLWE ciphertext.
|
/// * `rank`: rank of the GLWE ciphertext.
|
||||||
pub fn alloc<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
Self {
|
Self {
|
||||||
data: GLWE::alloc_from_infos(module, infos),
|
data: GLWE::alloc_from_infos(module, infos),
|
||||||
@@ -66,9 +64,10 @@ impl GLWEPacker {
|
|||||||
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
||||||
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
||||||
/// can be packed.
|
/// can be packed.
|
||||||
pub fn new<A, B: Backend>(module: Module<B>, infos: &A, log_batch: usize) -> Self
|
pub fn new<A, M>(module: &M, infos: &A, log_batch: usize) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
||||||
let log_n: usize = infos.n().log2();
|
let log_n: usize = infos.n().log2();
|
||||||
@@ -90,13 +89,13 @@ impl GLWEPacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Number of scratch space bytes required to call [Self::add].
|
/// Number of scratch space bytes required to call [Self::add].
|
||||||
pub fn tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
pub fn tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: GLWEAlloc + GLWEAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
pack_core_tmp_bytes(module, out_infos, key_infos)
|
pack_core_tmp_bytes(module, res_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
||||||
@@ -112,37 +111,12 @@ impl GLWEPacker {
|
|||||||
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
|
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
|
||||||
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
|
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
|
||||||
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
|
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
|
||||||
pub fn add<DataA: DataRef, DataAK: DataRef, B: Backend>(
|
pub fn add<A, K, M, BE: Backend>(&mut self, module: &M, a: Option<&A>, auto_keys: &HashMap<i64, K>, scratch: &mut Scratch<B>)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
A: GLWEToRef,
|
||||||
a: Option<&GLWE<DataA>>,
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
M: GLWEAutomorphism<BE>,
|
||||||
scratch: &mut Scratch<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxRotateInplace<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ VecZnxNegateInplace
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxSubInplace
|
|
||||||
+ VecZnxRotate
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
(self.counter as u32) < self.accumulators[0].data.n(),
|
(self.counter as u32) < self.accumulators[0].data.n(),
|
||||||
@@ -177,47 +151,27 @@ impl GLWEPacker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pack_core_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
fn pack_core_tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: GLWEAlloc + GLWEAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
combine_tmp_bytes(module, out_infos, key_infos)
|
combine_tmp_bytes(module, res_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
fn pack_core<A, K, M, BE: Backend>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
a: Option<&GLWE<D>>,
|
a: Option<&A>,
|
||||||
accumulators: &mut [Accumulator],
|
accumulators: &mut [Accumulator],
|
||||||
i: usize,
|
i: usize,
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, K>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftBytesOf
|
A: GLWEToRef + GLWEInfos,
|
||||||
+ VmpApplyDftToDftTmpBytes
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
M: GLWEAutomorphism<BE> + ModuleLogN + VecZnxCopy,
|
||||||
+ VmpApplyDftToDft<B>
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxRotateInplace<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ VecZnxNegateInplace
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxSubInplace
|
|
||||||
+ VecZnxRotate
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
let log_n: usize = module.log_n();
|
let log_n: usize = module.log_n();
|
||||||
|
|
||||||
@@ -268,49 +222,29 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn combine_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
fn combine_tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: GLWEAlloc + GLWEAutomorphism<BE>,
|
||||||
{
|
{
|
||||||
GLWE::bytes_of_from_infos(module, out_infos)
|
GLWE::bytes_of_from_infos(module, res_infos)
|
||||||
+ (GLWE::rsh_tmp_bytes(module.n()) | GLWE::automorphism_inplace_tmp_bytes(module, out_infos, key_infos))
|
+ (GLWE::rsh_tmp_bytes(module.n()) | module.glwe_automorphism_tmp_bytes(res_infos, res_infos, key_infos))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [combine] merges two ciphertexts together.
|
/// [combine] merges two ciphertexts together.
|
||||||
fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
fn combine<B, M, K, BE: Backend>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
acc: &mut Accumulator,
|
acc: &mut Accumulator,
|
||||||
b: Option<&GLWE<D>>,
|
b: Option<&B>,
|
||||||
i: usize,
|
i: usize,
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
auto_keys: &HashMap<i64, K>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxDftBytesOf
|
B: GLWEToRef + GLWEInfos,
|
||||||
+ VmpApplyDftToDftTmpBytes
|
K: AutomorphismKeyPreparedToRef<BE>,
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
M: GLWEAutomorphism<BE> + GaloisElement + VecZnxRotateInplace<BE>,
|
||||||
+ VmpApplyDftToDft<B>
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxRotateInplace<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ VecZnxNegateInplace
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxSubInplace
|
|
||||||
+ VecZnxRotate
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
let log_n: usize = acc.data.n().log2();
|
let log_n: usize = acc.data.n().log2();
|
||||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||||
@@ -335,7 +269,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
||||||
if acc.value {
|
if acc.value {
|
||||||
if let Some(b) = b {
|
if let Some(b) = b {
|
||||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
|
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, a);
|
||||||
|
|
||||||
// a = a * X^-t
|
// a = a * X^-t
|
||||||
a.rotate_inplace(module, -t, scratch_1);
|
a.rotate_inplace(module, -t, scratch_1);
|
||||||
@@ -390,110 +324,76 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait GLWEPacking<BE: Backend>
|
||||||
|
where
|
||||||
|
Self: GLWEAutomorphism<BE>
|
||||||
|
+ GaloisElement
|
||||||
|
+ ModuleLogN
|
||||||
|
+ GLWERotate<BE>
|
||||||
|
+ GLWESub
|
||||||
|
+ GLWEShift<BE>
|
||||||
|
+ GLWEAdd
|
||||||
|
+ GLWENormalize<BE>,
|
||||||
|
{
|
||||||
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
|
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
|
||||||
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
|
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
|
||||||
pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
|
fn glwe_pack<R, K>(
|
||||||
module: &Module<B>,
|
&self,
|
||||||
cts: &mut HashMap<usize, &mut GLWE<D>>,
|
cts: &mut HashMap<usize, &mut R>,
|
||||||
log_gap_out: usize,
|
log_gap_out: usize,
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<ATK, B>>,
|
keys: &HashMap<i64, K>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
ATK: DataRef,
|
R: GLWEToMut + GLWEToRef + GLWEInfos,
|
||||||
Module<B>: VecZnxRotateInplace<B>
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
+ VecZnxNormalizeInplace<B>
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxSwitchRing
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxDftCopy<B>
|
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNegateInplace
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxSubInplace
|
|
||||||
+ VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxRotate
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(*cts.keys().max().unwrap() < module.n())
|
assert!(*cts.keys().max().unwrap() < self.n())
|
||||||
}
|
}
|
||||||
|
|
||||||
let log_n: usize = module.log_n();
|
let log_n: usize = self.log_n();
|
||||||
|
|
||||||
(0..log_n - log_gap_out).for_each(|i| {
|
for i in 0..(log_n - log_gap_out){
|
||||||
let t: usize = (1 << log_n).min(1 << (log_n - 1 - i));
|
let t: usize = (1 << log_n).min(1 << (log_n - 1 - i));
|
||||||
|
|
||||||
let auto_key: &AutomorphismKeyPrepared<ATK, B> = if i == 0 {
|
let key: &K = if i == 0 {
|
||||||
auto_keys.get(&-1).unwrap()
|
keys.get(&-1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
auto_keys.get(&module.galois_element(1 << (i - 1))).unwrap()
|
keys.get(&self.galois_element(1 << (i - 1))).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
(0..t).for_each(|j| {
|
for j in 0..t{
|
||||||
let mut a: Option<&mut GLWE<D>> = cts.remove(&j);
|
let mut a: Option<&mut R> = cts.remove(&j);
|
||||||
let mut b: Option<&mut GLWE<D>> = cts.remove(&(j + t));
|
let mut b: Option<&mut R> = cts.remove(&(j + t));
|
||||||
|
|
||||||
pack_internal(module, &mut a, &mut b, i, auto_key, scratch);
|
pack_internal(self, &mut a, &mut b, i, key, scratch);
|
||||||
|
|
||||||
if let Some(a) = a {
|
if let Some(a) = a {
|
||||||
cts.insert(j, a);
|
cts.insert(j, a);
|
||||||
} else if let Some(b) = b {
|
} else if let Some(b) = b {
|
||||||
cts.insert(j, b);
|
cts.insert(j, b);
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
});
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
fn pack_internal<M, A, B, K, BE: Backend>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
a: &mut Option<&mut GLWE<A>>,
|
a: &mut Option<&mut A>,
|
||||||
b: &mut Option<&mut GLWE<D>>,
|
b: &mut Option<&mut B>,
|
||||||
i: usize,
|
i: usize,
|
||||||
auto_key: &AutomorphismKeyPrepared<DataAK, B>,
|
auto_key: &K,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
Module<B>: VecZnxRotateInplace<B>
|
M: GLWEAutomorphism<BE> + GLWERotate<BE> + GLWESub + GLWEShift<BE> + GLWEAdd + GLWENormalize<BE>,
|
||||||
+ VecZnxNormalizeInplace<B>
|
A: GLWEToMut + GLWEToRef + GLWEInfos,
|
||||||
+ VecZnxNormalizeTmpBytes
|
B: GLWEToMut + GLWEToRef + GLWEInfos,
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
|
||||||
+ VecZnxRshInplace<B>
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
+ VecZnxDftCopy<B>
|
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNegateInplace
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxSubInplace
|
|
||||||
+ VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxAutomorphismInplace<B>
|
|
||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
|
||||||
+ VecZnxRotate
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
||||||
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
||||||
@@ -509,45 +409,45 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
|||||||
let t: i64 = 1 << (a.n().log2() - i - 1);
|
let t: i64 = 1 << (a.n().log2() - i - 1);
|
||||||
|
|
||||||
if let Some(b) = b.as_deref_mut() {
|
if let Some(b) = b.as_deref_mut() {
|
||||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
|
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, a);
|
||||||
|
|
||||||
// a = a * X^-t
|
// a = a * X^-t
|
||||||
a.rotate_inplace(module, -t, scratch_1);
|
module.glwe_rotate_inplace(-t, a, scratch_1);
|
||||||
|
|
||||||
// tmp_b = a * X^-t - b
|
// tmp_b = a * X^-t - b
|
||||||
tmp_b.sub(module, a, b);
|
module.glwe_sub(&mut tmp_b, a, b);
|
||||||
tmp_b.rsh(module, 1, scratch_1);
|
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||||
|
|
||||||
// a = a * X^-t + b
|
// a = a * X^-t + b
|
||||||
a.add_inplace(module, b);
|
module.glwe_add_inplace(a, b);
|
||||||
a.rsh(module, 1, scratch_1);
|
module.glwe_rsh(1, a, scratch_1);
|
||||||
|
|
||||||
tmp_b.normalize_inplace(module, scratch_1);
|
module.glwe_normalize_inplace(&mut tmp_b, scratch_1);
|
||||||
|
|
||||||
// tmp_b = phi(a * X^-t - b)
|
// tmp_b = phi(a * X^-t - b)
|
||||||
tmp_b.automorphism_inplace(module, auto_key, scratch_1);
|
module.glwe_automorphism_inplace(&mut tmp_b, auto_key, scratch_1);
|
||||||
|
|
||||||
// a = a * X^-t + b - phi(a * X^-t - b)
|
// a = a * X^-t + b - phi(a * X^-t - b)
|
||||||
a.sub_inplace_ab(module, &tmp_b);
|
module.glwe_sub_inplace(a, &tmp_b);
|
||||||
a.normalize_inplace(module, scratch_1);
|
module.glwe_normalize_inplace(a, scratch_1);
|
||||||
|
|
||||||
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
||||||
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
||||||
// = a + b * X^t + phi(a - b * X^t)
|
// = a + b * X^t + phi(a - b * X^t)
|
||||||
a.rotate_inplace(module, t, scratch_1);
|
module.glwe_rotate_inplace(t, a, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
a.rsh(module, 1, scratch);
|
module.glwe_rsh(1, a, scratch);
|
||||||
// a = a + phi(a)
|
// a = a + phi(a)
|
||||||
a.automorphism_add_inplace(module, auto_key, scratch);
|
module.glwe_automorphism_add_inplace(a, auto_key, scratch);
|
||||||
}
|
}
|
||||||
} else if let Some(b) = b.as_deref_mut() {
|
} else if let Some(b) = b.as_deref_mut() {
|
||||||
let t: i64 = 1 << (b.n().log2() - i - 1);
|
let t: i64 = 1 << (b.n().log2() - i - 1);
|
||||||
|
|
||||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(b);
|
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, b);
|
||||||
tmp_b.rotate(module, t, b);
|
module.glwe_rotate(t, &mut tmp_b, b);
|
||||||
tmp_b.rsh(module, 1, scratch_1);
|
module.glwe_rsh(1, &mut tmp_b, scratch_1);
|
||||||
|
|
||||||
// a = (b* X^t - phi(b* X^t))
|
// a = (b* X^t - phi(b* X^t))
|
||||||
b.automorphism_sub_negate(module, &tmp_b, auto_key, scratch_1);
|
module.glwe_automorphism_sub_negate(b, &tmp_b, auto_key, scratch_1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,173 +1,188 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::ModuleLogN,
|
||||||
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
layouts::{Backend, DataMut, GaloisElement, Module, Scratch, VecZnx},
|
||||||
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
|
||||||
VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
GLWEAutomorphism, GLWECopy, GLWEShift, ScratchTakeCore,
|
||||||
operations::GLWEOperations,
|
layouts::{
|
||||||
|
Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, GLWEToMut, GLWEToRef, LWEInfos,
|
||||||
|
prepared::{AutomorphismKeyPreparedToRef, GetAutomorphismGaloisElement},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl GLWE<Vec<u8>> {
|
||||||
pub fn trace_galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
pub fn trace_galois_elements<M, BE: Backend>(module: &M) -> Vec<i64>
|
||||||
let mut gal_els: Vec<i64> = Vec::new();
|
where
|
||||||
(0..module.log_n()).for_each(|i| {
|
M: GLWETrace<BE>,
|
||||||
if i == 0 {
|
{
|
||||||
gal_els.push(-1);
|
module.glwe_trace_galois_elements()
|
||||||
} else {
|
|
||||||
gal_els.push(module.galois_element(1 << (i - 1)));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
gal_els
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn trace_tmp_bytes<B: Backend, OUT, IN, KEY>(module: &Module<B>, out_infos: &OUT, in_infos: &IN, key_infos: &KEY) -> usize
|
pub fn trace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
A: GLWEInfos,
|
||||||
KEY: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
M: GLWETrace<BE>,
|
||||||
{
|
{
|
||||||
let trace: usize = Self::automorphism_inplace_tmp_bytes(module, out_infos, key_infos);
|
module.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
if in_infos.base2k() != key_infos.base2k() {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> GLWE<D> {
|
||||||
|
pub fn trace<A, K, M, BE: Backend>(
|
||||||
|
&mut self,
|
||||||
|
module: &M,
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
a: &A,
|
||||||
|
keys: &HashMap<i64, K>,
|
||||||
|
scratch: &mut Scratch<BE>,
|
||||||
|
) where
|
||||||
|
A: GLWEToRef,
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GGLWEInfos + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
M: GLWETrace<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_trace(self, start, end, a, keys, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trace_inplace<K, M, BE: Backend>(
|
||||||
|
&mut self,
|
||||||
|
module: &M,
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
keys: &HashMap<i64, K>,
|
||||||
|
scratch: &mut Scratch<BE>,
|
||||||
|
) where
|
||||||
|
K: AutomorphismKeyPreparedToRef<BE> + GGLWEInfos + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
M: GLWETrace<BE>,
|
||||||
|
{
|
||||||
|
module.glwe_trace_inplace(self, start, end, keys, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> GLWETrace<BE> for Module<BE> where
|
||||||
|
Self: ModuleLogN + GaloisElement + GLWEAutomorphism<BE> + GLWEShift<BE> + GLWECopy
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait GLWETrace<BE: Backend>
|
||||||
|
where
|
||||||
|
Self: ModuleLogN + GaloisElement + GLWEAutomorphism<BE> + GLWEShift<BE> + GLWECopy,
|
||||||
|
{
|
||||||
|
fn glwe_trace_galois_elements(&self) -> Vec<i64> {
|
||||||
|
(0..self.log_n())
|
||||||
|
.map(|i| {
|
||||||
|
if i == 0 {
|
||||||
|
-1
|
||||||
|
} else {
|
||||||
|
self.galois_element(1 << (i - 1))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_trace_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
|
where
|
||||||
|
R: GLWEInfos,
|
||||||
|
A: GLWEInfos,
|
||||||
|
K: GGLWEInfos,
|
||||||
|
{
|
||||||
|
let trace: usize = self.glwe_automorphism_tmp_bytes(res_infos, a_infos, key_infos);
|
||||||
|
if a_infos.base2k() != key_infos.base2k() {
|
||||||
let glwe_conv: usize = VecZnx::bytes_of(
|
let glwe_conv: usize = VecZnx::bytes_of(
|
||||||
module.n(),
|
self.n(),
|
||||||
(key_infos.rank_out() + 1).into(),
|
(key_infos.rank_out() + 1).into(),
|
||||||
out_infos.k().min(in_infos.k()).div_ceil(key_infos.base2k()) as usize,
|
res_infos.k().min(a_infos.k()).div_ceil(key_infos.base2k()) as usize,
|
||||||
) + module.vec_znx_normalize_tmp_bytes();
|
) + self.vec_znx_normalize_tmp_bytes();
|
||||||
return glwe_conv + trace;
|
return glwe_conv + trace;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace
|
trace
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn trace_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
fn glwe_trace<R, A, K>(&self, res: &mut R, start: usize, end: usize, a: &A, keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEToMut,
|
||||||
KEY: GGLWEInfos,
|
A: GLWEToRef,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
K: AutomorphismKeyPreparedToRef<BE> + GGLWEInfos + GetAutomorphismGaloisElement,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
Self::trace_tmp_bytes(module, out_infos, out_infos, key_infos)
|
self.glwe_copy(res, a);
|
||||||
}
|
self.glwe_trace_inplace(res, start, end, keys, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
fn glwe_trace_inplace<R, K>(&self, res: &mut R, start: usize, end: usize, keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
|
||||||
pub fn trace<DataLhs: DataRef, DataAK: DataRef, B: Backend>(
|
where
|
||||||
&mut self,
|
R: GLWEToMut,
|
||||||
module: &Module<B>,
|
K: AutomorphismKeyPreparedToRef<BE> + GGLWEInfos + GetAutomorphismGaloisElement,
|
||||||
start: usize,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
end: usize,
|
|
||||||
lhs: &GLWE<DataLhs>,
|
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxCopy
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
{
|
||||||
self.copy(module, lhs);
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
self.trace_inplace(module, start, end, auto_keys, scratch);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn trace_inplace<DataAK: DataRef, B: Backend>(
|
let basek_ksk: Base2K = keys.get(keys.keys().next().unwrap()).unwrap().base2k();
|
||||||
&mut self,
|
|
||||||
module: &Module<B>,
|
|
||||||
start: usize,
|
|
||||||
end: usize,
|
|
||||||
auto_keys: &HashMap<i64, AutomorphismKeyPrepared<DataAK, B>>,
|
|
||||||
scratch: &mut Scratch<B>,
|
|
||||||
) where
|
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
|
||||||
+ VecZnxRshInplace<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: ScratchAvailable,
|
|
||||||
{
|
|
||||||
let basek_ksk: Base2K = auto_keys
|
|
||||||
.get(auto_keys.keys().next().unwrap())
|
|
||||||
.unwrap()
|
|
||||||
.base2k();
|
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.n(), module.n() as u32);
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
assert!(start < end);
|
assert!(start < end);
|
||||||
assert!(end <= module.log_n());
|
assert!(end <= self.log_n());
|
||||||
for key in auto_keys.values() {
|
for key in keys.values() {
|
||||||
assert_eq!(key.n(), module.n() as u32);
|
assert_eq!(key.n(), self.n() as u32);
|
||||||
assert_eq!(key.base2k(), basek_ksk);
|
assert_eq!(key.base2k(), basek_ksk);
|
||||||
assert_eq!(key.rank_in(), self.rank());
|
assert_eq!(key.rank_in(), res.rank());
|
||||||
assert_eq!(key.rank_out(), self.rank());
|
assert_eq!(key.rank_out(), res.rank());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.base2k() != basek_ksk {
|
if res.base2k() != basek_ksk {
|
||||||
let (mut self_conv, scratch_1) = scratch.take_glwe_ct(&GLWELayout {
|
let (mut self_conv, scratch_1) = scratch.take_glwe_ct(
|
||||||
n: module.n().into(),
|
self,
|
||||||
|
&GLWELayout {
|
||||||
|
n: self.n().into(),
|
||||||
base2k: basek_ksk,
|
base2k: basek_ksk,
|
||||||
k: self.k(),
|
k: res.k(),
|
||||||
rank: self.rank(),
|
rank: res.rank(),
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
for j in 0..(self.rank() + 1).into() {
|
for j in 0..(res.rank() + 1).into() {
|
||||||
module.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
basek_ksk.into(),
|
basek_ksk.into(),
|
||||||
&mut self_conv.data,
|
&mut self_conv.data,
|
||||||
j,
|
j,
|
||||||
basek_ksk.into(),
|
basek_ksk.into(),
|
||||||
&self.data,
|
res.data(),
|
||||||
j,
|
j,
|
||||||
scratch_1,
|
scratch_1,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for i in start..end {
|
for i in start..end {
|
||||||
self_conv.rsh(module, 1, scratch_1);
|
self.glwe_rsh(1, &mut self_conv, scratch_1);
|
||||||
|
|
||||||
let p: i64 = if i == 0 {
|
let p: i64 = if i == 0 {
|
||||||
-1
|
-1
|
||||||
} else {
|
} else {
|
||||||
module.galois_element(1 << (i - 1))
|
self.galois_element(1 << (i - 1))
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(key) = auto_keys.get(&p) {
|
if let Some(key) = keys.get(&p) {
|
||||||
self_conv.automorphism_add_inplace(module, key, scratch_1);
|
self.glwe_automorphism_add_inplace(&mut self_conv, key, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
panic!("auto_keys[{p}] is empty")
|
panic!("keys[{p}] is empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for j in 0..(self.rank() + 1).into() {
|
for j in 0..(res.rank() + 1).into() {
|
||||||
module.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
self.base2k().into(),
|
res.base2k().into(),
|
||||||
&mut self.data,
|
res.data_mut(),
|
||||||
j,
|
j,
|
||||||
basek_ksk.into(),
|
basek_ksk.into(),
|
||||||
&self_conv.data,
|
&self_conv.data,
|
||||||
@@ -177,18 +192,18 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for i in start..end {
|
for i in start..end {
|
||||||
self.rsh(module, 1, scratch);
|
self.glwe_rsh(1, res, scratch);
|
||||||
|
|
||||||
let p: i64 = if i == 0 {
|
let p: i64 = if i == 0 {
|
||||||
-1
|
-1
|
||||||
} else {
|
} else {
|
||||||
module.galois_element(1 << (i - 1))
|
self.galois_element(1 << (i - 1))
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(key) = auto_keys.get(&p) {
|
if let Some(key) = keys.get(&p) {
|
||||||
self.automorphism_add_inplace(module, key, scratch);
|
self.glwe_automorphism_add_inplace(res, key, scratch);
|
||||||
} else {
|
} else {
|
||||||
panic!("auto_keys[{p}] is empty")
|
panic!("keys[{p}] is empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScratchTakeCore,
|
ScratchTakeCore,
|
||||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
keyswitching::glwe_ct::GLWEKeyswitch,
|
||||||
layouts::{
|
layouts::{
|
||||||
AutomorphismKey, AutomorphismKeyToRef, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWESwitchingKey,
|
AutomorphismKey, AutomorphismKeyToRef, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWESwitchingKey,
|
||||||
GLWESwitchingKeyToRef,
|
GLWESwitchingKeyToRef,
|
||||||
@@ -16,7 +16,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
R: GGLWEInfos,
|
R: GGLWEInfos,
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
K: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -28,7 +28,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
A: AutomorphismKeyToRef,
|
A: AutomorphismKeyToRef,
|
||||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch(&mut self.key.key, &a.to_ref().key.key, b, scratch);
|
module.gglwe_keyswitch(&mut self.key.key, &a.to_ref().key.key, b, scratch);
|
||||||
}
|
}
|
||||||
@@ -37,7 +37,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
where
|
where
|
||||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch_inplace(&mut self.key.key, a, scratch);
|
module.gglwe_keyswitch_inplace(&mut self.key.key, a, scratch);
|
||||||
}
|
}
|
||||||
@@ -49,7 +49,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
R: GGLWEInfos,
|
R: GGLWEInfos,
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
K: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -61,7 +61,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
A: GLWESwitchingKeyToRef,
|
A: GLWESwitchingKeyToRef,
|
||||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch(&mut self.key, &a.to_ref().key, b, scratch);
|
module.gglwe_keyswitch(&mut self.key, &a.to_ref().key, b, scratch);
|
||||||
}
|
}
|
||||||
@@ -70,7 +70,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
where
|
where
|
||||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch_inplace(&mut self.key, a, scratch);
|
module.gglwe_keyswitch_inplace(&mut self.key, a, scratch);
|
||||||
}
|
}
|
||||||
@@ -82,7 +82,7 @@ impl GGLWE<Vec<u8>> {
|
|||||||
R: GGLWEInfos,
|
R: GGLWEInfos,
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
K: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
}
|
}
|
||||||
@@ -94,7 +94,7 @@ impl<DataSelf: DataMut> GGLWE<DataSelf> {
|
|||||||
A: GGLWEToRef,
|
A: GGLWEToRef,
|
||||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch(self, a, b, scratch);
|
module.gglwe_keyswitch(self, a, b, scratch);
|
||||||
}
|
}
|
||||||
@@ -103,17 +103,17 @@ impl<DataSelf: DataMut> GGLWE<DataSelf> {
|
|||||||
where
|
where
|
||||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGLWEKeySwitch<BE>,
|
M: GGLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.gglwe_keyswitch_inplace(self, a, scratch);
|
module.gglwe_keyswitch_inplace(self, a, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GGLWEKeySwitch<BE> for Module<BE> where Self: GLWEKeySwitch<BE> {}
|
impl<BE: Backend> GGLWEKeyswitch<BE> for Module<BE> where Self: GLWEKeyswitch<BE> {}
|
||||||
|
|
||||||
pub trait GGLWEKeySwitch<BE: Backend>
|
pub trait GGLWEKeyswitch<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWEKeySwitch<BE>,
|
Self: GLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
fn gglwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
fn gglwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::layouts::{Backend, DataMut, Scratch, VecZnx};
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
GGSWExpandRows, ScratchTakeCore,
|
GGSWExpandRows, ScratchTakeCore,
|
||||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
keyswitching::glwe_ct::GLWEKeyswitch,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef,
|
GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef,
|
||||||
prepared::{GLWESwitchingKeyPreparedToRef, TensorKeyPreparedToRef},
|
prepared::{GLWESwitchingKeyPreparedToRef, TensorKeyPreparedToRef},
|
||||||
@@ -22,7 +22,7 @@ impl GGSW<Vec<u8>> {
|
|||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
K: GGLWEInfos,
|
K: GGLWEInfos,
|
||||||
T: GGLWEInfos,
|
T: GGLWEInfos,
|
||||||
M: GGSWKeySwitch<BE>,
|
M: GGSWKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.ggsw_keyswitch_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
|
module.ggsw_keyswitch_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
|
||||||
}
|
}
|
||||||
@@ -35,7 +35,7 @@ impl<D: DataMut> GGSW<D> {
|
|||||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
T: TensorKeyPreparedToRef<BE>,
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGSWKeySwitch<BE>,
|
M: GGSWKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.ggsw_keyswitch(self, a, key, tsk, scratch);
|
module.ggsw_keyswitch(self, a, key, tsk, scratch);
|
||||||
}
|
}
|
||||||
@@ -45,15 +45,15 @@ impl<D: DataMut> GGSW<D> {
|
|||||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
T: TensorKeyPreparedToRef<BE>,
|
T: TensorKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
M: GGSWKeySwitch<BE>,
|
M: GGSWKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.ggsw_keyswitch_inplace(self, key, tsk, scratch);
|
module.ggsw_keyswitch_inplace(self, key, tsk, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GGSWKeySwitch<BE: Backend>
|
pub trait GGSWKeyswitch<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWEKeySwitch<BE> + GGSWExpandRows<BE>,
|
Self: GLWEKeyswitch<BE> + GGSWExpandRows<BE>,
|
||||||
{
|
{
|
||||||
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
||||||
where
|
where
|
||||||
@@ -127,5 +127,3 @@ where
|
|||||||
self.ggsw_expand_row(res, tsk, scratch);
|
self.ggsw_expand_row(res, tsk, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GGSW<DataSelf> {}
|
|
||||||
|
|||||||
@@ -16,14 +16,14 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl GLWE<Vec<u8>> {
|
||||||
pub fn keyswitch_tmp_bytes<M, R, A, B, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
pub fn keyswitch_tmp_bytes<M, R, A, B, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &B) -> usize
|
||||||
where
|
where
|
||||||
R: GLWEInfos,
|
R: GLWEInfos,
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
B: GGLWEInfos,
|
B: GGLWEInfos,
|
||||||
M: GLWEKeySwitch<BE>,
|
M: GLWEKeyswitch<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, b_infos)
|
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,7 +32,7 @@ impl<D: DataMut> GLWE<D> {
|
|||||||
where
|
where
|
||||||
A: GLWEToRef,
|
A: GLWEToRef,
|
||||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
M: GLWEKeySwitch<BE>,
|
M: GLWEKeyswitch<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch(self, a, b, scratch);
|
module.glwe_keyswitch(self, a, b, scratch);
|
||||||
@@ -41,14 +41,14 @@ impl<D: DataMut> GLWE<D> {
|
|||||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
M: GLWEKeySwitch<BE>,
|
M: GLWEKeyswitch<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_keyswitch_inplace(self, a, scratch);
|
module.glwe_keyswitch_inplace(self, a, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GLWEKeySwitch<BE> for Module<BE> where
|
impl<BE: Backend> GLWEKeyswitch<BE> for Module<BE> where
|
||||||
Self: Sized
|
Self: Sized
|
||||||
+ ModuleN
|
+ ModuleN
|
||||||
+ VecZnxDftBytesOf
|
+ VecZnxDftBytesOf
|
||||||
@@ -69,7 +69,7 @@ impl<BE: Backend> GLWEKeySwitch<BE> for Module<BE> where
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GLWEKeySwitch<BE: Backend>
|
pub trait GLWEKeyswitch<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: Sized
|
Self: Sized
|
||||||
+ ModuleN
|
+ ModuleN
|
||||||
@@ -89,7 +89,7 @@ where
|
|||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, key_infos: &B) -> usize
|
||||||
where
|
where
|
||||||
R: GLWEInfos,
|
R: GLWEInfos,
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
@@ -97,44 +97,44 @@ where
|
|||||||
{
|
{
|
||||||
let in_size: usize = a_infos
|
let in_size: usize = a_infos
|
||||||
.k()
|
.k()
|
||||||
.div_ceil(b_infos.base2k())
|
.div_ceil(key_infos.base2k())
|
||||||
.div_ceil(b_infos.dsize().into()) as usize;
|
.div_ceil(key_infos.dsize().into()) as usize;
|
||||||
let out_size: usize = res_infos.size();
|
let out_size: usize = res_infos.size();
|
||||||
let ksk_size: usize = b_infos.size();
|
let ksk_size: usize = key_infos.size();
|
||||||
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
let res_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||||
let ai_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
let ai_dft: usize = self.bytes_of_vec_znx_dft((key_infos.rank_in()).into(), in_size);
|
||||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
out_size,
|
out_size,
|
||||||
in_size,
|
in_size,
|
||||||
in_size,
|
in_size,
|
||||||
(b_infos.rank_in()).into(),
|
(key_infos.rank_in()).into(),
|
||||||
(b_infos.rank_out() + 1).into(),
|
(key_infos.rank_out() + 1).into(),
|
||||||
ksk_size,
|
ksk_size,
|
||||||
) + self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
) + self.bytes_of_vec_znx_dft((key_infos.rank_in()).into(), in_size);
|
||||||
let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
|
let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
|
||||||
if a_infos.base2k() == b_infos.base2k() {
|
if a_infos.base2k() == key_infos.base2k() {
|
||||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||||
} else if b_infos.dsize() == 1 {
|
} else if key_infos.dsize() == 1 {
|
||||||
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
|
let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
|
||||||
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||||
} else {
|
} else {
|
||||||
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
|
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
|
||||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank_in()).into(), in_size);
|
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (key_infos.rank_in()).into(), in_size);
|
||||||
res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
fn glwe_keyswitch<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
A: GLWEToRef,
|
A: GLWEToRef,
|
||||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
|
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
a.rank(),
|
a.rank(),
|
||||||
@@ -181,14 +181,14 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
fn glwe_keyswitch_inplace<R, K>(&self, res: &mut R, key: &K, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
|
let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &key.to_ref();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
res.rank(),
|
res.rank(),
|
||||||
@@ -239,11 +239,11 @@ impl GLWE<Vec<u8>> {}
|
|||||||
|
|
||||||
impl<DataSelf: DataMut> GLWE<DataSelf> {}
|
impl<DataSelf: DataMut> GLWE<DataSelf> {}
|
||||||
|
|
||||||
fn keyswitch_internal<BE: Backend, M, DR, DA, DB>(
|
pub(crate) fn keyswitch_internal<BE: Backend, M, DR, DA, DB>(
|
||||||
module: &M,
|
module: &M,
|
||||||
mut res: VecZnxDft<DR, BE>,
|
mut res: VecZnxDft<DR, BE>,
|
||||||
a: &GLWE<DA>,
|
a: &GLWE<DA>,
|
||||||
b: &GLWESwitchingKeyPrepared<DB, BE>,
|
key: &GLWESwitchingKeyPrepared<DB, BE>,
|
||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) -> VecZnxBig<DR, BE>
|
) -> VecZnxBig<DR, BE>
|
||||||
where
|
where
|
||||||
@@ -265,12 +265,12 @@ where
|
|||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let base2k_in: usize = a.base2k().into();
|
let base2k_in: usize = a.base2k().into();
|
||||||
let base2k_out: usize = b.base2k().into();
|
let base2k_out: usize = key.base2k().into();
|
||||||
let cols: usize = (a.rank() + 1).into();
|
let cols: usize = (a.rank() + 1).into();
|
||||||
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
||||||
let pmat: &VmpPMat<DB, BE> = &b.key.data;
|
let pmat: &VmpPMat<DB, BE> = &key.key.data;
|
||||||
|
|
||||||
if b.dsize() == 1 {
|
if key.dsize() == 1 {
|
||||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
|
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
|
||||||
|
|
||||||
if base2k_in == base2k_out {
|
if base2k_in == base2k_out {
|
||||||
@@ -295,7 +295,7 @@ where
|
|||||||
|
|
||||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
let dsize: usize = b.dsize().into();
|
let dsize: usize = key.dsize().into();
|
||||||
|
|
||||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
|
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
|
||||||
ai_dft.data_mut().fill(0);
|
ai_dft.data_mut().fill(0);
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScratchTakeCore,
|
ScratchTakeCore,
|
||||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
keyswitching::glwe_ct::GLWEKeyswitch,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWEInfos, GLWE, GLWEAlloc, GLWELayout, LWE, LWEInfos, LWEToMut, LWEToRef, Rank, TorusPrecision,
|
GGLWEInfos, GLWE, GLWEAlloc, GLWELayout, LWE, LWEInfos, LWEToMut, LWEToRef, Rank, TorusPrecision,
|
||||||
prepared::{LWESwitchingKeyPrepared, LWESwitchingKeyPreparedToRef},
|
prepared::{LWESwitchingKeyPrepared, LWESwitchingKeyPreparedToRef},
|
||||||
@@ -40,7 +40,7 @@ impl<BE: Backend> LWEKeySwitch<BE> for Module<BE> where Self: LWEKeySwitch<BE> {
|
|||||||
|
|
||||||
pub trait LWEKeySwitch<BE: Backend>
|
pub trait LWEKeySwitch<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GLWEKeySwitch<BE> + GLWEAlloc,
|
Self: GLWEKeyswitch<BE> + GLWEAlloc,
|
||||||
{
|
{
|
||||||
fn lwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
fn lwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -2,3 +2,8 @@ mod gglwe_ct;
|
|||||||
mod ggsw_ct;
|
mod ggsw_ct;
|
||||||
mod glwe_ct;
|
mod glwe_ct;
|
||||||
mod lwe_ct;
|
mod lwe_ct;
|
||||||
|
|
||||||
|
pub use gglwe_ct::*;
|
||||||
|
// pub use gglwe_ct::*;
|
||||||
|
pub use glwe_ct::*;
|
||||||
|
pub use lwe_ct::*;
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ use poulpy_hal::{
|
|||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
Base2K, Dnum, Dsize, GGLWEInfos, GLWE, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
|
Base2K, Dnum, Dsize, GGLWEInfos, GLWE, GLWEInfos, GLWESwitchingKey, GLWESwitchingKeyAlloc, GLWESwitchingKeyToMut,
|
||||||
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
|
GLWESwitchingKeyToRef, LWEInfos, Rank, RingDegree, TorusPrecision,
|
||||||
|
prepared::{GetAutomorphismGaloisElement, SetAutomorphismGaloisElement},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
@@ -27,6 +28,18 @@ pub struct AutomorphismKey<D: Data> {
|
|||||||
pub(crate) p: i64,
|
pub(crate) p: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> SetAutomorphismGaloisElement for AutomorphismKey<D> {
|
||||||
|
fn set_p(&mut self, p: i64) {
|
||||||
|
self.p = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataRef> GetAutomorphismGaloisElement for AutomorphismKey<D> {
|
||||||
|
fn p(&self) -> i64 {
|
||||||
|
self.p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: Data> AutomorphismKey<D> {
|
impl<D: Data> AutomorphismKey<D> {
|
||||||
pub fn p(&self) -> i64 {
|
pub fn p(&self) -> i64 {
|
||||||
self.p
|
self.p
|
||||||
|
|||||||
@@ -14,10 +14,12 @@ mod utils;
|
|||||||
|
|
||||||
pub use operations::*;
|
pub use operations::*;
|
||||||
pub mod layouts;
|
pub mod layouts;
|
||||||
|
pub use automorphism::*;
|
||||||
pub use conversion::*;
|
pub use conversion::*;
|
||||||
pub use dist::*;
|
pub use dist::*;
|
||||||
pub use external_product::*;
|
pub use external_product::*;
|
||||||
pub use glwe_packing::*;
|
pub use glwe_packing::*;
|
||||||
|
pub use keyswitching::*;
|
||||||
|
|
||||||
pub use encryption::SIGMA;
|
pub use encryption::SIGMA;
|
||||||
|
|
||||||
|
|||||||
@@ -1,320 +1,292 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
VecZnxAdd, VecZnxAddInplace, VecZnxCopy, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegateInplace,
|
ModuleN, VecZnxAdd, VecZnxAddInplace, VecZnxCopy, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegateInplace,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||||
VecZnxSubInplace, VecZnxSubNegateInplace,
|
VecZnxSubInplace, VecZnxSubNegateInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, Scratch, VecZnx, ZnxZero},
|
layouts::{Backend, Module, Scratch, VecZnx, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWE, GLWEInfos, GLWEPlaintext, GLWEToMut, GLWEToRef, LWEInfos, SetGLWEInfos, TorusPrecision};
|
use crate::{
|
||||||
|
ScratchTakeCore,
|
||||||
|
layouts::{GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos, SetGLWEInfos, TorusPrecision},
|
||||||
|
};
|
||||||
|
|
||||||
impl<D> GLWEOperations for GLWEPlaintext<D>
|
pub trait GLWEAdd
|
||||||
where
|
where
|
||||||
D: DataMut,
|
Self: ModuleN + VecZnxAdd + VecZnxCopy + VecZnxAddInplace,
|
||||||
GLWEPlaintext<D>: GLWEToMut + GLWEInfos,
|
|
||||||
{
|
{
|
||||||
}
|
fn glwe_add<R, A, B>(&self, res: &mut R, a: &A, b: &B)
|
||||||
|
|
||||||
impl<D: DataMut> GLWEOperations for GLWE<D> where GLWE<D>: GLWEToMut + GLWEInfos {}
|
|
||||||
|
|
||||||
pub trait GLWEOperations: GLWEToMut + GLWEInfos + SetGLWEInfos + Sized {
|
|
||||||
fn add<A, B, BACKEND: Backend>(&mut self, module: &Module<BACKEND>, a: &A, b: &B)
|
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
B: GLWEToRef + GLWEInfos,
|
A: GLWEToRef,
|
||||||
Module<BACKEND>: VecZnxAdd + VecZnxCopy,
|
B: GLWEToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &mut GLWE<&[u8]> = &mut a.to_ref();
|
||||||
assert_eq!(a.n(), self.n());
|
let b: &GLWE<&[u8]> = &b.to_ref();
|
||||||
assert_eq!(b.n(), self.n());
|
|
||||||
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(b.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
assert_eq!(a.base2k(), b.base2k());
|
assert_eq!(a.base2k(), b.base2k());
|
||||||
assert!(self.rank() >= a.rank().max(b.rank()));
|
assert!(res.rank() >= a.rank().max(b.rank()));
|
||||||
}
|
|
||||||
|
|
||||||
let min_col: usize = (a.rank().min(b.rank()) + 1).into();
|
let min_col: usize = (a.rank().min(b.rank()) + 1).into();
|
||||||
let max_col: usize = (a.rank().max(b.rank() + 1)).into();
|
let max_col: usize = (a.rank().max(b.rank() + 1)).into();
|
||||||
let self_col: usize = (self.rank() + 1).into();
|
let self_col: usize = (res.rank() + 1).into();
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
|
||||||
let b_ref: &GLWE<&[u8]> = &b.to_ref();
|
|
||||||
|
|
||||||
(0..min_col).for_each(|i| {
|
(0..min_col).for_each(|i| {
|
||||||
module.vec_znx_add(&mut self_mut.data, i, &a_ref.data, i, &b_ref.data, i);
|
self.vec_znx_add(res.data_mut(), i, a.data(), i, b.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
if a.rank() > b.rank() {
|
if a.rank() > b.rank() {
|
||||||
(min_col..max_col).for_each(|i| {
|
(min_col..max_col).for_each(|i| {
|
||||||
module.vec_znx_copy(&mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_copy(res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
(min_col..max_col).for_each(|i| {
|
(min_col..max_col).for_each(|i| {
|
||||||
module.vec_znx_copy(&mut self_mut.data, i, &b_ref.data, i);
|
self.vec_znx_copy(res.data_mut(), i, b.data(), i);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let size: usize = self_mut.size();
|
let size: usize = res.size();
|
||||||
(max_col..self_col).for_each(|i| {
|
(max_col..self_col).for_each(|i| {
|
||||||
(0..size).for_each(|j| {
|
(0..size).for_each(|j| {
|
||||||
self_mut.data.zero_at(i, j);
|
res.data.zero_at(i, j);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_base2k(a.base2k());
|
res.set_base2k(a.base2k());
|
||||||
self.set_k(set_k_binary(self, a, b));
|
res.set_k(set_k_binary(res, a, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_inplace<A, BACKEND: Backend>(&mut self, module: &Module<BACKEND>, a: &A)
|
fn glwe_add_inplace<R, A>(&self, res: &mut R, a: &A)
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
Module<BACKEND>: VecZnxAddInplace,
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
assert_eq!(a.n(), self.n());
|
|
||||||
assert_eq!(self.base2k(), a.base2k());
|
|
||||||
assert!(self.rank() >= a.rank())
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.base2k(), a.base2k());
|
||||||
|
assert!(res.rank() >= a.rank());
|
||||||
|
|
||||||
(0..(a.rank() + 1).into()).for_each(|i| {
|
(0..(a.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_add_inplace(&mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_add_inplace(res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_k(set_k_unary(self, a))
|
res.set_k(set_k_unary(res, a))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sub<A, B, BACKEND: Backend>(&mut self, module: &Module<BACKEND>, a: &A, b: &B)
|
impl<BE: Backend> GLWEAdd for Module<BE> where Self: ModuleN + VecZnxAdd + VecZnxCopy + VecZnxAddInplace {}
|
||||||
|
|
||||||
|
pub trait GLWESub
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
Self: ModuleN + VecZnxSub + VecZnxCopy + VecZnxNegateInplace + VecZnxSubInplace + VecZnxSubNegateInplace,
|
||||||
B: GLWEToRef + GLWEInfos,
|
|
||||||
Module<BACKEND>: VecZnxSub + VecZnxCopy + VecZnxNegateInplace,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
fn glwe_sub<R, A, B>(&self, res: &mut R, a: &A, b: &B)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
|
B: GLWEToRef,
|
||||||
{
|
{
|
||||||
assert_eq!(a.n(), self.n());
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
assert_eq!(b.n(), self.n());
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
|
let b: &GLWE<&[u8]> = &b.to_ref();
|
||||||
|
|
||||||
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(b.n(), self.n() as u32);
|
||||||
assert_eq!(a.base2k(), b.base2k());
|
assert_eq!(a.base2k(), b.base2k());
|
||||||
assert!(self.rank() >= a.rank().max(b.rank()));
|
assert!(res.rank() >= a.rank().max(b.rank()));
|
||||||
}
|
|
||||||
|
|
||||||
let min_col: usize = (a.rank().min(b.rank()) + 1).into();
|
let min_col: usize = (a.rank().min(b.rank()) + 1).into();
|
||||||
let max_col: usize = (a.rank().max(b.rank() + 1)).into();
|
let max_col: usize = (a.rank().max(b.rank() + 1)).into();
|
||||||
let self_col: usize = (self.rank() + 1).into();
|
let self_col: usize = (res.rank() + 1).into();
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
|
||||||
let b_ref: &GLWE<&[u8]> = &b.to_ref();
|
|
||||||
|
|
||||||
(0..min_col).for_each(|i| {
|
(0..min_col).for_each(|i| {
|
||||||
module.vec_znx_sub(&mut self_mut.data, i, &a_ref.data, i, &b_ref.data, i);
|
self.vec_znx_sub(res.data_mut(), i, a.data(), i, b.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
if a.rank() > b.rank() {
|
if a.rank() > b.rank() {
|
||||||
(min_col..max_col).for_each(|i| {
|
(min_col..max_col).for_each(|i| {
|
||||||
module.vec_znx_copy(&mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_copy(res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
(min_col..max_col).for_each(|i| {
|
(min_col..max_col).for_each(|i| {
|
||||||
module.vec_znx_copy(&mut self_mut.data, i, &b_ref.data, i);
|
self.vec_znx_copy(res.data_mut(), i, b.data(), i);
|
||||||
module.vec_znx_negate_inplace(&mut self_mut.data, i);
|
self.vec_znx_negate_inplace(res.data_mut(), i);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let size: usize = self_mut.size();
|
let size: usize = res.size();
|
||||||
(max_col..self_col).for_each(|i| {
|
(max_col..self_col).for_each(|i| {
|
||||||
(0..size).for_each(|j| {
|
(0..size).for_each(|j| {
|
||||||
self_mut.data.zero_at(i, j);
|
res.data.zero_at(i, j);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_base2k(a.base2k());
|
res.set_base2k(a.base2k());
|
||||||
self.set_k(set_k_binary(self, a, b));
|
res.set_k(set_k_binary(res, a, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sub_inplace_ab<A, BACKEND: Backend>(&mut self, module: &Module<BACKEND>, a: &A)
|
fn glwe_sub_inplace<R, A>(&self, res: &mut R, a: &A)
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
Module<BACKEND>: VecZnxSubInplace,
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
assert_eq!(a.n(), self.n());
|
|
||||||
assert_eq!(self.base2k(), a.base2k());
|
|
||||||
assert!(self.rank() >= a.rank())
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.base2k(), a.base2k());
|
||||||
|
assert!(res.rank() >= a.rank());
|
||||||
|
|
||||||
(0..(a.rank() + 1).into()).for_each(|i| {
|
(0..(a.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_sub_inplace(&mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_sub_inplace(res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_k(set_k_unary(self, a))
|
res.set_k(set_k_unary(res, a))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sub_inplace_ba<A, BACKEND: Backend>(&mut self, module: &Module<BACKEND>, a: &A)
|
fn glwe_sub_negate_inplace<R, A>(&self, res: &mut R, a: &A)
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
Module<BACKEND>: VecZnxSubNegateInplace,
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
assert_eq!(a.n(), self.n());
|
|
||||||
assert_eq!(self.base2k(), a.base2k());
|
|
||||||
assert!(self.rank() >= a.rank())
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.base2k(), a.base2k());
|
||||||
|
assert!(res.rank() >= a.rank());
|
||||||
|
|
||||||
(0..(a.rank() + 1).into()).for_each(|i| {
|
(0..(a.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_sub_negate_inplace(&mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_sub_negate_inplace(res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_k(set_k_unary(self, a))
|
res.set_k(set_k_unary(res, a))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rotate<A, B: Backend>(&mut self, module: &Module<B>, k: i64, a: &A)
|
pub trait GLWERotate<BE: Backend>
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
Self: ModuleN + VecZnxRotate + VecZnxRotateInplace<BE>,
|
||||||
Module<B>: VecZnxRotate,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
fn glwe_rotate<R, A>(&self, k: i64, res: &mut R, a: &A)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
assert_eq!(a.n(), self.n());
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
assert_eq!(self.rank(), a.rank())
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
}
|
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
assert_eq!(res.rank(), a.rank());
|
||||||
|
|
||||||
(0..(a.rank() + 1).into()).for_each(|i| {
|
(0..(a.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_rotate(k, &mut self_mut.data, i, &a_ref.data, i);
|
self.vec_znx_rotate(k, res.data_mut(), i, a.data(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.set_base2k(a.base2k());
|
res.set_base2k(a.base2k());
|
||||||
self.set_k(set_k_unary(self, a))
|
res.set_k(set_k_unary(res, a))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rotate_inplace<B: Backend>(&mut self, module: &Module<B>, k: i64, scratch: &mut Scratch<B>)
|
fn glwe_rotate_inplace<R>(&self, k: i64, res: &mut R, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxRotateInplace<B>,
|
R: GLWEToMut,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
|
||||||
(0..(self_mut.rank() + 1).into()).for_each(|i| {
|
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_rotate_inplace(k, &mut self_mut.data, i, scratch);
|
self.vec_znx_rotate_inplace(k, res.data_mut(), i, scratch);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn mul_xp_minus_one<A, B: Backend>(&mut self, module: &Module<B>, k: i64, a: &A)
|
pub trait GLWEMulXpMinusOne<BE: Backend>
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
Self: ModuleN + VecZnxMulXpMinusOne + VecZnxMulXpMinusOneInplace<BE>,
|
||||||
Module<B>: VecZnxMulXpMinusOne,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
fn glwe_mul_xp_minus_one<R, A>(&self, k: i64, res: &mut R, a: &A)
|
||||||
{
|
|
||||||
assert_eq!(a.n(), self.n());
|
|
||||||
assert_eq!(self.rank(), a.rank())
|
|
||||||
}
|
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
|
||||||
|
|
||||||
(0..(a.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_mul_xp_minus_one(k, &mut self_mut.data, i, &a_ref.data, i);
|
|
||||||
});
|
|
||||||
|
|
||||||
self.set_base2k(a.base2k());
|
|
||||||
self.set_k(set_k_unary(self, a))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mul_xp_minus_one_inplace<B: Backend>(&mut self, module: &Module<B>, k: i64, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxMulXpMinusOneInplace<B>,
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
|
|
||||||
(0..(self_mut.rank() + 1).into()).for_each(|i| {
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
module.vec_znx_mul_xp_minus_one_inplace(k, &mut self_mut.data, i, scratch);
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
});
|
assert_eq!(res.rank(), a.rank());
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_mul_xp_minus_one(k, res.data_mut(), i, a.data(), i);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn copy<A, M>(&mut self, module: &M, a: &A)
|
res.set_base2k(a.base2k());
|
||||||
|
res.set_k(set_k_unary(res, a))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_mul_xp_minus_one_inplace<R>(&self, k: i64, res: &mut R, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
M: VecZnxCopy,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
|
||||||
assert_eq!(self.n(), a.n());
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
assert_eq!(self.rank(), a.rank());
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_mul_xp_minus_one_inplace(k, res.data_mut(), i, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
pub trait GLWECopy
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
|
||||||
|
|
||||||
(0..(self_mut.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_copy(&mut self_mut.data, i, &a_ref.data, i);
|
|
||||||
});
|
|
||||||
|
|
||||||
self.set_k(a.k().min(self.max_k()));
|
|
||||||
self.set_base2k(a.base2k());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rsh<B: Backend>(&mut self, module: &Module<B>, k: usize, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxRshInplace<B>,
|
Self: ModuleN + VecZnxCopy,
|
||||||
{
|
{
|
||||||
let base2k: usize = self.base2k().into();
|
fn glwe_copy<R, A>(&self, res: &mut R, a: &A)
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_rsh_inplace(base2k, k, &mut self.to_mut().data, i, scratch);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn normalize<A, B: Backend>(&mut self, module: &Module<B>, a: &A, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
A: GLWEToRef + GLWEInfos,
|
R: GLWEToMut,
|
||||||
Module<B>: VecZnxNormalize<B>,
|
A: GLWEToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
{
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
assert_eq!(self.n(), a.n());
|
|
||||||
assert_eq!(self.rank(), a.rank());
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.rank(), a.rank());
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_copy(res.data_mut(), i, a.data(), i);
|
||||||
}
|
}
|
||||||
|
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
res.set_k(a.k().min(res.max_k()));
|
||||||
let a_ref: &GLWE<&[u8]> = &a.to_ref();
|
res.set_base2k(a.base2k());
|
||||||
|
}
|
||||||
(0..(self_mut.rank() + 1).into()).for_each(|i| {
|
|
||||||
module.vec_znx_normalize(
|
|
||||||
a.base2k().into(),
|
|
||||||
&mut self_mut.data,
|
|
||||||
i,
|
|
||||||
a.base2k().into(),
|
|
||||||
&a_ref.data,
|
|
||||||
i,
|
|
||||||
scratch,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
self.set_base2k(a.base2k());
|
|
||||||
self.set_k(a.k().min(self.k()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn normalize_inplace<B: Backend>(&mut self, module: &Module<B>, scratch: &mut Scratch<B>)
|
pub trait GLWEShift<BE: Backend>
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxNormalizeInplace<B>,
|
Self: ModuleN + VecZnxRshInplace<BE>,
|
||||||
{
|
{
|
||||||
let self_mut: &mut GLWE<&mut [u8]> = &mut self.to_mut();
|
fn glwe_rsh<R>(&self, k: usize, res: &mut R, scratch: &mut Scratch<BE>)
|
||||||
(0..(self_mut.rank() + 1).into()).for_each(|i| {
|
where
|
||||||
module.vec_znx_normalize_inplace(self_mut.base2k().into(), &mut self_mut.data, i, scratch);
|
R: GLWEToMut,
|
||||||
});
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let base2k: usize = res.base2k().into();
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_rsh_inplace(base2k, k, res.data_mut(), i, scratch);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,6 +296,50 @@ impl GLWE<Vec<u8>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait GLWENormalize<BE: Backend>
|
||||||
|
where
|
||||||
|
Self: ModuleN + VecZnxNormalize<BE> + VecZnxNormalizeInplace<BE>,
|
||||||
|
{
|
||||||
|
fn glwe_normalize<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
A: GLWEToRef,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||||
|
|
||||||
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(res.rank(), a.rank());
|
||||||
|
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_normalize(
|
||||||
|
res.base2k().into(),
|
||||||
|
res.data_mut(),
|
||||||
|
i,
|
||||||
|
a.base2k().into(),
|
||||||
|
a.data(),
|
||||||
|
i,
|
||||||
|
scratch,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.set_k(a.k().min(res.k()));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glwe_normalize_inplace<R>(&self, res: &mut R, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GLWEToMut,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
|
for i in 0..res.rank().as_usize() + 1 {
|
||||||
|
self.vec_znx_normalize_inplace(res.base2k().into(), res.data_mut(), i, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// c = op(a, b)
|
// c = op(a, b)
|
||||||
fn set_k_binary(c: &impl GLWEInfos, a: &impl GLWEInfos, b: &impl GLWEInfos) -> TorusPrecision {
|
fn set_k_binary(c: &impl GLWEInfos, a: &impl GLWEInfos, b: &impl GLWEInfos) -> TorusPrecision {
|
||||||
// If either operands is a ciphertext
|
// If either operands is a ciphertext
|
||||||
|
|||||||
@@ -8,3 +8,12 @@ pub trait ModuleNew<B: Backend> {
|
|||||||
pub trait ModuleN {
|
pub trait ModuleN {
|
||||||
fn n(&self) -> usize;
|
fn n(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait ModuleLogN
|
||||||
|
where
|
||||||
|
Self: ModuleN,
|
||||||
|
{
|
||||||
|
fn log_n(&self) -> usize {
|
||||||
|
(u64::BITS - (self.n() as u64 - 1).leading_zeros()) as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,7 +2,10 @@ use std::{fmt::Display, marker::PhantomData, ptr::NonNull};
|
|||||||
|
|
||||||
use rand_distr::num_traits::Zero;
|
use rand_distr::num_traits::Zero;
|
||||||
|
|
||||||
use crate::GALOISGENERATOR;
|
use crate::{
|
||||||
|
GALOISGENERATOR,
|
||||||
|
api::{ModuleLogN, ModuleN},
|
||||||
|
};
|
||||||
|
|
||||||
#[allow(clippy::missing_safety_doc)]
|
#[allow(clippy::missing_safety_doc)]
|
||||||
pub trait Backend: Sized {
|
pub trait Backend: Sized {
|
||||||
@@ -75,36 +78,49 @@ impl<B: Backend> Module<B> {
|
|||||||
pub fn log_n(&self) -> usize {
|
pub fn log_n(&self) -> usize {
|
||||||
(usize::BITS - (self.n() - 1).leading_zeros()) as _
|
(usize::BITS - (self.n() - 1).leading_zeros()) as _
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn cyclotomic_order(&self) -> u64 {
|
|
||||||
(self.n() << 1) as _
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait CyclotomicOrder
|
||||||
|
where
|
||||||
|
Self: ModuleN,
|
||||||
|
{
|
||||||
|
fn cyclotomic_order(&self) -> i64 {
|
||||||
|
(self.n() << 1) as _
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> ModuleLogN for Module<BE> where Self: ModuleN {}
|
||||||
|
|
||||||
|
impl<BE: Backend> CyclotomicOrder for Module<BE> where Self: ModuleN {}
|
||||||
|
|
||||||
|
pub trait GaloisElement
|
||||||
|
where
|
||||||
|
Self: CyclotomicOrder,
|
||||||
|
{
|
||||||
// Returns GALOISGENERATOR^|generator| * sign(generator)
|
// Returns GALOISGENERATOR^|generator| * sign(generator)
|
||||||
#[inline]
|
fn galois_element(&self, generator: i64) -> i64 {
|
||||||
pub fn galois_element(&self, generator: i64) -> i64 {
|
|
||||||
if generator == 0 {
|
if generator == 0 {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
((mod_exp_u64(GALOISGENERATOR, generator.unsigned_abs() as usize) & (self.cyclotomic_order() - 1)) as i64)
|
|
||||||
* generator.signum()
|
let g_exp: u64 = mod_exp_u64(GALOISGENERATOR, generator.unsigned_abs() as usize) & (self.cyclotomic_order() - 1) as u64;
|
||||||
|
g_exp as i64 * generator.signum()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns gen^-1
|
// Returns gen^-1
|
||||||
#[inline]
|
fn galois_element_inv(&self, gal_el: i64) -> i64 {
|
||||||
pub fn galois_element_inv(&self, gal_el: i64) -> i64 {
|
|
||||||
if gal_el == 0 {
|
if gal_el == 0 {
|
||||||
panic!("cannot invert 0")
|
panic!("cannot invert 0")
|
||||||
}
|
}
|
||||||
((mod_exp_u64(
|
|
||||||
gal_el.unsigned_abs(),
|
let g_exp: u64 =
|
||||||
(self.cyclotomic_order() - 1) as usize,
|
mod_exp_u64(GALOISGENERATOR, (self.cyclotomic_order() - 1) as usize) & (self.cyclotomic_order() - 1) as u64;
|
||||||
) & (self.cyclotomic_order() - 1)) as i64)
|
g_exp as i64 * gal_el.signum()
|
||||||
* gal_el.signum()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> GaloisElement for Module<BE> where Self: CyclotomicOrder {}
|
||||||
|
|
||||||
impl<B: Backend> Drop for Module<B> {
|
impl<B: Backend> Drop for Module<B> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
unsafe { B::destroy(self.ptr) }
|
unsafe { B::destroy(self.ptr) }
|
||||||
|
|||||||
Reference in New Issue
Block a user