mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
@@ -13,13 +13,13 @@ use crate::{
|
||||
};
|
||||
|
||||
impl AutomorphismKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,10 +63,10 @@ where
|
||||
assert_eq!(res.rank_out(), res.rank_in());
|
||||
assert_eq!(sk.rank(), res.rank_out());
|
||||
assert!(
|
||||
scratch.available() >= AutomorphismKeyCompressed::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
AutomorphismKeyCompressed::encrypt_sk_scratch_space(self, res)
|
||||
AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,12 +34,12 @@ impl<D: DataMut> GGLWECompressed<D> {
|
||||
}
|
||||
|
||||
impl GGLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWE::encrypt_sk_scratch_space(module, infos)
|
||||
GGLWE::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,10 +106,10 @@ where
|
||||
assert_eq!(res.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
assert!(
|
||||
scratch.available() >= GGLWECompressed::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GGLWECompressed::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GGLWECompressed::encrypt_sk_scratch_space(self, res)
|
||||
GGLWECompressed::encrypt_sk_tmp_bytes(self, res)
|
||||
);
|
||||
assert!(
|
||||
res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
+ GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
||||
}
|
||||
@@ -91,10 +91,10 @@ where
|
||||
assert!(sk_in.n().0 <= self.n() as u32);
|
||||
assert!(sk_out.n().0 <= self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
|
||||
scratch.available(),
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(self, res)
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl TensorKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
TensorKey::encrypt_sk_scratch_space(module, infos)
|
||||
TensorKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGSWCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||
GGSW::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, infos)
|
||||
GLWE::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -24,10 +24,10 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -36,7 +36,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
_infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_pk_scratch_space(module, _infos)
|
||||
GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,10 +119,10 @@ where
|
||||
assert_eq!(res.rank_out(), res.rank_in());
|
||||
assert_eq!(sk.rank(), res.rank_out());
|
||||
assert!(
|
||||
scratch.available() >= AutomorphismKey::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {:?}",
|
||||
scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
|
||||
scratch.available(),
|
||||
AutomorphismKey::encrypt_sk_scratch_space(self, res)
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,16 +13,16 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
|
||||
+ (GLWEPlaintext::bytes_of_from_infos(module, &infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -88,12 +88,12 @@ where
|
||||
assert_eq!(res.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
assert!(
|
||||
scratch.available() >= GGLWE::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(self, res.rank()={}, res.size()={}): {}",
|
||||
scratch.available() >= GGLWE::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes(self, res.rank()={}, res.size()={}): {}",
|
||||
scratch.available(),
|
||||
res.rank_out(),
|
||||
res.size(),
|
||||
GGLWE::encrypt_sk_scratch_space(self, res)
|
||||
GGLWE::encrypt_sk_tmp_bytes(self, res)
|
||||
);
|
||||
assert!(
|
||||
res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
|
||||
|
||||
@@ -13,21 +13,21 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
+ GLWESecretPrepared::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
GGLWE::encrypt_pk_scratch_space(module, _infos)
|
||||
GGLWE::encrypt_pk_tmp_bytes(module, _infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,10 +66,10 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
assert!(sk_in.n().0 <= module.n() as u32);
|
||||
assert!(sk_out.n().0 <= module.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_scratch_space(module, self),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
|
||||
scratch.available(),
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, self)
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl TensorKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
@@ -24,7 +24,7 @@ impl TensorKey<Vec<u8>> {
|
||||
+ module.bytes_of_vec_znx_big(1, 1)
|
||||
+ module.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGSW<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size = infos.size();
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
|
||||
+ VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
|
||||
+ VecZnx::bytes_of(module.n(), 1, size)
|
||||
+ module.bytes_of_vec_znx_dft((infos.rank() + 1).into(), size)
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
@@ -28,7 +28,7 @@ impl GLWE<Vec<u8>> {
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
|
||||
}
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -147,10 +147,10 @@ where
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
assert_eq!(pt.n(), self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWE::encrypt_sk_scratch_space(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GLWE::encrypt_sk_scratch_space(self, &res)
|
||||
GLWE::encrypt_sk_tmp_bytes(self, &res)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -209,10 +209,10 @@ where
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWE::encrypt_sk_scratch_space(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GLWE::encrypt_sk_scratch_space(self, &res)
|
||||
GLWE::encrypt_sk_tmp_bytes(self, &res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ where
|
||||
}
|
||||
|
||||
// Its ok to allocate scratch space here since pk is usually generated only once.
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_scratch_space(self, res));
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_tmp_bytes(self, res));
|
||||
|
||||
let mut tmp: GLWE<Vec<u8>> = GLWE::alloc_from_infos(res);
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
||||
+ (GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
+ (GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl LWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -36,7 +36,7 @@ impl LWESwitchingKey<Vec<u8>> {
|
||||
);
|
||||
GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||
+ GLWESecretPrepared::bytes_of(module, Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank};
|
||||
|
||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -22,8 +22,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
Rank(1),
|
||||
"rank_in != 1 is not supported for LWEToGLWESwitchingKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), infos.rank_in())
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(Degree(module.n() as u32), infos.rank_in())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user