mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
Add cross-basek normalization (#90)
* added cross_basek_normalization * updated method signatures to take layouts * fixed cross-base normalization fix #91 fix #93
This commit is contained in:
committed by
GitHub
parent
4da790ea6a
commit
37e13b965c
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
@@ -12,18 +12,20 @@ use poulpy_hal::{
|
||||
use crate::{
|
||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||
layouts::{
|
||||
GLWESecret,
|
||||
GGLWELayoutInfos, GLWEInfos, GLWESecret, LWEInfos,
|
||||
compressed::{GGLWEAutomorphismKeyCompressed, GGLWESwitchingKeyCompressed},
|
||||
},
|
||||
};
|
||||
|
||||
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||
{
|
||||
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, basek, k, rank, rank)
|
||||
+ GLWESecret::bytes_of(module.n(), rank)
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESecret::alloc_bytes_with(infos.n(), infos.rank_out())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +51,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -60,26 +62,21 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
use crate::layouts::Infos;
|
||||
|
||||
assert_eq!(self.n(), sk.n());
|
||||
assert_eq!(self.rank_out(), self.rank_in());
|
||||
assert_eq!(sk.rank(), self.rank());
|
||||
assert_eq!(sk.rank(), self.rank_out());
|
||||
assert!(
|
||||
scratch.available()
|
||||
>= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||
scratch.available() >= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {}",
|
||||
scratch.available(),
|
||||
self.rank(),
|
||||
self.size(),
|
||||
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
|
||||
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self)
|
||||
)
|
||||
}
|
||||
|
||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
||||
|
||||
{
|
||||
(0..self.rank()).for_each(|i| {
|
||||
(0..self.rank_out().into()).for_each(|i| {
|
||||
module.vec_znx_automorphism(
|
||||
module.galois_element_inv(p),
|
||||
&mut sk_out.data.as_vec_znx_mut(),
|
||||
|
||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
||||
source::Source,
|
||||
@@ -11,15 +11,16 @@ use poulpy_hal::{
|
||||
use crate::{
|
||||
TakeGLWEPt,
|
||||
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
||||
layouts::{GGLWECiphertext, Infos, compressed::GGLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
||||
layouts::{GGLWECiphertext, GGLWELayoutInfos, LWEInfos, compressed::GGLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
||||
};
|
||||
|
||||
impl GGLWECiphertextCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||
GGLWECiphertext::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +43,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -56,7 +57,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
||||
|
||||
assert_eq!(
|
||||
self.rank_in(),
|
||||
pt.cols(),
|
||||
pt.cols() as u32,
|
||||
"self.rank_in(): {} != pt.cols(): {}",
|
||||
self.rank_in(),
|
||||
pt.cols()
|
||||
@@ -69,36 +70,33 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
||||
sk.rank()
|
||||
);
|
||||
assert_eq!(self.n(), sk.n());
|
||||
assert_eq!(pt.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
assert!(
|
||||
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available(),
|
||||
self.rank(),
|
||||
self.size(),
|
||||
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
||||
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self)
|
||||
);
|
||||
assert!(
|
||||
self.rows() * self.digits() * self.basek() <= self.k(),
|
||||
"self.rows() : {} * self.digits() : {} * self.basek() : {} = {} >= self.k() = {}",
|
||||
self.rows().0 * self.digits().0 * self.base2k().0 <= self.k().0,
|
||||
"self.rows() : {} * self.digits() : {} * self.base2k() : {} = {} >= self.k() = {}",
|
||||
self.rows(),
|
||||
self.digits(),
|
||||
self.basek(),
|
||||
self.rows() * self.digits() * self.basek(),
|
||||
self.base2k(),
|
||||
self.rows().0 * self.digits().0 * self.base2k().0,
|
||||
self.k()
|
||||
);
|
||||
}
|
||||
|
||||
let rows: usize = self.rows();
|
||||
let digits: usize = self.digits();
|
||||
let basek: usize = self.basek();
|
||||
let k: usize = self.k();
|
||||
let rank_in: usize = self.rank_in();
|
||||
let cols: usize = self.rank_out() + 1;
|
||||
let rows: usize = self.rows().into();
|
||||
let digits: usize = self.digits().into();
|
||||
let base2k: usize = self.base2k().into();
|
||||
let rank_in: usize = self.rank_in().into();
|
||||
let cols: usize = (self.rank_out() + 1).into();
|
||||
|
||||
let mut source_xa = Source::new(seed);
|
||||
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(sk.n(), basek, k);
|
||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self);
|
||||
(0..rank_in).for_each(|col_i| {
|
||||
(0..rows).for_each(|row_i| {
|
||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||
@@ -110,15 +108,15 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
||||
pt,
|
||||
col_i,
|
||||
);
|
||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scrach_1);
|
||||
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1);
|
||||
|
||||
let (seed, mut source_xa_tmp) = source_xa.branch();
|
||||
self.seed[col_i * rows + row_i] = seed;
|
||||
|
||||
glwe_encrypt_sk_internal(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.base2k().into(),
|
||||
self.k().into(),
|
||||
&mut self.at_mut(row_i, col_i).data,
|
||||
cols,
|
||||
true,
|
||||
|
||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubABInplace, VecZnxSwitchRing,
|
||||
VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||
source::Source,
|
||||
@@ -11,23 +11,21 @@ use poulpy_hal::{
|
||||
|
||||
use crate::{
|
||||
TakeGLWESecretPrepared,
|
||||
layouts::{GGLWECiphertext, GLWESecret, compressed::GGLWESwitchingKeyCompressed, prepared::GLWESecretPrepared},
|
||||
layouts::{
|
||||
Degree, GGLWECiphertext, GGLWELayoutInfos, GLWEInfos, GLWESecret, LWEInfos, compressed::GGLWESwitchingKeyCompressed,
|
||||
prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k: usize,
|
||||
rank_in: usize,
|
||||
rank_out: usize,
|
||||
) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||
{
|
||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||
+ ScalarZnx::alloc_bytes(module.n(), rank_in)
|
||||
+ GLWESecretPrepared::bytes_of(module, rank_out)
|
||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, infos) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||
+ ScalarZnx::alloc_bytes(module.n(), infos.rank_in().into())
|
||||
+ GLWESecretPrepared::alloc_bytes_with(module, infos.rank_out())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +50,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -63,35 +61,22 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
||||
use crate::layouts::GGLWESwitchingKey;
|
||||
|
||||
assert!(sk_in.n() <= module.n());
|
||||
assert!(sk_out.n() <= module.n());
|
||||
assert!(sk_in.n().0 <= module.n() as u32);
|
||||
assert!(sk_out.n().0 <= module.n() as u32);
|
||||
assert!(
|
||||
scratch.available()
|
||||
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.rank_in(),
|
||||
self.rank_out()
|
||||
),
|
||||
scratch.available() >= GGLWESwitchingKey::encrypt_sk_scratch_space(module, self),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||
scratch.available(),
|
||||
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.rank_in(),
|
||||
self.rank_out()
|
||||
)
|
||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, self)
|
||||
)
|
||||
}
|
||||
|
||||
let n: usize = sk_in.n().max(sk_out.n());
|
||||
let n: usize = sk_in.n().max(sk_out.n()).into();
|
||||
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank());
|
||||
(0..sk_in.rank()).for_each(|i| {
|
||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
|
||||
(0..sk_in.rank().into()).for_each(|i| {
|
||||
module.vec_znx_switch_ring(
|
||||
&mut sk_in_tmp.as_vec_znx_mut(),
|
||||
i,
|
||||
@@ -100,10 +85,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
||||
);
|
||||
});
|
||||
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(n, sk_out.rank());
|
||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(Degree(n as u32), sk_out.rank());
|
||||
{
|
||||
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
||||
(0..sk_out.rank()).for_each(|i| {
|
||||
(0..sk_out.rank().into()).for_each(|i| {
|
||||
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
||||
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
||||
});
|
||||
@@ -117,7 +102,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
||||
source_xe,
|
||||
scratch_2,
|
||||
);
|
||||
self.sk_in_n = sk_in.n();
|
||||
self.sk_out_n = sk_out.n();
|
||||
self.sk_in_n = sk_in.n().into();
|
||||
self.sk_out_n = sk_out.n().into();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
@@ -11,16 +11,20 @@ use poulpy_hal::{
|
||||
|
||||
use crate::{
|
||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||
layouts::{GGLWETensorKey, GLWESecret, Infos, compressed::GGLWETensorKeyCompressed, prepared::Prepare},
|
||||
layouts::{
|
||||
GGLWELayoutInfos, GGLWETensorKey, GLWEInfos, GLWESecret, LWEInfos, Rank, compressed::GGLWETensorKeyCompressed,
|
||||
prepared::Prepare,
|
||||
},
|
||||
};
|
||||
|
||||
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWELayoutInfos,
|
||||
Module<B>:
|
||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||
{
|
||||
GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k, rank)
|
||||
GGLWETensorKey::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +46,7 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -63,37 +67,38 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(self.rank(), sk.rank());
|
||||
assert_eq!(self.rank_out(), sk.rank());
|
||||
assert_eq!(self.n(), sk.n());
|
||||
}
|
||||
|
||||
let n: usize = sk.n();
|
||||
let rank: usize = self.rank();
|
||||
let n: usize = sk.n().into();
|
||||
let rank: usize = self.rank_out().into();
|
||||
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);
|
||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(sk.n(), self.rank_out());
|
||||
sk_dft_prep.prepare(module, sk, scratch_1);
|
||||
|
||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
|
||||
|
||||
(0..rank).for_each(|i| {
|
||||
for i in 0..rank {
|
||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||
});
|
||||
}
|
||||
|
||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(n, 1);
|
||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(sk.n(), Rank(1));
|
||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n, 1, 1);
|
||||
|
||||
let mut source_xa: Source = Source::new(seed_xa);
|
||||
|
||||
(0..rank).for_each(|i| {
|
||||
(i..rank).for_each(|j| {
|
||||
for i in 0..rank {
|
||||
for j in i..rank {
|
||||
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
||||
|
||||
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||
module.vec_znx_big_normalize(
|
||||
self.basek(),
|
||||
self.base2k().into(),
|
||||
&mut sk_ij.data.as_vec_znx_mut(),
|
||||
0,
|
||||
self.base2k().into(),
|
||||
&sk_ij_big,
|
||||
0,
|
||||
scratch_5,
|
||||
@@ -103,7 +108,7 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
||||
|
||||
self.at_mut(i, j)
|
||||
.encrypt_sk(module, &sk_ij, sk, seed_xa_tmp, source_xe, scratch_5);
|
||||
});
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
||||
source::Source,
|
||||
@@ -11,15 +11,18 @@ use poulpy_hal::{
|
||||
use crate::{
|
||||
TakeGLWEPt,
|
||||
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
||||
layouts::{GGSWCiphertext, Infos, compressed::GGSWCiphertextCompressed, prepared::GLWESecretPrepared},
|
||||
layouts::{
|
||||
GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, compressed::GGSWCiphertextCompressed, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl GGSWCiphertextCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
{
|
||||
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank)
|
||||
GGSWCiphertext::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +45,7 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -56,27 +59,26 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
||||
|
||||
assert_eq!(self.rank(), sk.rank());
|
||||
assert_eq!(self.n(), sk.n());
|
||||
assert_eq!(pt.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
}
|
||||
|
||||
let basek: usize = self.basek();
|
||||
let k: usize = self.k();
|
||||
let rank: usize = self.rank();
|
||||
let base2k: usize = self.base2k().into();
|
||||
let rank: usize = self.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
let digits: usize = self.digits();
|
||||
let digits: usize = self.digits().into();
|
||||
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self.n(), basek, k);
|
||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&self.glwe_layout());
|
||||
|
||||
let mut source = Source::new(seed_xa);
|
||||
|
||||
self.seed = vec![[0u8; 32]; self.rows() * cols];
|
||||
self.seed = vec![[0u8; 32]; self.rows().0 as usize * cols];
|
||||
|
||||
(0..self.rows()).for_each(|row_i| {
|
||||
(0..self.rows().into()).for_each(|row_i| {
|
||||
tmp_pt.data.zero();
|
||||
|
||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (digits - 1) + row_i * digits, pt, 0);
|
||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scratch_1);
|
||||
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
|
||||
|
||||
(0..rank + 1).for_each(|col_j| {
|
||||
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
||||
@@ -87,8 +89,8 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
||||
|
||||
glwe_encrypt_sk_internal(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.base2k().into(),
|
||||
self.k().into(),
|
||||
&mut self.at_mut(row_i, col_j).data,
|
||||
cols,
|
||||
true,
|
||||
|
||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
@@ -10,15 +10,18 @@ use poulpy_hal::{
|
||||
|
||||
use crate::{
|
||||
encryption::{SIGMA, glwe_ct::glwe_encrypt_sk_internal},
|
||||
layouts::{GLWECiphertext, GLWEPlaintext, Infos, compressed::GLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
||||
layouts::{
|
||||
GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, compressed::GLWECiphertextCompressed, prepared::GLWESecretPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWECiphertextCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
{
|
||||
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
||||
GLWECiphertext::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +43,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -68,7 +71,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxAddNormal
|
||||
@@ -77,11 +80,11 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
{
|
||||
let mut source_xa = Source::new(seed_xa);
|
||||
let cols: usize = self.rank() + 1;
|
||||
let cols: usize = (self.rank() + 1).into();
|
||||
glwe_encrypt_sk_internal(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.base2k().into(),
|
||||
self.k().into(),
|
||||
&mut self.data,
|
||||
cols,
|
||||
true,
|
||||
|
||||
Reference in New Issue
Block a user