mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Add cross-basek normalization (#90)
* added cross_basek_normalization * updated method signatures to take layouts * fixed cross-base normalization fix #91 fix #93
This commit is contained in:
committed by
GitHub
parent
4da790ea6a
commit
37e13b965c
@@ -1,46 +1,40 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGLWEAutomorphismKey, GGLWESwitchingKey, GLWECiphertext, Infos,
|
||||
GGLWEAutomorphismKey, GGLWELayoutInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos,
|
||||
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
|
||||
};
|
||||
|
||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_in: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank: usize,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_infos: &KEY,
|
||||
) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GGLWELayoutInfos,
|
||||
IN: GGLWELayoutInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWESwitchingKey::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
||||
GGLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank: usize,
|
||||
) -> usize
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GGLWELayoutInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
||||
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +54,10 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
{
|
||||
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
||||
}
|
||||
@@ -80,43 +76,38 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
{
|
||||
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWESwitchingKey<Vec<u8>> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_in: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank_in: usize,
|
||||
rank_out: usize,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_apply: &KEY,
|
||||
) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GGLWELayoutInfos,
|
||||
IN: GGLWELayoutInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out)
|
||||
GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank: usize,
|
||||
) -> usize
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GGLWELayoutInfos + GLWEInfos,
|
||||
KEY: GGLWELayoutInfos + GLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
||||
GLWECiphertext::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,8 +127,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -168,17 +161,24 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
||||
self.rows(),
|
||||
lhs.rows()
|
||||
);
|
||||
assert_eq!(
|
||||
self.digits(),
|
||||
lhs.digits(),
|
||||
"ksk_out digits: {} != ksk_in digits: {}",
|
||||
self.digits(),
|
||||
lhs.digits()
|
||||
)
|
||||
}
|
||||
|
||||
(0..self.rank_in()).for_each(|col_i| {
|
||||
(0..self.rows()).for_each(|row_j| {
|
||||
(0..self.rank_in().into()).for_each(|col_i| {
|
||||
(0..self.rows().into()).for_each(|row_j| {
|
||||
self.at_mut(row_j, col_i)
|
||||
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
|
||||
});
|
||||
});
|
||||
|
||||
(self.rows().min(lhs.rows())..self.rows()).for_each(|row_i| {
|
||||
(0..self.rank_in()).for_each(|col_j| {
|
||||
(self.rows().min(lhs.rows()).into()..self.rows().into()).for_each(|row_i| {
|
||||
(0..self.rank_in().into()).for_each(|col_j| {
|
||||
self.at_mut(row_i, col_j).data.zero();
|
||||
});
|
||||
});
|
||||
@@ -198,8 +198,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -212,8 +214,8 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
||||
);
|
||||
}
|
||||
|
||||
(0..self.rank_in()).for_each(|col_i| {
|
||||
(0..self.rows()).for_each(|row_j| {
|
||||
(0..self.rank_in().into()).for_each(|col_i| {
|
||||
(0..self.rows().into()).for_each(|row_j| {
|
||||
self.at_mut(row_j, col_i)
|
||||
.keyswitch_inplace(module, rhs, scratch)
|
||||
});
|
||||
|
||||
@@ -1,101 +1,115 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
layouts::{
|
||||
GGLWECiphertext, GGSWCiphertext, GLWECiphertext, Infos,
|
||||
GGLWECiphertext, GGLWELayoutInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos,
|
||||
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared},
|
||||
},
|
||||
operations::GLWEOperations,
|
||||
};
|
||||
|
||||
impl GGSWCiphertext<Vec<u8>> {
|
||||
pub(crate) fn expand_row_scratch_space<B: Backend>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
self_k: usize,
|
||||
k_tsk: usize,
|
||||
digits: usize,
|
||||
rank: usize,
|
||||
) -> usize
|
||||
pub(crate) fn expand_row_scratch_space<B: Backend, OUT, TSK>(module: &Module<B>, out_infos: &OUT, tsk_infos: &TSK) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
TSK: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let tsk_size: usize = k_tsk.div_ceil(basek);
|
||||
let self_size_out: usize = self_k.div_ceil(basek);
|
||||
let self_size_in: usize = self_size_out.div_ceil(digits);
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = out_infos
|
||||
.k()
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.digits().into()) as usize;
|
||||
|
||||
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(rank + 1, tsk_size);
|
||||
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, self_size_in);
|
||||
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, size_in);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
self_size_out,
|
||||
self_size_in,
|
||||
self_size_in,
|
||||
rank,
|
||||
rank,
|
||||
tsk_size,
|
||||
size_in,
|
||||
size_in,
|
||||
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
|
||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_in: usize,
|
||||
k_ksk: usize,
|
||||
digits_ksk: usize,
|
||||
k_tsk: usize,
|
||||
digits_tsk: usize,
|
||||
rank: usize,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
IN: GGSWInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
TSK: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
let out_size: usize = k_out.div_ceil(basek);
|
||||
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, out_size);
|
||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
||||
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(apply_infos.rank_in(), apply_infos.rank_out());
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(apply_infos.rank_in(), tsk_infos.rank_in());
|
||||
}
|
||||
|
||||
let rank: usize = apply_infos.rank_out().into();
|
||||
|
||||
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||
|
||||
if in_infos.base2k() == tsk_infos.base2k() {
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
} else {
|
||||
let a_conv: usize = VecZnx::alloc_bytes(
|
||||
module.n(),
|
||||
1,
|
||||
out_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + module.vec_znx_normalize_tmp_bytes();
|
||||
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_ksk: usize,
|
||||
digits_ksk: usize,
|
||||
k_tsk: usize,
|
||||
digits_tsk: usize,
|
||||
rank: usize,
|
||||
out_infos: &OUT,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
TSK: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
GGSWCiphertext::keyswitch_scratch_space(
|
||||
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
||||
)
|
||||
GGSWCiphertext::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,18 +134,21 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(self.rank(), a.rank());
|
||||
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||
|
||||
assert_eq!(self.rank(), a.rank_out());
|
||||
assert_eq!(self.rows(), a.rows());
|
||||
assert_eq!(self.n(), module.n());
|
||||
assert_eq!(a.n(), module.n());
|
||||
assert_eq!(tsk.n(), module.n());
|
||||
assert_eq!(self.n(), module.n() as u32);
|
||||
assert_eq!(a.n(), module.n() as u32);
|
||||
assert_eq!(tsk.n(), module.n() as u32);
|
||||
}
|
||||
(0..self.rows()).for_each(|row_i| {
|
||||
(0..self.rows().into()).for_each(|row_i| {
|
||||
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
@@ -159,10 +176,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
(0..lhs.rows()).for_each(|row_i| {
|
||||
(0..lhs.rows().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
@@ -192,10 +210,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
(0..self.rows()).for_each(|row_i| {
|
||||
(0..self.rows().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
@@ -220,34 +239,41 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
assert!(
|
||||
scratch.available()
|
||||
>= GGSWCiphertext::expand_row_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
tsk.k(),
|
||||
tsk.digits(),
|
||||
tsk.rank()
|
||||
)
|
||||
);
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_tsk: usize = tsk.base2k().into();
|
||||
|
||||
let n: usize = self.n();
|
||||
let rank: usize = self.rank();
|
||||
assert!(scratch.available() >= GGSWCiphertext::expand_row_scratch_space(module, self, tsk));
|
||||
|
||||
let n: usize = self.n().into();
|
||||
let rank: usize = self.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
(0..self.rows()).for_each(|row_i| {
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, self.size());
|
||||
(0..cols).for_each(|i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &self.at(row_i, 0).data, i);
|
||||
});
|
||||
let a_size: usize = (self.size() * basek_in).div_ceil(basek_tsk);
|
||||
|
||||
(1..cols).for_each(|col_j| {
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row_i in 0..self.rows().into() {
|
||||
let a = &self.at(row_i, 0).data;
|
||||
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, a_size);
|
||||
|
||||
if basek_in == basek_tsk {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(n, 1, a_size);
|
||||
for i in 0..cols {
|
||||
module.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for col_j in 1..cols {
|
||||
// Example for rank 3:
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
@@ -268,7 +294,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
|
||||
let digits: usize = tsk.digits();
|
||||
let digits: usize = tsk.digits().into();
|
||||
|
||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
|
||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(digits));
|
||||
@@ -285,11 +311,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
(1..cols).for_each(|col_i| {
|
||||
for col_i in 1..cols {
|
||||
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
||||
|
||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||
(0..digits).for_each(|di| {
|
||||
for di in 0..digits {
|
||||
tmp_a.set_size((ci_dft.size() + di) / digits);
|
||||
|
||||
// Small optimization for digits > 2
|
||||
@@ -307,8 +333,8 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||
@@ -322,18 +348,19 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
|
||||
(0..cols).for_each(|i| {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||
module.vec_znx_big_normalize(
|
||||
self.basek(),
|
||||
basek_in,
|
||||
&mut self.at_mut(row_i, col_j).data,
|
||||
i,
|
||||
basek_tsk,
|
||||
&tmp_idft,
|
||||
0,
|
||||
scratch_3,
|
||||
);
|
||||
});
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,52 +1,64 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::layouts::{GLWECiphertext, Infos, prepared::GGLWESwitchingKeyPrepared};
|
||||
use crate::layouts::{GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWESwitchingKeyPrepared};
|
||||
|
||||
impl GLWECiphertext<Vec<u8>> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_in: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank_in: usize,
|
||||
rank_out: usize,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_apply: &KEY,
|
||||
) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
||||
let out_size: usize = k_out.div_ceil(basek);
|
||||
let ksk_size: usize = k_ksk.div_ceil(basek);
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank_out + 1, ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(out_size, in_size, in_size, rank_in, rank_out + 1, ksk_size)
|
||||
+ module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
||||
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||
res_dft + ((ai_dft + vmp) | normalize)
|
||||
let in_size: usize = in_infos
|
||||
.k()
|
||||
.div_ceil(key_apply.base2k())
|
||||
.div_ceil(key_apply.digits().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ksk_size: usize = key_apply.size();
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size,
|
||||
(key_apply.rank_in()).into(),
|
||||
(key_apply.rank_out() + 1).into(),
|
||||
ksk_size,
|
||||
) + module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||
if in_infos.base2k() == key_apply.base2k() {
|
||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||
} else if key_apply.digits() == 1 {
|
||||
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), 1, in_size) + module.vec_znx_normalize_tmp_bytes();
|
||||
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||
} else {
|
||||
// Since we stride over a to get a_dft when digits > 1, we need to store the full columns of a with in the base conversion.
|
||||
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (key_apply.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_out: usize,
|
||||
k_ksk: usize,
|
||||
digits: usize,
|
||||
rank: usize,
|
||||
) -> usize
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
where
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank, rank)
|
||||
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,10 +73,9 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
||||
) where
|
||||
DataLhs: DataRef,
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
let basek: usize = self.basek();
|
||||
assert_eq!(
|
||||
lhs.rank(),
|
||||
rhs.rank_in(),
|
||||
@@ -79,43 +90,26 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
assert_eq!(self.basek(), basek);
|
||||
assert_eq!(lhs.basek(), basek);
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
assert_eq!(lhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWECiphertext::keyswitch_scratch_space(module, self, lhs, rhs);
|
||||
|
||||
assert!(
|
||||
scratch.available()
|
||||
>= GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
lhs.k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
),
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.base2k(),
|
||||
self.k(),
|
||||
lhs.base2k(),
|
||||
lhs.k(),
|
||||
rhs.base2k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)={}",
|
||||
)={scrach_needed}",
|
||||
scratch.available(),
|
||||
GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
lhs.k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -127,10 +121,9 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
let basek: usize = self.basek();
|
||||
assert_eq!(
|
||||
self.rank(),
|
||||
rhs.rank_out(),
|
||||
@@ -138,41 +131,15 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
assert_eq!(self.basek(), basek);
|
||||
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, self, rhs);
|
||||
|
||||
assert!(
|
||||
scratch.available()
|
||||
>= GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
),
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)={}",
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space()={scrach_needed}",
|
||||
scratch.available(),
|
||||
GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.basek(),
|
||||
self.k(),
|
||||
self.k(),
|
||||
rhs.k(),
|
||||
rhs.digits(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -181,7 +148,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GLWECiphertext<DataLhs>,
|
||||
glwe_in: &GLWECiphertext<DataLhs>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
@@ -193,17 +160,31 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch(module, lhs, rhs, scratch);
|
||||
self.assert_keyswitch(module, glwe_in, rhs, scratch);
|
||||
}
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..self.cols()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
||||
|
||||
let basek_out: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = glwe_in.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
basek_out,
|
||||
&mut self.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
@@ -222,17 +203,31 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch_inplace(module, rhs, scratch);
|
||||
}
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
|
||||
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..self.cols()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut self.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -257,19 +252,30 @@ impl<D: DataRef> GLWECiphertext<D> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
if rhs.digits() == 1 {
|
||||
return keyswitch_vmp_one_digit(module, res_dft, &self.data, &rhs.key.data, scratch);
|
||||
return keyswitch_vmp_one_digit(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
scratch,
|
||||
);
|
||||
}
|
||||
|
||||
keyswitch_vmp_multiple_digits(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
rhs.digits(),
|
||||
rhs.digits().into(),
|
||||
scratch,
|
||||
)
|
||||
}
|
||||
@@ -277,6 +283,8 @@ impl<D: DataRef> GLWECiphertext<D> {
|
||||
|
||||
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
@@ -286,23 +294,42 @@ where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>:
|
||||
VecZnxDftAllocBytes + VecZnxDftApply<B> + VmpApplyDftToDft<B> + VecZnxIdftApplyConsume<B> + VecZnxBigAddSmallInplace<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B>,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
||||
});
|
||||
|
||||
if basek_in == basek_ksk {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
||||
});
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, 0, basek_in, a, col_i + 1, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||
res_big
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
@@ -318,37 +345,67 @@ where
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B>,
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
let size: usize = a.size();
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, size.div_ceil(digits));
|
||||
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a_size.div_ceil(digits));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
(0..digits).for_each(|di| {
|
||||
ai_dft.set_size((size + di) / digits);
|
||||
if basek_in == basek_ksk {
|
||||
for di in 0..digits {
|
||||
ai_dft.set_size((a_size + di) / digits);
|
||||
|
||||
// Small optimization for digits > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||
// Small optimization for digits > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, col_i, a, col_i + 1);
|
||||
});
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, j, a, j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, j, basek_in, a, j + 1, scratch_2);
|
||||
}
|
||||
|
||||
for di in 0..digits {
|
||||
ai_dft.set_size((a_size + di) / digits);
|
||||
|
||||
// Small optimization for digits > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res_dft.set_size(res_dft.max_size());
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
|
||||
@@ -1,26 +1,31 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
layouts::{GLWECiphertext, Infos, LWECiphertext, prepared::LWESwitchingKeyPrepared},
|
||||
layouts::{
|
||||
GGLWELayoutInfos, GLWECiphertext, GLWECiphertextLayout, LWECiphertext, LWEInfos, Rank, TorusPrecision,
|
||||
prepared::LWESwitchingKeyPrepared,
|
||||
},
|
||||
};
|
||||
|
||||
impl LWECiphertext<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend>(
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
basek: usize,
|
||||
k_lwe_out: usize,
|
||||
k_lwe_in: usize,
|
||||
k_ksk: usize,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_infos: &KEY,
|
||||
) -> usize
|
||||
where
|
||||
OUT: LWEInfos,
|
||||
IN: LWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
@@ -30,10 +35,30 @@ impl LWECiphertext<Vec<u8>> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWECiphertext::bytes_of(module.n(), basek, k_lwe_out.max(k_lwe_in), 1)
|
||||
+ GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_lwe_out, k_ksk, 1, 1)
|
||||
let max_k: TorusPrecision = in_infos.k().max(out_infos.k());
|
||||
|
||||
let glwe_in_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||
n: module.n().into(),
|
||||
base2k: in_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_out_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||
n: module.n().into(),
|
||||
base2k: out_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_in: usize = GLWECiphertext::alloc_bytes(&glwe_in_infos);
|
||||
let glwe_out: usize = GLWECiphertext::alloc_bytes(&glwe_out_infos);
|
||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||
|
||||
glwe_in + glwe_out + ks
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,32 +80,47 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxCopy,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(self.n() <= module.n());
|
||||
assert!(a.n() <= module.n());
|
||||
assert_eq!(self.basek(), a.basek());
|
||||
assert!(self.n() <= module.n() as u32);
|
||||
assert!(a.n() <= module.n() as u32);
|
||||
assert!(scratch.available() >= LWECiphertext::keyswitch_scratch_space(module, self, a, ksk));
|
||||
}
|
||||
|
||||
let max_k: usize = self.k().max(a.k());
|
||||
let basek: usize = self.basek();
|
||||
let max_k: TorusPrecision = self.k().max(a.k());
|
||||
|
||||
let (mut glwe, scratch_1) = scratch.take_glwe_ct(ksk.n(), basek, max_k, 1);
|
||||
glwe.data.zero();
|
||||
let a_size: usize = a.k().div_ceil(ksk.base2k()) as usize;
|
||||
|
||||
let n_lwe: usize = a.n();
|
||||
let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout {
|
||||
n: ksk.n(),
|
||||
base2k: a.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
});
|
||||
glwe_in.data.zero();
|
||||
|
||||
(0..a.size()).for_each(|i| {
|
||||
let data_lwe: &[i64] = a.data.at(0, i);
|
||||
glwe.data.at_mut(0, i)[0] = data_lwe[0];
|
||||
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||
let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(&GLWECiphertextLayout {
|
||||
n: ksk.n(),
|
||||
base2k: self.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
});
|
||||
|
||||
glwe.keyswitch_inplace(module, &ksk.0, scratch_1);
|
||||
let n_lwe: usize = a.n().into();
|
||||
|
||||
self.sample_extract(&glwe);
|
||||
for i in 0..a_size {
|
||||
let data_lwe: &[i64] = a.data.at(0, i);
|
||||
glwe_in.data.at_mut(0, i)[0] = data_lwe[0];
|
||||
glwe_in.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||
}
|
||||
|
||||
glwe_out.keyswitch(module, &glwe_in, &ksk.0, scratch_1);
|
||||
self.sample_extract(&glwe_out);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user