mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
committed by
Jean-Philippe Bossuat
parent
f72363cc4b
commit
2b2b994f7d
@@ -1,224 +1,205 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
use poulpy_hal::layouts::{Backend, DataMut, Module, Scratch};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
||||
layouts::{
|
||||
AutomorphismKey, AutomorphismKeyToRef, GGLWE, GGLWEInfos, GGLWEToMut, GGLWEToRef, GLWESwitchingKey,
|
||||
GLWESwitchingKeyToRef,
|
||||
prepared::{GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedToRef},
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::layouts::{
|
||||
GGLWEAutomorphismKey, GGLWEInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos,
|
||||
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
|
||||
};
|
||||
|
||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_infos: &KEY,
|
||||
) -> usize
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GGLWEInfos,
|
||||
A: GGLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
GGLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GGLWEAutomorphismKey<DataLhs>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: AutomorphismKeyToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
||||
module.gglwe_keyswitch(&mut self.key.key, &a.to_ref().key.key, b, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
||||
module.gglwe_keyswitch_inplace(&mut self.key.key, a, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWESwitchingKey<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_apply: &KEY,
|
||||
) -> usize
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
IN: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GGLWEInfos,
|
||||
A: GGLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos + GLWEInfos,
|
||||
KEY: GGLWEInfos + GLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWECiphertext::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GGLWESwitchingKey<DataLhs>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(
|
||||
self.rank_in(),
|
||||
lhs.rank_in(),
|
||||
"ksk_out input rank: {} != ksk_in input rank: {}",
|
||||
self.rank_in(),
|
||||
lhs.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
lhs.rank_out(),
|
||||
rhs.rank_in(),
|
||||
"ksk_in output rank: {} != ksk_apply input rank: {}",
|
||||
self.rank_out(),
|
||||
rhs.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
self.rank_out(),
|
||||
rhs.rank_out(),
|
||||
"ksk_out output rank: {} != ksk_apply output rank: {}",
|
||||
self.rank_out(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
assert!(
|
||||
self.dnum() <= lhs.dnum(),
|
||||
"self.dnum()={} > lhs.dnum()={}",
|
||||
self.dnum(),
|
||||
lhs.dnum()
|
||||
);
|
||||
assert_eq!(
|
||||
self.dsize(),
|
||||
lhs.dsize(),
|
||||
"ksk_out dsize: {} != ksk_in dsize: {}",
|
||||
self.dsize(),
|
||||
lhs.dsize()
|
||||
)
|
||||
module.gglwe_keyswitch(&mut self.key, &a.to_ref().key, b, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
module.gglwe_keyswitch_inplace(&mut self.key, a, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl GGLWE<Vec<u8>> {
|
||||
pub fn keyswitch_inplace_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GGLWEInfos,
|
||||
A: GGLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGLWE<DataSelf> {
|
||||
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GGLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
module.gglwe_keyswitch(self, a, b, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGLWEKeySwitch<BE>,
|
||||
{
|
||||
module.gglwe_keyswitch_inplace(self, a, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGLWEKeySwitch<BE> for Module<BE> where Self: GLWEKeySwitch<BE> {}
|
||||
|
||||
pub trait GGLWEKeySwitch<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeySwitch<BE>,
|
||||
{
|
||||
fn gglwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: GGLWEInfos,
|
||||
A: GGLWEInfos,
|
||||
K: GGLWEInfos,
|
||||
{
|
||||
self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
|
||||
fn gglwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToMut,
|
||||
A: GGLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGLWE<&[u8]> = &a.to_ref();
|
||||
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
|
||||
|
||||
assert_eq!(
|
||||
res.rank_in(),
|
||||
a.rank_in(),
|
||||
"res input rank: {} != a input rank: {}",
|
||||
res.rank_in(),
|
||||
a.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
a.rank_out(),
|
||||
b.rank_in(),
|
||||
"res output rank: {} != b input rank: {}",
|
||||
a.rank_out(),
|
||||
b.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
res.rank_out(),
|
||||
b.rank_out(),
|
||||
"res output rank: {} != b output rank: {}",
|
||||
res.rank_out(),
|
||||
b.rank_out()
|
||||
);
|
||||
assert!(
|
||||
res.dnum() <= a.dnum(),
|
||||
"res.dnum()={} > a.dnum()={}",
|
||||
res.dnum(),
|
||||
a.dnum()
|
||||
);
|
||||
assert_eq!(
|
||||
res.dsize(),
|
||||
a.dsize(),
|
||||
"res dsize: {} != a dsize: {}",
|
||||
res.dsize(),
|
||||
a.dsize()
|
||||
);
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
for col in 0..res.rank_in().into() {
|
||||
self.glwe_keyswitch(&mut res.at_mut(row, col), &a.at(row, col), b, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
(0..self.rank_in().into()).for_each(|col_i| {
|
||||
(0..self.dnum().into()).for_each(|row_j| {
|
||||
self.at_mut(row_j, col_i)
|
||||
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
|
||||
});
|
||||
});
|
||||
|
||||
(self.dnum().min(lhs.dnum()).into()..self.dnum().into()).for_each(|row_i| {
|
||||
(0..self.rank_in().into()).for_each(|col_j| {
|
||||
self.at_mut(row_i, col_j).data.zero();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
fn gglwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGLWEToMut,
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(
|
||||
self.rank_out(),
|
||||
rhs.rank_out(),
|
||||
"ksk_out output rank: {} != ksk_apply output rank: {}",
|
||||
self.rank_out(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
}
|
||||
let res: &mut GGLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
|
||||
|
||||
(0..self.rank_in().into()).for_each(|col_i| {
|
||||
(0..self.dnum().into()).for_each(|row_j| {
|
||||
self.at_mut(row_j, col_i)
|
||||
.keyswitch_inplace(module, rhs, scratch)
|
||||
});
|
||||
});
|
||||
assert_eq!(
|
||||
res.rank_out(),
|
||||
a.rank_out(),
|
||||
"res output rank: {} != a output rank: {}",
|
||||
res.rank_out(),
|
||||
a.rank_out()
|
||||
);
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
for col in 0..res.rank_in().into() {
|
||||
self.glwe_keyswitch_inplace(&mut res.at_mut(row, col), a, scratch);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {}
|
||||
|
||||
@@ -1,366 +1,131 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
||||
};
|
||||
use poulpy_hal::layouts::{Backend, DataMut, Scratch, VecZnx};
|
||||
|
||||
use crate::{
|
||||
GGSWExpandRows, ScratchTakeCore,
|
||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
||||
layouts::{
|
||||
GGLWECiphertext, GGLWEInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos,
|
||||
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared},
|
||||
GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef,
|
||||
prepared::{GLWESwitchingKeyPreparedToRef, TensorKeyPreparedToRef},
|
||||
},
|
||||
operations::GLWEOperations,
|
||||
};
|
||||
|
||||
impl GGSWCiphertext<Vec<u8>> {
|
||||
pub(crate) fn expand_row_scratch_space<B: Backend, OUT, TSK>(module: &Module<B>, out_infos: &OUT, tsk_infos: &TSK) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = out_infos
|
||||
.k()
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||
|
||||
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, size_in);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
tsk_size,
|
||||
size_in,
|
||||
size_in,
|
||||
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
|
||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
impl GGSW<Vec<u8>> {
|
||||
pub fn keyswitch_tmp_bytes<R, A, K, T, M, BE: Backend>(
|
||||
module: &M,
|
||||
res_infos: &R,
|
||||
a_infos: &A,
|
||||
key_infos: &K,
|
||||
tsk_infos: &T,
|
||||
) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
IN: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
R: GGSWInfos,
|
||||
A: GGSWInfos,
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(apply_infos.rank_in(), apply_infos.rank_out());
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(apply_infos.rank_in(), tsk_infos.rank_in());
|
||||
}
|
||||
module.ggsw_keyswitch_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
|
||||
}
|
||||
}
|
||||
|
||||
let rank: usize = apply_infos.rank_out().into();
|
||||
impl<D: DataMut> GGSW<D> {
|
||||
pub fn keyswitch<M, A, K, T, BE: Backend>(&mut self, module: &M, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GGSWToRef,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
module.ggsw_keyswitch(self, a, key, tsk, scratch);
|
||||
}
|
||||
|
||||
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||
pub fn keyswitch_inplace<M, K, T, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
module.ggsw_keyswitch_inplace(self, key, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
if in_infos.base2k() == tsk_infos.base2k() {
|
||||
pub trait GGSWKeySwitch<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeySwitch<BE> + GGSWExpandRows<BE>,
|
||||
{
|
||||
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGSWInfos,
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(key_infos.rank_in(), key_infos.rank_out());
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(key_infos.rank_in(), tsk_infos.rank_in());
|
||||
|
||||
let rank: usize = key_infos.rank_out().into();
|
||||
|
||||
let size_out: usize = res_infos.k().div_ceil(res_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::bytes_of(self.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
let ks: usize = self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos);
|
||||
let expand_rows: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
|
||||
if a_infos.base2k() == tsk_infos.base2k() {
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
} else {
|
||||
let a_conv: usize = VecZnx::alloc_bytes(
|
||||
module.n(),
|
||||
let a_conv: usize = VecZnx::bytes_of(
|
||||
self.n(),
|
||||
1,
|
||||
out_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + module.vec_znx_normalize_tmp_bytes();
|
||||
res_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
) -> usize
|
||||
fn ggsw_keyswitch<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
R: GGSWToMut,
|
||||
A: GGSWToRef,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
GGSWCiphertext::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGSW<&[u8]> = &a.to_ref();
|
||||
|
||||
assert_eq!(res.ggsw_layout(), a.ggsw_layout());
|
||||
|
||||
for row in 0..a.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch(&mut res.at_mut(row, 0), &a.at(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
|
||||
fn ggsw_keyswitch_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch_inplace(&mut res.at_mut(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
||||
pub fn from_gglwe<DataA, DataTsk, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
a: &GGLWECiphertext<DataA>,
|
||||
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
DataA: DataRef,
|
||||
DataTsk: DataRef,
|
||||
Module<B>: VecZnxCopy
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||
|
||||
assert_eq!(self.rank(), a.rank_out());
|
||||
assert_eq!(self.dnum(), a.dnum());
|
||||
assert_eq!(self.n(), module.n() as u32);
|
||||
assert_eq!(a.n(), module.n() as u32);
|
||||
assert_eq!(tsk.n(), module.n() as u32);
|
||||
}
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch<DataLhs: DataRef, DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GGSWCiphertext<DataLhs>,
|
||||
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
|
||||
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
(0..lhs.dnum().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
.keyswitch(module, &lhs.at(row_i, 0), ksk, scratch);
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
|
||||
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
.keyswitch_inplace(module, ksk, scratch);
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn expand_row<DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||
{
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_tsk: usize = tsk.base2k().into();
|
||||
|
||||
assert!(scratch.available() >= GGSWCiphertext::expand_row_scratch_space(module, self, tsk));
|
||||
|
||||
let n: usize = self.n().into();
|
||||
let rank: usize = self.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
let a_size: usize = (self.size() * basek_in).div_ceil(basek_tsk);
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row_i in 0..self.dnum().into() {
|
||||
let a = &self.at(row_i, 0).data;
|
||||
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, a_size);
|
||||
|
||||
if basek_in == basek_tsk {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(n, 1, a_size);
|
||||
for i in 0..cols {
|
||||
module.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for col_j in 1..cols {
|
||||
// Example for rank 3:
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
// actually composed of that many dnum and we focus on a specific row here
|
||||
// implicitely given ci_dft.
|
||||
//
|
||||
// # Input
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (0, 0, 0, 0)
|
||||
// col 2: (0, 0, 0, 0)
|
||||
// col 3: (0, 0, 0, 0)
|
||||
//
|
||||
// # Output
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
|
||||
let dsize: usize = tsk.dsize().into();
|
||||
|
||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
|
||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(dsize));
|
||||
|
||||
{
|
||||
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
|
||||
//
|
||||
// # Example for col=1
|
||||
//
|
||||
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
|
||||
// +
|
||||
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
|
||||
// +
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
for col_i in 1..cols {
|
||||
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
||||
|
||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||
for di in 0..dsize {
|
||||
tmp_a.set_size((ci_dft.size() + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
module.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
|
||||
if di == 0 && col_i == 1 {
|
||||
module.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||
//
|
||||
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
|
||||
// +
|
||||
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
|
||||
for i in 0..cols {
|
||||
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||
module.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut self.at_mut(row_i, col_j).data,
|
||||
i,
|
||||
basek_tsk,
|
||||
&tmp_idft,
|
||||
0,
|
||||
scratch_3,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<DataSelf: DataMut> GGSW<DataSelf> {}
|
||||
|
||||
@@ -1,186 +1,179 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::layouts::{GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWESwitchingKeyPrepared};
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||
prepared::{GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedToRef},
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWECiphertext<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_apply: &KEY,
|
||||
) -> usize
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn keyswitch_tmp_bytes<M, R, A, B, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos,
|
||||
M: GLWEKeySwitch<BE>,
|
||||
{
|
||||
let in_size: usize = in_infos
|
||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWE<D> {
|
||||
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
M: GLWEKeySwitch<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_keyswitch(self, a, b, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
M: GLWEKeySwitch<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_keyswitch_inplace(self, a, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEKeySwitch<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEKeySwitch<BE: Backend>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos,
|
||||
{
|
||||
let in_size: usize = a_infos
|
||||
.k()
|
||||
.div_ceil(key_apply.base2k())
|
||||
.div_ceil(key_apply.dsize().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ksk_size: usize = key_apply.size();
|
||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
.div_ceil(b_infos.base2k())
|
||||
.div_ceil(b_infos.dsize().into()) as usize;
|
||||
let out_size: usize = res_infos.size();
|
||||
let ksk_size: usize = b_infos.size();
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size,
|
||||
(key_apply.rank_in()).into(),
|
||||
(key_apply.rank_out() + 1).into(),
|
||||
(b_infos.rank_in()).into(),
|
||||
(b_infos.rank_out() + 1).into(),
|
||||
ksk_size,
|
||||
) + module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||
if in_infos.base2k() == key_apply.base2k() {
|
||||
) + self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
|
||||
if a_infos.base2k() == b_infos.base2k() {
|
||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||
} else if key_apply.dsize() == 1 {
|
||||
} else if b_infos.dsize() == 1 {
|
||||
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), 1, in_size) + module.vec_znx_normalize_tmp_bytes();
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||
} else {
|
||||
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
|
||||
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (key_apply.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
fn glwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||
}
|
||||
}
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
|
||||
|
||||
impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn assert_keyswitch<B: Backend, DataLhs, DataRhs>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
lhs: &GLWECiphertext<DataLhs>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataLhs: DataRef,
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
lhs.rank(),
|
||||
rhs.rank_in(),
|
||||
"lhs.rank(): {} != rhs.rank_in(): {}",
|
||||
lhs.rank(),
|
||||
rhs.rank_in()
|
||||
a.rank(),
|
||||
b.rank_in(),
|
||||
"a.rank(): {} != b.rank_in(): {}",
|
||||
a.rank(),
|
||||
b.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
self.rank(),
|
||||
rhs.rank_out(),
|
||||
"self.rank(): {} != rhs.rank_out(): {}",
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
res.rank(),
|
||||
b.rank_out(),
|
||||
"res.rank(): {} != b.rank_out(): {}",
|
||||
res.rank(),
|
||||
b.rank_out()
|
||||
);
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
assert_eq!(lhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWECiphertext::keyswitch_scratch_space(module, self, lhs, rhs);
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
assert_eq!(b.n(), self.n() as u32);
|
||||
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, a, b);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.base2k(),
|
||||
self.k(),
|
||||
lhs.base2k(),
|
||||
lhs.k(),
|
||||
rhs.base2k(),
|
||||
rhs.k(),
|
||||
rhs.dsize(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)={scrach_needed}",
|
||||
"scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn assert_keyswitch_inplace<B: Backend, DataRhs>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
self.rank(),
|
||||
rhs.rank_out(),
|
||||
"self.rank(): {} != rhs.rank_out(): {}",
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
let basek_out: usize = res.base2k().into();
|
||||
let base2k_out: usize = b.base2k().into();
|
||||
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, self, rhs);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space()={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
glwe_in: &GLWECiphertext<DataLhs>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch(module, glwe_in, rhs, scratch);
|
||||
}
|
||||
|
||||
let basek_out: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = glwe_in.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), b.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, a, b, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
self.vec_znx_big_normalize(
|
||||
basek_out,
|
||||
&mut self.data,
|
||||
&mut res.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
base2k_out,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
@@ -188,227 +181,190 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch_inplace(module, rhs, scratch);
|
||||
}
|
||||
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut self.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWECiphertext<D> {
|
||||
pub(crate) fn keyswitch_internal<B: Backend, DataRes, DataKey>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
res_dft: VecZnxDft<DataRes, B>,
|
||||
rhs: &GGLWESwitchingKeyPrepared<DataKey, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
fn glwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataKey: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
R: GLWEToMut,
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
if rhs.dsize() == 1 {
|
||||
return keyswitch_vmp_one_digit(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
scratch,
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
|
||||
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_in(),
|
||||
"res.rank(): {} != a.rank_in(): {}",
|
||||
res.rank(),
|
||||
a.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_out(),
|
||||
"res.rank(): {} != b.rank_out(): {}",
|
||||
res.rank(),
|
||||
a.rank_out()
|
||||
);
|
||||
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, res, a);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
|
||||
let base2k_in: usize = res.base2k().into();
|
||||
let base2k_out: usize = a.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), a.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, res, a, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
self.vec_znx_big_normalize(
|
||||
base2k_in,
|
||||
&mut res.data,
|
||||
i,
|
||||
base2k_out,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
}
|
||||
|
||||
keyswitch_vmp_multiple_digits(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
rhs.dsize().into(),
|
||||
scratch,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
impl GLWE<Vec<u8>> {}
|
||||
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {}
|
||||
|
||||
fn keyswitch_internal<BE: Backend, M, DR, DA, DB>(
|
||||
module: &M,
|
||||
mut res: VecZnxDft<DR, BE>,
|
||||
a: &GLWE<DA>,
|
||||
b: &GLWESwitchingKeyPrepared<DB, BE>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> VecZnxBig<DR, BE>
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
DR: DataMut,
|
||||
DA: DataRef,
|
||||
DB: DataRef,
|
||||
M: ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
let base2k_in: usize = a.base2k().into();
|
||||
let base2k_out: usize = b.base2k().into();
|
||||
let cols: usize = (a.rank() + 1).into();
|
||||
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
||||
let pmat: &VmpPMat<DB, BE> = &b.key.data;
|
||||
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
|
||||
if b.dsize() == 1 {
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
|
||||
|
||||
if basek_in == basek_ksk {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
||||
});
|
||||
if base2k_in == base2k_out {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a.data(), col_i + 1);
|
||||
});
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
0,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
col_i + 1,
|
||||
scratch_2,
|
||||
);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, 0, basek_in, a, col_i + 1, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
let dsize: usize = b.dsize().into();
|
||||
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
if base2k_in == base2k_out {
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a.data(), j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
j,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
j + 1,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.set_size(res.max_size());
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||
res_big
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
dsize: usize,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a_size.div_ceil(dsize));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
if basek_in == basek_ksk {
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a, j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, j, basek_in, a, j + 1, scratch_2);
|
||||
}
|
||||
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res_dft.set_size(res_dft.max_size());
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||
let mut res_big: VecZnxBig<DR, BE> = module.vec_znx_idft_apply_consume(res);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a.data(), 0);
|
||||
res_big
|
||||
}
|
||||
|
||||
@@ -1,116 +1,116 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
api::ScratchAvailable,
|
||||
layouts::{Backend, DataMut, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
TakeGLWECt,
|
||||
ScratchTakeCore,
|
||||
keyswitching::glwe_ct::GLWEKeySwitch,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWECiphertext, GLWECiphertextLayout, LWECiphertext, LWEInfos, Rank, TorusPrecision,
|
||||
prepared::LWESwitchingKeyPrepared,
|
||||
GGLWEInfos, GLWE, GLWEAlloc, GLWELayout, LWE, LWEInfos, LWEToMut, LWEToRef, Rank, TorusPrecision,
|
||||
prepared::{LWESwitchingKeyPrepared, LWESwitchingKeyPreparedToRef},
|
||||
},
|
||||
};
|
||||
|
||||
impl LWECiphertext<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_infos: &KEY,
|
||||
) -> usize
|
||||
impl LWE<Vec<u8>> {
|
||||
pub fn keyswitch_tmp_bytes<M, R, A, K, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
OUT: LWEInfos,
|
||||
IN: LWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
R: LWEInfos,
|
||||
A: LWEInfos,
|
||||
K: GGLWEInfos,
|
||||
M: LWEKeySwitch<BE>,
|
||||
{
|
||||
let max_k: TorusPrecision = in_infos.k().max(out_infos.k());
|
||||
|
||||
let glwe_in_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||
n: module.n().into(),
|
||||
base2k: in_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_out_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||
n: module.n().into(),
|
||||
base2k: out_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_in: usize = GLWECiphertext::alloc_bytes(&glwe_in_infos);
|
||||
let glwe_out: usize = GLWECiphertext::alloc_bytes(&glwe_out_infos);
|
||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||
|
||||
glwe_in + glwe_out + ks
|
||||
module.lwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
||||
pub fn keyswitch<A, DKs, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
a: &LWECiphertext<A>,
|
||||
ksk: &LWESwitchingKeyPrepared<DKs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
A: DataRef,
|
||||
DKs: DataRef,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxCopy,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
impl<D: DataMut> LWE<D> {
|
||||
pub fn keyswitch<M, A, K, BE: Backend>(&mut self, module: &M, a: &A, ksk: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: LWEToRef,
|
||||
K: LWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: LWEKeySwitch<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(self.n() <= module.n() as u32);
|
||||
assert!(a.n() <= module.n() as u32);
|
||||
assert!(scratch.available() >= LWECiphertext::keyswitch_scratch_space(module, self, a, ksk));
|
||||
}
|
||||
module.lwe_keyswitch(self, a, ksk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
let max_k: TorusPrecision = self.k().max(a.k());
|
||||
impl<BE: Backend> LWEKeySwitch<BE> for Module<BE> where Self: LWEKeySwitch<BE> {}
|
||||
|
||||
pub trait LWEKeySwitch<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeySwitch<BE> + GLWEAlloc,
|
||||
{
|
||||
fn lwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
|
||||
where
|
||||
R: LWEInfos,
|
||||
A: LWEInfos,
|
||||
K: GGLWEInfos,
|
||||
{
|
||||
let max_k: TorusPrecision = a_infos.k().max(res_infos.k());
|
||||
|
||||
let glwe_a_infos: GLWELayout = GLWELayout {
|
||||
n: self.ring_degree(),
|
||||
base2k: a_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_res_infos: GLWELayout = GLWELayout {
|
||||
n: self.ring_degree(),
|
||||
base2k: res_infos.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
};
|
||||
|
||||
let glwe_in: usize = GLWE::bytes_of_from_infos(self, &glwe_a_infos);
|
||||
let glwe_out: usize = GLWE::bytes_of_from_infos(self, &glwe_res_infos);
|
||||
let ks: usize = self.glwe_keyswitch_tmp_bytes(&glwe_res_infos, &glwe_a_infos, key_infos);
|
||||
|
||||
glwe_in + glwe_out + ks
|
||||
}
|
||||
|
||||
fn lwe_keyswitch<R, A, K>(&self, res: &mut R, a: &A, ksk: &K, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: LWEToMut,
|
||||
A: LWEToRef,
|
||||
K: LWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &LWE<&[u8]> = &a.to_ref();
|
||||
let ksk: &LWESwitchingKeyPrepared<&[u8], BE> = &ksk.to_ref();
|
||||
|
||||
assert!(res.n().as_usize() <= self.n());
|
||||
assert!(a.n().as_usize() <= self.n());
|
||||
assert_eq!(ksk.n(), self.n() as u32);
|
||||
assert!(scratch.available() >= self.lwe_keyswitch_tmp_bytes(res, a, ksk));
|
||||
|
||||
let max_k: TorusPrecision = res.k().max(a.k());
|
||||
|
||||
let a_size: usize = a.k().div_ceil(ksk.base2k()) as usize;
|
||||
|
||||
let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout {
|
||||
n: ksk.n(),
|
||||
base2k: a.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
});
|
||||
let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(
|
||||
self,
|
||||
&GLWELayout {
|
||||
n: ksk.n(),
|
||||
base2k: a.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
},
|
||||
);
|
||||
glwe_in.data.zero();
|
||||
|
||||
let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(&GLWECiphertextLayout {
|
||||
n: ksk.n(),
|
||||
base2k: self.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
});
|
||||
let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(
|
||||
self,
|
||||
&GLWELayout {
|
||||
n: ksk.n(),
|
||||
base2k: res.base2k(),
|
||||
k: max_k,
|
||||
rank: Rank(1),
|
||||
},
|
||||
);
|
||||
|
||||
let n_lwe: usize = a.n().into();
|
||||
|
||||
@@ -120,7 +120,7 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
||||
glwe_in.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||
}
|
||||
|
||||
glwe_out.keyswitch(module, &glwe_in, &ksk.0, scratch_1);
|
||||
self.sample_extract(&glwe_out);
|
||||
self.glwe_keyswitch(&mut glwe_out, &glwe_in, &ksk.0, scratch_1);
|
||||
res.sample_extract(&glwe_out);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user