updated repo for publishing (#74)

This commit is contained in:
Jean-Philippe Bossuat
2025-08-17 14:57:39 +02:00
committed by GitHub
parent 0be569eca0
commit 62eb87cc07
244 changed files with 374 additions and 539 deletions

View File

@@ -0,0 +1,221 @@
use poulpy_backend::hal::{
api::{
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigConsume, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
GGLWEAutomorphismKey, GGLWESwitchingKey, GLWECiphertext, Infos,
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
};
impl GGLWEAutomorphismKey<Vec<u8>> {
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
k_ksk: usize,
digits: usize,
rank: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
GGLWESwitchingKey::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits, rank, rank)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
digits: usize,
rank: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
}
}
impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GGLWEAutomorphismKey<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
{
self.key.keyswitch(module, &lhs.key, rhs, scratch);
}
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
{
self.key.keyswitch_inplace(module, &rhs.key, scratch);
}
}
impl GGLWESwitchingKey<Vec<u8>> {
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
k_ksk: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
GLWECiphertext::keyswitch_scratch_space(
module, n, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out,
)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
digits: usize,
rank: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_out, k_ksk, digits, rank)
}
}
impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GGLWESwitchingKey<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(
self.rank_in(),
lhs.rank_in(),
"ksk_out input rank: {} != ksk_in input rank: {}",
self.rank_in(),
lhs.rank_in()
);
assert_eq!(
lhs.rank_out(),
rhs.rank_in(),
"ksk_in output rank: {} != ksk_apply input rank: {}",
self.rank_out(),
rhs.rank_in()
);
assert_eq!(
self.rank_out(),
rhs.rank_out(),
"ksk_out output rank: {} != ksk_apply output rank: {}",
self.rank_out(),
rhs.rank_out()
);
}
(0..self.rank_in()).for_each(|col_i| {
(0..self.rows()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
});
});
(self.rows().min(lhs.rows())..self.rows()).for_each(|row_i| {
(0..self.rank_in()).for_each(|col_j| {
self.at_mut(row_i, col_j).data.zero();
});
});
}
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(
self.rank_out(),
rhs.rank_out(),
"ksk_out output rank: {} != ksk_apply output rank: {}",
self.rank_out(),
rhs.rank_out()
);
}
(0..self.rank_in()).for_each(|col_i| {
(0..self.rows()).for_each(|row_j| {
self.at_mut(row_j, col_i)
.keyswitch_inplace(module, rhs, scratch)
});
});
}
}

View File

@@ -0,0 +1,357 @@
use poulpy_backend::hal::{
api::{
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftCopy, VecZnxDftFromVecZnx,
VecZnxDftToVecZnxBigConsume, VecZnxDftToVecZnxBigTmpA, VecZnxNormalizeTmpBytes, VmpApply, VmpApplyAdd, VmpApplyTmpBytes,
ZnxInfos,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat},
};
use crate::{
layouts::{
GGLWECiphertext, GGSWCiphertext, GLWECiphertext, Infos,
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared},
},
operations::GLWEOperations,
};
impl GGSWCiphertext<Vec<u8>> {
pub(crate) fn expand_row_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
self_k: usize,
k_tsk: usize,
digits: usize,
rank: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
{
let tsk_size: usize = k_tsk.div_ceil(basek);
let self_size_out: usize = self_k.div_ceil(basek);
let self_size_in: usize = self_size_out.div_ceil(digits);
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, tsk_size);
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(n, 1, self_size_in);
let vmp: usize = module.vmp_apply_tmp_bytes(
n,
self_size_out,
self_size_in,
self_size_in,
rank,
rank,
tsk_size,
);
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(n, 1, tsk_size);
let norm: usize = module.vec_znx_normalize_tmp_bytes(n);
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
}
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
k_ksk: usize,
digits_ksk: usize,
k_tsk: usize,
digits_tsk: usize,
rank: usize,
) -> usize
where
Module<B>:
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
{
let out_size: usize = k_out.div_ceil(basek);
let res_znx: usize = VecZnx::alloc_bytes(n, rank + 1, out_size);
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, n, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, n, basek, k_out, k_tsk, digits_tsk, rank);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank + 1, out_size);
res_znx + ci_dft + (ks | expand_rows | res_dft)
}
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
digits_ksk: usize,
k_tsk: usize,
digits_tsk: usize,
rank: usize,
) -> usize
where
Module<B>:
VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
{
GGSWCiphertext::keyswitch_scratch_space(
module, n, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
)
}
}
impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
pub fn from_gglwe<DataA, DataTsk, B: Backend>(
&mut self,
module: &Module<B>,
a: &GGLWECiphertext<DataA>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
DataA: DataRef,
DataTsk: DataRef,
Module<B>: VecZnxCopy
+ VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftCopy<B>
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftAddInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftToVecZnxBigTmpA<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), a.rank());
assert_eq!(self.rows(), a.rows());
assert_eq!(self.n(), module.n());
assert_eq!(a.n(), module.n());
assert_eq!(tsk.n(), module.n());
}
(0..self.rows()).for_each(|row_i| {
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
});
self.expand_row(module, tsk, scratch);
}
pub fn keyswitch<DataLhs: DataRef, DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GGSWCiphertext<DataLhs>,
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftAllocBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B>
+ VecZnxDftToVecZnxBigTmpA<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
{
self.keyswitch_internal(module, lhs, ksk, scratch);
self.expand_row(module, tsk, scratch);
}
pub fn keyswitch_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftAllocBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftCopy<B>
+ VecZnxDftAddInplace<B>
+ VecZnxDftToVecZnxBigTmpA<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
{
unsafe {
let self_ptr: *mut GGSWCiphertext<DataSelf> = self as *mut GGSWCiphertext<DataSelf>;
self.keyswitch(module, &*self_ptr, ksk, tsk, scratch);
}
}
pub fn expand_row<DataTsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
tsk: &GGLWETensorKeyPrepared<DataTsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigAllocBytes
+ VecZnxNormalizeTmpBytes
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftCopy<B>
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftAddInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxDftToVecZnxBigTmpA<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
{
assert!(
scratch.available()
>= GGSWCiphertext::expand_row_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
tsk.k(),
tsk.digits(),
tsk.rank()
)
);
let n: usize = self.n();
let rank: usize = self.rank();
let cols: usize = rank + 1;
// Keyswitch the j-th row of the col 0
(0..self.rows()).for_each(|row_i| {
// Pre-compute DFT of (a0, a1, a2)
let (mut ci_dft, scratch1) = scratch.take_vec_znx_dft(n, cols, self.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut ci_dft, i, &self.at(row_i, 0).data, i);
});
(1..cols).for_each(|col_j| {
// Example for rank 3:
//
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
// actually composed of that many rows and we focus on a specific row here
// implicitely given ci_dft.
//
// # Input
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (0, 0, 0, 0)
// col 2: (0, 0, 0, 0)
// col 3: (0, 0, 0, 0)
//
// # Output
//
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
let digits: usize = tsk.digits();
let (mut tmp_dft_i, scratch2) = scratch1.take_vec_znx_dft(n, cols, tsk.size());
let (mut tmp_a, scratch3) = scratch2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(digits));
{
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
//
// # Example for col=1
//
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
// +
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
// +
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
(1..cols).for_each(|col_i| {
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
// Extracts a[i] and multipies with Enc(s[i]s[j])
(0..digits).for_each(|di| {
tmp_a.set_size((ci_dft.size() + di) / digits);
// Small optimization for digits > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last digits-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
tmp_dft_i.set_size(tsk.size() - ((digits - di) as isize - 2).max(0) as usize);
module.vec_znx_dft_copy(digits, digits - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
if di == 0 && col_i == 1 {
module.vmp_apply(&mut tmp_dft_i, &tmp_a, pmat, scratch3);
} else {
module.vmp_apply_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch3);
}
});
});
}
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
//
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
// +
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
// =
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
// =
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
let (mut tmp_idft, scratch3) = scratch2.take_vec_znx_big(n, 1, tsk.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_to_vec_znx_big_tmp_a(&mut tmp_idft, 0, &mut tmp_dft_i, i);
module.vec_znx_big_normalize(
self.basek(),
&mut self.at_mut(row_i, col_j).data,
i,
&tmp_idft,
0,
scratch3,
);
});
})
})
}
fn keyswitch_internal<DataLhs: DataRef, DataKsk: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GGSWCiphertext<DataLhs>,
ksk: &GGLWESwitchingKeyPrepared<DataKsk, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
{
// Keyswitch the j-th row of the col 0
(0..lhs.rows()).for_each(|row_i| {
// Key-switch column 0, i.e.
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
self.at_mut(row_i, 0)
.keyswitch(module, &lhs.at(row_i, 0), ksk, scratch);
})
}
}

View File

@@ -0,0 +1,306 @@
use poulpy_backend::hal::{
api::{
DataViewMut, ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigConsume, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, ZnxInfos,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
};
use crate::layouts::{GLWECiphertext, Infos, prepared::GGLWESwitchingKeyPrepared};
impl GLWECiphertext<Vec<u8>> {
#[allow(clippy::too_many_arguments)]
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_in: usize,
k_ksk: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
let out_size: usize = k_out.div_ceil(basek);
let ksk_size: usize = k_ksk.div_ceil(basek);
let res_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_out + 1, ksk_size); // TODO OPTIMIZE
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
let vmp: usize = module.vmp_apply_tmp_bytes(
n,
out_size,
in_size,
in_size,
rank_in,
rank_out + 1,
ksk_size,
) + module.vec_znx_dft_alloc_bytes(n, rank_in, in_size);
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes(n);
res_dft + ((ai_dft + vmp) | normalize)
}
pub fn keyswitch_inplace_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_out: usize,
k_ksk: usize,
digits: usize,
rank: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
{
Self::keyswitch_scratch_space(module, n, basek, k_out, k_out, k_ksk, digits, rank, rank)
}
}
impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
#[allow(dead_code)]
pub(crate) fn assert_keyswitch<B: Backend, DataLhs, DataRhs>(
&self,
module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &Scratch<B>,
) where
DataLhs: DataRef,
DataRhs: DataRef,
Module<B>: VecZnxDftAllocBytes + VmpApplyTmpBytes + VecZnxBigNormalizeTmpBytes,
Scratch<B>: ScratchAvailable,
{
let basek: usize = self.basek();
assert_eq!(
lhs.rank(),
rhs.rank_in(),
"lhs.rank(): {} != rhs.rank_in(): {}",
lhs.rank(),
rhs.rank_in()
);
assert_eq!(
self.rank(),
rhs.rank_out(),
"self.rank(): {} != rhs.rank_out(): {}",
self.rank(),
rhs.rank_out()
);
assert_eq!(self.basek(), basek);
assert_eq!(lhs.basek(), basek);
assert_eq!(rhs.n(), self.n());
assert_eq!(lhs.n(), self.n());
assert!(
scratch.available()
>= GLWECiphertext::keyswitch_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
rhs.k(),
rhs.digits(),
rhs.rank_in(),
rhs.rank_out(),
),
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
module,
self.basek(),
self.k(),
lhs.k(),
rhs.k(),
rhs.digits(),
rhs.rank_in(),
rhs.rank_out(),
)={}",
scratch.available(),
GLWECiphertext::keyswitch_scratch_space(
module,
self.n(),
self.basek(),
self.k(),
lhs.k(),
rhs.k(),
rhs.digits(),
rhs.rank_in(),
rhs.rank_out(),
)
);
}
}
impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
lhs: &GLWECiphertext<DataLhs>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
{
#[cfg(debug_assertions)]
{
self.assert_keyswitch(module, lhs, rhs, scratch);
}
let (res_dft, scratch1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
let res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, rhs, scratch1);
(0..self.cols()).for_each(|i| {
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch1);
})
}
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
&mut self,
module: &Module<B>,
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
{
unsafe {
let self_ptr: *mut GLWECiphertext<DataSelf> = self as *mut GLWECiphertext<DataSelf>;
self.keyswitch(module, &*self_ptr, rhs, scratch);
}
}
}
impl<D: DataRef> GLWECiphertext<D> {
pub(crate) fn keyswitch_internal<B: Backend, DataRes, DataKey>(
&self,
module: &Module<B>,
res_dft: VecZnxDft<DataRes, B>,
rhs: &GGLWESwitchingKeyPrepared<DataKey, B>,
scratch: &mut Scratch<B>,
) -> VecZnxBig<DataRes, B>
where
DataRes: DataMut,
DataKey: DataRef,
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B>,
{
if rhs.digits() == 1 {
return keyswitch_vmp_one_digit(module, res_dft, &self.data, &rhs.key.data, scratch);
}
keyswitch_vmp_multiple_digits(
module,
res_dft,
&self.data,
&rhs.key.data,
rhs.digits(),
scratch,
)
}
}
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
module: &Module<B>,
mut res_dft: VecZnxDft<DataRes, B>,
a: &VecZnx<DataIn>,
mat: &VmpPMat<DataVmp, B>,
scratch: &mut Scratch<B>,
) -> VecZnxBig<DataRes, B>
where
DataRes: DataMut,
DataIn: DataRef,
DataVmp: DataRef,
Module<B>:
VecZnxDftAllocBytes + VecZnxDftFromVecZnx<B> + VmpApply<B> + VecZnxDftToVecZnxBigConsume<B> + VecZnxBigAddSmallInplace<B>,
Scratch<B>: TakeVecZnxDft<B>,
{
let cols: usize = a.cols();
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
(0..cols - 1).for_each(|col_i| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut ai_dft, col_i, a, col_i + 1);
});
module.vmp_apply(&mut res_dft, &ai_dft, mat, scratch1);
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_dft_to_vec_znx_big_consume(res_dft);
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
res_big
}
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
module: &Module<B>,
mut res_dft: VecZnxDft<DataRes, B>,
a: &VecZnx<DataIn>,
mat: &VmpPMat<DataVmp, B>,
digits: usize,
scratch: &mut Scratch<B>,
) -> VecZnxBig<DataRes, B>
where
DataRes: DataMut,
DataIn: DataRef,
DataVmp: DataRef,
Module<B>: VecZnxDftAllocBytes
+ VecZnxDftFromVecZnx<B>
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>,
Scratch<B>: TakeVecZnxDft<B>,
{
let cols: usize = a.cols();
let size: usize = a.size();
let (mut ai_dft, scratch1) = scratch.take_vec_znx_dft(a.n(), cols - 1, size.div_ceil(digits));
ai_dft.data_mut().fill(0);
(0..digits).for_each(|di| {
ai_dft.set_size((size + di) / digits);
// Small optimization for digits > 2
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
// It is possible to further ignore the last digits-1 limbs, but this introduce
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
// noise is kept with respect to the ideal functionality.
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
(0..cols - 1).for_each(|col_i| {
module.vec_znx_dft_from_vec_znx(digits, digits - di - 1, &mut ai_dft, col_i, a, col_i + 1);
});
if di == 0 {
module.vmp_apply(&mut res_dft, &ai_dft, mat, scratch1);
} else {
module.vmp_apply_add(&mut res_dft, &ai_dft, mat, di, scratch1);
}
});
res_dft.set_size(res_dft.max_size());
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_dft_to_vec_znx_big_consume(res_dft);
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
res_big
}

View File

@@ -0,0 +1,87 @@
use poulpy_backend::hal::{
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigConsume, VmpApply, VmpApplyAdd, VmpApplyTmpBytes, ZnxView,
ZnxViewMut, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch},
};
use crate::{
TakeGLWECt,
layouts::{GLWECiphertext, Infos, LWECiphertext, prepared::LWESwitchingKeyPrepared},
};
impl LWECiphertext<Vec<u8>> {
pub fn keyswitch_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
basek: usize,
k_lwe_out: usize,
k_lwe_in: usize,
k_ksk: usize,
) -> usize
where
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
{
GLWECiphertext::bytes_of(n, basek, k_lwe_out.max(k_lwe_in), 1)
+ GLWECiphertext::keyswitch_inplace_scratch_space(module, n, basek, k_lwe_out, k_ksk, 1, 1)
}
}
impl<DLwe: DataMut> LWECiphertext<DLwe> {
pub fn keyswitch<A, DKs, B: Backend>(
&mut self,
module: &Module<B>,
a: &LWECiphertext<A>,
ksk: &LWESwitchingKeyPrepared<DKs, B>,
scratch: &mut Scratch<B>,
) where
A: DataRef,
DKs: DataRef,
Module<B>: VecZnxDftAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApply<B>
+ VmpApplyAdd<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftToVecZnxBigConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert!(self.n() <= module.n());
assert!(a.n() <= module.n());
assert_eq!(self.basek(), a.basek());
}
let max_k: usize = self.k().max(a.k());
let basek: usize = self.basek();
let (mut glwe, scratch1) = scratch.take_glwe_ct(ksk.n(), basek, max_k, 1);
glwe.data.zero();
let n_lwe: usize = a.n();
(0..a.size()).for_each(|i| {
let data_lwe: &[i64] = a.data.at(0, i);
glwe.data.at_mut(0, i)[0] = data_lwe[0];
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
});
glwe.keyswitch_inplace(module, &ksk.0, scratch1);
self.sample_extract(&glwe);
}
}

View File

@@ -0,0 +1,4 @@
mod gglwe_ct;
mod ggsw_ct;
mod glwe_ct;
mod lwe_ct;