mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
@@ -10,7 +10,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{AutomorphismKey, GGLWEInfos, GLWE, prepared::AutomorphismKeyPrepared};
|
||||
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -22,7 +22,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_scratch_space(
|
||||
GLWE::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&out_infos.glwe_layout(),
|
||||
&in_infos.glwe_layout(),
|
||||
@@ -30,13 +30,13 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
AutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos)
|
||||
AutomorphismKey::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl GGSW<Vec<u8>> {
|
||||
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -31,17 +31,17 @@ impl GGSW<Vec<u8>> {
|
||||
{
|
||||
let out_size: usize = out_infos.size();
|
||||
let ci_dft: usize = module.bytes_of_vec_znx_dft((key_infos.rank_out() + 1).into(), out_size);
|
||||
let ks_internal: usize = GLWE::keyswitch_scratch_space(
|
||||
let ks_internal: usize = GLWE::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&out_infos.glwe_layout(),
|
||||
&in_infos.glwe_layout(),
|
||||
key_infos,
|
||||
);
|
||||
let expand: usize = GGSW::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||
let expand: usize = GGSW::expand_row_tmp_bytes(module, out_infos, tsk_infos);
|
||||
ci_dft + (ks_internal | expand)
|
||||
}
|
||||
|
||||
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
key_infos: &KEY,
|
||||
@@ -54,7 +54,7 @@ impl GGSW<Vec<u8>> {
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
GGSW::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos)
|
||||
GGSW::automorphism_tmp_bytes(module, out_infos, out_infos, key_infos, tsk_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
self.rank(),
|
||||
tensor_key.rank_out()
|
||||
);
|
||||
assert!(scratch.available() >= GGSW::automorphism_scratch_space(module, self, lhs, auto_key, tensor_key))
|
||||
assert!(scratch.available() >= GGSW::automorphism_tmp_bytes(module, self, lhs, auto_key, tensor_key))
|
||||
};
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
|
||||
@@ -11,7 +11,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn automorphism_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -23,16 +23,16 @@ impl GLWE<Vec<u8>> {
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
Self::keyswitch_tmp_bytes(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
|
||||
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
pub fn automorphism_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
Self::keyswitch_inplace_tmp_bytes(module, out_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
279
poulpy-core/src/conversion/gglwe_to_ggsw.rs
Normal file
279
poulpy-core/src/conversion/gglwe_to_ggsw.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, Module, Scratch, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGLWEToRef, GGSW, GGSWInfos, GGSWToMut, GLWEInfos, LWEInfos,
|
||||
prepared::{TensorKeyPrepared, TensorKeyPreparedToRef},
|
||||
},
|
||||
operations::GLWEOperations,
|
||||
};
|
||||
|
||||
impl GGLWE<Vec<u8>> {
|
||||
pub fn from_gglw_tmp_bytes<R, A, M, BE: Backend>(module: &M, res_infos: &R, tsk_infos: &A) -> usize
|
||||
where
|
||||
M: GGSWFromGGLWE<BE>,
|
||||
R: GGSWInfos,
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
module.ggsw_from_gglwe_tmp_bytes(res_infos, tsk_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GGSW<D> {
|
||||
pub fn from_gglwe<G, M, T, BE: Backend>(&mut self, module: &M, gglwe: &G, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
M: GGSWFromGGLWE<BE>,
|
||||
G: GGLWEToRef,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.ggsw_from_gglwe(self, gglwe, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGSWFromGGLWE<BE> for Module<BE> where Self: GGSWExpandRows<BE> + VecZnxCopy {}
|
||||
|
||||
pub trait GGSWFromGGLWE<BE: Backend>
|
||||
where
|
||||
Self: GGSWExpandRows<BE> + VecZnxCopy,
|
||||
{
|
||||
fn ggsw_from_gglwe_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos)
|
||||
}
|
||||
|
||||
fn ggsw_from_gglwe<R, A, T>(&self, res: &mut R, a: &A, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
A: GGLWEToRef,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGLWE<&[u8]> = &a.to_ref();
|
||||
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
assert_eq!(res.rank(), a.rank_out());
|
||||
assert_eq!(res.dnum(), a.dnum());
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
assert_eq!(tsk.n(), self.n() as u32);
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
res.at_mut(row, 0).copy(self, &a.at(row, 0));
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GGSWExpandRows<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxDftCopy<BE>
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
{
|
||||
}
|
||||
|
||||
pub(crate) trait GGSWExpandRows<BE: Backend>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxDftCopy<BE>
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
{
|
||||
fn ggsw_expand_rows_tmp_bytes<R, A>(&self, res_infos: &R, tsk_infos: &A) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = res_infos
|
||||
.k()
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||
|
||||
let tmp_dft_i: usize = self.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = self.bytes_of_vec_znx_dft(1, size_in);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
tsk_size,
|
||||
size_in,
|
||||
size_in,
|
||||
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = self.bytes_of_vec_znx_big(1, tsk_size);
|
||||
let norm: usize = self.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
}
|
||||
|
||||
fn ggsw_expand_row<R, T>(&self, res: &mut R, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let tsk: &TensorKeyPrepared<&[u8], BE> = &tsk.to_ref();
|
||||
|
||||
let basek_in: usize = res.base2k().into();
|
||||
let basek_tsk: usize = tsk.base2k().into();
|
||||
|
||||
assert!(scratch.available() >= self.ggsw_expand_rows_tmp_bytes(res, tsk));
|
||||
|
||||
let rank: usize = res.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
let a_size: usize = (res.size() * basek_in).div_ceil(basek_tsk);
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row_i in 0..res.dnum().into() {
|
||||
let a = &res.at(row_i, 0).data;
|
||||
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, a_size);
|
||||
|
||||
if basek_in == basek_tsk {
|
||||
for i in 0..cols {
|
||||
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self, 1, a_size);
|
||||
for i in 0..cols {
|
||||
self.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||
self.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for col_j in 1..cols {
|
||||
// Example for rank 3:
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
// actually composed of that many dnum and we focus on a specific row here
|
||||
// implicitely given ci_dft.
|
||||
//
|
||||
// # Input
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (0, 0, 0, 0)
|
||||
// col 2: (0, 0, 0, 0)
|
||||
// col 3: (0, 0, 0, 0)
|
||||
//
|
||||
// # Output
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
|
||||
let dsize: usize = tsk.dsize().into();
|
||||
|
||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, tsk.size());
|
||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(self, 1, ci_dft.size().div_ceil(dsize));
|
||||
|
||||
{
|
||||
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
|
||||
//
|
||||
// # Example for col=1
|
||||
//
|
||||
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
|
||||
// +
|
||||
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
|
||||
// +
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
for col_i in 1..cols {
|
||||
let pmat: &VmpPMat<&[u8], BE> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
||||
|
||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||
for di in 0..dsize {
|
||||
tmp_a.set_size((ci_dft.size() + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
self.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
|
||||
if di == 0 && col_i == 1 {
|
||||
self.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
|
||||
} else {
|
||||
self.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||
//
|
||||
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
|
||||
// +
|
||||
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||
self.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(self, 1, tsk.size());
|
||||
for i in 0..cols {
|
||||
self.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||
self.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut res.at_mut(row_i, col_j).data,
|
||||
i,
|
||||
basek_tsk,
|
||||
&tmp_idft,
|
||||
0,
|
||||
scratch_3,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared};
|
||||
|
||||
impl LWE<Vec<u8>> {
|
||||
pub fn from_glwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn from_glwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
lwe_infos: &OUT,
|
||||
glwe_infos: &IN,
|
||||
@@ -34,7 +34,7 @@ impl LWE<Vec<u8>> {
|
||||
lwe_infos.base2k(),
|
||||
lwe_infos.k(),
|
||||
1u32.into(),
|
||||
) + GLWE::keyswitch_scratch_space(module, &glwe_layout, glwe_infos, key_infos)
|
||||
) + GLWE::keyswitch_tmp_bytes(module, &glwe_layout, glwe_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn from_lwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn from_lwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
glwe_infos: &OUT,
|
||||
lwe_infos: &IN,
|
||||
@@ -28,7 +28,7 @@ impl GLWE<Vec<u8>> {
|
||||
lwe_infos.k().max(glwe_infos.k()),
|
||||
1u32.into(),
|
||||
);
|
||||
let ks: usize = GLWE::keyswitch_inplace_scratch_space(module, glwe_infos, key_infos);
|
||||
let ks: usize = GLWE::keyswitch_inplace_tmp_bytes(module, glwe_infos, key_infos);
|
||||
if lwe_infos.base2k() == key_infos.base2k() {
|
||||
ct + ks
|
||||
} else {
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
mod gglwe_to_ggsw;
|
||||
mod glwe_to_lwe;
|
||||
mod lwe_to_glwe;
|
||||
|
||||
pub use gglwe_to_ggsw::*;
|
||||
|
||||
@@ -9,7 +9,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{GLWE, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn decrypt_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
|
||||
@@ -13,13 +13,13 @@ use crate::{
|
||||
};
|
||||
|
||||
impl AutomorphismKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(infos.n(), infos.rank_out())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,10 +63,10 @@ where
|
||||
assert_eq!(res.rank_out(), res.rank_in());
|
||||
assert_eq!(sk.rank(), res.rank_out());
|
||||
assert!(
|
||||
scratch.available() >= AutomorphismKeyCompressed::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
AutomorphismKeyCompressed::encrypt_sk_scratch_space(self, res)
|
||||
AutomorphismKeyCompressed::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,12 +34,12 @@ impl<D: DataMut> GGLWECompressed<D> {
|
||||
}
|
||||
|
||||
impl GGLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GGLWE::encrypt_sk_scratch_space(module, infos)
|
||||
GGLWE::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,10 +106,10 @@ where
|
||||
assert_eq!(res.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
assert!(
|
||||
scratch.available() >= GGLWECompressed::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GGLWECompressed::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GGLWECompressed::encrypt_sk_scratch_space(self, res)
|
||||
GGLWECompressed::encrypt_sk_tmp_bytes(self, res)
|
||||
);
|
||||
assert!(
|
||||
res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + SvpPPolBytesOf,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
+ GLWESecretPrepared::bytes_of(module, infos.rank_out())
|
||||
}
|
||||
@@ -91,10 +91,10 @@ where
|
||||
assert!(sk_in.n().0 <= self.n() as u32);
|
||||
assert!(sk_out.n().0 <= self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
|
||||
scratch.available(),
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(self, res)
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl TensorKeyCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
{
|
||||
TensorKey::encrypt_sk_scratch_space(module, infos)
|
||||
TensorKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGSWCompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||
GGSW::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWECompressed<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, infos)
|
||||
GLWE::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<BE>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -24,10 +24,10 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -36,7 +36,7 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
_infos.rank_out(),
|
||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_pk_scratch_space(module, _infos)
|
||||
GLWESwitchingKey::encrypt_pk_tmp_bytes(module, _infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,10 +119,10 @@ where
|
||||
assert_eq!(res.rank_out(), res.rank_in());
|
||||
assert_eq!(sk.rank(), res.rank_out());
|
||||
assert!(
|
||||
scratch.available() >= AutomorphismKey::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {:?}",
|
||||
scratch.available() >= AutomorphismKey::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_tmp_bytes: {:?}",
|
||||
scratch.available(),
|
||||
AutomorphismKey::encrypt_sk_scratch_space(self, res)
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(self, res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,16 +13,16 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
|
||||
+ (GLWEPlaintext::bytes_of_from_infos(module, &infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
@@ -88,12 +88,12 @@ where
|
||||
assert_eq!(res.n(), sk.n());
|
||||
assert_eq!(pt.n() as u32, sk.n());
|
||||
assert!(
|
||||
scratch.available() >= GGLWE::encrypt_sk_scratch_space(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(self, res.rank()={}, res.size()={}): {}",
|
||||
scratch.available() >= GGLWE::encrypt_sk_tmp_bytes(self, res),
|
||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_tmp_bytes(self, res.rank()={}, res.size()={}): {}",
|
||||
scratch.available(),
|
||||
res.rank_out(),
|
||||
res.size(),
|
||||
GGLWE::encrypt_sk_scratch_space(self, res)
|
||||
GGLWE::encrypt_sk_tmp_bytes(self, res)
|
||||
);
|
||||
assert!(
|
||||
res.dnum().0 * res.dsize().0 * res.base2k().0 <= res.k().0,
|
||||
|
||||
@@ -13,21 +13,21 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
(GGLWE::encrypt_sk_scratch_space(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
(GGLWE::encrypt_sk_tmp_bytes(module, infos) | ScalarZnx::bytes_of(module.n(), 1))
|
||||
+ ScalarZnx::bytes_of(module.n(), infos.rank_in().into())
|
||||
+ GLWESecretPrepared::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||
}
|
||||
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
{
|
||||
GGLWE::encrypt_pk_scratch_space(module, _infos)
|
||||
GGLWE::encrypt_pk_tmp_bytes(module, _infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,10 +66,10 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
||||
assert!(sk_in.n().0 <= module.n() as u32);
|
||||
assert!(sk_out.n().0 <= module.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_scratch_space(module, self),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||
scratch.available() >= GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self),
|
||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_tmp_bytes={}",
|
||||
scratch.available(),
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, self)
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, self)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl TensorKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigBytesOf,
|
||||
@@ -24,7 +24,7 @@ impl TensorKey<Vec<u8>> {
|
||||
+ module.bytes_of_vec_znx_big(1, 1)
|
||||
+ module.bytes_of_vec_znx_dft(1, 1)
|
||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GGSW<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
let size = infos.size();
|
||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &infos.glwe_layout())
|
||||
+ VecZnx::bytes_of(module.n(), (infos.rank() + 1).into(), size)
|
||||
+ VecZnx::bytes_of(module.n(), 1, size)
|
||||
+ module.bytes_of_vec_znx_dft((infos.rank() + 1).into(), size)
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
@@ -28,7 +28,7 @@ impl GLWE<Vec<u8>> {
|
||||
assert_eq!(module.n() as u32, infos.n());
|
||||
module.vec_znx_normalize_tmp_bytes() + 2 * VecZnx::bytes_of(module.n(), 1, size) + module.bytes_of_vec_znx_dft(1, size)
|
||||
}
|
||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_pk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -147,10 +147,10 @@ where
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
assert_eq!(pt.n(), self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWE::encrypt_sk_scratch_space(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GLWE::encrypt_sk_scratch_space(self, &res)
|
||||
GLWE::encrypt_sk_tmp_bytes(self, &res)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -209,10 +209,10 @@ where
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(sk.n(), self.n() as u32);
|
||||
assert!(
|
||||
scratch.available() >= GLWE::encrypt_sk_scratch_space(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||
scratch.available() >= GLWE::encrypt_sk_tmp_bytes(self, &res),
|
||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_tmp_bytes: {}",
|
||||
scratch.available(),
|
||||
GLWE::encrypt_sk_scratch_space(self, &res)
|
||||
GLWE::encrypt_sk_tmp_bytes(self, &res)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ where
|
||||
}
|
||||
|
||||
// Its ok to allocate scratch space here since pk is usually generated only once.
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_scratch_space(self, res));
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::encrypt_sk_tmp_bytes(self, res));
|
||||
|
||||
let mut tmp: GLWE<Vec<u8>> = GLWE::alloc_from_infos(res);
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESecretPrepared::bytes_of(module, infos.rank_in())
|
||||
+ (GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
+ (GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) | GLWESecret::bytes_of(infos.n(), infos.rank_in()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl LWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -36,7 +36,7 @@ impl LWESwitchingKey<Vec<u8>> {
|
||||
);
|
||||
GLWESecret::bytes_of(Degree(module.n() as u32), Rank(1))
|
||||
+ GLWESecretPrepared::bytes_of(module, Rank(1))
|
||||
+ GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank};
|
||||
|
||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
pub fn encrypt_sk_tmp_bytes<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGLWEInfos,
|
||||
Module<B>: SvpPPolBytesOf + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||
@@ -22,8 +22,7 @@ impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||
Rank(1),
|
||||
"rank_in != 1 is not supported for LWEToGLWESwitchingKey"
|
||||
);
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||
+ GLWESecret::bytes_of(Degree(module.n() as u32), infos.rank_in())
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, infos) + GLWESecret::bytes_of(Degree(module.n() as u32), infos.rank_in())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ where
|
||||
A: GGLWEInfos,
|
||||
B: GGSWInfos,
|
||||
{
|
||||
self.glwe_external_product_scratch_space(res_infos, a_infos, b_infos)
|
||||
self.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
}
|
||||
|
||||
fn gglwe_external_product<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
|
||||
@@ -21,7 +21,7 @@ where
|
||||
A: GGSWInfos,
|
||||
B: GGSWInfos,
|
||||
{
|
||||
self.glwe_external_product_scratch_space(res_infos, a_infos, b_infos)
|
||||
self.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
}
|
||||
|
||||
fn ggsw_external_product<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
|
||||
@@ -15,14 +15,14 @@ use crate::{
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn external_product_scratch_space<R, A, B, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGSWInfos,
|
||||
M: GLWEExternalProduct<BE>,
|
||||
{
|
||||
module.glwe_external_product_scratch_space(res_infos, a_infos, b_infos)
|
||||
module.glwe_external_product_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
{
|
||||
fn glwe_external_product_scratch_space<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
fn glwe_external_product_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
@@ -111,7 +111,7 @@ where
|
||||
|
||||
assert_eq!(rhs.rank(), res.rank());
|
||||
assert_eq!(rhs.n(), res.n());
|
||||
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, res, rhs));
|
||||
assert!(scratch.available() >= self.glwe_external_product_tmp_bytes(res, res, rhs));
|
||||
}
|
||||
|
||||
let cols: usize = (rhs.rank() + 1).into();
|
||||
@@ -225,7 +225,7 @@ where
|
||||
assert_eq!(rhs.rank(), res.rank());
|
||||
assert_eq!(rhs.n(), res.n());
|
||||
assert_eq!(lhs.n(), res.n());
|
||||
assert!(scratch.available() >= self.glwe_external_product_scratch_space(res, lhs, rhs));
|
||||
assert!(scratch.available() >= self.glwe_external_product_tmp_bytes(res, lhs, rhs));
|
||||
}
|
||||
|
||||
let cols: usize = (rhs.rank() + 1).into();
|
||||
|
||||
@@ -90,13 +90,13 @@ impl GLWEPacker {
|
||||
}
|
||||
|
||||
/// Number of scratch space bytes required to call [Self::add].
|
||||
pub fn scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
pub fn tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
pack_core_scratch_space(module, out_infos, key_infos)
|
||||
pack_core_tmp_bytes(module, out_infos, key_infos)
|
||||
}
|
||||
|
||||
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
||||
@@ -111,7 +111,7 @@ impl GLWEPacker {
|
||||
/// of packed ciphertexts reaches N/2^log_batch is a result written.
|
||||
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
|
||||
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
|
||||
/// * `scratch`: scratch space of size at least [Self::scratch_space].
|
||||
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
|
||||
pub fn add<DataA: DataRef, DataAK: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
@@ -177,13 +177,13 @@ impl GLWEPacker {
|
||||
}
|
||||
}
|
||||
|
||||
fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
fn pack_core_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
combine_scratch_space(module, out_infos, key_infos)
|
||||
combine_tmp_bytes(module, out_infos, key_infos)
|
||||
}
|
||||
|
||||
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
@@ -268,14 +268,14 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
}
|
||||
}
|
||||
|
||||
fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
fn combine_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::bytes_of_from_infos(module, out_infos)
|
||||
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
||||
+ (GLWE::rsh_tmp_bytes(module.n()) | GLWE::automorphism_inplace_tmp_bytes(module, out_infos, key_infos))
|
||||
}
|
||||
|
||||
/// [combine] merges two ciphertexts together.
|
||||
|
||||
@@ -27,19 +27,14 @@ impl GLWE<Vec<u8>> {
|
||||
gal_els
|
||||
}
|
||||
|
||||
pub fn trace_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_infos: &KEY,
|
||||
) -> usize
|
||||
pub fn trace_tmp_bytes<B: Backend, OUT, IN, KEY>(module: &Module<B>, out_infos: &OUT, in_infos: &IN, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos);
|
||||
let trace: usize = Self::automorphism_inplace_tmp_bytes(module, out_infos, key_infos);
|
||||
if in_infos.base2k() != key_infos.base2k() {
|
||||
let glwe_conv: usize = VecZnx::bytes_of(
|
||||
module.n(),
|
||||
@@ -52,13 +47,13 @@ impl GLWE<Vec<u8>> {
|
||||
trace
|
||||
}
|
||||
|
||||
pub fn trace_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
pub fn trace_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
Self::trace_scratch_space(module, out_infos, out_infos, key_infos)
|
||||
Self::trace_tmp_bytes(module, out_infos, out_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::layouts::{
|
||||
};
|
||||
|
||||
impl AutomorphismKey<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn keyswitch_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -25,16 +25,16 @@ impl AutomorphismKey<Vec<u8>> {
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||
GLWESwitchingKey::keyswitch_tmp_bytes(module, out_infos, in_infos, key_infos)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
pub fn keyswitch_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||
GLWESwitchingKey::keyswitch_inplace_tmp_bytes(module, out_infos, key_infos)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
||||
}
|
||||
|
||||
impl GLWESwitchingKey<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn keyswitch_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -98,16 +98,16 @@ impl GLWESwitchingKey<Vec<u8>> {
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||
GLWE::keyswitch_tmp_bytes(module, out_infos, in_infos, key_apply)
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
pub fn keyswitch_inplace_tmp_bytes<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
where
|
||||
OUT: GGLWEInfos + GLWEInfos,
|
||||
KEY: GGLWEInfos + GLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWE::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||
GLWE::keyswitch_inplace_tmp_bytes(module, out_infos, key_apply)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,359 +1,131 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
|
||||
VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
||||
};
|
||||
use poulpy_hal::layouts::{Backend, DataMut, Scratch, VecZnx};
|
||||
|
||||
use crate::{
|
||||
GGSWExpandRows, ScratchTakeCore,
|
||||
keyswitching::glwe_ct::GLWEKeySwitching,
|
||||
layouts::{
|
||||
GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, LWEInfos,
|
||||
prepared::{GLWESwitchingKeyPrepared, TensorKeyPrepared},
|
||||
GGLWEInfos, GGSW, GGSWInfos, GGSWToMut, GGSWToRef,
|
||||
prepared::{GLWESwitchingKeyPreparedToRef, TensorKeyPreparedToRef},
|
||||
},
|
||||
operations::GLWEOperations,
|
||||
};
|
||||
|
||||
impl GGSW<Vec<u8>> {
|
||||
pub(crate) fn expand_row_scratch_space<B: Backend, OUT, TSK>(module: &Module<B>, out_infos: &OUT, tsk_infos: &TSK) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||
let size_in: usize = out_infos
|
||||
.k()
|
||||
.div_ceil(tsk_infos.base2k())
|
||||
.div_ceil(tsk_infos.dsize().into()) as usize;
|
||||
|
||||
let tmp_dft_i: usize = module.bytes_of_vec_znx_dft((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||
let tmp_a: usize = module.bytes_of_vec_znx_dft(1, size_in);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
tsk_size,
|
||||
size_in,
|
||||
size_in,
|
||||
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||
tsk_size,
|
||||
);
|
||||
let tmp_idft: usize = module.bytes_of_vec_znx_big(1, tsk_size);
|
||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||
|
||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
pub fn keyswitch_tmp_bytes<R, A, K, T, M, BE: Backend>(
|
||||
module: &M,
|
||||
res_infos: &R,
|
||||
a_infos: &A,
|
||||
key_infos: &K,
|
||||
tsk_infos: &T,
|
||||
) -> usize
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
IN: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
R: GGSWInfos,
|
||||
A: GGSWInfos,
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(apply_infos.rank_in(), apply_infos.rank_out());
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(apply_infos.rank_in(), tsk_infos.rank_in());
|
||||
}
|
||||
module.ggsw_keyswitch_tmp_bytes(res_infos, a_infos, key_infos, tsk_infos)
|
||||
}
|
||||
}
|
||||
|
||||
let rank: usize = apply_infos.rank_out().into();
|
||||
impl<D: DataMut> GGSW<D> {
|
||||
pub fn keyswitch<M, A, K, T, BE: Backend>(&mut self, module: &M, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GGSWToRef,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
module.ggsw_keyswitch(self, a, key, tsk, scratch);
|
||||
}
|
||||
|
||||
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::bytes_of(module.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
let ks: usize = GLWE::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||
let expand_rows: usize = GGSW::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||
let res_dft: usize = module.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
pub fn keyswitch_inplace<M, K, T, BE: Backend>(&mut self, module: &M, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
M: GGSWKeySwitch<BE>,
|
||||
{
|
||||
module.ggsw_keyswitch_inplace(self, key, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
if in_infos.base2k() == tsk_infos.base2k() {
|
||||
pub trait GGSWKeySwitch<BE: Backend>
|
||||
where
|
||||
Self: GLWEKeySwitching<BE> + GGSWExpandRows<BE>,
|
||||
{
|
||||
fn ggsw_keyswitch_tmp_bytes<R, A, K, T>(&self, res_infos: &R, a_infos: &A, key_infos: &K, tsk_infos: &T) -> usize
|
||||
where
|
||||
R: GGSWInfos,
|
||||
A: GGSWInfos,
|
||||
K: GGLWEInfos,
|
||||
T: GGLWEInfos,
|
||||
{
|
||||
assert_eq!(key_infos.rank_in(), key_infos.rank_out());
|
||||
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||
assert_eq!(key_infos.rank_in(), tsk_infos.rank_in());
|
||||
|
||||
let rank: usize = key_infos.rank_out().into();
|
||||
|
||||
let size_out: usize = res_infos.k().div_ceil(res_infos.base2k()) as usize;
|
||||
let res_znx: usize = VecZnx::bytes_of(self.n(), rank + 1, size_out);
|
||||
let ci_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
let ks: usize = self.glwe_keyswitch_tmp_bytes(res_infos, a_infos, key_infos);
|
||||
let expand_rows: usize = self.ggsw_expand_rows_tmp_bytes(res_infos, tsk_infos);
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft(rank + 1, size_out);
|
||||
|
||||
if a_infos.base2k() == tsk_infos.base2k() {
|
||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||
} else {
|
||||
let a_conv: usize = VecZnx::bytes_of(
|
||||
module.n(),
|
||||
self.n(),
|
||||
1,
|
||||
out_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + module.vec_znx_normalize_tmp_bytes();
|
||||
res_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||
) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
apply_infos: &KEY,
|
||||
tsk_infos: &TSK,
|
||||
) -> usize
|
||||
fn ggsw_keyswitch<R, A, K, T>(&self, res: &mut R, a: &A, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
OUT: GGSWInfos,
|
||||
KEY: GGLWEInfos,
|
||||
TSK: GGLWEInfos,
|
||||
Module<B>:
|
||||
VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigBytesOf + VecZnxNormalizeTmpBytes + VecZnxBigNormalizeTmpBytes,
|
||||
R: GGSWToMut,
|
||||
A: GGSWToRef,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
GGSW::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GGSW<&[u8]> = &a.to_ref();
|
||||
|
||||
assert_eq!(res.ggsw_layout(), a.ggsw_layout());
|
||||
|
||||
for row in 0..a.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch(&mut res.at_mut(row, 0), &a.at(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
|
||||
fn ggsw_keyswitch_inplace<R, K, T>(&self, res: &mut R, key: &K, tsk: &T, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
R: GGSWToMut,
|
||||
K: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
T: TensorKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||
|
||||
for row in 0..res.dnum().into() {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.glwe_keyswitch_inplace(&mut res.at_mut(row, 0), key, scratch);
|
||||
}
|
||||
|
||||
self.ggsw_expand_row(res, tsk, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GGSW<DataSelf> {
|
||||
pub fn from_gglwe<DataA, DataTsk, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
a: &GGLWE<DataA>,
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
DataA: DataRef,
|
||||
DataTsk: DataRef,
|
||||
Module<B>: VecZnxCopy
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||
|
||||
assert_eq!(self.rank(), a.rank_out());
|
||||
assert_eq!(self.dnum(), a.dnum());
|
||||
assert_eq!(self.n(), module.n() as u32);
|
||||
assert_eq!(a.n(), module.n() as u32);
|
||||
assert_eq!(tsk.n(), module.n() as u32);
|
||||
}
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch<DataLhs: DataRef, DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
lhs: &GGSW<DataLhs>,
|
||||
ksk: &GLWESwitchingKeyPrepared<DataKsk, B>,
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
(0..lhs.dnum().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
.keyswitch(module, &lhs.at(row_i, 0), ksk, scratch);
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataKsk: DataRef, DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
ksk: &GLWESwitchingKeyPrepared<DataKsk, B>,
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
// Key-switch column 0, i.e.
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||
self.at_mut(row_i, 0)
|
||||
.keyswitch_inplace(module, ksk, scratch);
|
||||
});
|
||||
self.expand_row(module, tsk, scratch);
|
||||
}
|
||||
|
||||
pub fn expand_row<DataTsk: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
tsk: &TensorKeyPrepared<DataTsk, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_tsk: usize = tsk.base2k().into();
|
||||
|
||||
assert!(scratch.available() >= GGSW::expand_row_scratch_space(module, self, tsk));
|
||||
|
||||
let n: usize = self.n().into();
|
||||
let rank: usize = self.rank().into();
|
||||
let cols: usize = rank + 1;
|
||||
|
||||
let a_size: usize = (self.size() * basek_in).div_ceil(basek_tsk);
|
||||
|
||||
// Keyswitch the j-th row of the col 0
|
||||
for row_i in 0..self.dnum().into() {
|
||||
let a = &self.at(row_i, 0).data;
|
||||
|
||||
// Pre-compute DFT of (a0, a1, a2)
|
||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, a_size);
|
||||
|
||||
if basek_in == basek_tsk {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(n, 1, a_size);
|
||||
for i in 0..cols {
|
||||
module.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||
}
|
||||
}
|
||||
|
||||
for col_j in 1..cols {
|
||||
// Example for rank 3:
|
||||
//
|
||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||
// actually composed of that many dnum and we focus on a specific row here
|
||||
// implicitely given ci_dft.
|
||||
//
|
||||
// # Input
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (0, 0, 0, 0)
|
||||
// col 2: (0, 0, 0, 0)
|
||||
// col 3: (0, 0, 0, 0)
|
||||
//
|
||||
// # Output
|
||||
//
|
||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0 , a1 , a2 )
|
||||
// col 1: (-(b0s0 + b1s1 + b2s2) , b0 + M[i], b1 , b2 )
|
||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||
|
||||
let dsize: usize = tsk.dsize().into();
|
||||
|
||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
|
||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(dsize));
|
||||
|
||||
{
|
||||
// Performs a key-switch for each combination of s[i]*s[j], i.e. for a0, a1, a2
|
||||
//
|
||||
// # Example for col=1
|
||||
//
|
||||
// a0 * (-(f0s0 + f1s1 + f1s2) + s0^2, f0, f1, f2) = (-(a0f0s0 + a0f1s1 + a0f1s2) + a0s0^2, a0f0, a0f1, a0f2)
|
||||
// +
|
||||
// a1 * (-(g0s0 + g1s1 + g1s2) + s0s1, g0, g1, g2) = (-(a1g0s0 + a1g1s1 + a1g1s2) + a1s0s1, a1g0, a1g1, a1g2)
|
||||
// +
|
||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||
for col_i in 1..cols {
|
||||
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
||||
|
||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||
for di in 0..dsize {
|
||||
tmp_a.set_size((ci_dft.size() + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
tmp_dft_i.set_size(tsk.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
module.vec_znx_dft_copy(dsize, dsize - 1 - di, &mut tmp_a, 0, &ci_dft, col_i);
|
||||
if di == 0 && col_i == 1 {
|
||||
module.vmp_apply_dft_to_dft(&mut tmp_dft_i, &tmp_a, pmat, scratch_3);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||
//
|
||||
// (-(x0s0 + x1s1 + x2s2) + a0s0s0 + a1s0s1 + a2s0s2, x0, x1, x2)
|
||||
// +
|
||||
// (0, -(a0s0 + a1s1 + a2s2) + M[i], 0, 0)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0 -(a0s0 + a1s1 + a2s2) + M[i], x1, x2)
|
||||
// =
|
||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
|
||||
for i in 0..cols {
|
||||
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||
module.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut self.at_mut(row_i, col_j).data,
|
||||
i,
|
||||
basek_tsk,
|
||||
&tmp_idft,
|
||||
0,
|
||||
scratch_3,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<DataSelf: DataMut> GGSW<DataSelf> {}
|
||||
|
||||
@@ -1,186 +1,179 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
ModuleN, ScratchAvailable, ScratchTakeBasic, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||
};
|
||||
|
||||
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::GLWESwitchingKeyPrepared};
|
||||
use crate::{
|
||||
ScratchTakeCore,
|
||||
layouts::{
|
||||
GGLWEInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||
prepared::{GLWESwitchingKeyPrepared, GLWESwitchingKeyPreparedToRef},
|
||||
},
|
||||
};
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
key_apply: &KEY,
|
||||
) -> usize
|
||||
pub fn keyswitch_tmp_bytes<M, R, A, B, BE: Backend>(module: &M, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
IN: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos,
|
||||
M: GLWEKeySwitching<BE>,
|
||||
{
|
||||
let in_size: usize = in_infos
|
||||
module.glwe_keyswitch_tmp_bytes(res_infos, a_infos, b_infos)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> GLWE<D> {
|
||||
pub fn keyswitch<A, B, M, BE: Backend>(&mut self, module: &M, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
M: GLWEKeySwitching<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_keyswitch(self, a, b, scratch);
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<A, M, BE: Backend>(&mut self, module: &M, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
M: GLWEKeySwitching<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
module.glwe_keyswitch_inplace(self, a, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<BE: Backend> GLWEKeySwitching<BE> for Module<BE> where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
{
|
||||
}
|
||||
|
||||
pub trait GLWEKeySwitching<BE: Backend>
|
||||
where
|
||||
Self: Sized
|
||||
+ ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
fn glwe_keyswitch_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||
where
|
||||
R: GLWEInfos,
|
||||
A: GLWEInfos,
|
||||
B: GGLWEInfos,
|
||||
{
|
||||
let in_size: usize = a_infos
|
||||
.k()
|
||||
.div_ceil(key_apply.base2k())
|
||||
.div_ceil(key_apply.dsize().into()) as usize;
|
||||
let out_size: usize = out_infos.size();
|
||||
let ksk_size: usize = key_apply.size();
|
||||
let res_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
.div_ceil(b_infos.base2k())
|
||||
.div_ceil(b_infos.dsize().into()) as usize;
|
||||
let out_size: usize = res_infos.size();
|
||||
let ksk_size: usize = b_infos.size();
|
||||
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||
let ai_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||
out_size,
|
||||
in_size,
|
||||
in_size,
|
||||
(key_apply.rank_in()).into(),
|
||||
(key_apply.rank_out() + 1).into(),
|
||||
(b_infos.rank_in()).into(),
|
||||
(b_infos.rank_out() + 1).into(),
|
||||
ksk_size,
|
||||
) + module.bytes_of_vec_znx_dft((key_apply.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||
if in_infos.base2k() == key_apply.base2k() {
|
||||
) + self.bytes_of_vec_znx_dft((b_infos.rank_in()).into(), in_size);
|
||||
let normalize_big: usize = self.vec_znx_big_normalize_tmp_bytes();
|
||||
if a_infos.base2k() == b_infos.base2k() {
|
||||
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||
} else if key_apply.dsize() == 1 {
|
||||
} else if b_infos.dsize() == 1 {
|
||||
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||
let normalize_conv: usize = VecZnx::bytes_of(module.n(), 1, in_size) + module.vec_znx_normalize_tmp_bytes();
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), 1, in_size) + self.vec_znx_normalize_tmp_bytes();
|
||||
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||
} else {
|
||||
// Since we stride over a to get a_dft when dsize > 1, we need to store the full columns of a with in the base conversion.
|
||||
let normalize_conv: usize = VecZnx::bytes_of(module.n(), (key_apply.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank_in()).into(), in_size);
|
||||
res_dft + ((ai_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||
fn glwe_keyswitch<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
R: GLWEToMut,
|
||||
A: GLWEToRef,
|
||||
B: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||
}
|
||||
}
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWE<&[u8]> = &a.to_ref();
|
||||
let b: &GLWESwitchingKeyPrepared<&[u8], BE> = &b.to_ref();
|
||||
|
||||
impl<DataSelf: DataRef> GLWE<DataSelf> {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn assert_keyswitch<B: Backend, DataLhs, DataRhs>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
lhs: &GLWE<DataLhs>,
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataLhs: DataRef,
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
lhs.rank(),
|
||||
rhs.rank_in(),
|
||||
"lhs.rank(): {} != rhs.rank_in(): {}",
|
||||
lhs.rank(),
|
||||
rhs.rank_in()
|
||||
a.rank(),
|
||||
b.rank_in(),
|
||||
"a.rank(): {} != b.rank_in(): {}",
|
||||
a.rank(),
|
||||
b.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
self.rank(),
|
||||
rhs.rank_out(),
|
||||
"self.rank(): {} != rhs.rank_out(): {}",
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
res.rank(),
|
||||
b.rank_out(),
|
||||
"res.rank(): {} != b.rank_out(): {}",
|
||||
res.rank(),
|
||||
b.rank_out()
|
||||
);
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
assert_eq!(lhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWE::keyswitch_scratch_space(module, self, lhs, rhs);
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
assert_eq!(b.n(), self.n() as u32);
|
||||
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, a, b);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
||||
module,
|
||||
self.base2k(),
|
||||
self.k(),
|
||||
lhs.base2k(),
|
||||
lhs.k(),
|
||||
rhs.base2k(),
|
||||
rhs.k(),
|
||||
rhs.dsize(),
|
||||
rhs.rank_in(),
|
||||
rhs.rank_out(),
|
||||
)={scrach_needed}",
|
||||
"scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn assert_keyswitch_inplace<B: Backend, DataRhs>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &Scratch<B>,
|
||||
) where
|
||||
DataRhs: DataRef,
|
||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
assert_eq!(
|
||||
self.rank(),
|
||||
rhs.rank_out(),
|
||||
"self.rank(): {} != rhs.rank_out(): {}",
|
||||
self.rank(),
|
||||
rhs.rank_out()
|
||||
);
|
||||
let basek_out: usize = res.base2k().into();
|
||||
let base2k_out: usize = b.base2k().into();
|
||||
|
||||
assert_eq!(rhs.n(), self.n());
|
||||
|
||||
let scrach_needed: usize = GLWE::keyswitch_inplace_scratch_space(module, self, rhs);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space()={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
glwe_in: &GLWE<DataLhs>,
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch(module, glwe_in, rhs, scratch);
|
||||
}
|
||||
|
||||
let basek_out: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = glwe_in.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), b.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, a, b, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
self.vec_znx_big_normalize(
|
||||
basek_out,
|
||||
&mut self.data,
|
||||
&mut res.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
base2k_out,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
@@ -188,227 +181,190 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn keyswitch_inplace<DataRhs: DataRef, B: Backend>(
|
||||
&mut self,
|
||||
module: &Module<B>,
|
||||
rhs: &GLWESwitchingKeyPrepared<DataRhs, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: ScratchAvailable,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
self.assert_keyswitch_inplace(module, rhs, scratch);
|
||||
}
|
||||
|
||||
let basek_in: usize = self.base2k().into();
|
||||
let basek_ksk: usize = rhs.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||
module.vec_znx_big_normalize(
|
||||
basek_in,
|
||||
&mut self.data,
|
||||
i,
|
||||
basek_ksk,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> GLWE<D> {
|
||||
pub(crate) fn keyswitch_internal<B: Backend, DataRes, DataKey>(
|
||||
&self,
|
||||
module: &Module<B>,
|
||||
res_dft: VecZnxDft<DataRes, B>,
|
||||
rhs: &GLWESwitchingKeyPrepared<DataKey, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
fn glwe_keyswitch_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataKey: DataRef,
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>:,
|
||||
R: GLWEToMut,
|
||||
A: GLWESwitchingKeyPreparedToRef<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
if rhs.dsize() == 1 {
|
||||
return keyswitch_vmp_one_digit(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
scratch,
|
||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||
let a: &GLWESwitchingKeyPrepared<&[u8], BE> = &a.to_ref();
|
||||
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_in(),
|
||||
"res.rank(): {} != a.rank_in(): {}",
|
||||
res.rank(),
|
||||
a.rank_in()
|
||||
);
|
||||
assert_eq!(
|
||||
res.rank(),
|
||||
a.rank_out(),
|
||||
"res.rank(): {} != b.rank_out(): {}",
|
||||
res.rank(),
|
||||
a.rank_out()
|
||||
);
|
||||
|
||||
assert_eq!(res.n(), self.n() as u32);
|
||||
assert_eq!(a.n(), self.n() as u32);
|
||||
|
||||
let scrach_needed: usize = self.glwe_keyswitch_tmp_bytes(res, res, a);
|
||||
|
||||
assert!(
|
||||
scratch.available() >= scrach_needed,
|
||||
"scratch.available()={} < glwe_keyswitch_tmp_bytes={scrach_needed}",
|
||||
scratch.available(),
|
||||
);
|
||||
|
||||
let base2k_in: usize = res.base2k().into();
|
||||
let base2k_out: usize = a.base2k().into();
|
||||
|
||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self, (res.rank() + 1).into(), a.size()); // Todo optimise
|
||||
let res_big: VecZnxBig<&mut [u8], BE> = keyswitch_internal(self, res_dft, res, a, scratch_1);
|
||||
(0..(res.rank() + 1).into()).for_each(|i| {
|
||||
self.vec_znx_big_normalize(
|
||||
base2k_in,
|
||||
&mut res.data,
|
||||
i,
|
||||
base2k_out,
|
||||
&res_big,
|
||||
i,
|
||||
scratch_1,
|
||||
);
|
||||
}
|
||||
|
||||
keyswitch_vmp_multiple_digits(
|
||||
module,
|
||||
self.base2k().into(),
|
||||
rhs.base2k().into(),
|
||||
res_dft,
|
||||
&self.data,
|
||||
&rhs.key.data,
|
||||
rhs.dsize().into(),
|
||||
scratch,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
impl GLWE<Vec<u8>> {}
|
||||
|
||||
impl<DataSelf: DataMut> GLWE<DataSelf> {}
|
||||
|
||||
fn keyswitch_internal<BE: Backend, M, DR, DA, DB>(
|
||||
module: &M,
|
||||
mut res: VecZnxDft<DR, BE>,
|
||||
a: &GLWE<DA>,
|
||||
b: &GLWESwitchingKeyPrepared<DB, BE>,
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> VecZnxBig<DR, BE>
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>:,
|
||||
DR: DataMut,
|
||||
DA: DataRef,
|
||||
DB: DataRef,
|
||||
M: ModuleN
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
+ VmpApplyDftToDftAdd<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>,
|
||||
Scratch<BE>: ScratchTakeCore<BE>,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
let base2k_in: usize = a.base2k().into();
|
||||
let base2k_out: usize = b.base2k().into();
|
||||
let cols: usize = (a.rank() + 1).into();
|
||||
let a_size: usize = (a.size() * base2k_in).div_ceil(base2k_out);
|
||||
let pmat: &VmpPMat<DB, BE> = &b.key.data;
|
||||
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
|
||||
if b.dsize() == 1 {
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a.size());
|
||||
|
||||
if basek_in == basek_ksk {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
||||
});
|
||||
if base2k_in == base2k_out {
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a.data(), col_i + 1);
|
||||
});
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
0,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
col_i + 1,
|
||||
scratch_2,
|
||||
);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), 1, a_size);
|
||||
(0..cols - 1).for_each(|col_i| {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, 0, basek_in, a, col_i + 1, scratch_2);
|
||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||
});
|
||||
let dsize: usize = b.dsize().into();
|
||||
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(module, cols - 1, a_size.div_ceil(dsize));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
if base2k_in == base2k_out {
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a.data(), j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module, cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(
|
||||
base2k_out,
|
||||
&mut a_conv,
|
||||
j,
|
||||
base2k_in,
|
||||
a.data(),
|
||||
j + 1,
|
||||
scratch_2,
|
||||
);
|
||||
}
|
||||
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res.set_size(pmat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res, &ai_dft, pmat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res, &ai_dft, pmat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.set_size(res.max_size());
|
||||
}
|
||||
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||
res_big
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
|
||||
module: &Module<B>,
|
||||
basek_in: usize,
|
||||
basek_ksk: usize,
|
||||
mut res_dft: VecZnxDft<DataRes, B>,
|
||||
a: &VecZnx<DataIn>,
|
||||
mat: &VmpPMat<DataVmp, B>,
|
||||
dsize: usize,
|
||||
scratch: &mut Scratch<B>,
|
||||
) -> VecZnxBig<DataRes, B>
|
||||
where
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataVmp: DataRef,
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VecZnxDftApply<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>:,
|
||||
{
|
||||
let cols: usize = a.cols();
|
||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a_size.div_ceil(dsize));
|
||||
ai_dft.data_mut().fill(0);
|
||||
|
||||
if basek_in == basek_ksk {
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, a, j + 1);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), cols - 1, a_size);
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_normalize(basek_ksk, &mut a_conv, j, basek_in, a, j + 1, scratch_2);
|
||||
}
|
||||
|
||||
for di in 0..dsize {
|
||||
ai_dft.set_size((a_size + di) / dsize);
|
||||
|
||||
// Small optimization for dsize > 2
|
||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(dsize-1) * B}.
|
||||
// As such we can ignore the last dsize-2 limbs safely of the sum of vmp products.
|
||||
// It is possible to further ignore the last dsize-1 limbs, but this introduce
|
||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||
// noise is kept with respect to the ideal functionality.
|
||||
res_dft.set_size(mat.size() - ((dsize - di) as isize - 2).max(0) as usize);
|
||||
|
||||
for j in 0..cols - 1 {
|
||||
module.vec_znx_dft_apply(dsize, dsize - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||
}
|
||||
|
||||
if di == 0 {
|
||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_2);
|
||||
} else {
|
||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res_dft.set_size(res_dft.max_size());
|
||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||
let mut res_big: VecZnxBig<DR, BE> = module.vec_znx_idft_apply_consume(res);
|
||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a.data(), 0);
|
||||
res_big
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use poulpy_hal::{
|
||||
use crate::layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared};
|
||||
|
||||
impl LWE<Vec<u8>> {
|
||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||
pub fn keyswitch_tmp_bytes<B: Backend, OUT, IN, KEY>(
|
||||
module: &Module<B>,
|
||||
out_infos: &OUT,
|
||||
in_infos: &IN,
|
||||
@@ -50,7 +50,7 @@ impl LWE<Vec<u8>> {
|
||||
|
||||
let glwe_in: usize = GLWE::bytes_of_from_infos(module, &glwe_in_infos);
|
||||
let glwe_out: usize = GLWE::bytes_of_from_infos(module, &glwe_out_infos);
|
||||
let ks: usize = GLWE::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||
let ks: usize = GLWE::keyswitch_tmp_bytes(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||
|
||||
glwe_in + glwe_out + ks
|
||||
}
|
||||
@@ -84,7 +84,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
||||
{
|
||||
assert!(self.n() <= module.n() as u32);
|
||||
assert!(a.n() <= module.n() as u32);
|
||||
assert!(scratch.available() >= LWE::keyswitch_scratch_space(module, self, a, ksk));
|
||||
assert!(scratch.available() >= LWE::keyswitch_tmp_bytes(module, self, a, ksk));
|
||||
}
|
||||
|
||||
let max_k: TorusPrecision = self.k().max(a.k());
|
||||
|
||||
@@ -14,6 +14,7 @@ mod utils;
|
||||
|
||||
pub use operations::*;
|
||||
pub mod layouts;
|
||||
pub use conversion::*;
|
||||
pub use dist::*;
|
||||
pub use external_product::*;
|
||||
pub use glwe_packing::*;
|
||||
|
||||
@@ -35,7 +35,7 @@ impl<D: DataRef> GGLWE<D> {
|
||||
let dsize: usize = self.dsize().into();
|
||||
let base2k: usize = self.base2k().into();
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_tmp_bytes(module, self));
|
||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||
|
||||
(0..self.rank_in().into()).for_each(|col_i| {
|
||||
|
||||
@@ -48,7 +48,7 @@ impl<D: DataRef> GGSW<D> {
|
||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||
|
||||
let mut scratch: ScratchOwned<B> =
|
||||
ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self) | module.vec_znx_normalize_tmp_bytes());
|
||||
ScratchOwned::alloc(GLWE::decrypt_tmp_bytes(module, self) | module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
(0..(self.rank() + 1).into()).for_each(|col_j| {
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
@@ -120,7 +120,7 @@ impl<D: DataRef> GGSW<D> {
|
||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||
|
||||
let mut scratch: ScratchOwned<B> =
|
||||
ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self) | module.vec_znx_normalize_tmp_bytes());
|
||||
ScratchOwned::alloc(GLWE::decrypt_tmp_bytes(module, self) | module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
(0..(self.rank() + 1).into()).for_each(|col_j| {
|
||||
(0..self.dnum().into()).for_each(|row_i| {
|
||||
|
||||
@@ -61,7 +61,7 @@ impl<D: DataRef> GLWE<D> {
|
||||
+ VecZnxNormalizeInplace<B>,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
{
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_tmp_bytes(module, self));
|
||||
let noise_have: f64 = self.noise(module, sk_prepared, pt_want, scratch.borrow());
|
||||
assert!(noise_have <= max_noise, "{noise_have} {max_noise}");
|
||||
}
|
||||
|
||||
@@ -246,10 +246,10 @@ pub trait GLWEOperations: GLWEToMut + GLWEInfos + SetGLWEInfos + Sized {
|
||||
});
|
||||
}
|
||||
|
||||
fn copy<A, B: Backend>(&mut self, module: &Module<B>, a: &A)
|
||||
fn copy<A, M>(&mut self, module: &M, a: &A)
|
||||
where
|
||||
A: GLWEToRef + GLWEInfos,
|
||||
Module<B>: VecZnxCopy,
|
||||
M: VecZnxCopy,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -319,8 +319,8 @@ pub trait GLWEOperations: GLWEToMut + GLWEInfos + SetGLWEInfos + Sized {
|
||||
}
|
||||
|
||||
impl GLWE<Vec<u8>> {
|
||||
pub fn rsh_scratch_space(n: usize) -> usize {
|
||||
VecZnx::rsh_scratch_space(n)
|
||||
pub fn rsh_tmp_bytes(n: usize) -> usize {
|
||||
VecZnx::rsh_tmp_bytes(n)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -120,9 +120,9 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key_in_infos)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key_apply_infos)
|
||||
| AutomorphismKey::automorphism_scratch_space(
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key_in_infos)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key_apply_infos)
|
||||
| AutomorphismKey::automorphism_tmp_bytes(
|
||||
module,
|
||||
&auto_key_out_infos,
|
||||
&auto_key_in_infos,
|
||||
@@ -319,9 +319,9 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key_apply)
|
||||
| AutomorphismKey::automorphism_inplace_scratch_space(module, &auto_key, &auto_key_apply),
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key_apply)
|
||||
| AutomorphismKey::automorphism_inplace_tmp_bytes(module, &auto_key, &auto_key_apply),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&auto_key);
|
||||
|
||||
@@ -139,10 +139,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ct_in)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key)
|
||||
| TensorKey::encrypt_sk_scratch_space(module, &tensor_key)
|
||||
| GGSW::automorphism_scratch_space(module, &ct_out, &ct_in, &auto_key, &tensor_key),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ct_in)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key)
|
||||
| TensorKey::encrypt_sk_tmp_bytes(module, &tensor_key)
|
||||
| GGSW::automorphism_tmp_bytes(module, &ct_out, &ct_in, &auto_key, &tensor_key),
|
||||
);
|
||||
|
||||
let var_xs: f64 = 0.5;
|
||||
@@ -319,10 +319,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ct)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &auto_key)
|
||||
| TensorKey::encrypt_sk_scratch_space(module, &tensor_key)
|
||||
| GGSW::automorphism_inplace_scratch_space(module, &ct, &auto_key, &tensor_key),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ct)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &auto_key)
|
||||
| TensorKey::encrypt_sk_tmp_bytes(module, &tensor_key)
|
||||
| GGSW::automorphism_inplace_tmp_bytes(module, &ct, &auto_key, &tensor_key),
|
||||
);
|
||||
|
||||
let var_xs: f64 = 0.5;
|
||||
|
||||
@@ -112,10 +112,10 @@ where
|
||||
module.vec_znx_fill_uniform(base2k, &mut pt_want.data, 0, &mut source_xa);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
AutomorphismKey::encrypt_sk_scratch_space(module, &autokey)
|
||||
| GLWE::decrypt_scratch_space(module, &ct_out)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &ct_in)
|
||||
| GLWE::automorphism_scratch_space(module, &ct_out, &ct_in, &autokey),
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(module, &autokey)
|
||||
| GLWE::decrypt_tmp_bytes(module, &ct_out)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &ct_in)
|
||||
| GLWE::automorphism_tmp_bytes(module, &ct_out, &ct_in, &autokey),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&ct_out);
|
||||
@@ -246,10 +246,10 @@ where
|
||||
module.vec_znx_fill_uniform(base2k, &mut pt_want.data, 0, &mut source_xa);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
AutomorphismKey::encrypt_sk_scratch_space(module, &autokey)
|
||||
| GLWE::decrypt_scratch_space(module, &ct)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &ct)
|
||||
| GLWE::automorphism_inplace_scratch_space(module, &ct, &autokey),
|
||||
AutomorphismKey::encrypt_sk_tmp_bytes(module, &autokey)
|
||||
| GLWE::decrypt_tmp_bytes(module, &ct)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &ct)
|
||||
| GLWE::automorphism_inplace_tmp_bytes(module, &ct, &autokey),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&ct);
|
||||
|
||||
@@ -96,9 +96,9 @@ where
|
||||
};
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, &lwe_to_glwe_infos)
|
||||
| GLWE::from_lwe_scratch_space(module, &glwe_infos, &lwe_infos, &lwe_to_glwe_infos)
|
||||
| GLWE::decrypt_scratch_space(module, &glwe_infos),
|
||||
LWEToGLWESwitchingKey::encrypt_sk_tmp_bytes(module, &lwe_to_glwe_infos)
|
||||
| GLWE::from_lwe_tmp_bytes(module, &glwe_infos, &lwe_infos, &lwe_to_glwe_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
@@ -213,9 +213,9 @@ where
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWEToLWESwitchingKey::encrypt_sk_scratch_space(module, &glwe_to_lwe_infos)
|
||||
| LWE::from_glwe_scratch_space(module, &lwe_infos, &glwe_infos, &glwe_to_lwe_infos)
|
||||
| GLWE::decrypt_scratch_space(module, &glwe_infos),
|
||||
GLWEToLWESwitchingKey::encrypt_sk_tmp_bytes(module, &glwe_to_lwe_infos)
|
||||
| LWE::from_glwe_tmp_bytes(module, &lwe_infos, &glwe_infos, &glwe_to_lwe_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
|
||||
@@ -90,7 +90,7 @@ where
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_tmp_bytes(
|
||||
module, &atk_infos,
|
||||
));
|
||||
|
||||
@@ -192,7 +192,7 @@ where
|
||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(AutomorphismKey::encrypt_sk_tmp_bytes(
|
||||
module, &atk_infos,
|
||||
));
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ where
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::encrypt_sk_tmp_bytes(
|
||||
module,
|
||||
&gglwe_infos,
|
||||
));
|
||||
@@ -179,7 +179,7 @@ where
|
||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKeyCompressed::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKeyCompressed::encrypt_sk_tmp_bytes(
|
||||
module,
|
||||
&gglwe_infos,
|
||||
));
|
||||
|
||||
@@ -55,7 +55,7 @@ where
|
||||
|
||||
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSW::encrypt_sk_scratch_space(module, &ggsw_infos));
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSW::encrypt_sk_tmp_bytes(module, &ggsw_infos));
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&ggsw_infos);
|
||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||
@@ -144,7 +144,7 @@ where
|
||||
|
||||
pt_scalar.fill_ternary_hw(0, n, &mut source_xs);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCompressed::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GGSWCompressed::encrypt_sk_tmp_bytes(
|
||||
module,
|
||||
&ggsw_infos,
|
||||
));
|
||||
|
||||
@@ -85,7 +85,7 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWE::encrypt_sk_scratch_space(module, &glwe_infos) | GLWE::decrypt_scratch_space(module, &glwe_infos),
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &glwe_infos) | GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
@@ -178,7 +178,7 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWECompressed::encrypt_sk_scratch_space(module, &glwe_infos) | GLWE::decrypt_scratch_space(module, &glwe_infos),
|
||||
GLWECompressed::encrypt_sk_tmp_bytes(module, &glwe_infos) | GLWE::decrypt_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
@@ -269,7 +269,7 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWE::decrypt_scratch_space(module, &glwe_infos) | GLWE::encrypt_sk_scratch_space(module, &glwe_infos),
|
||||
GLWE::decrypt_tmp_bytes(module, &glwe_infos) | GLWE::encrypt_sk_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
@@ -349,9 +349,9 @@ where
|
||||
let mut source_xu: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWE::encrypt_sk_scratch_space(module, &glwe_infos)
|
||||
| GLWE::decrypt_scratch_space(module, &glwe_infos)
|
||||
| GLWE::encrypt_pk_scratch_space(module, &glwe_infos),
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &glwe_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_infos)
|
||||
| GLWE::encrypt_pk_tmp_bytes(module, &glwe_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_infos);
|
||||
|
||||
@@ -86,7 +86,7 @@ where
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(TensorKey::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(TensorKey::encrypt_sk_tmp_bytes(
|
||||
module,
|
||||
&tensor_key_infos,
|
||||
));
|
||||
@@ -204,7 +204,7 @@ where
|
||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(TensorKeyCompressed::encrypt_sk_scratch_space(
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(TensorKeyCompressed::encrypt_sk_tmp_bytes(
|
||||
module,
|
||||
&tensor_key_infos,
|
||||
));
|
||||
|
||||
@@ -121,14 +121,14 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_in_infos)
|
||||
| GLWESwitchingKey::external_product_scratch_space(
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_in_infos)
|
||||
| GLWESwitchingKey::external_product_tmp_bytes(
|
||||
module,
|
||||
&gglwe_out_infos,
|
||||
&gglwe_in_infos,
|
||||
&ggsw_infos,
|
||||
)
|
||||
| GGSW::encrypt_sk_scratch_space(module, &ggsw_infos),
|
||||
| GGSW::encrypt_sk_tmp_bytes(module, &ggsw_infos),
|
||||
);
|
||||
|
||||
let r: usize = 1;
|
||||
@@ -292,9 +292,9 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_out_infos)
|
||||
| GLWESwitchingKey::external_product_inplace_scratch_space(module, &gglwe_out_infos, &ggsw_infos)
|
||||
| GGSW::encrypt_sk_scratch_space(module, &ggsw_infos),
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_out_infos)
|
||||
| GLWESwitchingKey::external_product_inplace_tmp_bytes(module, &gglwe_out_infos, &ggsw_infos)
|
||||
| GGSW::encrypt_sk_tmp_bytes(module, &ggsw_infos),
|
||||
);
|
||||
|
||||
let r: usize = 1;
|
||||
|
||||
@@ -128,9 +128,9 @@ where
|
||||
pt_apply.to_mut().raw_mut()[k] = 1; //X^{k}
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_apply_infos)
|
||||
| GGSW::encrypt_sk_scratch_space(module, &ggsw_in_infos)
|
||||
| GGSW::external_product_scratch_space(module, &ggsw_out_infos, &ggsw_in_infos, &ggsw_apply_infos),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_apply_infos)
|
||||
| GGSW::encrypt_sk_tmp_bytes(module, &ggsw_in_infos)
|
||||
| GGSW::external_product_tmp_bytes(module, &ggsw_out_infos, &ggsw_in_infos, &ggsw_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank.into());
|
||||
@@ -282,9 +282,9 @@ where
|
||||
pt_apply.to_mut().raw_mut()[k] = 1; //X^{k}
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_apply_infos)
|
||||
| GGSW::encrypt_sk_scratch_space(module, &ggsw_out_infos)
|
||||
| GGSW::external_product_inplace_scratch_space(module, &ggsw_out_infos, &ggsw_apply_infos),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_apply_infos)
|
||||
| GGSW::encrypt_sk_tmp_bytes(module, &ggsw_out_infos)
|
||||
| GGSW::external_product_inplace_tmp_bytes(module, &ggsw_out_infos, &ggsw_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank.into());
|
||||
|
||||
@@ -116,9 +116,9 @@ where
|
||||
pt_ggsw.raw_mut()[k] = 1; // X^{k}
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_apply_infos)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &glwe_in_infos)
|
||||
| GLWE::external_product_scratch_space(module, &glwe_out_infos, &glwe_in_infos, &ggsw_apply_infos),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_apply_infos)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &glwe_in_infos)
|
||||
| GLWE::external_product_tmp_bytes(module, &glwe_out_infos, &glwe_in_infos, &ggsw_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank.into());
|
||||
@@ -259,9 +259,9 @@ where
|
||||
pt_ggsw.raw_mut()[k] = 1; // X^{k}
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_apply_infos)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &glwe_out_infos)
|
||||
| GLWE::external_product_inplace_scratch_space(module, &glwe_out_infos, &ggsw_apply_infos),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_apply_infos)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &glwe_out_infos)
|
||||
| GLWE::external_product_inplace_tmp_bytes(module, &glwe_out_infos, &ggsw_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank.into());
|
||||
|
||||
@@ -119,11 +119,11 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_s0s1_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_s1s2_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_s0s2_infos),
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_s0s1_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_s1s2_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_s0s2_infos),
|
||||
);
|
||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_scratch_space(
|
||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&gglwe_s0s1_infos,
|
||||
&gglwe_s0s2_infos,
|
||||
@@ -274,10 +274,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch_enc: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_s0s1_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_scratch_space(module, &gglwe_s1s2_infos),
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_s0s1_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &gglwe_s1s2_infos),
|
||||
);
|
||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_inplace_scratch_space(
|
||||
let mut scratch_apply: ScratchOwned<B> = ScratchOwned::alloc(GLWESwitchingKey::keyswitch_inplace_tmp_bytes(
|
||||
module,
|
||||
&gglwe_s0s1_infos,
|
||||
&gglwe_s1s2_infos,
|
||||
|
||||
@@ -132,10 +132,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_in_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_scratch_space(module, &ksk_apply_infos)
|
||||
| TensorKey::encrypt_sk_scratch_space(module, &tsk_infos)
|
||||
| GGSW::keyswitch_scratch_space(
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_in_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &ksk_apply_infos)
|
||||
| TensorKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGSW::keyswitch_tmp_bytes(
|
||||
module,
|
||||
&ggsw_out_infos,
|
||||
&ggsw_in_infos,
|
||||
@@ -310,10 +310,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GGSW::encrypt_sk_scratch_space(module, &ggsw_out_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_scratch_space(module, &ksk_apply_infos)
|
||||
| TensorKey::encrypt_sk_scratch_space(module, &tsk_infos)
|
||||
| GGSW::keyswitch_inplace_scratch_space(module, &ggsw_out_infos, &ksk_apply_infos, &tsk_infos),
|
||||
GGSW::encrypt_sk_tmp_bytes(module, &ggsw_out_infos)
|
||||
| GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &ksk_apply_infos)
|
||||
| TensorKey::encrypt_sk_tmp_bytes(module, &tsk_infos)
|
||||
| GGSW::keyswitch_inplace_tmp_bytes(module, &ggsw_out_infos, &ksk_apply_infos, &tsk_infos),
|
||||
);
|
||||
|
||||
let var_xs: f64 = 0.5;
|
||||
|
||||
@@ -112,9 +112,9 @@ where
|
||||
module.vec_znx_fill_uniform(base2k, &mut pt_want.data, 0, &mut source_xa);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &key_apply)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &glwe_in_infos)
|
||||
| GLWE::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, &key_apply),
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &key_apply)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &glwe_in_infos)
|
||||
| GLWE::keyswitch_tmp_bytes(module, &glwe_out_infos, &glwe_in_infos, &key_apply),
|
||||
);
|
||||
|
||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank_in.into());
|
||||
@@ -244,9 +244,9 @@ where
|
||||
module.vec_znx_fill_uniform(base2k, &mut pt_want.data, 0, &mut source_xa);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, &key_apply_infos)
|
||||
| GLWE::encrypt_sk_scratch_space(module, &glwe_out_infos)
|
||||
| GLWE::keyswitch_inplace_scratch_space(module, &glwe_out_infos, &key_apply_infos),
|
||||
GLWESwitchingKey::encrypt_sk_tmp_bytes(module, &key_apply_infos)
|
||||
| GLWE::encrypt_sk_tmp_bytes(module, &glwe_out_infos)
|
||||
| GLWE::keyswitch_inplace_tmp_bytes(module, &glwe_out_infos, &key_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n.into(), rank.into());
|
||||
|
||||
@@ -99,8 +99,8 @@ where
|
||||
};
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
LWESwitchingKey::encrypt_sk_scratch_space(module, &key_apply_infos)
|
||||
| LWE::keyswitch_scratch_space(module, &lwe_out_infos, &lwe_in_infos, &key_apply_infos),
|
||||
LWESwitchingKey::encrypt_sk_tmp_bytes(module, &key_apply_infos)
|
||||
| LWE::keyswitch_tmp_bytes(module, &lwe_out_infos, &lwe_in_infos, &key_apply_infos),
|
||||
);
|
||||
|
||||
let mut sk_lwe_in: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe_in.into());
|
||||
|
||||
@@ -105,9 +105,9 @@ where
|
||||
};
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWE::encrypt_sk_scratch_space(module, &glwe_out_infos)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &key_infos)
|
||||
| GLWEPacker::scratch_space(module, &glwe_out_infos, &key_infos),
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &glwe_out_infos)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &key_infos)
|
||||
| GLWEPacker::tmp_bytes(module, &glwe_out_infos, &key_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_out_infos);
|
||||
|
||||
@@ -107,10 +107,10 @@ where
|
||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(
|
||||
GLWE::encrypt_sk_scratch_space(module, &glwe_out_infos)
|
||||
| GLWE::decrypt_scratch_space(module, &glwe_out_infos)
|
||||
| AutomorphismKey::encrypt_sk_scratch_space(module, &key_infos)
|
||||
| GLWE::trace_inplace_scratch_space(module, &glwe_out_infos, &key_infos),
|
||||
GLWE::encrypt_sk_tmp_bytes(module, &glwe_out_infos)
|
||||
| GLWE::decrypt_tmp_bytes(module, &glwe_out_infos)
|
||||
| AutomorphismKey::encrypt_sk_tmp_bytes(module, &key_infos)
|
||||
| GLWE::trace_inplace_tmp_bytes(module, &glwe_out_infos, &key_infos),
|
||||
);
|
||||
|
||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc_from_infos(&glwe_out_infos);
|
||||
|
||||
Reference in New Issue
Block a user