Merge branch 'dev_trait' into dev_trait_practice

This commit is contained in:
Rasoul Akhavan Mahdavi
2025-10-16 23:53:45 -04:00
6 changed files with 321 additions and 261 deletions

View File

@@ -1,100 +1,125 @@
use poulpy_hal::{
api::{
VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf,
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
api::ModuleN,
layouts::{Backend, DataMut, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
};
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared};
use crate::{
GLWEKeyswitch, ScratchTakeCore,
layouts::{
GGLWEInfos, GLWE, GLWEAlloc, GLWEInfos, GLWELayout, GLWEToRef, LWE, LWEInfos, LWEToMut, Rank,
prepared::{LWEToGLWESwitchingKeyPrepared, LWEToGLWESwitchingKeyPreparedToRef},
},
};
impl LWE<Vec<u8>> {
pub fn from_glwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>,
lwe_infos: &OUT,
glwe_infos: &IN,
key_infos: &KEY,
) -> usize
pub trait LWESampleExtract
where
OUT: LWEInfos,
IN: GLWEInfos,
KEY: GGLWEInfos,
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
Self: ModuleN,
{
let glwe_layout: GLWELayout = GLWELayout {
n: module.n().into(),
fn lwe_sample_extract<R, A>(&self, res: &mut R, a: &A)
where
R: LWEToMut,
A: GLWEToRef,
{
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
let a: &GLWE<&[u8]> = &a.to_ref();
assert!(res.n() <= a.n());
assert_eq!(a.n(), self.n() as u32);
assert!(res.base2k() == a.base2k());
let min_size: usize = res.size().min(a.size());
let n: usize = res.n().into();
res.data.zero();
(0..min_size).for_each(|i| {
let data_lwe: &mut [i64] = res.data.at_mut(0, i);
data_lwe[0] = a.data.at(0, i)[0];
data_lwe[1..].copy_from_slice(&a.data.at(1, i)[..n]);
});
}
}
impl<BE: Backend> LWESampleExtract for Module<BE> where Self: ModuleN {}
impl<BE: Backend> LWEFromGLWE<BE> for Module<BE> where Self: GLWEKeyswitch<BE> + GLWEAlloc + LWESampleExtract {}
pub trait LWEFromGLWE<BE: Backend>
where
Self: GLWEKeyswitch<BE> + GLWEAlloc + LWESampleExtract,
{
fn lwe_from_glwe_tmp_bytes<R, A, K>(&self, lwe_infos: &R, glwe_infos: &A, key_infos: &K) -> usize
where
R: LWEInfos,
A: GLWEInfos,
K: GGLWEInfos,
{
let res_infos: GLWELayout = GLWELayout {
n: self.n().into(),
base2k: lwe_infos.base2k(),
k: lwe_infos.k(),
rank: Rank(1),
};
GLWE::bytes_of(
module.n().into(),
lwe_infos.base2k(),
lwe_infos.k(),
1u32.into(),
) + GLWE::keyswitch_tmp_bytes(module, &glwe_layout, glwe_infos, key_infos)
}
self.bytes_of_glwe(lwe_infos.base2k(), lwe_infos.k(), 1u32.into())
+ self.glwe_keyswitch_tmp_bytes(&res_infos, glwe_infos, key_infos)
}
impl<DLwe: DataMut> LWE<DLwe> {
pub fn sample_extract<DGlwe: DataRef>(&mut self, a: &GLWE<DGlwe>) {
#[cfg(debug_assertions)]
fn lwe_from_glwe<R, A, K>(&self, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
where
R: LWEToMut,
A: GLWEToRef,
K: LWEToGLWESwitchingKeyPreparedToRef<BE> + GGLWEInfos,
Scratch<BE>: ScratchTakeCore<BE>,
{
assert!(self.n() <= a.n());
assert!(self.base2k() == a.base2k());
}
let res: &mut LWE<&mut [u8]> = &mut res.to_mut();
let a: &GLWE<&[u8]> = &a.to_ref();
let key: &LWEToGLWESwitchingKeyPrepared<&[u8], BE> = &key.to_ref();
let min_size: usize = self.size().min(a.size());
let n: usize = self.n().into();
self.data.zero();
(0..min_size).for_each(|i| {
let data_lwe: &mut [i64] = self.data.at_mut(0, i);
data_lwe[0] = a.data.at(0, i)[0];
data_lwe[1..].copy_from_slice(&a.data.at(1, i)[..n]);
});
}
pub fn from_glwe<DGlwe, DKs, B: Backend>(
&mut self,
module: &Module<B>,
a: &GLWE<DGlwe>,
ks: &GLWEToLWESwitchingKeyPrepared<DKs, B>,
scratch: &mut Scratch<B>,
) where
DGlwe: DataRef,
DKs: DataRef,
Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>:,
{
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), module.n() as u32);
assert_eq!(ks.n(), module.n() as u32);
assert!(self.n() <= module.n() as u32);
}
assert_eq!(a.n(), self.n() as u32);
assert_eq!(key.n(), self.n() as u32);
assert!(res.n() <= self.n() as u32);
let glwe_layout: GLWELayout = GLWELayout {
n: module.n().into(),
base2k: self.base2k(),
k: self.k(),
n: self.n().into(),
base2k: res.base2k(),
k: res.k(),
rank: Rank(1),
};
let (mut tmp_glwe, scratch_1) = scratch.take_glwe_ct(&glwe_layout);
tmp_glwe.keyswitch(module, a, &ks.0, scratch_1);
self.sample_extract(&tmp_glwe);
let (mut tmp_glwe, scratch_1) = scratch.take_glwe_ct(self, &glwe_layout);
self.glwe_keyswitch(&mut tmp_glwe, a, &key.0, scratch_1);
self.lwe_sample_extract(res, &tmp_glwe);
}
}
impl LWE<Vec<u8>> {
pub fn from_glwe_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, lwe_infos: &R, glwe_infos: &A, key_infos: &K) -> usize
where
R: LWEInfos,
A: GLWEInfos,
K: GGLWEInfos,
M: LWEFromGLWE<BE>,
{
module.lwe_from_glwe_tmp_bytes(lwe_infos, glwe_infos, key_infos)
}
}
impl<D: DataMut> LWE<D> {
pub fn sample_extract<A, M>(&mut self, module: &M, a: &A)
where
A: GLWEToRef,
M: LWESampleExtract,
{
module.lwe_sample_extract(self, a);
}
pub fn from_glwe<R, A, K, M, BE: Backend>(&self, module: &M, res: &mut R, a: &A, key: &K, scratch: &mut Scratch<BE>)
where
R: LWEToMut,
A: GLWEToRef,
K: LWEToGLWESwitchingKeyPreparedToRef<BE> + GGLWEInfos,
M: LWEFromGLWE<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
module.lwe_from_glwe(res, a, key, scratch);
}
}

View File

@@ -1,79 +1,67 @@
use poulpy_hal::{
api::{
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
api::ScratchTakeBasic,
layouts::{Backend, DataMut, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
};
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared};
use crate::{
GLWEKeyswitch, ScratchTakeCore,
layouts::{
GGLWEInfos, GLWE, GLWEAlloc, GLWEInfos, GLWELayout, GLWEToMut, LWE, LWEInfos, LWEToRef,
prepared::{LWEToGLWESwitchingKeyPrepared, LWEToGLWESwitchingKeyPreparedToRef},
},
};
impl GLWE<Vec<u8>> {
pub fn from_lwe_tmp_bytes<B: Backend, OUT, IN, KEY>(
module: &Module<B>,
glwe_infos: &OUT,
lwe_infos: &IN,
key_infos: &KEY,
) -> usize
impl<BE: Backend> GLWEFromLWE<BE> for Module<BE> where Self: GLWEKeyswitch<BE> + GLWEAlloc {}
pub trait GLWEFromLWE<BE: Backend>
where
OUT: GLWEInfos,
IN: LWEInfos,
KEY: GGLWEInfos,
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
Self: GLWEKeyswitch<BE> + GLWEAlloc,
{
let ct: usize = GLWE::bytes_of(
module.n().into(),
fn glwe_from_lwe_tmp_bytes<R, A, K>(&self, glwe_infos: &R, lwe_infos: &A, key_infos: &K) -> usize
where
R: GLWEInfos,
A: LWEInfos,
K: GGLWEInfos,
{
let ct: usize = self.bytes_of_glwe(
key_infos.base2k(),
lwe_infos.k().max(glwe_infos.k()),
1u32.into(),
);
let ks: usize = GLWE::keyswitch_inplace_tmp_bytes(module, glwe_infos, key_infos);
let ks: usize = self.glwe_keyswitch_tmp_bytes(glwe_infos, glwe_infos, key_infos);
if lwe_infos.base2k() == key_infos.base2k() {
ct + ks
} else {
let a_conv = VecZnx::bytes_of(module.n(), 1, lwe_infos.size()) + module.vec_znx_normalize_tmp_bytes();
let a_conv = VecZnx::bytes_of(self.n(), 1, lwe_infos.size()) + self.vec_znx_normalize_tmp_bytes();
ct + a_conv + ks
}
}
}
impl<D: DataMut> GLWE<D> {
pub fn from_lwe<DLwe, DKsk, B: Backend>(
&mut self,
module: &Module<B>,
lwe: &LWE<DLwe>,
ksk: &LWEToGLWESwitchingKeyPrepared<DKsk, B>,
scratch: &mut Scratch<B>,
) where
DLwe: DataRef,
DKsk: DataRef,
Module<B>: VecZnxDftBytesOf
+ VmpApplyDftToDftTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VmpApplyDftToDft<B>
+ VmpApplyDftToDftAdd<B>
+ VecZnxDftApply<B>
+ VecZnxIdftApplyConsume<B>
+ VecZnxBigAddSmallInplace<B>
+ VecZnxBigNormalize<B>
+ VecZnxNormalize<B>
+ VecZnxNormalizeTmpBytes,
Scratch<B>: ScratchAvailable,
fn glwe_from_lwe<R, A, K>(&self, res: &mut R, lwe: &A, ksk: &K, scratch: &mut Scratch<BE>)
where
R: GLWEToMut,
A: LWEToRef,
K: LWEToGLWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.n(), module.n() as u32);
assert_eq!(ksk.n(), module.n() as u32);
assert!(lwe.n() <= module.n() as u32);
}
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
let lwe: &LWE<&[u8]> = &lwe.to_ref();
let ksk: &LWEToGLWESwitchingKeyPrepared<&[u8], BE> = &ksk.to_ref();
let (mut glwe, scratch_1) = scratch.take_glwe_ct(&GLWELayout {
assert_eq!(res.n(), self.n() as u32);
assert_eq!(ksk.n(), self.n() as u32);
assert!(lwe.n() <= self.n() as u32);
let (mut glwe, scratch_1) = scratch.take_glwe_ct(
self,
&GLWELayout {
n: ksk.n(),
base2k: ksk.base2k(),
k: lwe.k(),
rank: 1u32.into(),
});
},
);
glwe.data.zero();
let n_lwe: usize = lwe.n().into();
@@ -85,14 +73,14 @@ impl<D: DataMut> GLWE<D> {
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
}
} else {
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module.n(), 1, lwe.size());
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(self, 1, lwe.size());
a_conv.zero();
for j in 0..lwe.size() {
let data_lwe: &[i64] = lwe.data.at(0, j);
a_conv.at_mut(0, j)[0] = data_lwe[0]
}
module.vec_znx_normalize(
self.vec_znx_normalize(
ksk.base2k().into(),
&mut glwe.data,
0,
@@ -108,7 +96,7 @@ impl<D: DataMut> GLWE<D> {
a_conv.at_mut(0, j)[..n_lwe].copy_from_slice(&data_lwe[1..]);
}
module.vec_znx_normalize(
self.vec_znx_normalize(
ksk.base2k().into(),
&mut glwe.data,
1,
@@ -119,6 +107,30 @@ impl<D: DataMut> GLWE<D> {
);
}
self.keyswitch(module, &glwe, &ksk.0, scratch_1);
self.glwe_keyswitch(res, &glwe, &ksk.0, scratch_1);
}
}
impl GLWE<Vec<u8>> {
pub fn from_lwe_tmp_bytes<R, A, K, M, BE: Backend>(module: &M, glwe_infos: &R, lwe_infos: &A, key_infos: &K) -> usize
where
R: GLWEInfos,
A: LWEInfos,
K: GGLWEInfos,
M: GLWEFromLWE<BE>,
{
module.glwe_from_lwe_tmp_bytes(glwe_infos, lwe_infos, key_infos)
}
}
impl<D: DataMut> GLWE<D> {
pub fn from_lwe<A, K, M, BE: Backend>(&mut self, module: &M, lwe: &A, ksk: &K, scratch: &mut Scratch<BE>)
where
M: GLWEFromLWE<BE>,
A: LWEToRef,
K: LWEToGLWESwitchingKeyPreparedToRef<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
module.glwe_from_lwe(self, lwe, ksk, scratch);
}
}

View File

@@ -3,3 +3,5 @@ mod glwe_to_lwe;
mod lwe_to_glwe;
pub use gglwe_to_ggsw::*;
pub use glwe_to_lwe::*;
pub use lwe_to_glwe::*;

View File

@@ -1,12 +1,13 @@
use std::collections::HashMap;
use poulpy_hal::{
api::{ModuleLogN, VecZnxCopy, VecZnxRotateInplace},
layouts::{Backend, DataMut, DataRef, GaloisElement, Module, Scratch},
api::ModuleLogN,
layouts::{Backend, GaloisElement, Module, Scratch},
};
use crate::{
GLWEAdd, GLWEAutomorphism, GLWENormalize, GLWERotate, GLWEShift, GLWESub, ScratchTakeCore,
GLWEAdd, GLWEAutomorphism, GLWECopy, GLWENormalize, GLWERotate, GLWEShift, GLWESub, ScratchTakeCore,
glwe_trace::GLWETrace,
layouts::{
GGLWEInfos, GLWE, GLWEAlloc, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
prepared::{AutomorphismKeyPreparedToRef, GetAutomorphismGaloisElement},
@@ -40,10 +41,10 @@ impl Accumulator {
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
/// * `rank`: rank of the GLWE ciphertext.
pub fn alloc<A, M>(module: &M, infos: &A) -> Self
pub fn alloc<A, M, BE: Backend>(module: &M, infos: &A) -> Self
where
A: GLWEInfos,
M: GLWEAlloc,
M: GLWEPacking<BE>,
{
Self {
data: GLWE::alloc_from_infos(module, infos),
@@ -64,15 +65,15 @@ impl GLWEPacker {
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
/// can be packed.
pub fn new<A, M>(module: &M, infos: &A, log_batch: usize) -> Self
pub fn alloc<A, M, BE: Backend>(module: &M, infos: &A, log_batch: usize) -> Self
where
A: GLWEInfos,
M: GLWEAlloc,
M: GLWEPacking<BE>,
{
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
let log_n: usize = infos.n().log2();
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(module, infos)));
Self {
GLWEPacker {
accumulators,
log_batch,
counter: 0,
@@ -93,13 +94,19 @@ impl GLWEPacker {
where
R: GLWEInfos,
K: GGLWEInfos,
M: GLWEAlloc + GLWEAutomorphism<BE>,
M: GLWEPacking<BE>,
{
pack_core_tmp_bytes(module, res_infos, key_infos)
module.bytes_of_glwe_from_infos(res_infos)
+ module
.glwe_rsh_tmp_byte()
.max(module.glwe_automorphism_tmp_bytes(res_infos, res_infos, key_infos))
}
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
GLWE::trace_galois_elements(module)
pub fn galois_elements<M, BE: Backend>(module: &M) -> Vec<i64>
where
M: GLWETrace<BE>,
{
module.glwe_trace_galois_elements()
}
/// Adds a GLWE ciphertext to the [GLWEPacker].
@@ -111,11 +118,11 @@ impl GLWEPacker {
/// * `a`: ciphertext to pack. Can optionally give None to pack a 0 ciphertext.
/// * `auto_keys`: a [HashMap] containing the [AutomorphismKeyExec]s.
/// * `scratch`: scratch space of size at least [Self::tmp_bytes].
pub fn add<A, K, M, BE: Backend>(&mut self, module: &M, a: Option<&A>, auto_keys: &HashMap<i64, K>, scratch: &mut Scratch<B>)
pub fn add<A, K, M, BE: Backend>(&mut self, module: &M, a: Option<&A>, auto_keys: &HashMap<i64, K>, scratch: &mut Scratch<BE>)
where
A: GLWEToRef,
K: AutomorphismKeyPreparedToRef<BE>,
M: GLWEAutomorphism<BE>,
A: GLWEToRef + GLWEInfos,
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
M: GLWEPacking<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
assert!(
@@ -136,14 +143,15 @@ impl GLWEPacker {
}
/// Flush result to`res`.
pub fn flush<Data: DataMut, B: Backend>(&mut self, module: &Module<B>, res: &mut GLWE<Data>)
pub fn flush<R, M, BE: Backend>(&mut self, module: &M, res: &mut R)
where
Module<B>: VecZnxCopy,
R: GLWEToMut,
M: GLWEPacking<BE>,
{
assert!(self.counter as u32 == self.accumulators[0].data.n());
// Copy result GLWE into res GLWE
res.copy(
module,
module.glwe_copy(
res,
&self.accumulators[module.log_n() - self.log_batch - 1].data,
);
@@ -151,177 +159,18 @@ impl GLWEPacker {
}
}
fn pack_core_tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
where
R: GLWEInfos,
K: GGLWEInfos,
M: GLWEAlloc + GLWEAutomorphism<BE>,
impl<BE: Backend> GLWEPacking<BE> for Module<BE> where
Self: GLWEAutomorphism<BE>
+ GaloisElement
+ ModuleLogN
+ GLWERotate<BE>
+ GLWESub
+ GLWEShift<BE>
+ GLWEAdd
+ GLWENormalize<BE>
+ GLWECopy
+ GLWEAlloc
{
combine_tmp_bytes(module, res_infos, key_infos)
}
fn pack_core<A, K, M, BE: Backend>(
module: &M,
a: Option<&A>,
accumulators: &mut [Accumulator],
i: usize,
auto_keys: &HashMap<i64, K>,
scratch: &mut Scratch<BE>,
) where
A: GLWEToRef + GLWEInfos,
K: AutomorphismKeyPreparedToRef<BE>,
M: GLWEAutomorphism<BE> + ModuleLogN + VecZnxCopy,
Scratch<BE>: ScratchTakeCore<BE>,
{
let log_n: usize = module.log_n();
if i == log_n {
return;
}
// Isolate the first accumulator
let (acc_prev, acc_next) = accumulators.split_at_mut(1);
// Control = true accumlator is free to overide
if !acc_prev[0].control {
let acc_mut_ref: &mut Accumulator = &mut acc_prev[0]; // from split_at_mut
// No previous value -> copies and sets flags accordingly
if let Some(a_ref) = a {
acc_mut_ref.data.copy(module, a_ref);
acc_mut_ref.value = true
} else {
acc_mut_ref.value = false
}
acc_mut_ref.control = true; // Able to be combined on next call
} else {
// Compresses acc_prev <- combine(acc_prev, a).
combine(module, &mut acc_prev[0], a, i, auto_keys, scratch);
acc_prev[0].control = false;
// Propagates to next accumulator
if acc_prev[0].value {
pack_core(
module,
Some(&acc_prev[0].data),
acc_next,
i + 1,
auto_keys,
scratch,
);
} else {
pack_core(
module,
None::<&GLWE<Vec<u8>>>,
acc_next,
i + 1,
auto_keys,
scratch,
);
}
}
}
fn combine_tmp_bytes<R, K, M, BE: Backend>(module: &M, res_infos: &R, key_infos: &K) -> usize
where
R: GLWEInfos,
K: GGLWEInfos,
M: GLWEAlloc + GLWEAutomorphism<BE>,
{
GLWE::bytes_of_from_infos(module, res_infos)
+ (GLWE::rsh_tmp_bytes(module.n()) | module.glwe_automorphism_tmp_bytes(res_infos, res_infos, key_infos))
}
/// [combine] merges two ciphertexts together.
fn combine<B, M, K, BE: Backend>(
module: &M,
acc: &mut Accumulator,
b: Option<&B>,
i: usize,
auto_keys: &HashMap<i64, K>,
scratch: &mut Scratch<BE>,
) where
B: GLWEToRef + GLWEInfos,
K: AutomorphismKeyPreparedToRef<BE>,
M: GLWEAutomorphism<BE> + GaloisElement + VecZnxRotateInplace<BE>,
Scratch<BE>: ScratchTakeCore<BE>,
{
let log_n: usize = acc.data.n().log2();
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
let gal_el: i64 = if i == 0 {
-1
} else {
module.galois_element(1 << (i - 1))
};
let t: i64 = 1 << (log_n - i - 1);
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
// Different cases for wether a and/or b are zero.
//
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
// Necessary so that the scaling of the plaintext remains constant.
// It however is ok to do so here because coefficients are eventually
// either mapped to garbage or twice their value which vanishes I(X)
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
if acc.value {
if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, a);
// a = a * X^-t
a.rotate_inplace(module, -t, scratch_1);
// tmp_b = a * X^-t - b
tmp_b.sub(module, a, b);
tmp_b.rsh(module, 1, scratch_1);
// a = a * X^-t + b
a.add_inplace(module, b);
a.rsh(module, 1, scratch_1);
tmp_b.normalize_inplace(module, scratch_1);
// tmp_b = phi(a * X^-t - b)
if let Some(key) = auto_keys.get(&gal_el) {
tmp_b.automorphism_inplace(module, key, scratch_1);
} else {
panic!("auto_key[{gal_el}] not found");
}
// a = a * X^-t + b - phi(a * X^-t - b)
a.sub_inplace_ab(module, &tmp_b);
a.normalize_inplace(module, scratch_1);
// a = a + b * X^t - phi(a * X^-t - b) * X^t
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
// = a + b * X^t + phi(a - b * X^t)
a.rotate_inplace(module, t, scratch_1);
} else {
a.rsh(module, 1, scratch);
// a = a + phi(a)
if let Some(key) = auto_keys.get(&gal_el) {
a.automorphism_add_inplace(module, key, scratch);
} else {
panic!("auto_key[{gal_el}] not found");
}
}
} else if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
tmp_b.rotate(module, 1 << (log_n - i - 1), b);
tmp_b.rsh(module, 1, scratch_1);
// a = (b* X^t - phi(b* X^t))
if let Some(key) = auto_keys.get(&gal_el) {
a.automorphism_sub_negate(module, &tmp_b, key, scratch_1);
} else {
panic!("auto_key[{gal_el}] not found");
}
acc.value = true;
}
}
pub trait GLWEPacking<BE: Backend>
@@ -333,7 +182,9 @@ where
+ GLWESub
+ GLWEShift<BE>
+ GLWEAdd
+ GLWENormalize<BE>,
+ GLWENormalize<BE>
+ GLWECopy
+ GLWEAlloc,
{
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
@@ -375,8 +226,171 @@ where
} else if let Some(b) = b {
cts.insert(j, b);
}
}
}
}
}
fn pack_core<A, K, M, BE: Backend>(
module: &M,
a: Option<&A>,
accumulators: &mut [Accumulator],
i: usize,
auto_keys: &HashMap<i64, K>,
scratch: &mut Scratch<BE>,
) where
A: GLWEToRef + GLWEInfos,
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
M: ModuleLogN
+ GLWEAutomorphism<BE>
+ GaloisElement
+ GLWERotate<BE>
+ GLWESub
+ GLWEShift<BE>
+ GLWEAdd
+ GLWENormalize<BE>
+ GLWECopy,
Scratch<BE>: ScratchTakeCore<BE>,
{
let log_n: usize = module.log_n();
if i == log_n {
return;
}
// Isolate the first accumulator
let (acc_prev, acc_next) = accumulators.split_at_mut(1);
// Control = true accumlator is free to overide
if !acc_prev[0].control {
let acc_mut_ref: &mut Accumulator = &mut acc_prev[0]; // from split_at_mut
// No previous value -> copies and sets flags accordingly
if let Some(a_ref) = a {
module.glwe_copy(&mut acc_mut_ref.data, a_ref);
acc_mut_ref.value = true
} else {
acc_mut_ref.value = false
}
acc_mut_ref.control = true; // Able to be combined on next call
} else {
// Compresses acc_prev <- combine(acc_prev, a).
combine(module, &mut acc_prev[0], a, i, auto_keys, scratch);
acc_prev[0].control = false;
// Propagates to next accumulator
if acc_prev[0].value {
pack_core(
module,
Some(&acc_prev[0].data),
acc_next,
i + 1,
auto_keys,
scratch,
);
} else {
pack_core(
module,
None::<&GLWE<Vec<u8>>>,
acc_next,
i + 1,
auto_keys,
scratch,
);
}
}
}
/// [combine] merges two ciphertexts together.
fn combine<B, M, K, BE: Backend>(
module: &M,
acc: &mut Accumulator,
b: Option<&B>,
i: usize,
auto_keys: &HashMap<i64, K>,
scratch: &mut Scratch<BE>,
) where
B: GLWEToRef + GLWEInfos,
M: GLWEAutomorphism<BE> + GaloisElement + GLWERotate<BE> + GLWESub + GLWEShift<BE> + GLWEAdd + GLWENormalize<BE>,
B: GLWEToRef + GLWEInfos,
K: AutomorphismKeyPreparedToRef<BE> + GetAutomorphismGaloisElement,
Scratch<BE>: ScratchTakeCore<BE>,
{
let log_n: usize = acc.data.n().log2();
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
let gal_el: i64 = if i == 0 {
-1
} else {
module.galois_element(1 << (i - 1))
};
};
let t: i64 = 1 << (log_n - i - 1);
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
// Different cases for wether a and/or b are zero.
//
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
// Necessary so that the scaling of the plaintext remains constant.
// It however is ok to do so here because coefficients are eventually
// either mapped to garbage or twice their value which vanishes I(X)
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
if acc.value {
if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, a);
// a = a * X^-t
module.glwe_rotate_inplace(-t, a, scratch_1);
// tmp_b = a * X^-t - b
module.glwe_sub(&mut tmp_b, a, b);
module.glwe_rsh(1, &mut tmp_b, scratch_1);
// a = a * X^-t + b
module.glwe_add_inplace(a, b);
module.glwe_rsh(1, a, scratch_1);
module.glwe_normalize_inplace(&mut tmp_b, scratch_1);
// tmp_b = phi(a * X^-t - b)
if let Some(auto_key) = auto_keys.get(&gal_el) {
module.glwe_automorphism_inplace(&mut tmp_b, auto_key, scratch_1);
} else {
panic!("auto_key[{gal_el}] not found");
}
// a = a * X^-t + b - phi(a * X^-t - b)
module.glwe_sub_inplace(a, &tmp_b);
module.glwe_normalize_inplace(a, scratch_1);
// a = a + b * X^t - phi(a * X^-t - b) * X^t
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
// = a + b * X^t + phi(a - b * X^t)
module.glwe_rotate_inplace(t, a, scratch_1);
} else {
module.glwe_rsh(1, a, scratch);
// a = a + phi(a)
if let Some(auto_key) = auto_keys.get(&gal_el) {
module.glwe_automorphism_add_inplace(a, auto_key, scratch);
} else {
panic!("auto_key[{gal_el}] not found");
}
}
} else if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(module, a);
module.glwe_rotate(t, &mut tmp_b, b);
module.glwe_rsh(1, &mut tmp_b, scratch_1);
// a = (b* X^t - phi(b* X^t))
if let Some(auto_key) = auto_keys.get(&gal_el) {
module.glwe_automorphism_sub_negate(a, &tmp_b, auto_key, scratch_1);
} else {
panic!("auto_key[{gal_el}] not found");
}
acc.value = true;
}
}

View File

@@ -4,7 +4,7 @@ use poulpy_hal::{
};
use crate::{
ScratchTakeCore,
LWESampleExtract, ScratchTakeCore,
keyswitching::glwe_ct::GLWEKeyswitch,
layouts::{
GGLWEInfos, GLWE, GLWEAlloc, GLWELayout, LWE, LWEInfos, LWEToMut, LWEToRef, Rank, TorusPrecision,
@@ -40,7 +40,7 @@ impl<BE: Backend> LWEKeySwitch<BE> for Module<BE> where Self: LWEKeySwitch<BE> {
pub trait LWEKeySwitch<BE: Backend>
where
Self: GLWEKeyswitch<BE> + GLWEAlloc,
Self: GLWEKeyswitch<BE> + GLWEAlloc + LWESampleExtract,
{
fn lwe_keyswitch_tmp_bytes<R, A, K>(&self, res_infos: &R, a_infos: &A, key_infos: &K) -> usize
where
@@ -121,6 +121,6 @@ where
}
self.glwe_keyswitch(&mut glwe_out, &glwe_in, &ksk.0, scratch_1);
res.sample_extract(&glwe_out);
self.lwe_sample_extract(res, &glwe_out);
}
}

View File

@@ -277,6 +277,10 @@ pub trait GLWEShift<BE: Backend>
where
Self: ModuleN + VecZnxRshInplace<BE>,
{
fn glwe_rsh_tmp_byte(&self) -> usize {
VecZnx::rsh_tmp_bytes(self.n())
}
fn glwe_rsh<R>(&self, k: usize, res: &mut R, scratch: &mut Scratch<BE>)
where
R: GLWEToMut,
@@ -291,8 +295,11 @@ where
}
impl GLWE<Vec<u8>> {
pub fn rsh_tmp_bytes(n: usize) -> usize {
VecZnx::rsh_tmp_bytes(n)
pub fn rsh_tmp_bytes<M, BE: Backend>(module: &M) -> usize
where
M: GLWEShift<BE>,
{
module.glwe_rsh_tmp_byte()
}
}