mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
* Some cleaning, CBT example, fix mod switch and add LUT correctness test to BR test * finished trait cleaning * removed trait aliastoutside of backend
217 lines
7.4 KiB
Rust
217 lines
7.4 KiB
Rust
use backend::hal::{
|
|
api::{
|
|
ScratchAvailable, SvpApplyInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
|
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftToVecZnxBigConsume, VecZnxFillUniform,
|
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VmpPMatAlloc,
|
|
VmpPrepare, ZnxView, ZnxViewMut,
|
|
},
|
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch},
|
|
};
|
|
use sampling::source::Source;
|
|
|
|
use std::marker::PhantomData;
|
|
|
|
use core::{
|
|
Distribution,
|
|
layouts::{
|
|
GGSWCiphertext, LWESecret,
|
|
compressed::GGSWCiphertextCompressed,
|
|
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared},
|
|
},
|
|
};
|
|
|
|
use crate::tfhe::blind_rotation::{
|
|
BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyCompressed, BlindRotationKeyEncryptSk, BlindRotationKeyPrepared,
|
|
BlindRotationKeyPreparedAlloc, CGGI,
|
|
};
|
|
|
|
impl BlindRotationKeyAlloc for BlindRotationKey<Vec<u8>, CGGI> {
|
|
fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
|
let mut data: Vec<GGSWCiphertext<Vec<u8>>> = Vec::with_capacity(n_lwe);
|
|
(0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(n_gglwe, basek, k, rows, 1, rank)));
|
|
Self {
|
|
keys: data,
|
|
dist: Distribution::NONE,
|
|
_phantom: PhantomData,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl BlindRotationKey<Vec<u8>, CGGI> {
|
|
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
|
where
|
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
|
{
|
|
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
|
|
}
|
|
}
|
|
|
|
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
|
|
where
|
|
Module<B>: VecZnxAddScalarInplace
|
|
+ VecZnxDftAllocBytes
|
|
+ VecZnxBigNormalize<B>
|
|
+ VecZnxDftFromVecZnx<B>
|
|
+ SvpApplyInplace<B>
|
|
+ VecZnxDftToVecZnxBigConsume<B>
|
|
+ VecZnxNormalizeTmpBytes
|
|
+ VecZnxFillUniform
|
|
+ VecZnxSubABInplace
|
|
+ VecZnxAddInplace
|
|
+ VecZnxNormalizeInplace<B>
|
|
+ VecZnxAddNormal
|
|
+ VecZnxNormalize<B>
|
|
+ VecZnxSub,
|
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
|
{
|
|
fn encrypt_sk<DataSkGLWE, DataSkLWE>(
|
|
&mut self,
|
|
module: &Module<B>,
|
|
sk_glwe: &GLWESecretPrepared<DataSkGLWE, B>,
|
|
sk_lwe: &LWESecret<DataSkLWE>,
|
|
source_xa: &mut Source,
|
|
source_xe: &mut Source,
|
|
sigma: f64,
|
|
scratch: &mut Scratch<B>,
|
|
) where
|
|
DataSkGLWE: DataRef,
|
|
DataSkLWE: DataRef,
|
|
{
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
assert_eq!(self.keys.len(), sk_lwe.n());
|
|
assert!(sk_glwe.n() <= module.n());
|
|
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
|
|
match sk_lwe.dist() {
|
|
Distribution::BinaryBlock(_)
|
|
| Distribution::BinaryFixed(_)
|
|
| Distribution::BinaryProb(_)
|
|
| Distribution::ZERO => {}
|
|
_ => panic!(
|
|
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
|
|
),
|
|
}
|
|
}
|
|
|
|
self.dist = sk_lwe.dist();
|
|
|
|
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
|
|
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data().to_ref();
|
|
|
|
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
|
|
pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i];
|
|
ggsw.encrypt_sk(module, &pt, sk_glwe, source_xa, source_xe, sigma, scratch);
|
|
});
|
|
}
|
|
}
|
|
|
|
impl<B: Backend> BlindRotationKeyPreparedAlloc<B> for BlindRotationKeyPrepared<Vec<u8>, CGGI, B>
|
|
where
|
|
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
|
{
|
|
fn alloc(module: &Module<B>, n_glwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
|
let mut data: Vec<GGSWCiphertextPrepared<Vec<u8>, B>> = Vec::with_capacity(n_lwe);
|
|
(0..n_lwe).for_each(|_| {
|
|
data.push(GGSWCiphertextPrepared::alloc(
|
|
module, n_glwe, basek, k, rows, 1, rank,
|
|
))
|
|
});
|
|
Self {
|
|
data,
|
|
dist: Distribution::NONE,
|
|
x_pow_a: None,
|
|
_phantom: PhantomData,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl BlindRotationKeyCompressed<Vec<u8>, CGGI> {
|
|
pub fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
|
let mut data: Vec<GGSWCiphertextCompressed<Vec<u8>>> = Vec::with_capacity(n_lwe);
|
|
(0..n_lwe).for_each(|_| {
|
|
data.push(GGSWCiphertextCompressed::alloc(
|
|
n_gglwe, basek, k, rows, 1, rank,
|
|
))
|
|
});
|
|
Self {
|
|
keys: data,
|
|
dist: Distribution::NONE,
|
|
_phantom: PhantomData,
|
|
}
|
|
}
|
|
|
|
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
|
|
where
|
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
|
{
|
|
GGSWCiphertextCompressed::encrypt_sk_scratch_space(module, n, basek, k, rank)
|
|
}
|
|
}
|
|
|
|
impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
|
|
pub fn encrypt_sk<DataSkGLWE, DataSkLWE, B: Backend>(
|
|
&mut self,
|
|
module: &Module<B>,
|
|
sk_glwe: &GLWESecretPrepared<DataSkGLWE, B>,
|
|
sk_lwe: &LWESecret<DataSkLWE>,
|
|
seed_xa: [u8; 32],
|
|
source_xe: &mut Source,
|
|
sigma: f64,
|
|
scratch: &mut Scratch<B>,
|
|
) where
|
|
DataSkGLWE: DataRef,
|
|
DataSkLWE: DataRef,
|
|
Module<B>: VecZnxAddScalarInplace
|
|
+ VecZnxDftAllocBytes
|
|
+ VecZnxBigNormalize<B>
|
|
+ VecZnxDftFromVecZnx<B>
|
|
+ SvpApplyInplace<B>
|
|
+ VecZnxDftToVecZnxBigConsume<B>
|
|
+ VecZnxNormalizeTmpBytes
|
|
+ VecZnxFillUniform
|
|
+ VecZnxSubABInplace
|
|
+ VecZnxAddInplace
|
|
+ VecZnxNormalizeInplace<B>
|
|
+ VecZnxAddNormal
|
|
+ VecZnxNormalize<B>
|
|
+ VecZnxSub,
|
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
|
{
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
assert_eq!(self.keys.len(), sk_lwe.n());
|
|
assert!(sk_glwe.n() <= module.n());
|
|
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
|
|
match sk_lwe.dist() {
|
|
Distribution::BinaryBlock(_)
|
|
| Distribution::BinaryFixed(_)
|
|
| Distribution::BinaryProb(_)
|
|
| Distribution::ZERO => {}
|
|
_ => panic!(
|
|
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
|
|
),
|
|
}
|
|
}
|
|
|
|
self.dist = sk_lwe.dist();
|
|
|
|
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
|
|
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data().to_ref();
|
|
|
|
let mut source_xa: Source = Source::new(seed_xa);
|
|
|
|
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
|
|
pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i];
|
|
ggsw.encrypt_sk(
|
|
module,
|
|
&pt,
|
|
sk_glwe,
|
|
source_xa.new_seed(),
|
|
source_xe,
|
|
sigma,
|
|
scratch,
|
|
);
|
|
});
|
|
}
|
|
}
|