Add schemes (#71)

* Move br + cbt to schemes/tfhe

* refactor blind rotation

* refactor circuit bootstrapping

* renamed exec -> prepared
This commit is contained in:
Jean-Philippe Bossuat
2025-08-15 15:06:26 +02:00
committed by GitHub
parent 8d9897b88b
commit c7219c35e9
130 changed files with 2631 additions and 3270 deletions

11
schemes/Cargo.toml Normal file
View File

@@ -0,0 +1,11 @@
[package]
name = "schemes"
version = "0.1.0"
edition = "2024"
[dependencies]
backend = {path="../backend"}
core = {path="../core"}
sampling = {path="../sampling"}
itertools = "0.14.0"
byteorder = "1.5.0"

2
schemes/src/lib.rs Normal file
View File

@@ -0,0 +1,2 @@
#![feature(trait_alias)]
pub mod tfhe;

View File

@@ -0,0 +1,482 @@
use backend::hal::{
api::{
ScratchAvailable, SvpApply, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftFromVecZnx, VecZnxDftSubABInplace, VecZnxDftToVecZnxBig,
VecZnxDftToVecZnxBigTmpBytes, VecZnxDftZero, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
VecZnxRotate, VecZnxSubABInplace, VmpApplyTmpBytes, ZnxView, ZnxZero,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch, SvpPPol, VecZnx},
};
use itertools::izip;
use core::{
Distribution, GLWEOperations, TakeGLWECt,
layouts::{GLWECiphertext, GLWECiphertextToMut, Infos, LWECiphertext, LWECiphertextToRef},
trait_families::GLWEExternalProductFamily,
};
use crate::tfhe::blind_rotation::{
BlincRotationExecute, BlindRotationKeyPrepared, CGGI, LookUpTable, LookUpTableRotationDirection,
};
pub trait CCGIBlindRotationFamily<B: Backend> = VecZnxBigAllocBytes
+ VecZnxDftAllocBytes
+ SvpPPolAllocBytes
+ VmpApplyTmpBytes
+ VecZnxBigNormalizeTmpBytes
+ VecZnxDftToVecZnxBigTmpBytes
+ VecZnxDftToVecZnxBig<B>
+ VecZnxDftAdd<B>
+ VecZnxDftAddInplace<B>
+ VecZnxDftFromVecZnx<B>
+ VecZnxDftZero<B>
+ SvpApply<B>
+ VecZnxDftSubABInplace<B>
+ VecZnxBigAddSmallInplace<B>
+ GLWEExternalProductFamily<B>
+ VecZnxRotate
+ VecZnxAddInplace
+ VecZnxSubABInplace
+ VecZnxNormalize<B>
+ VecZnxNormalizeInplace<B>
+ VecZnxCopy
+ VecZnxMulXpMinusOneInplace;
pub fn cggi_blind_rotate_scratch_space<B: Backend>(
module: &Module<B>,
n: usize,
block_size: usize,
extension_factor: usize,
basek: usize,
k_res: usize,
k_brk: usize,
rows: usize,
rank: usize,
) -> usize
where
Module<B>: CCGIBlindRotationFamily<B>,
{
let brk_size: usize = k_brk.div_ceil(basek);
if block_size > 1 {
let cols: usize = rank + 1;
let acc_dft: usize = module.vec_znx_dft_alloc_bytes(n, cols, rows) * extension_factor;
let acc_big: usize = module.vec_znx_big_alloc_bytes(n, 1, brk_size);
let vmp_res: usize = module.vec_znx_dft_alloc_bytes(n, cols, brk_size) * extension_factor;
let vmp_xai: usize = module.vec_znx_dft_alloc_bytes(n, 1, brk_size);
let acc_dft_add: usize = vmp_res;
let vmp: usize = module.vmp_apply_tmp_bytes(n, brk_size, rows, rows, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
let acc: usize;
if extension_factor > 1 {
acc = VecZnx::alloc_bytes(n, cols, k_res.div_ceil(basek)) * extension_factor;
} else {
acc = 0;
}
return acc
+ acc_dft
+ acc_dft_add
+ vmp_res
+ vmp_xai
+ (vmp | (acc_big + (module.vec_znx_big_normalize_tmp_bytes(n) | module.vec_znx_dft_to_vec_znx_big_tmp_bytes(n))));
} else {
GLWECiphertext::bytes_of(n, basek, k_res, rank)
+ GLWECiphertext::external_product_scratch_space(module, n, basek, k_res, k_res, k_brk, 1, rank)
}
}
impl<D: DataRef, B: Backend> BlincRotationExecute<B> for BlindRotationKeyPrepared<D, CGGI, B>
where
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + TakeVecZnx + ScratchAvailable,
{
fn execute<DR: DataMut, DI: DataRef>(
&self,
module: &Module<B>,
res: &mut GLWECiphertext<DR>,
lwe: &LWECiphertext<DI>,
lut: &LookUpTable,
scratch: &mut Scratch<B>,
) {
match self.dist {
Distribution::BinaryBlock(_) | Distribution::BinaryFixed(_) | Distribution::BinaryProb(_) | Distribution::ZERO => {
if lut.extension_factor() > 1 {
execute_block_binary_extended(module, res, lwe, lut, self, scratch)
} else if self.block_size() > 1 {
execute_block_binary(module, res, lwe, lut, self, scratch);
} else {
execute_standard(module, res, lwe, lut, self, scratch);
}
}
_ => panic!("invalid CGGI distribution"),
}
}
}
fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
module: &Module<B>,
res: &mut GLWECiphertext<DataRes>,
lwe: &LWECiphertext<DataIn>,
lut: &LookUpTable,
brk: &BlindRotationKeyPrepared<DataBrk, CGGI, B>,
scratch: &mut Scratch<B>,
) where
DataRes: DataMut,
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
{
let n_glwe: usize = brk.n();
let extension_factor: usize = lut.extension_factor();
let basek: usize = res.basek();
let rows: usize = brk.rows();
let cols: usize = res.rank() + 1;
let (mut acc, scratch1) = scratch.take_vec_znx_slice(extension_factor, n_glwe, cols, res.size());
let (mut acc_dft, scratch2) = scratch1.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, rows);
let (mut vmp_res, scratch3) = scratch2.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
let (mut acc_add_dft, scratch4) = scratch3.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
let (mut vmp_xai, scratch5) = scratch4.take_vec_znx_dft(n_glwe, 1, brk.size());
(0..extension_factor).for_each(|i| {
acc[i].zero();
});
let x_pow_a: &Vec<SvpPPol<Vec<u8>, B>>;
if let Some(b) = &brk.x_pow_a {
x_pow_a = b
} else {
panic!("invalid key: x_pow_a has not been initialized")
}
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let two_n: usize = 2 * n_glwe;
let two_n_ext: usize = 2 * lut.domain_size();
mod_switch_2n(two_n_ext, &mut lwe_2n, &lwe_ref, lut.rotation_direction());
let a: &[i64] = &lwe_2n[1..];
let b_pos: usize = ((lwe_2n[0] + two_n_ext as i64) & (two_n_ext - 1) as i64) as usize;
let b_hi: usize = b_pos / extension_factor;
let b_lo: usize = b_pos & (extension_factor - 1);
for (i, j) in (0..b_lo).zip(extension_factor - b_lo..extension_factor) {
module.vec_znx_rotate(b_hi as i64 + 1, &mut acc[i], 0, &lut.data[j], 0);
}
for (i, j) in (b_lo..extension_factor).zip(0..extension_factor - b_lo) {
module.vec_znx_rotate(b_hi as i64, &mut acc[i], 0, &lut.data[j], 0);
}
let block_size: usize = brk.block_size();
izip!(
a.chunks_exact(block_size),
brk.data.chunks_exact(block_size)
)
.for_each(|(ai, ski)| {
(0..extension_factor).for_each(|i| {
(0..cols).for_each(|j| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut acc_dft[i], j, &acc[i], j);
});
module.vec_znx_dft_zero(&mut acc_add_dft[i])
});
// TODO: first & last iterations can be optimized
izip!(ai.iter(), ski.iter()).for_each(|(aii, skii)| {
let ai_pos: usize = ((aii + two_n_ext as i64) & (two_n_ext - 1) as i64) as usize;
let ai_hi: usize = ai_pos / extension_factor;
let ai_lo: usize = ai_pos & (extension_factor - 1);
// vmp_res = DFT(acc) * BRK[i]
(0..extension_factor).for_each(|i| {
module.vmp_apply(&mut vmp_res[i], &acc_dft[i], skii.data(), scratch5);
});
// Trivial case: no rotation between polynomials, we can directly multiply with (X^{-ai} - 1)
if ai_lo == 0 {
// Sets acc_add_dft[i] = (acc[i] * sk) * X^{-ai} - (acc[i] * sk)
if ai_hi != 0 {
// DFT X^{-ai}
(0..extension_factor).for_each(|j| {
(0..cols).for_each(|i| {
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], i);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[j], i, &vmp_xai, 0);
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[j], i, &vmp_res[j], i);
});
});
}
// Non trivial case: rotation between polynomials
// In this case we can't directly multiply with (X^{-ai} - 1) because of the
// ring homomorphism R^{N} -> prod R^{N/extension_factor}, so we split the
// computation in two steps: acc_add_dft = (acc * sk) * (-1) + (acc * sk) * X^{-ai}
} else {
// Sets acc_add_dft[0..ai_lo] += (acc[extension_factor - ai_lo..extension_factor] * sk) * X^{-ai+1}
if (ai_hi + 1) & (two_n - 1) != 0 {
for (i, j) in (0..ai_lo).zip(extension_factor - ai_lo..extension_factor) {
(0..cols).for_each(|k| {
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi + 1], 0, &vmp_res[j], k);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_xai, 0);
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k);
});
}
}
// Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai}
if ai_hi != 0 {
// Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai}
for (i, j) in (ai_lo..extension_factor).zip(0..extension_factor - ai_lo) {
(0..cols).for_each(|k| {
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], k);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_xai, 0);
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k);
});
}
}
}
});
{
let (mut acc_add_big, scratch7) = scratch5.take_vec_znx_big(n_glwe, 1, brk.size());
(0..extension_factor).for_each(|j| {
(0..cols).for_each(|i| {
module.vec_znx_dft_to_vec_znx_big(&mut acc_add_big, 0, &acc_add_dft[j], i, scratch7);
module.vec_znx_big_add_small_inplace(&mut acc_add_big, 0, &acc[j], i);
module.vec_znx_big_normalize(basek, &mut acc[j], i, &acc_add_big, 0, scratch7);
});
});
}
});
(0..cols).for_each(|i| {
module.vec_znx_copy(&mut res.data, i, &acc[0], i);
});
}
fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
module: &Module<B>,
res: &mut GLWECiphertext<DataRes>,
lwe: &LWECiphertext<DataIn>,
lut: &LookUpTable,
brk: &BlindRotationKeyPrepared<DataBrk, CGGI, B>,
scratch: &mut Scratch<B>,
) where
DataRes: DataMut,
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
{
let n_glwe: usize = brk.n();
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let mut out_mut: GLWECiphertext<&mut [u8]> = res.to_mut();
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let two_n: usize = n_glwe << 1;
let basek: usize = brk.basek();
let rows: usize = brk.rows();
let cols: usize = out_mut.rank() + 1;
mod_switch_2n(
2 * lut.domain_size(),
&mut lwe_2n,
&lwe_ref,
lut.rotation_direction(),
);
let a: &[i64] = &lwe_2n[1..];
let b: i64 = lwe_2n[0];
out_mut.data.zero();
// Initialize out to X^{b} * LUT(X)
module.vec_znx_rotate(b, &mut out_mut.data, 0, &lut.data[0], 0);
let block_size: usize = brk.block_size();
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
let (mut acc_dft, scratch1) = scratch.take_vec_znx_dft(n_glwe, cols, rows);
let (mut vmp_res, scratch2) = scratch1.take_vec_znx_dft(n_glwe, cols, brk.size());
let (mut acc_add_dft, scratch3) = scratch2.take_vec_znx_dft(n_glwe, cols, brk.size());
let (mut vmp_xai, scratch4) = scratch3.take_vec_znx_dft(n_glwe, 1, brk.size());
let x_pow_a: &Vec<SvpPPol<Vec<u8>, B>>;
if let Some(b) = &brk.x_pow_a {
x_pow_a = b
} else {
panic!("invalid key: x_pow_a has not been initialized")
}
izip!(
a.chunks_exact(block_size),
brk.data.chunks_exact(block_size)
)
.for_each(|(ai, ski)| {
(0..cols).for_each(|j| {
module.vec_znx_dft_from_vec_znx(1, 0, &mut acc_dft, j, &out_mut.data, j);
});
module.vec_znx_dft_zero(&mut acc_add_dft);
izip!(ai.iter(), ski.iter()).for_each(|(aii, skii)| {
let ai_pos: usize = ((aii + two_n as i64) & (two_n - 1) as i64) as usize;
// vmp_res = DFT(acc) * BRK[i]
module.vmp_apply(&mut vmp_res, &acc_dft, skii.data(), scratch4);
// DFT(X^ai -1) * (DFT(acc) * BRK[i])
(0..cols).for_each(|i| {
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_pos], 0, &vmp_res, i);
module.vec_znx_dft_add_inplace(&mut acc_add_dft, i, &vmp_xai, 0);
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft, i, &vmp_res, i);
});
});
{
let (mut acc_add_big, scratch5) = scratch4.take_vec_znx_big(n_glwe, 1, brk.size());
(0..cols).for_each(|i| {
module.vec_znx_dft_to_vec_znx_big(&mut acc_add_big, 0, &acc_add_dft, i, scratch5);
module.vec_znx_big_add_small_inplace(&mut acc_add_big, 0, &out_mut.data, i);
module.vec_znx_big_normalize(basek, &mut out_mut.data, i, &acc_add_big, 0, scratch5);
});
}
});
}
fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
module: &Module<B>,
res: &mut GLWECiphertext<DataRes>,
lwe: &LWECiphertext<DataIn>,
lut: &LookUpTable,
brk: &BlindRotationKeyPrepared<DataBrk, CGGI, B>,
scratch: &mut Scratch<B>,
) where
DataRes: DataMut,
DataIn: DataRef,
DataBrk: DataRef,
Module<B>: CCGIBlindRotationFamily<B>,
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(
res.n(),
brk.n(),
"res.n(): {} != brk.n(): {}",
res.n(),
brk.n()
);
assert_eq!(
lut.domain_size(),
brk.n(),
"lut.n(): {} != brk.n(): {}",
lut.domain_size(),
brk.n()
);
assert_eq!(
res.rank(),
brk.rank(),
"res.rank(): {} != brk.rank(): {}",
res.rank(),
brk.rank()
);
assert_eq!(
lwe.n(),
brk.data.len(),
"lwe.n(): {} != brk.data.len(): {}",
lwe.n(),
brk.data.len()
);
}
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let mut out_mut: GLWECiphertext<&mut [u8]> = res.to_mut();
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let basek: usize = brk.basek();
mod_switch_2n(
2 * lut.domain_size(),
&mut lwe_2n,
&lwe_ref,
lut.rotation_direction(),
);
let a: &[i64] = &lwe_2n[1..];
let b: i64 = lwe_2n[0];
out_mut.data.zero();
// Initialize out to X^{b} * LUT(X)
module.vec_znx_rotate(b, &mut out_mut.data, 0, &lut.data[0], 0);
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
let (mut acc_tmp, scratch1) = scratch.take_glwe_ct(out_mut.n(), basek, out_mut.k(), out_mut.rank());
// TODO: see if faster by skipping normalization in external product and keeping acc in big coeffs
// TODO: first iteration can be optimized to be a gglwe product
izip!(a.iter(), brk.data.iter()).for_each(|(ai, ski)| {
// acc_tmp = sk[i] * acc
acc_tmp.external_product(module, &out_mut, ski, scratch1);
// acc_tmp = (sk[i] * acc) * (X^{ai} - 1)
acc_tmp.mul_xp_minus_one_inplace(module, *ai);
// acc = acc + (sk[i] * acc) * (X^{ai} - 1)
out_mut.add_inplace(module, &acc_tmp);
});
// We can normalize only at the end because we add normalized values in [-2^{basek-1}, 2^{basek-1}]
// on top of each others, thus ~ 2^{63-basek} additions are supported before overflow.
out_mut.normalize_inplace(module, scratch1);
}
pub(crate) fn mod_switch_2n(n: usize, res: &mut [i64], lwe: &LWECiphertext<&[u8]>, rot_dir: LookUpTableRotationDirection) {
let basek: usize = lwe.basek();
let log2n: usize = usize::BITS as usize - (n - 1).leading_zeros() as usize + 1;
res.copy_from_slice(&lwe.data().at(0, 0));
match rot_dir {
LookUpTableRotationDirection::Left => {
res.iter_mut().for_each(|x| *x = -*x);
}
LookUpTableRotationDirection::Right => {}
}
if basek > log2n {
let diff: usize = basek - log2n;
res.iter_mut().for_each(|x| {
*x = div_round_by_pow2(x, diff);
})
} else {
let rem: usize = basek - (log2n % basek);
let size: usize = log2n.div_ceil(basek);
(1..size).for_each(|i| {
if i == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
izip!(lwe.data().at(0, i).iter(), res.iter_mut()).for_each(|(x, y)| {
*y = (*y << k_rem) + (x >> rem);
});
} else {
izip!(lwe.data().at(0, i).iter(), res.iter_mut()).for_each(|(x, y)| {
*y = (*y << basek) + x;
});
}
})
}
}
#[inline(always)]
fn div_round_by_pow2(x: &i64, k: usize) -> i64 {
(x + (1 << (k - 1))) >> k
}

View File

@@ -0,0 +1,188 @@
use backend::hal::{
api::{
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VmpPMatAlloc, VmpPMatPrepare, ZnxView, ZnxViewMut,
},
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch},
};
use sampling::source::Source;
use std::marker::PhantomData;
use core::{
Distribution,
layouts::{
GGSWCiphertext, LWESecret,
compressed::GGSWCiphertextCompressed,
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared},
},
trait_families::GGSWEncryptSkFamily,
};
use crate::tfhe::blind_rotation::{
BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyCompressed, BlindRotationKeyEncryptSk, BlindRotationKeyPrepared,
BlindRotationKeyPreparedAlloc, CGGI,
};
impl BlindRotationKeyAlloc for BlindRotationKey<Vec<u8>, CGGI> {
fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
let mut data: Vec<GGSWCiphertext<Vec<u8>>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(n_gglwe, basek, k, rows, 1, rank)));
Self {
keys: data,
dist: Distribution::NONE,
_phantom: PhantomData,
}
}
}
impl BlindRotationKey<Vec<u8>, CGGI> {
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B>,
{
GGSWCiphertext::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
where
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
fn encrypt_sk<DataSkGLWE, DataSkLWE>(
&mut self,
module: &Module<B>,
sk_glwe: &GLWESecretPrepared<DataSkGLWE, B>,
sk_lwe: &LWESecret<DataSkLWE>,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) where
DataSkGLWE: DataRef,
DataSkLWE: DataRef,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.keys.len(), sk_lwe.n());
assert!(sk_glwe.n() <= module.n());
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
match sk_lwe.dist() {
Distribution::BinaryBlock(_)
| Distribution::BinaryFixed(_)
| Distribution::BinaryProb(_)
| Distribution::ZERO => {}
_ => panic!(
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
),
}
}
self.dist = sk_lwe.dist();
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data().to_ref();
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i];
ggsw.encrypt_sk(module, &pt, sk_glwe, source_xa, source_xe, sigma, scratch);
});
}
}
impl<B: Backend> BlindRotationKeyPreparedAlloc<B> for BlindRotationKeyPrepared<Vec<u8>, CGGI, B>
where
Module<B>: VmpPMatAlloc<B> + VmpPMatPrepare<B>,
{
fn alloc(module: &Module<B>, n_glwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
let mut data: Vec<GGSWCiphertextPrepared<Vec<u8>, B>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| {
data.push(GGSWCiphertextPrepared::alloc(
module, n_glwe, basek, k, rows, 1, rank,
))
});
Self {
data,
dist: Distribution::NONE,
x_pow_a: None,
_phantom: PhantomData,
}
}
}
impl BlindRotationKeyCompressed<Vec<u8>, CGGI> {
pub fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
let mut data: Vec<GGSWCiphertextCompressed<Vec<u8>>> = Vec::with_capacity(n_lwe);
(0..n_lwe).for_each(|_| {
data.push(GGSWCiphertextCompressed::alloc(
n_gglwe, basek, k, rows, 1, rank,
))
});
Self {
keys: data,
dist: Distribution::NONE,
_phantom: PhantomData,
}
}
pub fn generate_from_sk_scratch_space<B: Backend>(module: &Module<B>, n: usize, basek: usize, k: usize, rank: usize) -> usize
where
Module<B>: GGSWEncryptSkFamily<B>,
{
GGSWCiphertextCompressed::encrypt_sk_scratch_space(module, n, basek, k, rank)
}
}
impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
pub fn encrypt_sk<DataSkGLWE, DataSkLWE, B: Backend>(
&mut self,
module: &Module<B>,
sk_glwe: &GLWESecretPrepared<DataSkGLWE, B>,
sk_lwe: &LWESecret<DataSkLWE>,
seed_xa: [u8; 32],
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) where
DataSkGLWE: DataRef,
DataSkLWE: DataRef,
Module<B>: GGSWEncryptSkFamily<B> + VecZnxAddScalarInplace,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.keys.len(), sk_lwe.n());
assert!(sk_glwe.n() <= module.n());
assert_eq!(sk_glwe.rank(), self.keys[0].rank());
match sk_lwe.dist() {
Distribution::BinaryBlock(_)
| Distribution::BinaryFixed(_)
| Distribution::BinaryProb(_)
| Distribution::ZERO => {}
_ => panic!(
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
),
}
}
self.dist = sk_lwe.dist();
let mut pt: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(sk_glwe.n(), 1);
let sk_ref: ScalarZnx<&[u8]> = sk_lwe.data().to_ref();
let mut source_xa: Source = Source::new(seed_xa);
self.keys.iter_mut().enumerate().for_each(|(i, ggsw)| {
pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i];
ggsw.encrypt_sk(
module,
&pt,
sk_glwe,
source_xa.new_seed(),
source_xe,
sigma,
scratch,
);
});
}
}

View File

@@ -0,0 +1,162 @@
use backend::hal::{
api::{FillUniform, Reset},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, Scratch, WriterTo},
};
use sampling::source::Source;
use std::{fmt, marker::PhantomData};
use core::{
Distribution,
layouts::{GGSWCiphertext, Infos, LWESecret, prepared::GLWESecretPrepared},
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crate::tfhe::blind_rotation::BlindRotationAlgo;
pub trait BlindRotationKeyAlloc {
fn alloc(n_gglwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self;
}
pub trait BlindRotationKeyEncryptSk<B: Backend> {
fn encrypt_sk<DataSkGLWE, DataSkLWE>(
&mut self,
module: &Module<B>,
sk_glwe: &GLWESecretPrepared<DataSkGLWE, B>,
sk_lwe: &LWESecret<DataSkLWE>,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) where
DataSkGLWE: DataRef,
DataSkLWE: DataRef;
}
#[derive(Clone)]
pub struct BlindRotationKey<D: Data, BRT: BlindRotationAlgo> {
pub(crate) keys: Vec<GGSWCiphertext<D>>,
pub(crate) dist: Distribution,
pub(crate) _phantom: PhantomData<BRT>,
}
impl<D: DataRef, BRT: BlindRotationAlgo> fmt::Debug for BlindRotationKey<D, BRT> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: Data, BRT: BlindRotationAlgo> PartialEq for BlindRotationKey<D, BRT> {
fn eq(&self, other: &Self) -> bool {
if self.keys.len() != other.keys.len() {
return false;
}
for (a, b) in self.keys.iter().zip(other.keys.iter()) {
if a != b {
return false;
}
}
self.dist == other.dist && self._phantom == other._phantom
}
}
impl<D: Data, BRT: BlindRotationAlgo> Eq for BlindRotationKey<D, BRT> {}
impl<D: DataRef, BRT: BlindRotationAlgo> fmt::Display for BlindRotationKey<D, BRT> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, key) in self.keys.iter().enumerate() {
write!(f, "key[{}]: {}", i, key)?;
}
writeln!(f, "{:?}", self.dist)
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> Reset for BlindRotationKey<D, BRT> {
fn reset(&mut self) {
self.keys.iter_mut().for_each(|key| key.reset());
self.dist = Distribution::NONE;
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> FillUniform for BlindRotationKey<D, BRT> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.keys
.iter_mut()
.for_each(|key| key.fill_uniform(source));
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> ReaderFrom for BlindRotationKey<D, BRT> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
match Distribution::read_from(reader) {
Ok(dist) => self.dist = dist,
Err(e) => return Err(e),
}
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
if self.keys.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
));
}
for key in &mut self.keys {
key.read_from(reader)?;
}
Ok(())
}
}
impl<D: DataRef, BRT: BlindRotationAlgo> WriterTo for BlindRotationKey<D, BRT> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
match self.dist.write_to(writer) {
Ok(()) => {}
Err(e) => return Err(e),
}
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
for key in &self.keys {
key.write_to(writer)?;
}
Ok(())
}
}
impl<D: DataRef, BRT: BlindRotationAlgo> BlindRotationKey<D, BRT> {
#[allow(dead_code)]
pub(crate) fn n(&self) -> usize {
self.keys[0].n()
}
#[allow(dead_code)]
pub(crate) fn rows(&self) -> usize {
self.keys[0].rows()
}
#[allow(dead_code)]
pub(crate) fn k(&self) -> usize {
self.keys[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.keys[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.keys[0].rank()
}
pub(crate) fn basek(&self) -> usize {
self.keys[0].basek()
}
#[allow(dead_code)]
pub(crate) fn block_size(&self) -> usize {
match self.dist {
Distribution::BinaryBlock(value) => value,
_ => 1,
}
}
}

View File

@@ -0,0 +1,141 @@
use backend::hal::{
api::{FillUniform, Reset},
layouts::{Data, DataMut, DataRef, ReaderFrom, WriterTo},
};
use std::{fmt, marker::PhantomData};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use core::{
Distribution,
layouts::{Infos, compressed::GGSWCiphertextCompressed},
};
use crate::tfhe::blind_rotation::BlindRotationAlgo;
#[derive(Clone)]
pub struct BlindRotationKeyCompressed<D: Data, BRT: BlindRotationAlgo> {
pub(crate) keys: Vec<GGSWCiphertextCompressed<D>>,
pub(crate) dist: Distribution,
pub(crate) _phantom: PhantomData<BRT>,
}
impl<D: DataRef, BRT: BlindRotationAlgo> fmt::Debug for BlindRotationKeyCompressed<D, BRT> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: Data, BRT: BlindRotationAlgo> PartialEq for BlindRotationKeyCompressed<D, BRT> {
fn eq(&self, other: &Self) -> bool {
if self.keys.len() != other.keys.len() {
return false;
}
for (a, b) in self.keys.iter().zip(other.keys.iter()) {
if a != b {
return false;
}
}
self.dist == other.dist && self._phantom == other._phantom
}
}
impl<D: Data, BRT: BlindRotationAlgo> Eq for BlindRotationKeyCompressed<D, BRT> {}
impl<D: DataRef, BRT: BlindRotationAlgo> fmt::Display for BlindRotationKeyCompressed<D, BRT> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, key) in self.keys.iter().enumerate() {
write!(f, "key[{}]: {}", i, key)?;
}
writeln!(f, "{:?}", self.dist)
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> Reset for BlindRotationKeyCompressed<D, BRT> {
fn reset(&mut self) {
self.keys.iter_mut().for_each(|key| key.reset());
self.dist = Distribution::NONE;
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> FillUniform for BlindRotationKeyCompressed<D, BRT> {
fn fill_uniform(&mut self, source: &mut sampling::source::Source) {
self.keys
.iter_mut()
.for_each(|key| key.fill_uniform(source));
}
}
impl<D: DataMut, BRT: BlindRotationAlgo> ReaderFrom for BlindRotationKeyCompressed<D, BRT> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
match Distribution::read_from(reader) {
Ok(dist) => self.dist = dist,
Err(e) => return Err(e),
}
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
if self.keys.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("self.keys.len()={} != read len={}", self.keys.len(), len),
));
}
for key in &mut self.keys {
key.read_from(reader)?;
}
Ok(())
}
}
impl<D: DataRef, BRT: BlindRotationAlgo> WriterTo for BlindRotationKeyCompressed<D, BRT> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
match self.dist.write_to(writer) {
Ok(()) => {}
Err(e) => return Err(e),
}
writer.write_u64::<LittleEndian>(self.keys.len() as u64)?;
for key in &self.keys {
key.write_to(writer)?;
}
Ok(())
}
}
impl<D: DataRef, BRA: BlindRotationAlgo> BlindRotationKeyCompressed<D, BRA> {
#[allow(dead_code)]
pub(crate) fn n(&self) -> usize {
self.keys[0].n()
}
#[allow(dead_code)]
pub(crate) fn rows(&self) -> usize {
self.keys[0].rows()
}
#[allow(dead_code)]
pub(crate) fn k(&self) -> usize {
self.keys[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.keys[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.keys[0].rank()
}
#[allow(dead_code)]
pub(crate) fn basek(&self) -> usize {
self.keys[0].basek()
}
#[allow(dead_code)]
pub(crate) fn block_size(&self) -> usize {
match self.dist {
Distribution::BinaryBlock(value) => value,
_ => 1,
}
}
}

View File

@@ -0,0 +1,125 @@
use backend::hal::{
api::{SvpPPolAlloc, SvpPrepare, VmpPMatAlloc, VmpPMatPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, ScalarZnx, Scratch, SvpPPol},
};
use std::marker::PhantomData;
use core::{
Distribution,
layouts::{
Infos,
prepared::{GGSWCiphertextPrepared, Prepare, PrepareAlloc},
},
};
use crate::tfhe::blind_rotation::{BlindRotationAlgo, BlindRotationKey, utils::set_xai_plus_y};
pub trait BlindRotationKeyPreparedAlloc<B: Backend> {
fn alloc(module: &Module<B>, n_glwe: usize, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self;
}
#[derive(PartialEq, Eq)]
pub struct BlindRotationKeyPrepared<D: Data, BRT: BlindRotationAlgo, B: Backend> {
pub(crate) data: Vec<GGSWCiphertextPrepared<D, B>>,
pub(crate) dist: Distribution,
pub(crate) x_pow_a: Option<Vec<SvpPPol<Vec<u8>, B>>>,
pub(crate) _phantom: PhantomData<BRT>,
}
impl<D: Data, BRT: BlindRotationAlgo, B: Backend> BlindRotationKeyPrepared<D, BRT, B> {
#[allow(dead_code)]
pub(crate) fn n(&self) -> usize {
self.data[0].n()
}
#[allow(dead_code)]
pub(crate) fn rows(&self) -> usize {
self.data[0].rows()
}
#[allow(dead_code)]
pub(crate) fn k(&self) -> usize {
self.data[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.data[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.data[0].rank()
}
pub(crate) fn basek(&self) -> usize {
self.data[0].basek()
}
pub(crate) fn block_size(&self) -> usize {
match self.dist {
Distribution::BinaryBlock(value) => value,
_ => 1,
}
}
}
impl<D: DataRef, BRA: BlindRotationAlgo, B: Backend> PrepareAlloc<B, BlindRotationKeyPrepared<Vec<u8>, BRA, B>>
for BlindRotationKey<D, BRA>
where
BlindRotationKeyPrepared<Vec<u8>, BRA, B>: BlindRotationKeyPreparedAlloc<B>,
BlindRotationKeyPrepared<Vec<u8>, BRA, B>: Prepare<B, BlindRotationKey<D, BRA>>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> BlindRotationKeyPrepared<Vec<u8>, BRA, B> {
let mut brk: BlindRotationKeyPrepared<Vec<u8>, BRA, B> = BlindRotationKeyPrepared::alloc(
module,
self.n(),
self.keys.len(),
self.basek(),
self.k(),
self.rows(),
self.rank(),
);
brk.prepare(module, self, scratch);
brk
}
}
impl<DM: DataMut, DR: DataRef, BRA: BlindRotationAlgo, B: Backend> Prepare<B, BlindRotationKey<DR, BRA>>
for BlindRotationKeyPrepared<DM, BRA, B>
where
Module<B>: VmpPMatAlloc<B> + VmpPMatPrepare<B> + SvpPPolAlloc<B> + SvpPrepare<B>,
{
fn prepare(&mut self, module: &Module<B>, other: &BlindRotationKey<DR, BRA>, scratch: &mut Scratch<B>) {
#[cfg(debug_assertions)]
{
assert_eq!(self.data.len(), other.keys.len());
}
let n: usize = other.n();
self.data
.iter_mut()
.zip(other.keys.iter())
.for_each(|(ggsw_prepared, other)| {
ggsw_prepared.prepare(module, other, scratch);
});
self.dist = other.dist;
match other.dist {
Distribution::BinaryBlock(_) => {
let mut x_pow_a: Vec<SvpPPol<Vec<u8>, B>> = Vec::with_capacity(n << 1);
let mut buf: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
(0..n << 1).for_each(|i| {
let mut res: SvpPPol<Vec<u8>, B> = module.svp_ppol_alloc(n, 1);
set_xai_plus_y(module, i, 0, &mut res, &mut buf);
x_pow_a.push(res);
});
self.x_pow_a = Some(x_pow_a);
}
_ => {}
}
}
}

View File

@@ -0,0 +1,197 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace,
VecZnxSwithcDegree, ZnxInfos, ZnxViewMut,
},
layouts::{Backend, Module, ScratchOwned, VecZnx},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
};
#[derive(Debug, Clone, Copy)]
pub enum LookUpTableRotationDirection {
Left,
Right,
}
pub struct LookUpTable {
pub(crate) data: Vec<VecZnx<Vec<u8>>>,
pub(crate) rot_dir: LookUpTableRotationDirection,
pub(crate) basek: usize,
pub(crate) k: usize,
pub(crate) drift: usize,
}
impl LookUpTable {
pub fn alloc(n: usize, basek: usize, k: usize, extension_factor: usize) -> Self {
#[cfg(debug_assertions)]
{
assert!(
extension_factor & (extension_factor - 1) == 0,
"extension_factor must be a power of two but is: {}",
extension_factor
);
}
let size: usize = k.div_ceil(basek);
let mut data: Vec<VecZnx<Vec<u8>>> = Vec::with_capacity(extension_factor);
(0..extension_factor).for_each(|_| {
data.push(VecZnx::alloc(n, 1, size));
});
Self {
data,
basek,
k,
drift: 0,
rot_dir: LookUpTableRotationDirection::Left,
}
}
pub fn log_extension_factor(&self) -> usize {
(usize::BITS - (self.extension_factor() - 1).leading_zeros()) as _
}
pub fn extension_factor(&self) -> usize {
self.data.len()
}
pub fn domain_size(&self) -> usize {
self.data.len() * self.data[0].n()
}
pub fn rotation_direction(&self) -> LookUpTableRotationDirection {
self.rot_dir
}
// By default X^{-dec(lwe)} is computed during the blind rotation.
// Setting [reverse_rotation] to true will reverse the sign of
// rotation of the LUT by instead evaluating X^{dec(lwe)} during
// the blind rotation.
pub fn set_rotation_direction(&mut self, rot_dir: LookUpTableRotationDirection) {
self.rot_dir = rot_dir
}
pub fn set<B: Backend>(&mut self, module: &Module<B>, f: &Vec<i64>, k: usize)
where
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
assert!(f.len() <= module.n());
let basek: usize = self.basek;
// Get the number minimum limb to store the message modulus
let limbs: usize = k.div_ceil(basek);
#[cfg(debug_assertions)]
{
assert!(f.len() <= module.n());
assert!(
(max_bit_size(f) + (k % basek) as u32) < i64::BITS,
"overflow: max(|f|) << (k%basek) > i64::BITS"
);
assert!(limbs <= self.data[0].size());
}
// Scaling factor
let mut scale = 1;
if k % basek != 0 {
scale <<= basek - (k % basek);
}
// #elements in lookup table
let f_len: usize = f.len();
// If LUT size > TakeScalarZnx
let domain_size: usize = self.domain_size();
let size: usize = self.k.div_ceil(self.basek);
// Equivalent to AUTO([f(0), -f(n-1), -f(n-2), ..., -f(1)], -1)
let mut lut_full: VecZnx<Vec<u8>> = VecZnx::alloc(domain_size, 1, size);
let lut_at: &mut [i64] = lut_full.at_mut(0, limbs - 1);
let step: usize = domain_size.div_round(f_len);
f.iter().enumerate().for_each(|(i, fi)| {
let start: usize = i * step;
let end: usize = start + step;
lut_at[start..end].fill(fi * scale);
});
let drift: usize = step >> 1;
// Rotates half the step to the left
module.vec_znx_rotate_inplace(-(drift as i64), &mut lut_full, 0);
let n_large: usize = lut_full.n();
module.vec_znx_normalize_inplace(
self.basek,
&mut lut_full,
0,
ScratchOwned::alloc(module.vec_znx_normalize_tmp_bytes(n_large)).borrow(),
);
if self.extension_factor() > 1 {
(0..self.extension_factor()).for_each(|i| {
module.vec_znx_switch_degree(&mut self.data[i], 0, &lut_full, 0);
if i < self.extension_factor() {
module.vec_znx_rotate_inplace(-1, &mut lut_full, 0);
}
});
} else {
module.vec_znx_copy(&mut self.data[0], 0, &lut_full, 0);
}
self.drift = drift
}
#[allow(dead_code)]
pub(crate) fn rotate<B: Backend>(&mut self, module: &Module<B>, k: i64)
where
Module<B>: VecZnxRotateInplace,
{
let extension_factor: usize = self.extension_factor();
let two_n: usize = 2 * self.data[0].n();
let two_n_ext: usize = two_n * extension_factor;
let k_pos: usize = ((k + two_n_ext as i64) % two_n_ext as i64) as usize;
let k_hi: usize = k_pos / extension_factor;
let k_lo: usize = k_pos % extension_factor;
(0..extension_factor - k_lo).for_each(|i| {
module.vec_znx_rotate_inplace(k_hi as i64, &mut self.data[i], 0);
});
(extension_factor - k_lo..extension_factor).for_each(|i| {
module.vec_znx_rotate_inplace(k_hi as i64 + 1, &mut self.data[i], 0);
});
self.data.rotate_right(k_lo as usize);
}
}
pub(crate) trait DivRound {
fn div_round(self, rhs: Self) -> Self;
}
impl DivRound for usize {
#[inline]
fn div_round(self, rhs: Self) -> Self {
(self + rhs / 2) / rhs
}
}
fn max_bit_size(vec: &[i64]) -> u32 {
vec.iter()
.map(|&v| {
if v == 0 {
0
} else {
v.unsigned_abs().ilog2() + 1
}
})
.max()
.unwrap_or(0)
}

View File

@@ -0,0 +1,35 @@
mod cggi_algo;
mod cggi_key;
mod key;
mod key_compressed;
mod key_prepared;
mod lut;
mod utils;
pub use cggi_algo::*;
pub use key::*;
pub use key_compressed::*;
pub use key_prepared::*;
pub use lut::*;
pub mod tests;
use backend::hal::layouts::{Backend, DataMut, DataRef, Module, Scratch};
use core::layouts::{GLWECiphertext, LWECiphertext};
pub trait BlindRotationAlgo {}
#[derive(Clone)]
pub struct CGGI {}
impl BlindRotationAlgo for CGGI {}
pub trait BlincRotationExecute<B: Backend> {
fn execute<DR: DataMut, DI: DataRef>(
&self,
module: &Module<B>,
res: &mut GLWECiphertext<DR>,
lwe: &LWECiphertext<DI>,
lut: &LookUpTable,
scratch: &mut Scratch<B>,
);
}

View File

@@ -0,0 +1,160 @@
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxFillUniform, VecZnxRotateInplace,
VecZnxSub, VecZnxSwithcDegree, VmpPMatAlloc, VmpPMatPrepare, ZnxView,
},
layouts::{Backend, Module, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl,
TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl, VecZnxBigAllocBytesImpl, VecZnxDftAllocBytesImpl,
},
};
use sampling::source::Source;
use crate::tfhe::blind_rotation::{
BlincRotationExecute, BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyEncryptSk, BlindRotationKeyPrepared,
CCGIBlindRotationFamily, CGGI, LookUpTable, cggi_blind_rotate_scratch_space, mod_switch_2n,
};
use core::{
layouts::{
GLWECiphertext, GLWEPlaintext, GLWESecret, Infos, LWECiphertext, LWECiphertextToRef, LWEPlaintext, LWESecret,
prepared::{GLWESecretPrepared, PrepareAlloc},
},
trait_families::{GLWEDecryptFamily, GLWESecretPreparedModuleFamily},
};
pub fn test_blind_rotation<B: Backend>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
where
Module<B>: CCGIBlindRotationFamily<B>
+ GLWESecretPreparedModuleFamily<B>
+ GLWEDecryptFamily<B>
+ VecZnxFillUniform
+ VecZnxAddNormal
+ VecZnxAddScalarInplace
+ VecZnxRotateInplace
+ VecZnxSwithcDegree
+ VecZnxSub
+ VmpPMatAlloc<B>
+ VmpPMatPrepare<B>,
B: VecZnxDftAllocBytesImpl<B>
+ VecZnxBigAllocBytesImpl<B>
+ ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ TakeVecZnxDftImpl<B>
+ TakeVecZnxBigImpl<B>
+ TakeVecZnxDftSliceImpl<B>
+ ScratchAvailableImpl<B>
+ TakeVecZnxImpl<B>
+ TakeVecZnxSliceImpl<B>,
{
let n: usize = module.n();
let basek: usize = 19;
let k_lwe: usize = 24;
let k_brk: usize = 3 * basek;
let rows_brk: usize = 2; // Ensures first limb is noise-free.
let k_lut: usize = 1 * basek;
let k_res: usize = 2 * basek;
let rank: usize = 1;
let message_modulus: usize = 1 << 4;
let mut source_xs: Source = Source::new([2u8; 32]);
let mut source_xe: Source = Source::new([2u8; 32]);
let mut source_xa: Source = Source::new([1u8; 32]);
let mut scratch: ScratchOwned<B> = ScratchOwned::<B>::alloc(BlindRotationKey::generate_from_sk_scratch_space(
module, n, basek, k_brk, rank,
));
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
let sk_glwe_dft: GLWESecretPrepared<Vec<u8>, B> = sk_glwe.prepare_alloc(module, scratch.borrow());
let mut sk_lwe: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe);
sk_lwe.fill_binary_block(block_size, &mut source_xs);
let mut scratch_br: ScratchOwned<B> = ScratchOwned::<B>::alloc(cggi_blind_rotate_scratch_space(
module,
n,
block_size,
extension_factor,
basek,
k_res,
k_brk,
rows_brk,
rank,
));
let mut brk: BlindRotationKey<Vec<u8>, CGGI> =
BlindRotationKey::<Vec<u8>, CGGI>::alloc(n, n_lwe, basek, k_brk, rows_brk, rank);
brk.encrypt_sk(
module,
&sk_glwe_dft,
&sk_lwe,
&mut source_xa,
&mut source_xe,
3.2,
scratch.borrow(),
);
let mut lwe: LWECiphertext<Vec<u8>> = LWECiphertext::alloc(n_lwe, basek, k_lwe);
let mut pt_lwe: LWEPlaintext<Vec<u8>> = LWEPlaintext::alloc(basek, k_lwe);
let x: i64 = 2;
let bits: usize = 8;
pt_lwe.encode_i64(x, bits);
lwe.encrypt_sk(
module,
&pt_lwe,
&sk_lwe,
&mut source_xa,
&mut source_xe,
3.2,
);
let mut f: Vec<i64> = vec![0i64; message_modulus];
f.iter_mut()
.enumerate()
.for_each(|(i, x)| *x = 2 * (i as i64) + 1);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(module, &f, message_modulus);
let mut res: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_res, rank);
let brk_prepared: BlindRotationKeyPrepared<Vec<u8>, CGGI, B> = brk.prepare_alloc(module, scratch.borrow());
brk_prepared.execute(module, &mut res, &lwe, &lut, scratch_br.borrow());
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_res);
res.decrypt(module, &mut pt_have, &sk_glwe_dft, scratch.borrow());
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
mod_switch_2n(
2 * lut.domain_size(),
&mut lwe_2n,
&lwe.to_ref(),
lut.rotation_direction(),
);
let pt_want: i64 = (lwe_2n[0]
+ lwe_2n[1..]
.iter()
.zip(sk_lwe.raw())
.map(|(x, y)| x * y)
.sum::<i64>())
& (2 * lut.domain_size() - 1) as i64;
lut.rotate(module, pt_want);
// First limb should be exactly equal (test are parameterized such that the noise does not reach
// the first limb)
assert_eq!(pt_have.data.at(0, 0), lut.data[0].at(0, 0));
}

View File

@@ -0,0 +1,83 @@
use std::vec;
use backend::hal::{
api::{VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSwithcDegree},
layouts::{Backend, Module},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
};
use crate::tfhe::blind_rotation::{DivRound, LookUpTable};
pub fn test_lut_standard<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let n: usize = module.n();
let basek: usize = 20;
let k_lut: usize = 40;
let message_modulus: usize = 16;
let extension_factor: usize = 1;
let log_scale: usize = basek + 1;
let mut f: Vec<i64> = vec![0i64; message_modulus];
f.iter_mut()
.enumerate()
.for_each(|(i, x)| *x = (i as i64) - 8);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(module, &f, log_scale);
let half_step: i64 = lut.domain_size().div_round(message_modulus << 1) as i64;
lut.rotate(module, half_step);
let step: usize = lut.domain_size().div_round(message_modulus);
let mut lut_dec: Vec<i64> = vec![0i64; module.n()];
lut.data[0].decode_vec_i64(basek, 0, log_scale, &mut lut_dec);
(0..lut.domain_size()).step_by(step).for_each(|i| {
(0..step).for_each(|_| {
assert_eq!(f[i / step] % message_modulus as i64, lut_dec[i]);
});
});
}
pub fn test_lut_extended<B: Backend>(module: &Module<B>)
where
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
{
let n: usize = module.n();
let basek: usize = 20;
let k_lut: usize = 40;
let message_modulus: usize = 16;
let extension_factor: usize = 4;
let log_scale: usize = basek + 1;
let mut f: Vec<i64> = vec![0i64; message_modulus];
f.iter_mut()
.enumerate()
.for_each(|(i, x)| *x = (i as i64) - 8);
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, k_lut, extension_factor);
lut.set(&module, &f, log_scale);
let half_step: i64 = lut.domain_size().div_round(message_modulus << 1) as i64;
lut.rotate(&module, half_step);
let step: usize = module.n().div_round(message_modulus);
let mut lut_dec: Vec<i64> = vec![0i64; module.n()];
(0..extension_factor).for_each(|ext| {
lut.data[ext].decode_vec_i64(basek, 0, log_scale, &mut lut_dec);
(0..module.n()).step_by(step).for_each(|i| {
(0..step).for_each(|_| {
assert_eq!(f[i / step] % message_modulus as i64, lut_dec[i]);
});
});
});
}

View File

@@ -0,0 +1,15 @@
use backend::hal::tests::serialization::test_reader_writer_interface;
use crate::tfhe::blind_rotation::{BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyCompressed, CGGI};
#[test]
fn test_cggi_blind_rotation_key_serialization() {
let original: BlindRotationKey<Vec<u8>, CGGI> = BlindRotationKey::alloc(256, 64, 12, 54, 2, 2);
test_reader_writer_interface(original);
}
#[test]
fn test_cggi_blind_rotation_key_compressed_serialization() {
let original: BlindRotationKeyCompressed<Vec<u8>, CGGI> = BlindRotationKeyCompressed::alloc(256, 64, 12, 54, 2, 2);
test_reader_writer_interface(original);
}

View File

@@ -0,0 +1,39 @@
use backend::{
hal::{api::ModuleNew, layouts::Module},
implementation::cpu_spqlios::FFT64,
};
use crate::tfhe::blind_rotation::tests::{
generic_blind_rotation::test_blind_rotation,
generic_lut::{test_lut_extended, test_lut_standard},
};
#[test]
fn lut_standard() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
test_lut_standard(&module);
}
#[test]
fn lut_extended() {
let module: Module<FFT64> = Module::<FFT64>::new(32);
test_lut_extended(&module);
}
#[test]
fn standard() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
test_blind_rotation(&module, 224, 1, 1);
}
#[test]
fn block_binary() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
test_blind_rotation(&module, 224, 7, 1);
}
#[test]
fn block_binary_extended() {
let module: Module<FFT64> = Module::<FFT64>::new(512);
test_blind_rotation(&module, 224, 7, 2);
}

View File

@@ -0,0 +1 @@
mod fft64;

View File

@@ -0,0 +1 @@
mod cpu_spqlios;

View File

@@ -0,0 +1,7 @@
#[cfg(test)]
mod generic_serialization;
#[cfg(test)]
mod implementation;
pub mod generic_blind_rotation;
pub mod generic_lut;

View File

@@ -0,0 +1,41 @@
use backend::hal::{
api::{SvpPrepare, ZnxInfos, ZnxViewMut},
layouts::{Backend, DataMut, Module, ScalarZnx, SvpPPol},
};
pub(crate) fn set_xai_plus_y<A, C, B: Backend>(
module: &Module<B>,
ai: usize,
y: i64,
res: &mut SvpPPol<A, B>,
buf: &mut ScalarZnx<C>,
) where
A: DataMut,
C: DataMut,
Module<B>: SvpPrepare<B>,
{
let n: usize = res.n();
{
let raw: &mut [i64] = buf.at_mut(0, 0);
if ai < n {
raw[ai] = 1;
} else {
raw[(ai - n) & (n - 1)] = -1;
}
raw[0] += y;
}
module.svp_prepare(res, 0, buf, 0);
{
let raw: &mut [i64] = buf.at_mut(0, 0);
if ai < n {
raw[ai] = 0;
} else {
raw[(ai - n) & (n - 1)] = 0;
}
raw[0] = 0;
}
}

View File

@@ -0,0 +1,397 @@
use std::{collections::HashMap, time::Instant, usize};
use backend::hal::{
api::{
ScratchAvailable, TakeMatZnx, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAutomorphismInplace, VecZnxBigSubSmallBInplace, VecZnxCopy,
VecZnxDftCopy, VecZnxDftToVecZnxBigTmpA, VecZnxNegateInplace, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes,
VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree,
},
layouts::{Backend, DataMut, DataRef, Module, Scratch},
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
};
use core::{GLWEOperations, TakeGGLWE, TakeGLWECt, layouts::Infos, trait_families::GLWETraceModuleFamily};
use core::layouts::{GGSWCiphertext, GLWECiphertext, LWECiphertext, prepared::GGLWEAutomorphismKeyPrepared};
use crate::tfhe::{
blind_rotation::{
BlincRotationExecute, BlindRotationAlgo, BlindRotationKeyPrepared, CCGIBlindRotationFamily, LookUpTable,
LookUpTableRotationDirection,
},
circuit_bootstrapping::{CircuitBootstrappingKeyPrepared, CirtuitBootstrappingExecute},
};
pub trait CircuitBootstrapFamily<B: Backend> = VecZnxRotateInplace
+ VecZnxNormalizeInplace<B>
+ VecZnxNormalizeTmpBytes
+ CCGIBlindRotationFamily<B>
+ VecZnxSwithcDegree
+ VecZnxBigAutomorphismInplace<B>
+ VecZnxRshInplace
+ VecZnxDftCopy<B>
+ VecZnxDftToVecZnxBigTmpA<B>
+ VecZnxSub
+ VecZnxAddInplace
+ VecZnxNegateInplace
+ VecZnxCopy
+ VecZnxSubABInplace
+ GLWETraceModuleFamily<B>
+ VecZnxRotateInplace
+ VecZnxAutomorphismInplace
+ VecZnxBigSubSmallBInplace<B>;
impl<D: DataRef, BRA: BlindRotationAlgo, B: Backend> CirtuitBootstrappingExecute<B> for CircuitBootstrappingKeyPrepared<D, BRA, B>
where
Module<B>: CircuitBootstrapFamily<B>,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
Scratch<B>: TakeVecZnx
+ TakeVecZnxDftSlice<B>
+ TakeVecZnxBig<B>
+ TakeVecZnxDft<B>
+ TakeMatZnx
+ ScratchAvailable
+ TakeVecZnxSlice,
BlindRotationKeyPrepared<D, BRA, B>: BlincRotationExecute<B>,
{
fn execute_to_constant<DM: DataMut, DR: DataRef>(
&self,
module: &Module<B>,
res: &mut GGSWCiphertext<DM>,
lwe: &LWECiphertext<DR>,
log_domain: usize,
extension_factor: usize,
scratch: &mut Scratch<B>,
) {
circuit_bootstrap_core(
false,
module,
0,
res,
lwe,
log_domain,
extension_factor,
self,
scratch,
);
}
fn execute_to_exponent<DM: DataMut, DR: DataRef>(
&self,
module: &Module<B>,
log_gap_out: usize,
res: &mut GGSWCiphertext<DM>,
lwe: &LWECiphertext<DR>,
log_domain: usize,
extension_factor: usize,
scratch: &mut Scratch<B>,
) {
circuit_bootstrap_core(
true,
module,
log_gap_out,
res,
lwe,
log_domain,
extension_factor,
self,
scratch,
);
}
}
pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B: Backend>(
to_exponent: bool,
module: &Module<B>,
log_gap_out: usize,
res: &mut GGSWCiphertext<DRes>,
lwe: &LWECiphertext<DLwe>,
log_domain: usize,
extension_factor: usize,
key: &CircuitBootstrappingKeyPrepared<DBrk, BRA, B>,
scratch: &mut Scratch<B>,
) where
DRes: DataMut,
DLwe: DataRef,
DBrk: DataRef,
Module<B>: CircuitBootstrapFamily<B>,
B: ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
Scratch<B>: TakeVecZnxDftSlice<B>
+ TakeVecZnxBig<B>
+ TakeVecZnxDft<B>
+ TakeVecZnx
+ ScratchAvailable
+ TakeVecZnxSlice
+ TakeMatZnx,
BlindRotationKeyPrepared<DBrk, BRA, B>: BlincRotationExecute<B>,
{
#[cfg(debug_assertions)]
{
assert_eq!(res.n(), key.brk.n());
assert_eq!(lwe.basek(), key.brk.basek());
assert_eq!(res.basek(), key.brk.basek());
}
let n: usize = res.n();
let basek: usize = res.basek();
let rows: usize = res.rows();
let rank: usize = res.rank();
let k: usize = res.k();
let alpha: usize = rows.next_power_of_two();
let mut f: Vec<i64> = vec![0i64; (1 << log_domain) * alpha];
if to_exponent {
(0..rows).for_each(|i| {
f[i] = 1 << (basek * (rows - 1 - i));
});
} else {
(0..1 << log_domain).for_each(|j| {
(0..rows).for_each(|i| {
f[j * alpha + i] = j as i64 * (1 << (basek * (rows - 1 - i)));
});
});
}
// Lut precision, basically must be able to hold the decomposition power basis of the GGSW
let mut lut: LookUpTable = LookUpTable::alloc(n, basek, basek * rows, extension_factor);
lut.set(module, &f, basek * rows);
if to_exponent {
lut.set_rotation_direction(LookUpTableRotationDirection::Right);
}
// TODO: separate GGSW k from output of blind rotation k
let (mut res_glwe, scratch1) = scratch.take_glwe_ct(n, basek, k, rank);
let (mut tmp_gglwe, scratch2) = scratch1.take_gglwe(n, basek, k, rows, 1, rank, rank);
let now: Instant = Instant::now();
key.brk.execute(module, &mut res_glwe, &lwe, &lut, scratch2);
println!("blind_rotate: {} ms", now.elapsed().as_millis());
let gap: usize = 2 * lut.drift / lut.extension_factor();
let log_gap_in: usize = (usize::BITS - (gap * alpha - 1).leading_zeros()) as _;
(0..rows).for_each(|i| {
let mut tmp_glwe: GLWECiphertext<&mut [u8]> = tmp_gglwe.at_mut(i, 0);
if to_exponent {
let now: Instant = Instant::now();
// Isolates i-th LUT and moves coefficients according to requested gap.
post_process(
module,
&mut tmp_glwe,
&res_glwe,
log_gap_in,
log_gap_out,
log_domain,
&key.atk,
scratch2,
);
println!("post_process: {} ms", now.elapsed().as_millis());
} else {
tmp_glwe.trace(module, 0, module.log_n(), &res_glwe, &key.atk, scratch2);
}
if i < rows {
res_glwe.rotate_inplace(module, -(gap as i64));
}
});
// Expands GGLWE to GGSW using GGLWE(s^2)
res.from_gglwe(module, &tmp_gglwe, &key.tsk, scratch2);
}
fn post_process<DataRes, DataA, B: Backend>(
module: &Module<B>,
res: &mut GLWECiphertext<DataRes>,
a: &GLWECiphertext<DataA>,
log_gap_in: usize,
log_gap_out: usize,
log_domain: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>>,
scratch: &mut Scratch<B>,
) where
DataRes: DataMut,
DataA: DataRef,
Module<B>: CircuitBootstrapFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
{
let log_n: usize = module.log_n();
let mut cts: HashMap<usize, GLWECiphertext<Vec<u8>>> = HashMap::new();
// First partial trace, vanishes all coefficients which are not multiples of gap_in
// [1, 1, 1, 1, 0, 0, 0, ..., 0, 0, -1, -1, -1, -1] -> [1, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0]
res.trace(
module,
module.log_n() - log_gap_in as usize + 1,
log_n,
&a,
auto_keys,
scratch,
);
// TODO: optimize with packing and final partial trace
// If gap_out < gap_in, then we need to repack, i.e. reduce the cap between coefficients.
if log_gap_in != log_gap_out {
let steps: i32 = 1 << log_domain;
(0..steps).for_each(|i| {
if i != 0 {
res.rotate_inplace(module, -(1 << log_gap_in));
}
cts.insert(i as usize * (1 << log_gap_out), res.clone());
});
let now: Instant = Instant::now();
pack(module, &mut cts, log_gap_out, auto_keys, scratch);
println!("pack: {} ms", now.elapsed().as_millis());
let packed: GLWECiphertext<Vec<u8>> = cts.remove(&0).unwrap();
res.trace(
module,
log_n - log_gap_out,
log_n,
&packed,
auto_keys,
scratch,
);
}
}
pub fn pack<D: DataMut, B: Backend>(
module: &Module<B>,
cts: &mut HashMap<usize, GLWECiphertext<D>>,
log_gap_out: usize,
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>>,
scratch: &mut Scratch<B>,
) where
Module<B>: CircuitBootstrapFamily<B>,
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
{
let log_n: usize = module.log_n();
let basek: usize = cts.get(&0).unwrap().basek();
let k: usize = cts.get(&0).unwrap().k();
let rank: usize = cts.get(&0).unwrap().rank();
(0..log_n - log_gap_out).for_each(|i| {
let now: Instant = Instant::now();
let t = 16.min(1 << (log_n - 1 - i));
let auto_key: &GGLWEAutomorphismKeyPrepared<Vec<u8>, B>;
if i == 0 {
auto_key = auto_keys.get(&-1).unwrap()
} else {
auto_key = auto_keys.get(&module.galois_element(1 << (i - 1))).unwrap();
}
(0..t).for_each(|j| {
let mut a: Option<GLWECiphertext<D>> = cts.remove(&j);
let mut b: Option<GLWECiphertext<D>> = cts.remove(&(j + t));
combine(
module,
basek,
k,
rank,
a.as_mut(),
b.as_mut(),
i,
auto_key,
scratch,
);
if let Some(a) = a {
cts.insert(j, a);
} else if let Some(b) = b {
cts.insert(j, b);
}
});
println!("combine: {} us", now.elapsed().as_micros());
});
}
fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
module: &Module<B>,
basek: usize,
k: usize,
rank: usize,
a: Option<&mut GLWECiphertext<A>>,
b: Option<&mut GLWECiphertext<D>>,
i: usize,
auto_key: &GGLWEAutomorphismKeyPrepared<DataAK, B>,
scratch: &mut Scratch<B>,
) where
Module<B>: CircuitBootstrapFamily<B>,
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
{
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
// Different cases for wether a and/or b are zero.
//
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
// Necessary so that the scaling of the plaintext remains constant.
// It however is ok to do so here because coefficients are eventually
// either mapped to garbage or twice their value which vanishes I(X)
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
if let Some(a) = a {
let n: usize = a.n();
let log_n: usize = (u64::BITS - (n - 1).leading_zeros()) as _;
let t: i64 = 1 << (log_n - i - 1);
if let Some(b) = b {
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
// a = a * X^-t
a.rotate_inplace(module, -t);
// tmp_b = a * X^-t - b
tmp_b.sub(module, a, b);
tmp_b.rsh(module, 1);
// a = a * X^-t + b
a.add_inplace(module, b);
a.rsh(module, 1);
tmp_b.normalize_inplace(module, scratch_1);
// tmp_b = phi(a * X^-t - b)
tmp_b.automorphism_inplace(module, auto_key, scratch_1);
// a = a * X^-t + b - phi(a * X^-t - b)
a.sub_inplace_ab(module, &tmp_b);
a.normalize_inplace(module, scratch_1);
// a = a + b * X^t - phi(a * X^-t - b) * X^t
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
// = a + b * X^t + phi(a - b * X^t)
a.rotate_inplace(module, t);
} else {
a.rsh(module, 1);
// a = a + phi(a)
a.automorphism_add_inplace(module, auto_key, scratch);
}
} else {
if let Some(b) = b {
let n: usize = b.n();
let log_n: usize = (u64::BITS - (n - 1).leading_zeros()) as _;
let t: i64 = 1 << (log_n - i - 1);
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
tmp_b.rotate(module, t, b);
tmp_b.rsh(module, 1);
// a = (b* X^t - phi(b* X^t))
b.automorphism_sub_ba(module, &tmp_b, auto_key, scratch_1);
}
}
}

View File

@@ -0,0 +1,150 @@
use core::layouts::{
GGLWEAutomorphismKey, GGLWETensorKey, GLWECiphertext, GLWESecret, LWESecret,
prepared::{GGLWEAutomorphismKeyPrepared, GGLWETensorKeyPrepared, GLWESecretPrepared, PrepareAlloc},
};
use std::{collections::HashMap, usize};
use backend::hal::{
api::{
ScratchAvailable, TakeScalarZnx, TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddScalarInplace,
VecZnxAutomorphism, VecZnxSwithcDegree, VmpPMatAlloc, VmpPMatPrepare,
},
layouts::{Backend, Data, DataRef, Module, Scratch},
};
use sampling::source::Source;
use core::trait_families::{
GGLWEAutomorphismKeyEncryptSkFamily, GGLWETensorKeyEncryptSkFamily, GGSWEncryptSkFamily, GLWESecretPreparedModuleFamily,
};
use crate::tfhe::blind_rotation::{
BlindRotationAlgo, BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyEncryptSk, BlindRotationKeyPrepared,
};
pub trait CircuitBootstrappingKeyEncryptSk<B: Backend> {
fn encrypt_sk<DLwe, DGlwe>(
module: &Module<B>,
basek: usize,
sk_lwe: &LWESecret<DLwe>,
sk_glwe: &GLWESecret<DGlwe>,
k_brk: usize,
rows_brk: usize,
k_trace: usize,
rows_trace: usize,
k_tsk: usize,
rows_tsk: usize,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) -> Self
where
DLwe: DataRef,
DGlwe: DataRef;
}
pub struct CircuitBootstrappingKey<D: Data, BRA: BlindRotationAlgo> {
pub(crate) brk: BlindRotationKey<D, BRA>,
pub(crate) tsk: GGLWETensorKey<Vec<u8>>,
pub(crate) atk: HashMap<i64, GGLWEAutomorphismKey<Vec<u8>>>,
}
impl<BRA: BlindRotationAlgo, B: Backend> CircuitBootstrappingKeyEncryptSk<B> for CircuitBootstrappingKey<Vec<u8>, BRA>
where
BlindRotationKey<Vec<u8>, BRA>: BlindRotationKeyAlloc + BlindRotationKeyEncryptSk<B>,
Module<B>: GGSWEncryptSkFamily<B>
+ GLWESecretPreparedModuleFamily<B>
+ VecZnxAddScalarInplace
+ GGLWEAutomorphismKeyEncryptSkFamily<B>
+ VecZnxAutomorphism
+ VecZnxSwithcDegree
+ GGLWETensorKeyEncryptSkFamily<B>,
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeSvpPPol<B> + TakeVecZnxBig<B>,
{
fn encrypt_sk<DLwe, DGlwe>(
module: &Module<B>,
basek: usize,
sk_lwe: &LWESecret<DLwe>,
sk_glwe: &GLWESecret<DGlwe>,
k_brk: usize,
rows_brk: usize,
k_trace: usize,
rows_trace: usize,
k_tsk: usize,
rows_tsk: usize,
source_xa: &mut Source,
source_xe: &mut Source,
sigma: f64,
scratch: &mut Scratch<B>,
) -> Self
where
DLwe: DataRef,
DGlwe: DataRef,
{
let mut auto_keys: HashMap<i64, GGLWEAutomorphismKey<Vec<u8>>> = HashMap::new();
let gal_els: Vec<i64> = GLWECiphertext::trace_galois_elements(&module);
gal_els.iter().for_each(|gal_el| {
let mut key: GGLWEAutomorphismKey<Vec<u8>> =
GGLWEAutomorphismKey::alloc(sk_glwe.n(), basek, k_trace, rows_trace, 1, sk_glwe.rank());
key.encrypt_sk(
&module, *gal_el, &sk_glwe, source_xa, source_xe, sigma, scratch,
);
auto_keys.insert(*gal_el, key);
});
let sk_glwe_prepared: GLWESecretPrepared<Vec<u8>, B> = sk_glwe.prepare_alloc(module, scratch);
let mut brk: BlindRotationKey<Vec<u8>, BRA> = BlindRotationKey::<Vec<u8>, BRA>::alloc(
sk_glwe.n(),
sk_lwe.n(),
basek,
k_brk,
rows_brk,
sk_glwe.rank(),
);
brk.encrypt_sk(
module,
&sk_glwe_prepared,
sk_lwe,
source_xa,
source_xe,
sigma,
scratch,
);
let mut tsk: GGLWETensorKey<Vec<u8>> = GGLWETensorKey::alloc(sk_glwe.n(), basek, k_tsk, rows_tsk, 1, sk_glwe.rank());
tsk.encrypt_sk(module, &sk_glwe, source_xa, source_xe, sigma, scratch);
Self {
brk,
atk: auto_keys,
tsk,
}
}
}
pub struct CircuitBootstrappingKeyPrepared<D: Data, BRA: BlindRotationAlgo, B: Backend> {
pub(crate) brk: BlindRotationKeyPrepared<D, BRA, B>,
pub(crate) tsk: GGLWETensorKeyPrepared<Vec<u8>, B>,
pub(crate) atk: HashMap<i64, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>>,
}
impl<D: DataRef, BRA: BlindRotationAlgo, B: Backend> PrepareAlloc<B, CircuitBootstrappingKeyPrepared<Vec<u8>, BRA, B>>
for CircuitBootstrappingKey<D, BRA>
where
Module<B>: VmpPMatAlloc<B> + VmpPMatPrepare<B>,
BlindRotationKey<D, BRA>: PrepareAlloc<B, BlindRotationKeyPrepared<Vec<u8>, BRA, B>>,
GGLWETensorKey<D>: PrepareAlloc<B, GGLWETensorKeyPrepared<Vec<u8>, B>>,
GGLWEAutomorphismKey<D>: PrepareAlloc<B, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> CircuitBootstrappingKeyPrepared<Vec<u8>, BRA, B> {
let brk: BlindRotationKeyPrepared<Vec<u8>, BRA, B> = self.brk.prepare_alloc(module, scratch);
let tsk: GGLWETensorKeyPrepared<Vec<u8>, B> = self.tsk.prepare_alloc(module, scratch);
let mut atk: HashMap<i64, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>> = HashMap::new();
for (key, value) in &self.atk {
atk.insert(*key, value.prepare_alloc(module, scratch));
}
CircuitBootstrappingKeyPrepared { brk, tsk, atk }
}
}

View File

@@ -0,0 +1,33 @@
mod circuit_bootstrapping;
mod key;
pub mod tests;
pub use circuit_bootstrapping::*;
pub use key::*;
use core::layouts::{GGSWCiphertext, LWECiphertext};
use backend::hal::layouts::{Backend, DataMut, DataRef, Module, Scratch};
pub trait CirtuitBootstrappingExecute<B: Backend> {
fn execute_to_constant<DM: DataMut, DR: DataRef>(
&self,
module: &Module<B>,
res: &mut GGSWCiphertext<DM>,
lwe: &LWECiphertext<DR>,
log_domain: usize,
extension_factor: usize,
scratch: &mut Scratch<B>,
);
fn execute_to_exponent<DM: DataMut, DR: DataRef>(
&self,
module: &Module<B>,
log_gap_out: usize,
res: &mut GGSWCiphertext<DM>,
lwe: &LWECiphertext<DR>,
log_domain: usize,
extension_factor: usize,
scratch: &mut Scratch<B>,
);
}

View File

@@ -0,0 +1,355 @@
use std::time::Instant;
use backend::hal::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpPPolAlloc, SvpPrepare, VecZnxAddNormal, VecZnxAddScalarInplace,
VecZnxAutomorphism, VecZnxFillUniform, VecZnxNormalizeInplace, VecZnxRotateInplace, VecZnxSwithcDegree, VmpPMatAlloc,
VmpPMatPrepare, ZnxView, ZnxViewMut,
},
layouts::{Backend, Module, ScalarZnx, ScratchOwned},
oep::{
ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeMatZnxImpl, TakeScalarZnxImpl, TakeSvpPPolImpl,
TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl, TakeVecZnxImpl, TakeVecZnxSliceImpl,
},
};
use sampling::source::Source;
use crate::tfhe::{
blind_rotation::{
BlincRotationExecute, BlindRotationAlgo, BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyEncryptSk,
BlindRotationKeyPrepared,
},
circuit_bootstrapping::{
CircuitBootstrapFamily, CircuitBootstrappingKey, CircuitBootstrappingKeyEncryptSk, CircuitBootstrappingKeyPrepared,
CirtuitBootstrappingExecute,
},
};
use core::{
layouts::prepared::PrepareAlloc,
trait_families::{
GGLWEAutomorphismKeyEncryptSkFamily, GGLWETensorKeyEncryptSkFamily, GGSWAssertNoiseFamily, GGSWEncryptSkFamily,
GLWEDecryptFamily,
},
};
use core::layouts::{
GGSWCiphertext, GLWECiphertext, GLWEPlaintext, GLWESecret, LWECiphertext, LWEPlaintext, LWESecret,
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared},
};
pub fn test_circuit_bootstrapping_to_exponent<B: Backend, BRA: BlindRotationAlgo>(module: &Module<B>)
where
Module<B>: VecZnxFillUniform
+ VecZnxAddNormal
+ VecZnxNormalizeInplace<B>
+ GGSWEncryptSkFamily<B>
+ VecZnxAddScalarInplace
+ GGLWEAutomorphismKeyEncryptSkFamily<B>
+ VecZnxAutomorphism
+ VecZnxSwithcDegree
+ GGLWETensorKeyEncryptSkFamily<B>
+ CircuitBootstrapFamily<B>
+ GLWEDecryptFamily<B>
+ GGSWAssertNoiseFamily<B>
+ VmpPMatAlloc<B>
+ VmpPMatPrepare<B>
+ SvpPrepare<B>
+ SvpPPolAlloc<B>,
B: ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ TakeVecZnxDftImpl<B>
+ ScratchAvailableImpl<B>
+ TakeVecZnxImpl<B>
+ TakeScalarZnxImpl<B>
+ TakeSvpPPolImpl<B>
+ TakeVecZnxBigImpl<B>
+ TakeVecZnxDftSliceImpl<B>
+ TakeMatZnxImpl<B>
+ TakeVecZnxSliceImpl<B>,
BlindRotationKey<Vec<u8>, BRA>: PrepareAlloc<B, BlindRotationKeyPrepared<Vec<u8>, BRA, B>>,
BlindRotationKeyPrepared<Vec<u8>, BRA, B>: BlincRotationExecute<B>,
BlindRotationKey<Vec<u8>, BRA>: BlindRotationKeyAlloc + BlindRotationKeyEncryptSk<B>,
{
let n: usize = module.n();
let basek: usize = 17;
let extension_factor: usize = 1;
let rank: usize = 1;
let sigma: f64 = 3.2;
let n_lwe: usize = 77;
let k_lwe_pt: usize = 4;
let k_lwe_ct: usize = 22;
let block_size: usize = 7;
let k_brk: usize = 5 * basek;
let rows_brk: usize = 4;
let k_trace: usize = 5 * basek;
let rows_trace: usize = 4;
let k_tsk: usize = 5 * basek;
let rows_tsk: usize = 4;
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(1 << 23);
let mut source_xs: Source = Source::new([1u8; 32]);
let mut source_xa: Source = Source::new([1u8; 32]);
let mut source_xe: Source = Source::new([1u8; 32]);
let mut sk_lwe: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe);
sk_lwe.fill_binary_block(block_size, &mut source_xs);
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
let sk_glwe_prepared: GLWESecretPrepared<Vec<u8>, B> = sk_glwe.prepare_alloc(module, scratch.borrow());
let data: i64 = 1;
let mut pt_lwe: LWEPlaintext<Vec<u8>> = LWEPlaintext::alloc(basek, k_lwe_pt);
pt_lwe.encode_i64(data, k_lwe_pt + 2);
println!("pt_lwe: {}", pt_lwe);
let mut ct_lwe: LWECiphertext<Vec<u8>> = LWECiphertext::alloc(n_lwe, basek, k_lwe_ct);
ct_lwe.encrypt_sk(
module,
&pt_lwe,
&sk_lwe,
&mut source_xa,
&mut source_xe,
sigma,
);
let now: Instant = Instant::now();
let cbt_key: CircuitBootstrappingKey<Vec<u8>, BRA> = CircuitBootstrappingKey::encrypt_sk(
module,
basek,
&sk_lwe,
&sk_glwe,
k_brk,
rows_brk,
k_trace,
rows_trace,
k_tsk,
rows_tsk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
println!("CBT-KGEN: {} ms", now.elapsed().as_millis());
let k_ggsw_res: usize = 4 * basek;
let rows_ggsw_res: usize = 2;
let mut res: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw_res, rows_ggsw_res, 1, rank);
let log_gap_out = 1;
let cbt_prepared: CircuitBootstrappingKeyPrepared<Vec<u8>, BRA, B> = cbt_key.prepare_alloc(module, scratch.borrow());
let now: Instant = Instant::now();
cbt_prepared.execute_to_exponent(
module,
log_gap_out,
&mut res,
&ct_lwe,
k_lwe_pt,
extension_factor,
scratch.borrow(),
);
println!("CBT: {} ms", now.elapsed().as_millis());
// X^{data * 2^log_gap_out}
let mut pt_ggsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
pt_ggsw.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(data * (1 << log_gap_out), &mut pt_ggsw.as_vec_znx_mut(), 0);
res.print_noise(module, &sk_glwe_prepared, &pt_ggsw);
let k_glwe: usize = k_ggsw_res;
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_glwe, rank);
let mut pt_glwe: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, basek);
pt_glwe.data.at_mut(0, 0)[0] = 1 << (basek - 2);
ct_glwe.encrypt_sk(
module,
&pt_glwe,
&sk_glwe_prepared,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let res_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = res.prepare_alloc(module, scratch.borrow());
ct_glwe.external_product_inplace(module, &res_prepared, scratch.borrow());
let mut pt_res: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_glwe);
ct_glwe.decrypt(module, &mut pt_res, &sk_glwe_prepared, scratch.borrow());
// Parameters are set such that the first limb should be noiseless.
let mut pt_want: Vec<i64> = vec![0i64; module.n()];
pt_want[data as usize * (1 << log_gap_out)] = pt_glwe.data.at(0, 0)[0];
assert_eq!(pt_res.data.at(0, 0), pt_want);
}
pub fn test_circuit_bootstrapping_to_constant<B: Backend, BRA: BlindRotationAlgo>(module: &Module<B>)
where
Module<B>: VecZnxFillUniform
+ VecZnxAddNormal
+ VecZnxNormalizeInplace<B>
+ GGSWEncryptSkFamily<B>
+ VecZnxAddScalarInplace
+ GGLWEAutomorphismKeyEncryptSkFamily<B>
+ VecZnxAutomorphism
+ VecZnxSwithcDegree
+ GGLWETensorKeyEncryptSkFamily<B>
+ CircuitBootstrapFamily<B>
+ GLWEDecryptFamily<B>
+ GGSWAssertNoiseFamily<B>
+ VmpPMatAlloc<B>
+ VmpPMatPrepare<B>
+ SvpPrepare<B>
+ SvpPPolAlloc<B>,
B: ScratchOwnedAllocImpl<B>
+ ScratchOwnedBorrowImpl<B>
+ TakeVecZnxDftImpl<B>
+ ScratchAvailableImpl<B>
+ TakeVecZnxImpl<B>
+ TakeScalarZnxImpl<B>
+ TakeSvpPPolImpl<B>
+ TakeVecZnxBigImpl<B>
+ TakeVecZnxDftSliceImpl<B>
+ TakeMatZnxImpl<B>
+ TakeVecZnxSliceImpl<B>,
BlindRotationKey<Vec<u8>, BRA>: PrepareAlloc<B, BlindRotationKeyPrepared<Vec<u8>, BRA, B>>,
BlindRotationKeyPrepared<Vec<u8>, BRA, B>: BlincRotationExecute<B>,
BlindRotationKey<Vec<u8>, BRA>: BlindRotationKeyAlloc + BlindRotationKeyEncryptSk<B>,
{
let n = module.n();
let basek: usize = 14;
let extension_factor: usize = 1;
let rank: usize = 2;
let sigma: f64 = 3.2;
let n_lwe: usize = 77;
let k_lwe_pt: usize = 1;
let k_lwe_ct: usize = 13;
let block_size: usize = 7;
let k_brk: usize = 5 * basek;
let rows_brk: usize = 3;
let k_trace: usize = 5 * basek;
let rows_trace: usize = 4;
let k_tsk: usize = 5 * basek;
let rows_tsk: usize = 4;
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(1 << 23);
let mut source_xs: Source = Source::new([1u8; 32]);
let mut source_xa: Source = Source::new([1u8; 32]);
let mut source_xe: Source = Source::new([1u8; 32]);
let mut sk_lwe: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe);
sk_lwe.fill_binary_block(block_size, &mut source_xs);
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
let sk_glwe_prepared: GLWESecretPrepared<Vec<u8>, B> = sk_glwe.prepare_alloc(module, scratch.borrow());
let data: i64 = 1;
let mut pt_lwe: LWEPlaintext<Vec<u8>> = LWEPlaintext::alloc(basek, k_lwe_pt);
pt_lwe.encode_i64(data, k_lwe_pt + 2);
println!("pt_lwe: {}", pt_lwe);
let mut ct_lwe: LWECiphertext<Vec<u8>> = LWECiphertext::alloc(n_lwe, basek, k_lwe_ct);
ct_lwe.encrypt_sk(
module,
&pt_lwe,
&sk_lwe,
&mut source_xa,
&mut source_xe,
sigma,
);
let now: Instant = Instant::now();
let cbt_key: CircuitBootstrappingKey<Vec<u8>, BRA> = CircuitBootstrappingKey::encrypt_sk(
module,
basek,
&sk_lwe,
&sk_glwe,
k_brk,
rows_brk,
k_trace,
rows_trace,
k_tsk,
rows_tsk,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
println!("CBT-KGEN: {} ms", now.elapsed().as_millis());
let k_ggsw_res: usize = 4 * basek;
let rows_ggsw_res: usize = 3;
let mut res: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw_res, rows_ggsw_res, 1, rank);
let cbt_prepared: CircuitBootstrappingKeyPrepared<Vec<u8>, BRA, B> = cbt_key.prepare_alloc(module, scratch.borrow());
let now: Instant = Instant::now();
cbt_prepared.execute_to_constant(
module,
&mut res,
&ct_lwe,
k_lwe_pt,
extension_factor,
scratch.borrow(),
);
println!("CBT: {} ms", now.elapsed().as_millis());
// X^{data * 2^log_gap_out}
let mut pt_ggsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
pt_ggsw.at_mut(0, 0)[0] = data;
res.print_noise(module, &sk_glwe_prepared, &pt_ggsw);
let k_glwe: usize = k_ggsw_res;
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_glwe, rank);
let mut pt_glwe: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, basek);
pt_glwe.data.at_mut(0, 0)[0] = 1 << (basek - k_lwe_pt - 1);
ct_glwe.encrypt_sk(
module,
&pt_glwe,
&sk_glwe_prepared,
&mut source_xa,
&mut source_xe,
sigma,
scratch.borrow(),
);
let res_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = res.prepare_alloc(module, scratch.borrow());
ct_glwe.external_product_inplace(module, &res_prepared, scratch.borrow());
let mut pt_res: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_glwe);
ct_glwe.decrypt(module, &mut pt_res, &sk_glwe_prepared, scratch.borrow());
// Parameters are set such that the first limb should be noiseless.
let mut pt_want: Vec<i64> = vec![0i64; module.n()];
pt_want[0] = pt_glwe.data.at(0, 0)[0] * data;
assert_eq!(pt_res.data.at(0, 0), pt_want);
}

View File

@@ -0,0 +1,23 @@
use backend::{
hal::{api::ModuleNew, layouts::Module},
implementation::cpu_spqlios::FFT64,
};
use crate::tfhe::{
blind_rotation::CGGI,
circuit_bootstrapping::tests::circuit_bootstrapping::{
test_circuit_bootstrapping_to_constant, test_circuit_bootstrapping_to_exponent,
},
};
#[test]
fn test_to_constant() {
let module: Module<FFT64> = Module::<FFT64>::new(256);
test_circuit_bootstrapping_to_constant::<FFT64, CGGI>(&module);
}
#[test]
fn test_to_exponent() {
let module: Module<FFT64> = Module::<FFT64>::new(256);
test_circuit_bootstrapping_to_exponent::<FFT64, CGGI>(&module);
}

View File

@@ -0,0 +1 @@
mod fft64;

View File

@@ -0,0 +1 @@
mod cpu_spqlios;

View File

@@ -0,0 +1,4 @@
pub mod circuit_bootstrapping;
#[cfg(test)]
mod implementation;

2
schemes/src/tfhe/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod blind_rotation;
pub mod circuit_bootstrapping;