mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Ref. + AVX code & generic tests + benches (#85)
This commit is contained in:
committed by
GitHub
parent
99b9e3e10e
commit
56dbd29c59
@@ -13,7 +13,7 @@ use poulpy_hal::{
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use poulpy_backend::cpu_spqlios::FFT64;
|
||||
use poulpy_backend::cpu_spqlios::FFT64Spqlios;
|
||||
|
||||
use poulpy_schemes::tfhe::{
|
||||
blind_rotation::CGGI,
|
||||
@@ -27,7 +27,7 @@ fn main() {
|
||||
let n_glwe: usize = 1024;
|
||||
|
||||
// Module provides access to the backend arithmetic
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(n_glwe as u64);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n_glwe as u64);
|
||||
|
||||
// Base 2 loga
|
||||
let basek: usize = 13;
|
||||
@@ -75,7 +75,7 @@ fn main() {
|
||||
let k_tsk: usize = (rows_tsk + 1) * basek;
|
||||
|
||||
// Scratch space (4MB)
|
||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(1 << 22);
|
||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(1 << 22);
|
||||
|
||||
// Secret key sampling source
|
||||
let mut source_xs: Source = Source::new([1u8; 32]);
|
||||
@@ -97,7 +97,7 @@ fn main() {
|
||||
// sk_glwe.fill_zero();
|
||||
|
||||
// GLWE secret prepared (opaque backend dependant write only struct)
|
||||
let sk_glwe_prepared: GLWESecretPrepared<Vec<u8>, FFT64> = sk_glwe.prepare_alloc(&module, scratch.borrow());
|
||||
let sk_glwe_prepared: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_glwe.prepare_alloc(&module, scratch.borrow());
|
||||
|
||||
// Plaintext value to circuit bootstrap
|
||||
let data: i64 = 1 % (1 << k_lwe_pt);
|
||||
@@ -142,7 +142,8 @@ fn main() {
|
||||
let mut res: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n_glwe, basek, k_ggsw_res, rows_ggsw_res, 1, rank);
|
||||
|
||||
// Circuit bootstrapping key prepared (opaque backend dependant write only struct)
|
||||
let cbt_prepared: CircuitBootstrappingKeyPrepared<Vec<u8>, CGGI, FFT64> = cbt_key.prepare_alloc(&module, scratch.borrow());
|
||||
let cbt_prepared: CircuitBootstrappingKeyPrepared<Vec<u8>, CGGI, FFT64Spqlios> =
|
||||
cbt_key.prepare_alloc(&module, scratch.borrow());
|
||||
|
||||
// Apply circuit bootstrapping: LWE(data * 2^{- (k_lwe_pt + 2)}) -> GGSW(data)
|
||||
let now: Instant = Instant::now();
|
||||
@@ -193,7 +194,7 @@ fn main() {
|
||||
);
|
||||
|
||||
// Prepare GGSW output of circuit bootstrapping (opaque backend dependant write only struct)
|
||||
let res_prepared: GGSWCiphertextPrepared<Vec<u8>, FFT64> = res.prepare_alloc(&module, scratch.borrow());
|
||||
let res_prepared: GGSWCiphertextPrepared<Vec<u8>, FFT64Spqlios> = res.prepare_alloc(&module, scratch.borrow());
|
||||
|
||||
// Apply GLWE x GGSW
|
||||
ct_glwe.external_product_inplace(&module, &res_prepared, scratch.borrow());
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use itertools::izip;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFT, IDFTConsume, ScratchAvailable, SvpApply, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||
TakeVecZnxDftSlice, TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftSubABInplace,
|
||||
VecZnxDftZero, VecZnxIDFTTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxSubABInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
VecZnxDftSubABInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes,
|
||||
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxSubABInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, SvpPPol, VecZnx, ZnxView, ZnxZero},
|
||||
};
|
||||
@@ -36,7 +36,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
let brk_size: usize = k_brk.div_ceil(basek);
|
||||
@@ -59,7 +59,7 @@ where
|
||||
+ acc_dft_add
|
||||
+ vmp_res
|
||||
+ vmp_xai
|
||||
+ (vmp | (acc_big + (module.vec_znx_big_normalize_tmp_bytes() | module.vec_znx_idft_tmp_bytes())))
|
||||
+ (vmp | (acc_big + (module.vec_znx_big_normalize_tmp_bytes() | module.vec_znx_idft_apply_tmp_bytes())))
|
||||
} else {
|
||||
GLWECiphertext::bytes_of(module.n(), basek, k_res, rank)
|
||||
+ GLWECiphertext::external_product_scratch_space(module, basek, k_res, k_res, k_brk, 1, rank)
|
||||
@@ -73,13 +73,13 @@ where
|
||||
+ SvpPPolAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ IDFT<B>
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxIdftApply<B>
|
||||
+ VecZnxDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ DFT<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftZero<B>
|
||||
+ SvpApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxDftSubABInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -88,10 +88,10 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxMulXpMinusOneInplace
|
||||
+ VecZnxMulXpMinusOneInplace<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + TakeVecZnx + ScratchAvailable,
|
||||
@@ -135,13 +135,13 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ SvpPPolAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ IDFT<B>
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxIdftApply<B>
|
||||
+ VecZnxDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ DFT<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftZero<B>
|
||||
+ SvpApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxDftSubABInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -150,7 +150,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxMulXpMinusOneInplace
|
||||
+ VecZnxMulXpMinusOneInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VmpApplyDftToDft<B>,
|
||||
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
|
||||
@@ -161,11 +161,11 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
let rows: usize = brk.rows();
|
||||
let cols: usize = res.rank() + 1;
|
||||
|
||||
let (mut acc, scratch1) = scratch.take_vec_znx_slice(extension_factor, n_glwe, cols, res.size());
|
||||
let (mut acc_dft, scratch2) = scratch1.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, rows);
|
||||
let (mut vmp_res, scratch3) = scratch2.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
|
||||
let (mut acc_add_dft, scratch4) = scratch3.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
|
||||
let (mut vmp_xai, scratch5) = scratch4.take_vec_znx_dft(n_glwe, 1, brk.size());
|
||||
let (mut acc, scratch_1) = scratch.take_vec_znx_slice(extension_factor, n_glwe, cols, res.size());
|
||||
let (mut acc_dft, scratch_2) = scratch_1.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, rows);
|
||||
let (mut vmp_res, scratch_3) = scratch_2.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
|
||||
let (mut acc_add_dft, scratch_4) = scratch_3.take_vec_znx_dft_slice(extension_factor, n_glwe, cols, brk.size());
|
||||
let (mut vmp_xai, scratch_5) = scratch_4.take_vec_znx_dft(n_glwe, 1, brk.size());
|
||||
|
||||
(0..extension_factor).for_each(|i| {
|
||||
acc[i].zero();
|
||||
@@ -208,7 +208,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
.for_each(|(ai, ski)| {
|
||||
(0..extension_factor).for_each(|i| {
|
||||
(0..cols).for_each(|j| {
|
||||
module.dft(1, 0, &mut acc_dft[i], j, &acc[i], j);
|
||||
module.vec_znx_dft_apply(1, 0, &mut acc_dft[i], j, &acc[i], j);
|
||||
});
|
||||
module.vec_znx_dft_zero(&mut acc_add_dft[i])
|
||||
});
|
||||
@@ -221,7 +221,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
|
||||
// vmp_res = DFT(acc) * BRK[i]
|
||||
(0..extension_factor).for_each(|i| {
|
||||
module.vmp_apply_dft_to_dft(&mut vmp_res[i], &acc_dft[i], skii.data(), scratch5);
|
||||
module.vmp_apply_dft_to_dft(&mut vmp_res[i], &acc_dft[i], skii.data(), scratch_5);
|
||||
});
|
||||
|
||||
// Trivial case: no rotation between polynomials, we can directly multiply with (X^{-ai} - 1)
|
||||
@@ -231,7 +231,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
// DFT X^{-ai}
|
||||
(0..extension_factor).for_each(|j| {
|
||||
(0..cols).for_each(|i| {
|
||||
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], i);
|
||||
module.svp_apply_dft_to_dft(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], i);
|
||||
module.vec_znx_dft_add_inplace(&mut acc_add_dft[j], i, &vmp_xai, 0);
|
||||
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[j], i, &vmp_res[j], i);
|
||||
});
|
||||
@@ -247,7 +247,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
if (ai_hi + 1) & (two_n - 1) != 0 {
|
||||
for (i, j) in (0..ai_lo).zip(extension_factor - ai_lo..extension_factor) {
|
||||
(0..cols).for_each(|k| {
|
||||
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi + 1], 0, &vmp_res[j], k);
|
||||
module.svp_apply_dft_to_dft(&mut vmp_xai, 0, &x_pow_a[ai_hi + 1], 0, &vmp_res[j], k);
|
||||
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_xai, 0);
|
||||
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k);
|
||||
});
|
||||
@@ -259,7 +259,7 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
// Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai}
|
||||
for (i, j) in (ai_lo..extension_factor).zip(0..extension_factor - ai_lo) {
|
||||
(0..cols).for_each(|k| {
|
||||
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], k);
|
||||
module.svp_apply_dft_to_dft(&mut vmp_xai, 0, &x_pow_a[ai_hi], 0, &vmp_res[j], k);
|
||||
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_xai, 0);
|
||||
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k);
|
||||
});
|
||||
@@ -269,11 +269,11 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
});
|
||||
|
||||
{
|
||||
let (mut acc_add_big, scratch7) = scratch5.take_vec_znx_big(n_glwe, 1, brk.size());
|
||||
let (mut acc_add_big, scratch7) = scratch_5.take_vec_znx_big(n_glwe, 1, brk.size());
|
||||
|
||||
(0..extension_factor).for_each(|j| {
|
||||
(0..cols).for_each(|i| {
|
||||
module.idft(&mut acc_add_big, 0, &acc_add_dft[j], i, scratch7);
|
||||
module.vec_znx_idft_apply(&mut acc_add_big, 0, &acc_add_dft[j], i, scratch7);
|
||||
module.vec_znx_big_add_small_inplace(&mut acc_add_big, 0, &acc[j], i);
|
||||
module.vec_znx_big_normalize(basek, &mut acc[j], i, &acc_add_big, 0, scratch7);
|
||||
});
|
||||
@@ -302,13 +302,13 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ SvpPPolAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ IDFT<B>
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxIdftApply<B>
|
||||
+ VecZnxDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ DFT<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftZero<B>
|
||||
+ SvpApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxDftSubABInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -317,7 +317,7 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxMulXpMinusOneInplace
|
||||
+ VecZnxMulXpMinusOneInplace<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VecZnxBigNormalize<B>,
|
||||
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
|
||||
@@ -351,10 +351,10 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
|
||||
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
|
||||
|
||||
let (mut acc_dft, scratch1) = scratch.take_vec_znx_dft(n_glwe, cols, rows);
|
||||
let (mut vmp_res, scratch2) = scratch1.take_vec_znx_dft(n_glwe, cols, brk.size());
|
||||
let (mut acc_add_dft, scratch3) = scratch2.take_vec_znx_dft(n_glwe, cols, brk.size());
|
||||
let (mut vmp_xai, scratch4) = scratch3.take_vec_znx_dft(n_glwe, 1, brk.size());
|
||||
let (mut acc_dft, scratch_1) = scratch.take_vec_znx_dft(n_glwe, cols, rows);
|
||||
let (mut vmp_res, scratch_2) = scratch_1.take_vec_znx_dft(n_glwe, cols, brk.size());
|
||||
let (mut acc_add_dft, scratch_3) = scratch_2.take_vec_znx_dft(n_glwe, cols, brk.size());
|
||||
let (mut vmp_xai, scratch_4) = scratch_3.take_vec_znx_dft(n_glwe, 1, brk.size());
|
||||
|
||||
let x_pow_a: &Vec<SvpPPol<Vec<u8>, B>>;
|
||||
if let Some(b) = &brk.x_pow_a {
|
||||
@@ -369,7 +369,7 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
)
|
||||
.for_each(|(ai, ski)| {
|
||||
(0..cols).for_each(|j| {
|
||||
module.dft(1, 0, &mut acc_dft, j, &out_mut.data, j);
|
||||
module.vec_znx_dft_apply(1, 0, &mut acc_dft, j, &out_mut.data, j);
|
||||
});
|
||||
|
||||
module.vec_znx_dft_zero(&mut acc_add_dft);
|
||||
@@ -378,23 +378,23 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
let ai_pos: usize = ((aii + two_n as i64) & (two_n - 1) as i64) as usize;
|
||||
|
||||
// vmp_res = DFT(acc) * BRK[i]
|
||||
module.vmp_apply_dft_to_dft(&mut vmp_res, &acc_dft, skii.data(), scratch4);
|
||||
module.vmp_apply_dft_to_dft(&mut vmp_res, &acc_dft, skii.data(), scratch_4);
|
||||
|
||||
// DFT(X^ai -1) * (DFT(acc) * BRK[i])
|
||||
(0..cols).for_each(|i| {
|
||||
module.svp_apply(&mut vmp_xai, 0, &x_pow_a[ai_pos], 0, &vmp_res, i);
|
||||
module.svp_apply_dft_to_dft(&mut vmp_xai, 0, &x_pow_a[ai_pos], 0, &vmp_res, i);
|
||||
module.vec_znx_dft_add_inplace(&mut acc_add_dft, i, &vmp_xai, 0);
|
||||
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft, i, &vmp_res, i);
|
||||
});
|
||||
});
|
||||
|
||||
{
|
||||
let (mut acc_add_big, scratch5) = scratch4.take_vec_znx_big(n_glwe, 1, brk.size());
|
||||
let (mut acc_add_big, scratch_5) = scratch_4.take_vec_znx_big(n_glwe, 1, brk.size());
|
||||
|
||||
(0..cols).for_each(|i| {
|
||||
module.idft(&mut acc_add_big, 0, &acc_add_dft, i, scratch5);
|
||||
module.vec_znx_idft_apply(&mut acc_add_big, 0, &acc_add_dft, i, scratch_5);
|
||||
module.vec_znx_big_add_small_inplace(&mut acc_add_big, 0, &out_mut.data, i);
|
||||
module.vec_znx_big_normalize(basek, &mut out_mut.data, i, &acc_add_big, 0, scratch5);
|
||||
module.vec_znx_big_normalize(basek, &mut out_mut.data, i, &acc_add_big, 0, scratch_5);
|
||||
});
|
||||
}
|
||||
});
|
||||
@@ -416,13 +416,13 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ SvpPPolAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ IDFT<B>
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxIdftApply<B>
|
||||
+ VecZnxDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ DFT<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftZero<B>
|
||||
+ SvpApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxDftSubABInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -431,10 +431,10 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxMulXpMinusOneInplace
|
||||
+ VecZnxMulXpMinusOneInplace<B>
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<B>: TakeVecZnxDftSlice<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnxSlice + ScratchAvailable + TakeVecZnx,
|
||||
@@ -492,16 +492,16 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
module.vec_znx_rotate(b, &mut out_mut.data, 0, &lut.data[0], 0);
|
||||
|
||||
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
|
||||
let (mut acc_tmp, scratch1) = scratch.take_glwe_ct(out_mut.n(), basek, out_mut.k(), out_mut.rank());
|
||||
let (mut acc_tmp, scratch_1) = scratch.take_glwe_ct(out_mut.n(), basek, out_mut.k(), out_mut.rank());
|
||||
|
||||
// TODO: see if faster by skipping normalization in external product and keeping acc in big coeffs
|
||||
// TODO: first iteration can be optimized to be a gglwe product
|
||||
izip!(a.iter(), brk.data.iter()).for_each(|(ai, ski)| {
|
||||
// acc_tmp = sk[i] * acc
|
||||
acc_tmp.external_product(module, &out_mut, ski, scratch1);
|
||||
acc_tmp.external_product(module, &out_mut, ski, scratch_1);
|
||||
|
||||
// acc_tmp = (sk[i] * acc) * (X^{ai} - 1)
|
||||
acc_tmp.mul_xp_minus_one_inplace(module, *ai);
|
||||
acc_tmp.mul_xp_minus_one_inplace(module, *ai, scratch_1);
|
||||
|
||||
// acc = acc + (sk[i] * acc) * (X^{ai} - 1)
|
||||
out_mut.add_inplace(module, &acc_tmp);
|
||||
@@ -509,7 +509,7 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
|
||||
// We can normalize only at the end because we add normalized values in [-2^{basek-1}, 2^{basek-1}]
|
||||
// on top of each others, thus ~ 2^{63-basek} additions are supported before overflow.
|
||||
out_mut.normalize_inplace(module, scratch1);
|
||||
out_mut.normalize_inplace(module, scratch_1);
|
||||
}
|
||||
|
||||
pub fn mod_switch_2n(n: usize, res: &mut [i64], lwe: &LWECiphertext<&[u8]>, rot_dir: LookUpTableRotationDirection) {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFTConsume, ScratchAvailable, SvpApplyInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxFillUniform, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VmpPMatAlloc, VmpPrepare,
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut},
|
||||
source::Source,
|
||||
@@ -50,9 +51,9 @@ where
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ DFT<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
@@ -162,9 +163,9 @@ impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ DFT<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
|
||||
@@ -80,10 +80,10 @@ impl<D: DataMut, BRT: BlindRotationAlgo> Reset for BlindRotationKey<D, BRT> {
|
||||
}
|
||||
|
||||
impl<D: DataMut, BRT: BlindRotationAlgo> FillUniform for BlindRotationKey<D, BRT> {
|
||||
fn fill_uniform(&mut self, source: &mut Source) {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key| key.fill_uniform(source));
|
||||
.for_each(|key| key.fill_uniform(log_bound, source));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,10 +59,10 @@ impl<D: DataMut, BRT: BlindRotationAlgo> Reset for BlindRotationKeyCompressed<D,
|
||||
}
|
||||
|
||||
impl<D: DataMut, BRT: BlindRotationAlgo> FillUniform for BlindRotationKeyCompressed<D, BRT> {
|
||||
fn fill_uniform(&mut self, source: &mut Source) {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
self.keys
|
||||
.iter_mut()
|
||||
.for_each(|key| key.fill_uniform(source));
|
||||
.for_each(|key| key.fill_uniform(log_bound, source));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace,
|
||||
VecZnxSwithcDegree,
|
||||
VecZnxRotateInplaceTmpBytes, VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned, VecZnx, ZnxInfos, ZnxViewMut},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@@ -69,15 +68,22 @@ impl LookUpTable {
|
||||
self.rot_dir = rot_dir
|
||||
}
|
||||
|
||||
pub fn set<B>(&mut self, module: &Module<B>, f: &[i64], k: usize)
|
||||
pub fn set<B: Backend>(&mut self, module: &Module<B>, f: &[i64], k: usize)
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxCopy
|
||||
+ VecZnxRotateInplaceTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
assert!(f.len() <= module.n());
|
||||
|
||||
let basek: usize = self.basek;
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
// Get the number minimum limb to store the message modulus
|
||||
let limbs: usize = k.div_ceil(basek);
|
||||
|
||||
@@ -124,17 +130,15 @@ impl LookUpTable {
|
||||
|
||||
if self.extension_factor() > 1 {
|
||||
(0..self.extension_factor()).for_each(|i| {
|
||||
module.vec_znx_switch_degree(&mut self.data[i], 0, &lut_full, 0);
|
||||
module.vec_znx_switch_ring(&mut self.data[i], 0, &lut_full, 0);
|
||||
if i < self.extension_factor() {
|
||||
module.vec_znx_rotate_inplace(-1, &mut lut_full, 0);
|
||||
module.vec_znx_rotate_inplace(-1, &mut lut_full, 0, scratch.borrow());
|
||||
}
|
||||
});
|
||||
} else {
|
||||
module.vec_znx_copy(&mut self.data[0], 0, &lut_full, 0);
|
||||
}
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
self.data.iter_mut().for_each(|a| {
|
||||
module.vec_znx_normalize_inplace(self.basek, a, 0, scratch.borrow());
|
||||
});
|
||||
@@ -147,23 +151,26 @@ impl LookUpTable {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn rotate<B: Backend>(&mut self, module: &Module<B>, k: i64)
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace,
|
||||
Module<B>: VecZnxRotateInplace<B> + VecZnxRotateInplaceTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let extension_factor: usize = self.extension_factor();
|
||||
let two_n: usize = 2 * self.data[0].n();
|
||||
let two_n_ext: usize = two_n * extension_factor;
|
||||
|
||||
let mut scratch: ScratchOwned<_> = ScratchOwned::alloc(module.vec_znx_rotate_inplace_tmp_bytes());
|
||||
|
||||
let k_pos: usize = ((k + two_n_ext as i64) % two_n_ext as i64) as usize;
|
||||
|
||||
let k_hi: usize = k_pos / extension_factor;
|
||||
let k_lo: usize = k_pos % extension_factor;
|
||||
|
||||
(0..extension_factor - k_lo).for_each(|i| {
|
||||
module.vec_znx_rotate_inplace(k_hi as i64, &mut self.data[i], 0);
|
||||
module.vec_znx_rotate_inplace(k_hi as i64, &mut self.data[i], 0, scratch.borrow());
|
||||
});
|
||||
|
||||
(extension_factor - k_lo..extension_factor).for_each(|i| {
|
||||
module.vec_znx_rotate_inplace(k_hi as i64 + 1, &mut self.data[i], 0);
|
||||
module.vec_znx_rotate_inplace(k_hi as i64 + 1, &mut self.data[i], 0, scratch.borrow());
|
||||
});
|
||||
|
||||
self.data.rotate_right(k_lo);
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFT, IDFTConsume, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApply, SvpApplyInplace, SvpPPolAlloc,
|
||||
SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace,
|
||||
VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd,
|
||||
VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftSubABInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIDFTTmpBytes,
|
||||
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftSubABInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal,
|
||||
ZnFillUniform, ZnNormalizeInplace,
|
||||
},
|
||||
layouts::{Backend, Module, ScratchOwned, ZnxView},
|
||||
oep::{
|
||||
@@ -33,13 +34,13 @@ where
|
||||
+ SvpPPolAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIDFTTmpBytes
|
||||
+ IDFT<B>
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxIdftApply<B>
|
||||
+ VecZnxDftAdd<B>
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ DFT<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxDftZero<B>
|
||||
+ SvpApply<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxDftSubABInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -48,19 +49,19 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxCopy
|
||||
+ VecZnxMulXpMinusOneInplace
|
||||
+ VecZnxMulXpMinusOneInplace<B>
|
||||
+ SvpPrepare<B>
|
||||
+ SvpPPolAlloc<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxRotateInplace
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxSub
|
||||
+ VmpPMatAlloc<B>
|
||||
+ VmpPrepare<B>
|
||||
@@ -68,6 +69,7 @@ where
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ ZnFillUniform
|
||||
+ ZnAddNormal
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ ZnNormalizeInplace<B>,
|
||||
B: Backend
|
||||
+ VecZnxDftAllocBytesImpl<B>
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use std::vec;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxSwithcDegree},
|
||||
api::{
|
||||
VecZnxCopy, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
|
||||
VecZnxSwitchRing,
|
||||
},
|
||||
layouts::{Backend, Module},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||
};
|
||||
@@ -10,7 +13,12 @@ use crate::tfhe::blind_rotation::{DivRound, LookUpTable};
|
||||
|
||||
pub fn test_lut_standard<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxCopy
|
||||
+ VecZnxRotateInplaceTmpBytes,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
{
|
||||
let basek: usize = 20;
|
||||
@@ -45,7 +53,12 @@ where
|
||||
|
||||
pub fn test_lut_extended<B>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace + VecZnxNormalizeInplace<B> + VecZnxNormalizeTmpBytes + VecZnxSwithcDegree + VecZnxCopy,
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxCopy
|
||||
+ VecZnxRotateInplaceTmpBytes,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
{
|
||||
let basek: usize = 20;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use poulpy_hal::tests::serialization::test_reader_writer_interface;
|
||||
use poulpy_hal::test_suite::serialization::test_reader_writer_interface;
|
||||
|
||||
use crate::tfhe::blind_rotation::{BlindRotationKey, BlindRotationKeyAlloc, BlindRotationKeyCompressed, CGGI};
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use poulpy_backend::cpu_spqlios::FFT64;
|
||||
use poulpy_backend::cpu_spqlios::FFT64Spqlios;
|
||||
use poulpy_hal::{api::ModuleNew, layouts::Module};
|
||||
|
||||
use crate::tfhe::blind_rotation::tests::{
|
||||
@@ -8,30 +8,30 @@ use crate::tfhe::blind_rotation::tests::{
|
||||
|
||||
#[test]
|
||||
fn lut_standard() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(32);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(32);
|
||||
test_lut_standard(&module);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lut_extended() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(32);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(32);
|
||||
test_lut_extended(&module);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn standard() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(512);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(512);
|
||||
test_blind_rotation(&module, 224, 1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_binary() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(512);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(512);
|
||||
test_blind_rotation(&module, 224, 7, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_binary_extended() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(512);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(512);
|
||||
test_blind_rotation(&module, 224, 7, 2);
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFTConsume, IDFTTmpA, ScratchAvailable, TakeMatZnx, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxCopy,
|
||||
VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftCopy, VecZnxNegateInplace, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxSwithcDegree, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
ScratchAvailable, TakeMatZnx, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
|
||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
|
||||
VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ToOwnedDeep},
|
||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||
@@ -26,14 +27,14 @@ use crate::tfhe::{
|
||||
|
||||
impl<D: DataRef, BRA: BlindRotationAlgo, B> CirtuitBootstrappingExecute<B> for CircuitBootstrappingKeyPrepared<D, BRA, B>
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ IDFTTmpA<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
@@ -44,12 +45,13 @@ where
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ DFT<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate,
|
||||
@@ -124,14 +126,14 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
DRes: DataMut,
|
||||
DLwe: DataRef,
|
||||
DBrk: DataRef,
|
||||
Module<B>: VecZnxRotateInplace
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ IDFTTmpA<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
@@ -142,14 +144,15 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ DFT<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxRotate,
|
||||
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||
Scratch<B>: TakeVecZnxDftSlice<B>
|
||||
@@ -199,10 +202,10 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
}
|
||||
|
||||
// TODO: separate GGSW k from output of blind rotation k
|
||||
let (mut res_glwe, scratch1) = scratch.take_glwe_ct(n, basek, k, rank);
|
||||
let (mut tmp_gglwe, scratch2) = scratch1.take_gglwe(n, basek, k, rows, 1, rank.max(1), rank);
|
||||
let (mut res_glwe, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
|
||||
let (mut tmp_gglwe, scratch_2) = scratch_1.take_gglwe(n, basek, k, rows, 1, rank.max(1), rank);
|
||||
|
||||
key.brk.execute(module, &mut res_glwe, lwe, &lut, scratch2);
|
||||
key.brk.execute(module, &mut res_glwe, lwe, &lut, scratch_2);
|
||||
|
||||
let gap: usize = 2 * lut.drift / lut.extension_factor();
|
||||
|
||||
@@ -221,19 +224,19 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
log_gap_out,
|
||||
log_domain,
|
||||
&key.atk,
|
||||
scratch2,
|
||||
scratch_2,
|
||||
);
|
||||
} else {
|
||||
tmp_glwe.trace(module, 0, module.log_n(), &res_glwe, &key.atk, scratch2);
|
||||
tmp_glwe.trace(module, 0, module.log_n(), &res_glwe, &key.atk, scratch_2);
|
||||
}
|
||||
|
||||
if i < rows {
|
||||
res_glwe.rotate_inplace(module, -(gap as i64));
|
||||
res_glwe.rotate_inplace(module, -(gap as i64), scratch_2);
|
||||
}
|
||||
});
|
||||
|
||||
// Expands GGLWE to GGSW using GGLWE(s^2)
|
||||
res.from_gglwe(module, &tmp_gglwe, &key.tsk, scratch2);
|
||||
res.from_gglwe(module, &tmp_gglwe, &key.tsk, scratch_2);
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@@ -249,14 +252,14 @@ fn post_process<DataRes, DataA, B: Backend>(
|
||||
) where
|
||||
DataRes: DataMut,
|
||||
DataA: DataRef,
|
||||
Module<B>: VecZnxRotateInplace
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ IDFTTmpA<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
@@ -267,11 +270,11 @@ fn post_process<DataRes, DataA, B: Backend>(
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ DFT<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxRotate,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||
@@ -297,7 +300,7 @@ fn post_process<DataRes, DataA, B: Backend>(
|
||||
let steps: i32 = 1 << log_domain;
|
||||
(0..steps).for_each(|i| {
|
||||
if i != 0 {
|
||||
res.rotate_inplace(module, -(1 << log_gap_in));
|
||||
res.rotate_inplace(module, -(1 << log_gap_in), scratch);
|
||||
}
|
||||
cts.insert(i as usize * (1 << log_gap_out), res.to_owned_deep());
|
||||
});
|
||||
@@ -321,14 +324,14 @@ pub fn pack<D: DataMut, B: Backend>(
|
||||
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<Vec<u8>, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxRotateInplace
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ IDFTTmpA<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
@@ -339,11 +342,11 @@ pub fn pack<D: DataMut, B: Backend>(
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ DFT<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxRotate,
|
||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
||||
@@ -400,14 +403,14 @@ fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
auto_key: &GGLWEAutomorphismKeyPrepared<DataAK, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxRotateInplace
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ IDFTTmpA<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
@@ -418,11 +421,11 @@ fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ DFT<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxRotate,
|
||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
||||
@@ -446,15 +449,15 @@ fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
|
||||
|
||||
// a = a * X^-t
|
||||
a.rotate_inplace(module, -t);
|
||||
a.rotate_inplace(module, -t, scratch_1);
|
||||
|
||||
// tmp_b = a * X^-t - b
|
||||
tmp_b.sub(module, a, b);
|
||||
tmp_b.rsh(module, 1);
|
||||
tmp_b.rsh(module, 1, scratch_1);
|
||||
|
||||
// a = a * X^-t + b
|
||||
a.add_inplace(module, b);
|
||||
a.rsh(module, 1);
|
||||
a.rsh(module, 1, scratch_1);
|
||||
|
||||
tmp_b.normalize_inplace(module, scratch_1);
|
||||
|
||||
@@ -468,9 +471,9 @@ fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
||||
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
||||
// = a + b * X^t + phi(a - b * X^t)
|
||||
a.rotate_inplace(module, t);
|
||||
a.rotate_inplace(module, t, scratch_1);
|
||||
} else {
|
||||
a.rsh(module, 1);
|
||||
a.rsh(module, 1, scratch);
|
||||
// a = a + phi(a)
|
||||
a.automorphism_add_inplace(module, auto_key, scratch);
|
||||
}
|
||||
@@ -481,7 +484,7 @@ fn combine<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
|
||||
tmp_b.rotate(module, t, b);
|
||||
tmp_b.rsh(module, 1);
|
||||
tmp_b.rsh(module, 1, scratch_1);
|
||||
|
||||
// a = (b* X^t - phi(b* X^t))
|
||||
b.automorphism_sub_ba(module, &tmp_b, auto_key, scratch_1);
|
||||
|
||||
@@ -6,11 +6,11 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFTConsume, IDFTTmpA, ScratchAvailable, SvpApply, SvpApplyInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare,
|
||||
TakeScalarZnx, TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxFillUniform, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpPMatAlloc,
|
||||
VmpPrepare,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
||||
VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, Data, DataRef, Module, Scratch},
|
||||
source::Source,
|
||||
@@ -51,14 +51,14 @@ pub struct CircuitBootstrappingKey<D: Data, BRA: BlindRotationAlgo> {
|
||||
impl<BRA: BlindRotationAlgo, B: Backend> CircuitBootstrappingKeyEncryptSk<B> for CircuitBootstrappingKey<Vec<u8>, BRA>
|
||||
where
|
||||
BlindRotationKey<Vec<u8>, BRA>: BlindRotationKeyAlloc + BlindRotationKeyEncryptSk<B>,
|
||||
Module<B>: SvpApply<B>
|
||||
+ IDFTTmpA<B>
|
||||
Module<B>: SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ DFT<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxFillUniform
|
||||
+ VecZnxSubABInplace
|
||||
@@ -68,7 +68,7 @@ where
|
||||
+ VecZnxNormalize<B>
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxAutomorphism,
|
||||
|
||||
@@ -2,14 +2,15 @@ use std::time::Instant;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
DFT, IDFTConsume, IDFTTmpA, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApply, SvpApplyInplace, SvpPPolAlloc,
|
||||
SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
||||
VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxCopy,
|
||||
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftCopy, VecZnxFillUniform, VecZnxNegateInplace,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
||||
VecZnxSub, VecZnxSubABInplace, VecZnxSwithcDegree, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform, ZnNormalizeInplace,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||
ZnNormalizeInplace,
|
||||
},
|
||||
layouts::{Backend, Module, ScalarZnx, ScratchOwned, ZnxView, ZnxViewMut},
|
||||
oep::{
|
||||
@@ -43,9 +44,9 @@ where
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ DFT<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxAddInplace
|
||||
@@ -53,10 +54,10 @@ where
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ IDFTTmpA<B>
|
||||
+ SvpApply<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigAlloc<B>
|
||||
@@ -70,14 +71,15 @@ where
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ VecZnxRotateInplace
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
@@ -185,7 +187,12 @@ where
|
||||
// X^{data * 2^log_gap_out}
|
||||
let mut pt_ggsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
||||
pt_ggsw.at_mut(0, 0)[0] = 1;
|
||||
module.vec_znx_rotate_inplace(data * (1 << log_gap_out), &mut pt_ggsw.as_vec_znx_mut(), 0);
|
||||
module.vec_znx_rotate_inplace(
|
||||
data * (1 << log_gap_out),
|
||||
&mut pt_ggsw.as_vec_znx_mut(),
|
||||
0,
|
||||
scratch.borrow(),
|
||||
);
|
||||
|
||||
res.print_noise(module, &sk_glwe_prepared, &pt_ggsw);
|
||||
|
||||
@@ -224,9 +231,9 @@ where
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ DFT<B>
|
||||
+ SvpApplyInplace<B>
|
||||
+ IDFTConsume<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSubABInplace
|
||||
+ VecZnxAddInplace
|
||||
@@ -234,10 +241,10 @@ where
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwithcDegree
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ IDFTTmpA<B>
|
||||
+ SvpApply<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigAlloc<B>
|
||||
@@ -251,13 +258,14 @@ where
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ VecZnxRotateInplace
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxAutomorphismInplace
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallBInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxDftAddInplace<B>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use poulpy_backend::cpu_spqlios::FFT64;
|
||||
use poulpy_backend::cpu_spqlios::FFT64Spqlios;
|
||||
use poulpy_hal::{api::ModuleNew, layouts::Module};
|
||||
|
||||
use crate::tfhe::{
|
||||
@@ -10,12 +10,12 @@ use crate::tfhe::{
|
||||
|
||||
#[test]
|
||||
fn test_to_constant() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(256);
|
||||
test_circuit_bootstrapping_to_constant::<FFT64, CGGI>(&module);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(256);
|
||||
test_circuit_bootstrapping_to_constant::<FFT64Spqlios, CGGI>(&module);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_exponent() {
|
||||
let module: Module<FFT64> = Module::<FFT64>::new(256);
|
||||
test_circuit_bootstrapping_to_exponent::<FFT64, CGGI>(&module);
|
||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(256);
|
||||
test_circuit_bootstrapping_to_exponent::<FFT64Spqlios, CGGI>(&module);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user