wip on extended br + bug fixing

This commit is contained in:
Jean-Philippe Bossuat
2025-07-03 11:38:25 +02:00
parent c98bf75b61
commit 81fb710165
12 changed files with 303 additions and 216 deletions

View File

@@ -1,29 +1,46 @@
use std::time::Instant;
use backend::{
FFT64, MatZnxDftOps, MatZnxDftScratch, Module, ScalarZnxAlloc, ScalarZnxDftAlloc, ScalarZnxDftOps, Scratch, VecZnxDftOps,
VecZnxOps, ZnxView, ZnxViewMut, ZnxZero,
FFT64, MatZnxDftOps, MatZnxDftScratch, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps,
Scratch, VecZnxBigAlloc, VecZnxBigOps, VecZnxDftOps, VecZnxOps, ZnxInfos, ZnxView, ZnxViewMut, ZnxZero,
};
use itertools::izip;
use crate::{
FourierGLWECiphertext, GGSWCiphertext, GLWECiphertext, GLWECiphertextToMut, Infos, LWECiphertext, ScratchCore,
FourierGLWECiphertext, FourierGLWESecret, GLWECiphertext, GLWECiphertextToMut, GLWEPlaintext, Infos, LWECiphertext,
ScratchCore,
blind_rotation::{key::BlindRotationKeyCGGI, lut::LookUpTable},
lwe::ciphertext::LWECiphertextToRef,
};
pub fn cggi_blind_rotate_scratch_space(
module: &Module<FFT64>,
extension_factor: usize,
basek: usize,
k_lut: usize,
k_brk: usize,
rows: usize,
rank: usize,
) -> usize {
let size = k_brk.div_ceil(basek);
GGSWCiphertext::<Vec<u8>, FFT64>::bytes_of(module, basek, k_brk, rows, 1, rank)
+ (module.mat_znx_dft_mul_x_pow_minus_one_scratch_space(size, rank + 1)
| GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_lut, k_brk, 1, rank))
let lut_size: usize = k_lut.div_ceil(basek);
let brk_size: usize = k_brk.div_ceil(basek);
let acc_dft: usize = FourierGLWECiphertext::bytes_of(module, basek, k_brk, rank) * extension_factor;
let acc_big: usize = module.bytes_of_vec_znx_big(rank + 1, brk_size);
let acc_dft_add: usize = FourierGLWECiphertext::bytes_of(module, basek, k_brk, rank) * extension_factor;
let vmp_res: usize = FourierGLWECiphertext::bytes_of(module, basek, k_brk, rank) * extension_factor;
let xai_plus_y: usize = module.bytes_of_scalar_znx(1);
let xai_plus_y_dft: usize = module.bytes_of_scalar_znx_dft(1);
let vmp: usize = module.vmp_apply_tmp_bytes(lut_size, lut_size, rows, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
let acc: usize;
if extension_factor > 1 {
acc = GLWECiphertext::bytes_of(module, basek, k_lut, rank) * extension_factor;
} else {
acc = 0;
}
return acc + acc_dft + acc_dft_add + vmp_res + xai_plus_y + xai_plus_y_dft + (vmp | acc_big);
}
pub fn cggi_blind_rotate<DataRes, DataIn>(
@@ -37,8 +54,8 @@ pub fn cggi_blind_rotate<DataRes, DataIn>(
DataRes: AsRef<[u8]> + AsMut<[u8]>,
DataIn: AsRef<[u8]>,
{
if lut.data.len() > 1 {
cggi_blind_rotate_block_binary_exnteded(module, res, lwe, lut, brk, scratch);
if lut.extension_factor() > 1 {
cggi_blind_rotate_block_binary_extended(module, res, lwe, lut, brk, scratch);
} else if brk.block_size() > 1 {
cggi_blind_rotate_block_binary(module, res, lwe, lut, brk, scratch);
} else {
@@ -46,7 +63,7 @@ pub fn cggi_blind_rotate<DataRes, DataIn>(
}
}
pub(crate) fn cggi_blind_rotate_block_binary_exnteded<DataRes, DataIn>(
pub(crate) fn cggi_blind_rotate_block_binary_extended<DataRes, DataIn>(
module: &Module<FFT64>,
res: &mut GLWECiphertext<DataRes>,
lwe: &LWECiphertext<DataIn>,
@@ -57,198 +74,164 @@ pub(crate) fn cggi_blind_rotate_block_binary_exnteded<DataRes, DataIn>(
DataRes: AsRef<[u8]> + AsMut<[u8]>,
DataIn: AsRef<[u8]>,
{
let extension_factor: usize = lut.extension_factor();
let basek: usize = res.basek();
let (mut acc, scratch1) = scratch.tmp_vec_glwe_ct(extension_factor, module, basek, res.k(), res.rank());
let (mut acc_dft, scratch2) = scratch1.tmp_vec_fourier_glwe_ct(extension_factor, module, basek, brk.k(), res.rank());
let (mut vmp_res, scratch3) = scratch2.tmp_vec_fourier_glwe_ct(extension_factor, module, basek, brk.k(), res.rank());
let (mut acc_add_dft, scratch4) = scratch3.tmp_vec_fourier_glwe_ct(extension_factor, module, basek, brk.k(), res.rank());
(0..extension_factor).for_each(|i| {
acc[i].data.zero();
});
let (mut xai_plus_y, scratch5) = scratch4.tmp_scalar_znx(module, 1);
let (mut xai_plus_y_dft, scratch6) = scratch5.tmp_scalar_znx_dft(module, 1);
let (mut acc_add_big, scratch7) = scratch6.tmp_vec_znx_big(module, 1, brk.size());
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
let mut out_mut: GLWECiphertext<&mut [u8]> = res.to_mut();
let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref();
let basek: usize = out_mut.basek();
let cols: usize = out_mut.rank() + 1;
let two_n_ext: usize = 2 * lut.domain_size();
mod_switch_2n(
2 * module.n() * lut.extension_factor(),
&mut lwe_2n,
&lwe_ref,
);
let cols: usize = res.rank() + 1;
let extension_factor: i64 = lut.extension_factor() as i64;
let mut acc: Vec<GLWECiphertext<Vec<u8>>> = Vec::with_capacity(lut.extension_factor());
for _ in 0..extension_factor {
acc.push(GLWECiphertext::alloc(
module,
basek,
out_mut.k(),
out_mut.rank(),
));
}
negate_and_mod_switch_2n(two_n_ext, &mut lwe_2n, &lwe_ref);
let a: &[i64] = &lwe_2n[1..];
let b: i64 = lwe_2n[0];
let b_pos: usize = ((lwe_2n[0] + two_n_ext as i64) % two_n_ext as i64) as usize;
let b_inner: i64 = b / extension_factor;
let b_outer: i64 = b % extension_factor;
let b_hi: usize = b_pos / extension_factor;
let b_lo: usize = b_pos % extension_factor;
for (i, j) in (0..b_outer).zip(extension_factor - b_outer..extension_factor) {
module.vec_znx_rotate(
b_inner + 1,
&mut acc[j as usize].data,
0,
&lut.data[i as usize],
0,
);
for (i, j) in (0..b_lo).zip(extension_factor - b_lo..extension_factor) {
module.vec_znx_rotate(b_hi as i64 + 1, &mut acc[i].data, 0, &lut.data[j], 0);
}
for (i, j) in (b_outer..extension_factor).zip(0..extension_factor - b_outer) {
module.vec_znx_rotate(
b_inner,
&mut acc[j as usize].data,
0,
&lut.data[i as usize],
0,
);
for (i, j) in (b_lo..extension_factor).zip(0..extension_factor - b_lo) {
module.vec_znx_rotate(b_hi as i64, &mut acc[i].data, 0, &lut.data[j], 0);
}
let block_size: usize = brk.block_size();
let mut acc_dft: Vec<FourierGLWECiphertext<Vec<u8>, FFT64>> = Vec::with_capacity(lut.extension_factor());
for _ in 0..extension_factor {
acc_dft.push(FourierGLWECiphertext::alloc(
module,
basek,
out_mut.k(),
out_mut.rank(),
));
}
let mut vmp_res: Vec<FourierGLWECiphertext<Vec<u8>, FFT64>> = Vec::with_capacity(lut.extension_factor());
for _ in 0..extension_factor {
vmp_res.push(FourierGLWECiphertext::alloc(
module,
basek,
out_mut.k(),
out_mut.rank(),
));
}
let mut acc_add_dft: Vec<FourierGLWECiphertext<Vec<u8>, FFT64>> = Vec::with_capacity(lut.extension_factor());
for _ in 0..extension_factor {
acc_add_dft.push(FourierGLWECiphertext::alloc(
module,
basek,
out_mut.k(),
out_mut.rank(),
));
}
let mut xai_minus_one: backend::ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
let mut xai_minus_one_dft: backend::ScalarZnxDft<Vec<u8>, FFT64> = module.new_scalar_znx_dft(1);
izip!(
a.chunks_exact(block_size),
brk.data.chunks_exact(block_size)
)
.enumerate()
.for_each(|(i, (ai, ski))| {
(0..lut.extension_factor()).for_each(|i| {
acc[i].dft(module, &mut acc_dft[i]);
(0..extension_factor).for_each(|i| {
(0..cols).for_each(|j| {
module.vec_znx_dft(1, 0, &mut acc_dft[i].data, j, &acc[i].data, j);
});
acc_add_dft[i].data.zero();
});
// TODO: first & last iterations can be optimized
izip!(ai.iter(), ski.iter()).for_each(|(aii, skii)| {
let aii_inner: i64 = aii / extension_factor;
let aii_outer: i64 = aii % extension_factor;
let ai_pos: usize = ((aii + two_n_ext as i64) % two_n_ext as i64) as usize;
let ai_hi: usize = ai_pos / extension_factor;
let ai_lo: usize = ai_pos % extension_factor;
// vmp_res = DFT(acc) * BRK[i]
(0..lut.extension_factor()).for_each(|i| {
module.vmp_apply(&mut vmp_res[i].data, &acc_dft[i].data, &skii.data, scratch);
(0..extension_factor).for_each(|i| {
module.vmp_apply(&mut vmp_res[i].data, &acc_dft[i].data, &skii.data, scratch7);
});
if aii_outer == 0 {
xai_minus_one.zero();
xai_minus_one.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(aii_inner, &mut xai_minus_one, 0);
xai_minus_one.at_mut(0, 0)[0] -= 1;
module.svp_prepare(&mut xai_minus_one_dft, 0, &xai_minus_one, 0);
// Trivial case: no rotation between polynomials, we can directly multiply with (X^{-ai} - 1)
if ai_lo == 0 {
// DFT X^{-ai}
set_xai_plus_y(
module,
ai_hi as i64,
-1,
&mut xai_plus_y_dft,
&mut xai_plus_y,
);
(0..lut.extension_factor()).for_each(|j| {
// Sets acc_add_dft[i] = (acc[i] * sk) * (X^{-ai} - 1)
(0..extension_factor).for_each(|j| {
(0..cols).for_each(|i| {
module.svp_apply_inplace(&mut vmp_res[j].data, i, &xai_minus_one_dft, 0);
module.svp_apply_inplace(&mut vmp_res[j].data, i, &xai_plus_y_dft, 0);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[j].data, i, &vmp_res[j].data, i);
});
})
});
// Non trivial case: rotation between polynomials
// In this case we can't directly multiply with (X^{-ai} - 1) because of the
// ring homomorphism R^{N} -> prod R^{N/extension_factor}, so we split the
// computation in two steps: acc_add_dft = (acc * sk) * (-1) + (acc * sk) * X^{-ai}
} else {
xai_minus_one.zero();
xai_minus_one.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(aii_inner + 1, &mut xai_minus_one, 0);
xai_minus_one.at_mut(0, 0)[0] -= 1;
module.svp_prepare(&mut xai_minus_one_dft, 0, &xai_minus_one, 0);
for (i, j) in (0..aii_outer).zip(extension_factor - aii_outer..extension_factor) {
module.vec_znx_rotate(
b_inner + 1,
&mut acc[j as usize].data,
0,
&lut.data[i as usize],
0,
);
// Sets acc_add_dft[i] = acc[i] * sk
(0..extension_factor).for_each(|i| {
(0..cols).for_each(|k| {
module.svp_apply_inplace(&mut vmp_res[i as usize].data, k, &xai_minus_one_dft, 0);
module.vec_znx_dft_add_inplace(
&mut acc_add_dft[j as usize].data,
k,
&vmp_res[i as usize].data,
k,
);
module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i].data, k, &vmp_res[i].data, k);
})
});
// DFT X^{-ai+1}
set_xai_plus_y(
module,
ai_hi as i64 + 1,
0,
&mut xai_plus_y_dft,
&mut xai_plus_y,
);
// Sets acc_add_dft[0..ai_lo] += (acc[extension_factor - ai_lo..extension_factor] * sk) * X^{-ai+1}
for (i, j) in (0..ai_lo).zip(extension_factor - ai_lo..extension_factor) {
module.vec_znx_rotate(b_hi as i64 + 1, &mut acc[i].data, 0, &lut.data[j], 0);
(0..cols).for_each(|k| {
module.svp_apply_inplace(&mut vmp_res[j].data, k, &xai_plus_y_dft, 0);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i].data, k, &vmp_res[j].data, k);
});
}
xai_minus_one.zero();
xai_minus_one.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(aii_inner, &mut xai_minus_one, 0);
xai_minus_one.at_mut(0, 0)[0] -= 1;
module.svp_prepare(&mut xai_minus_one_dft, 0, &xai_minus_one, 0);
for (i, j) in (aii_outer..extension_factor).zip(0..extension_factor - aii_outer) {
module.vec_znx_rotate(
b_inner,
&mut acc[j as usize].data,
0,
&lut.data[i as usize],
0,
);
// DFT X^{-ai}
set_xai_plus_y(
module,
ai_hi as i64,
0,
&mut xai_plus_y_dft,
&mut xai_plus_y,
);
// Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai}
for (i, j) in (ai_lo..extension_factor).zip(0..extension_factor - ai_lo) {
module.vec_znx_rotate(b_hi as i64, &mut acc[i].data, 0, &lut.data[j], 0);
(0..cols).for_each(|k| {
module.svp_apply_inplace(&mut vmp_res[i as usize].data, k, &xai_minus_one_dft, 0);
module.vec_znx_dft_add_inplace(
&mut acc_add_dft[j as usize].data,
k,
&vmp_res[i as usize].data,
k,
);
module.svp_apply_inplace(&mut vmp_res[j].data, k, &xai_plus_y_dft, 0);
module.vec_znx_dft_add_inplace(&mut acc_add_dft[i].data, k, &vmp_res[j].data, k);
});
}
}
});
if i == lwe.n() - block_size {
(0..extension_factor).for_each(|j| {
(0..cols).for_each(|i| {
module.vec_znx_dft_add_inplace(&mut acc_dft[0].data, i, &acc_add_dft[0].data, i);
module.vec_znx_dft_add_inplace(&mut acc_dft[j].data, i, &acc_add_dft[j].data, i);
module.vec_znx_idft(&mut acc_add_big, 0, &acc_dft[j].data, i, scratch7);
module.vec_znx_big_normalize(basek, &mut acc[j].data, i, &acc_add_big, 0, scratch7);
});
acc_dft[0].idft(module, &mut out_mut, scratch);
} else {
(0..lut.extension_factor()).for_each(|j| {
(0..cols).for_each(|i| {
module.vec_znx_dft_add_inplace(&mut acc_dft[j].data, i, &acc_add_dft[j].data, i);
});
acc_dft[j].idft(module, &mut acc[j], scratch);
})
}
});
});
(0..cols).for_each(|i| {
module.vec_znx_copy(&mut res.data, i, &acc[0].data, i);
});
}
fn set_xai_plus_y(
module: &Module<FFT64>,
k: i64,
y: i64,
res: &mut ScalarZnxDft<&mut [u8], FFT64>,
buf: &mut ScalarZnx<&mut [u8]>,
) {
buf.zero();
buf.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(k, buf, 0);
buf.at_mut(0, 0)[0] += y;
module.svp_prepare(res, 0, buf, 0);
}
pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn>(
@@ -270,7 +253,7 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn>(
let cols: usize = out_mut.rank() + 1;
mod_switch_2n(2 * module.n(), &mut lwe_2n, &lwe_ref);
negate_and_mod_switch_2n(2 * lut.domain_size(), &mut lwe_2n, &lwe_ref);
let a: &[i64] = &lwe_2n[1..];
let b: i64 = lwe_2n[0];
@@ -278,17 +261,17 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn>(
out_mut.data.zero();
// Initialize out to X^{b} * LUT(X)
module.vec_znx_rotate(b, &mut out_mut.data, 0, &lut.data[0], 0);
module.vec_znx_rotate(-b, &mut out_mut.data, 0, &lut.data[0], 0);
let block_size: usize = brk.block_size();
// ACC + [sum DFT(X^ai -1) * (DFT(ACC) x BRKi)]
let (mut acc_dft, scratch1) = scratch.tmp_glwe_fourier(module, brk.basek(), out_mut.k(), out_mut.rank());
let (mut acc_add_dft, scratch2) = scratch1.tmp_glwe_fourier(module, brk.basek(), out_mut.k(), out_mut.rank());
let (mut vmp_res, scratch3) = scratch2.tmp_glwe_fourier(module, basek, out_mut.k(), out_mut.rank());
let (mut xai_minus_one, scratch4) = scratch3.tmp_scalar_znx(module, 1);
let (mut xai_minus_one_dft, scratch5) = scratch4.tmp_scalar_znx_dft(module, 1);
let (mut acc_dft, scratch1) = scratch.tmp_fourier_glwe_ct(module, brk.basek(), out_mut.k(), out_mut.rank());
let (mut acc_add_dft, scratch2) = scratch1.tmp_fourier_glwe_ct(module, brk.basek(), out_mut.k(), out_mut.rank());
let (mut vmp_res, scratch3) = scratch2.tmp_fourier_glwe_ct(module, basek, out_mut.k(), out_mut.rank());
let (mut xai_plus_y, scratch4) = scratch3.tmp_scalar_znx(module, 1);
let (mut xai_plus_y_dft, scratch5) = scratch4.tmp_scalar_znx_dft(module, 1);
let start: Instant = Instant::now();
izip!(
@@ -304,15 +287,11 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn>(
module.vmp_apply(&mut vmp_res.data, &acc_dft.data, &skii.data, scratch5);
// DFT(X^ai -1)
xai_minus_one.zero();
xai_minus_one.at_mut(0, 0)[0] = 1;
module.vec_znx_rotate_inplace(*aii, &mut xai_minus_one, 0);
xai_minus_one.at_mut(0, 0)[0] -= 1;
module.svp_prepare(&mut xai_minus_one_dft, 0, &xai_minus_one, 0);
set_xai_plus_y(module, *aii, -1, &mut xai_plus_y_dft, &mut xai_plus_y);
// DFT(X^ai -1) * (DFT(acc) * BRK[i])
(0..cols).for_each(|i| {
module.svp_apply_inplace(&mut vmp_res.data, i, &xai_minus_one_dft, 0);
module.svp_apply_inplace(&mut vmp_res.data, i, &xai_plus_y_dft, 0);
module.vec_znx_dft_add_inplace(&mut acc_add_dft.data, i, &vmp_res.data, i);
});
});
@@ -324,15 +303,15 @@ pub(crate) fn cggi_blind_rotate_block_binary<DataRes, DataIn>(
acc_dft.idft(module, &mut out_mut, scratch5);
});
let duration: std::time::Duration = start.elapsed();
println!("external products: {} us", duration.as_micros());
}
pub(crate) fn mod_switch_2n(n: usize, res: &mut [i64], lwe: &LWECiphertext<&[u8]>) {
pub(crate) fn negate_and_mod_switch_2n(n: usize, res: &mut [i64], lwe: &LWECiphertext<&[u8]>) {
let basek: usize = lwe.basek();
let log2n: usize = usize::BITS as usize - (n - 1).leading_zeros() as usize + 1;
res.copy_from_slice(&lwe.data.at(0, 0));
res.iter_mut().for_each(|x| *x = -*x);
if basek > log2n {
let diff: usize = basek - log2n;

View File

@@ -46,8 +46,13 @@ impl BlindRotationKeyCGGI<FFT64> {
assert_eq!(sk_glwe.n(), module.n());
assert_eq!(sk_glwe.rank(), self.data[0].rank());
match sk_lwe.dist {
Distribution::BinaryBlock(_) | Distribution::BinaryFixed(_) | Distribution::BinaryProb(_) => {}
_ => panic!("invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb"),
Distribution::BinaryBlock(_)
| Distribution::BinaryFixed(_)
| Distribution::BinaryProb(_)
| Distribution::ZERO => {}
_ => panic!(
"invalid GLWESecret distribution: must be BinaryBlock, BinaryFixed or BinaryProb (or ZERO for debugging)"
),
}
}
@@ -79,6 +84,11 @@ impl BlindRotationKeyCGGI<FFT64> {
self.data[0].k()
}
#[allow(dead_code)]
pub(crate) fn size(&self) -> usize {
self.data[0].size()
}
#[allow(dead_code)]
pub(crate) fn rank(&self) -> usize {
self.data[0].rank()

View File

@@ -1,4 +1,4 @@
use backend::{FFT64, Module, VecZnx, VecZnxAlloc, VecZnxOps, ZnxInfos, ZnxViewMut, alloc_aligned};
use backend::{FFT64, Module, ScalarZnx, VecZnx, VecZnxAlloc, VecZnxOps, ZnxInfos, ZnxView, ZnxViewMut, alloc_aligned};
pub struct LookUpTable {
pub(crate) data: Vec<VecZnx<Vec<u8>>>,
@@ -84,6 +84,31 @@ impl LookUpTable {
}
}
pub fn set_raw<D>(&mut self, module: &Module<FFT64>, lut: &ScalarZnx<D>)
where
D: AsRef<[u8]>,
{
let domain_size: usize = self.domain_size();
let size: usize = self.k.div_ceil(self.basek);
let mut lut_full: VecZnx<Vec<u8>> = VecZnx::new::<i64>(domain_size, 1, size);
lut_full.at_mut(0, 0).copy_from_slice(lut.raw());
if self.extension_factor() > 1 {
(0..self.extension_factor()).for_each(|i| {
module.switch_degree(&mut self.data[i], 0, &lut_full, 0);
if i < self.extension_factor() {
lut_full.rotate(-1);
}
});
} else {
module.vec_znx_copy(&mut self.data[0], 0, &lut_full, 0);
}
}
#[allow(dead_code)]
pub(crate) fn rotate(&mut self, k: i64) {
let extension_factor: usize = self.extension_factor();
let two_n: usize = 2 * self.data[0].n();

View File

@@ -1,12 +1,12 @@
use std::time::Instant;
use backend::{Encoding, FFT64, Module, ScratchOwned, Stats, VecZnxOps, ZnxView};
use backend::{Encoding, FFT64, Module, ScalarZnx, ScratchOwned, Stats, VecZnxOps, ZnxView, ZnxViewMut};
use sampling::source::Source;
use crate::{
FourierGLWESecret, GLWECiphertext, GLWEPlaintext, GLWESecret, Infos, LWECiphertext, LWESecret,
blind_rotation::{
ccgi::{cggi_blind_rotate, cggi_blind_rotate_scratch_space, mod_switch_2n},
ccgi::{cggi_blind_rotate, cggi_blind_rotate_scratch_space, negate_and_mod_switch_2n},
key::BlindRotationKeyCGGI,
lut::LookUpTable,
},
@@ -16,22 +16,24 @@ use crate::{
#[test]
fn blind_rotation() {
let module: Module<FFT64> = Module::<FFT64>::new(2048);
let basek: usize = 20;
let basek: usize = 18;
let n_lwe: usize = 1071;
let k_lwe: usize = 22;
let k_brk: usize = 60;
let k_lwe: usize = 24;
let k_brk: usize = 3 * basek;
let rows_brk: usize = 2;
let k_lut: usize = 60;
let k_lut: usize = 2 * basek;
let rank: usize = 1;
let block_size: usize = 7;
let message_modulus: usize = 64;
let extension_factor: usize = 2;
let mut source_xs: Source = Source::new([0u8; 32]);
let mut source_xe: Source = Source::new([0u8; 32]);
let mut source_xa: Source = Source::new([0u8; 32]);
let message_modulus: usize = 1 << 6;
let mut source_xs: Source = Source::new([1u8; 32]);
let mut source_xe: Source = Source::new([1u8; 32]);
let mut source_xa: Source = Source::new([1u8; 32]);
let mut sk_glwe: GLWESecret<Vec<u8>> = GLWESecret::alloc(&module, rank);
sk_glwe.fill_ternary_prob(0.5, &mut source_xs);
@@ -40,9 +42,21 @@ fn blind_rotation() {
let mut sk_lwe: LWESecret<Vec<u8>> = LWESecret::alloc(n_lwe);
sk_lwe.fill_binary_block(block_size, &mut source_xs);
sk_lwe.data.raw_mut()[0] = 0;
println!("sk_lwe: {:?}", sk_lwe.data.raw());
let mut scratch: ScratchOwned = ScratchOwned::new(
BlindRotationKeyCGGI::generate_from_sk_scratch_space(&module, basek, k_brk, rank)
| cggi_blind_rotate_scratch_space(&module, basek, k_lut, k_brk, rows_brk, rank),
| cggi_blind_rotate_scratch_space(
&module,
extension_factor,
basek,
k_lut,
k_brk,
rows_brk,
rank,
),
);
let start: Instant = Instant::now();
@@ -65,8 +79,8 @@ fn blind_rotation() {
let mut pt_lwe: LWEPlaintext<Vec<u8>> = LWEPlaintext::alloc(basek, k_lwe);
let x: i64 = 0;
let bits: usize = 6;
let x: i64 = 1;
let bits: usize = 8;
pt_lwe.data.encode_coeff_i64(0, basek, bits, 0, x, bits);
@@ -82,7 +96,7 @@ fn blind_rotation() {
2 * x + 1
}
let mut lut: LookUpTable = LookUpTable::alloc(&module, basek, k_lut, 1);
let mut lut: LookUpTable = LookUpTable::alloc(&module, basek, k_lut, extension_factor);
lut.set(&module, lut_fn, message_modulus);
let mut res: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_lut, rank);
@@ -103,7 +117,7 @@ fn blind_rotation() {
let mut lwe_2n: Vec<i64> = vec![0i64; lwe.n() + 1]; // TODO: from scratch space
mod_switch_2n(module.n() * 2, &mut lwe_2n, &lwe.to_ref());
negate_and_mod_switch_2n(2 * lut.domain_size(), &mut lwe_2n, &lwe.to_ref());
let pt_want: i64 = (lwe_2n[0]
+ lwe_2n[1..]
@@ -111,15 +125,22 @@ fn blind_rotation() {
.zip(sk_lwe.data.at(0, 0))
.map(|(x, y)| x * y)
.sum::<i64>())
% (module.n() as i64 * 2);
% (2 * lut.domain_size()) as i64;
module.vec_znx_rotate_inplace(pt_want, &mut lut.data[0], 0);
println!("pt_want: {}", pt_want);
println!("pt_want: {}", lut.data[0]);
lut.rotate(pt_want);
lut.data.iter().for_each(|d| {
println!("{}", d);
});
// First limb should be exactly equal (test are parameterized such that the noise does not reach
// the first limb)
// assert_eq!(pt_have.data.at_mut(0, 0), lut.data[0].at_mut(0, 0));
// Then checks the noise
module.vec_znx_sub_ab_inplace(&mut lut.data[0], 0, &pt_have.data, 0);
let noise: f64 = lut.data[0].std(0, basek);
println!("noise: {}", noise);
}

View File

@@ -25,7 +25,7 @@ fn standard() {
let step: usize = lut.domain_size().div_round(message_modulus);
(0..lut.domain_size()).step_by(step).for_each(|i| {
(0..step).for_each(|j| {
(0..step).for_each(|_| {
assert_eq!(
lut_fn((i / step) as i64) % message_modulus as i64,
lut.data[0].raw()[0] / scale as i64
@@ -58,7 +58,7 @@ fn extended() {
let step: usize = lut.domain_size().div_round(message_modulus);
(0..lut.domain_size()).step_by(step).for_each(|i| {
(0..step).for_each(|j| {
(0..step).for_each(|_| {
assert_eq!(
lut_fn((i / step) as i64) % message_modulus as i64,
lut.data[0].raw()[0] / scale as i64