From 52a6a130a595cd47023784d071e32d2e0105caa0 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Bossuat Date: Fri, 11 Jul 2025 12:29:49 +0200 Subject: [PATCH] Fixes after meeting --- backend/src/lib.rs | 8 +- backend/src/vec_znx.rs | 8 +- core/src/blind_rotation/cggi.rs | 168 ++++++++++----------- core/src/blind_rotation/key.rs | 120 +++++++++++---- core/src/blind_rotation/lut.rs | 4 + core/src/blind_rotation/test_fft64/cggi.rs | 31 +--- 6 files changed, 188 insertions(+), 151 deletions(-) diff --git a/backend/src/lib.rs b/backend/src/lib.rs index e7c8e5e..8ac50ce 100644 --- a/backend/src/lib.rs +++ b/backend/src/lib.rs @@ -240,11 +240,11 @@ impl Scratch { ) -> (Vec>, &mut Self) { let mut scratch: &mut Scratch = self; let mut slice: Vec> = Vec::with_capacity(slice_size); - for _ in 0..slice_size{ + for _ in 0..slice_size { let (znx, new_scratch) = scratch.tmp_vec_znx_dft(module, cols, size); scratch = new_scratch; slice.push(znx); - }; + } (slice, scratch) } @@ -279,11 +279,11 @@ impl Scratch { ) -> (Vec>, &mut Self) { let mut scratch: &mut Scratch = self; let mut slice: Vec> = Vec::with_capacity(slice_size); - for _ in 0..slice_size{ + for _ in 0..slice_size { let (znx, new_scratch) = scratch.tmp_vec_znx(module, cols, size); scratch = new_scratch; slice.push(znx); - }; + } (slice, scratch) } diff --git a/backend/src/vec_znx.rs b/backend/src/vec_znx.rs index 00568dd..74f9f86 100644 --- a/backend/src/vec_znx.rs +++ b/backend/src/vec_znx.rs @@ -111,10 +111,10 @@ impl + AsRef<[u8]>> VecZnx { } } - pub fn rotate(&mut self, k: i64){ - unsafe{ - (0..self.cols()).for_each(|i|{ - (0..self.size()).for_each(|j|{ + pub fn rotate(&mut self, k: i64) { + unsafe { + (0..self.cols()).for_each(|i| { + (0..self.size()).for_each(|j| { znx::znx_rotate_inplace_i64(self.n() as u64, k, self.at_mut_ptr(i, j)); }); }) diff --git a/core/src/blind_rotation/cggi.rs b/core/src/blind_rotation/cggi.rs index 3e0dc33..2cf1d88 100644 --- a/core/src/blind_rotation/cggi.rs +++ b/core/src/blind_rotation/cggi.rs @@ -1,5 +1,5 @@ use backend::{ - FFT64, MatZnxDftOps, MatZnxDftScratch, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, + FFT64, MatZnxDftOps, MatZnxDftScratch, Module, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, Scratch, VecZnxAlloc, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDftAlloc, VecZnxDftOps, VecZnxOps, ZnxView, ZnxViewMut, ZnxZero, }; @@ -30,7 +30,7 @@ pub fn cggi_blind_rotate_scratch_space( let acc_big: usize = module.bytes_of_vec_znx_big(1, brk_size); let vmp_res: usize = module.bytes_of_vec_znx_dft(cols, brk_size) * extension_factor; let acc_dft_add: usize = vmp_res; - let xai_plus_y: usize = module.bytes_of_scalar_znx(1); + let xai_plus_y: usize = module.bytes_of_scalar_znx_dft(1); let xai_plus_y_dft: usize = module.bytes_of_scalar_znx_dft(1); let vmp: usize = module.vmp_apply_tmp_bytes(brk_size, rows, rows, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2) @@ -54,16 +54,17 @@ pub fn cggi_blind_rotate_scratch_space( } } -pub fn cggi_blind_rotate( +pub fn cggi_blind_rotate( module: &Module, res: &mut GLWECiphertext, lwe: &LWECiphertext, lut: &LookUpTable, - brk: &BlindRotationKeyCGGI, + brk: &BlindRotationKeyCGGI, scratch: &mut Scratch, ) where DataRes: AsRef<[u8]> + AsMut<[u8]>, DataIn: AsRef<[u8]>, + DataBrk: AsRef<[u8]>, { match brk.dist { Distribution::BinaryBlock(_) | Distribution::BinaryFixed(_) | Distribution::BinaryProb(_) | Distribution::ZERO => { @@ -82,16 +83,17 @@ pub fn cggi_blind_rotate( } } -pub(crate) fn cggi_blind_rotate_block_binary_extended( +pub(crate) fn cggi_blind_rotate_block_binary_extended( module: &Module, res: &mut GLWECiphertext, lwe: &LWECiphertext, lut: &LookUpTable, - brk: &BlindRotationKeyCGGI, + brk: &BlindRotationKeyCGGI, scratch: &mut Scratch, ) where DataRes: AsRef<[u8]> + AsMut<[u8]>, DataIn: AsRef<[u8]>, + DataBrk: AsRef<[u8]>, { let extension_factor: usize = lut.extension_factor(); let basek: usize = res.basek(); @@ -102,25 +104,35 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended( let (mut acc_dft, scratch2) = scratch1.tmp_slice_vec_znx_dft(extension_factor, module, cols, rows); let (mut vmp_res, scratch3) = scratch2.tmp_slice_vec_znx_dft(extension_factor, module, cols, brk.size()); let (mut acc_add_dft, scratch4) = scratch3.tmp_slice_vec_znx_dft(extension_factor, module, cols, brk.size()); - let (mut xai_plus_y, scratch5) = scratch4.tmp_scalar_znx(module, 1); + let (mut minus_one, scratch5) = scratch4.tmp_scalar_znx_dft(module, 1); let (mut xai_plus_y_dft, scratch6) = scratch5.tmp_scalar_znx_dft(module, 1); + minus_one.raw_mut()[..module.n() >> 1].fill(-1.0); + (0..extension_factor).for_each(|i| { acc[i].zero(); }); + let x_pow_a: &Vec, FFT64>>; + if let Some(b) = &brk.x_pow_a { + x_pow_a = b + } else { + panic!("invalid key: x_pow_a has not been initialized") + } + let mut lwe_2n: Vec = vec![0i64; lwe.n() + 1]; // TODO: from scratch space let lwe_ref: LWECiphertext<&[u8]> = lwe.to_ref(); + let two_n: usize = 2 * module.n(); let two_n_ext: usize = 2 * lut.domain_size(); negate_and_mod_switch_2n(two_n_ext, &mut lwe_2n, &lwe_ref); let a: &[i64] = &lwe_2n[1..]; - let b_pos: usize = ((lwe_2n[0] + two_n_ext as i64) % two_n_ext as i64) as usize; + let b_pos: usize = ((lwe_2n[0] + two_n_ext as i64) & (two_n_ext - 1) as i64) as usize; let b_hi: usize = b_pos / extension_factor; - let b_lo: usize = b_pos % extension_factor; + let b_lo: usize = b_pos & (extension_factor - 1); for (i, j) in (0..b_lo).zip(extension_factor - b_lo..extension_factor) { module.vec_znx_rotate(b_hi as i64 + 1, &mut acc[i], 0, &lut.data[j], 0); @@ -145,9 +157,9 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended( // TODO: first & last iterations can be optimized izip!(ai.iter(), ski.iter()).for_each(|(aii, skii)| { - let ai_pos: usize = ((aii + two_n_ext as i64) % two_n_ext as i64) as usize; + let ai_pos: usize = ((aii + two_n_ext as i64) & (two_n_ext - 1) as i64) as usize; let ai_hi: usize = ai_pos / extension_factor; - let ai_lo: usize = ai_pos % extension_factor; + let ai_lo: usize = ai_pos & (extension_factor - 1); // vmp_res = DFT(acc) * BRK[i] (0..extension_factor).for_each(|i| { @@ -156,48 +168,62 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended( // Trivial case: no rotation between polynomials, we can directly multiply with (X^{-ai} - 1) if ai_lo == 0 { - // DFT X^{-ai} - set_xai_plus_y(module, ai_hi, -1, &mut xai_plus_y_dft, &mut xai_plus_y); - // Sets acc_add_dft[i] = (acc[i] * sk) * (X^{-ai} - 1) - (0..extension_factor).for_each(|j| { - (0..cols).for_each(|i| { - module.svp_apply_inplace(&mut vmp_res[j], i, &xai_plus_y_dft, 0); - module.vec_znx_dft_add_inplace(&mut acc_add_dft[j], i, &vmp_res[j], i); + if ai_hi != 0 { + // DFT X^{-ai} + module.vec_znx_dft_add(&mut xai_plus_y_dft, 0, &x_pow_a[ai_hi], 0, &minus_one, 0); + (0..extension_factor).for_each(|j| { + (0..cols).for_each(|i| { + module.svp_apply_inplace(&mut vmp_res[j], i, &xai_plus_y_dft, 0); + module.vec_znx_dft_add_inplace(&mut acc_add_dft[j], i, &vmp_res[j], i); + }); }); - }); + } + // Non trivial case: rotation between polynomials // In this case we can't directly multiply with (X^{-ai} - 1) because of the // ring homomorphism R^{N} -> prod R^{N/extension_factor}, so we split the // computation in two steps: acc_add_dft = (acc * sk) * (-1) + (acc * sk) * X^{-ai} } else { // Sets acc_add_dft[i] = acc[i] * sk - (0..extension_factor).for_each(|i| { - (0..cols).for_each(|k| { - module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k); - }) - }); - // DFT X^{-ai} - set_xai_plus_y(module, ai_hi + 1, 0, &mut xai_plus_y_dft, &mut xai_plus_y); - - // Sets acc_add_dft[0..ai_lo] += (acc[extension_factor - ai_lo..extension_factor] * sk) * X^{-ai+1} - for (i, j) in (0..ai_lo).zip(extension_factor - ai_lo..extension_factor) { - (0..cols).for_each(|k| { - module.svp_apply_inplace(&mut vmp_res[j], k, &xai_plus_y_dft, 0); - module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_res[j], k); - }); + // Sets acc_add_dft[0..ai_lo] -= acc[..ai_lo] * sk + if (ai_hi + 1) & (two_n - 1) != 0 { + for i in 0..ai_lo { + (0..cols).for_each(|k| { + module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k); + }); + } } - // DFT X^{-ai} - set_xai_plus_y(module, ai_hi, 0, &mut xai_plus_y_dft, &mut xai_plus_y); + // Sets acc_add_dft[ai_lo..extension_factor] -= acc[ai_lo..extension_factor] * sk + if ai_hi != 0 { + for i in ai_lo..extension_factor { + (0..cols).for_each(|k: usize| { + module.vec_znx_dft_sub_ab_inplace(&mut acc_add_dft[i], k, &vmp_res[i], k); + }); + } + } + + // Sets acc_add_dft[0..ai_lo] += (acc[extension_factor - ai_lo..extension_factor] * sk) * X^{-ai+1} + if (ai_hi + 1) & (two_n - 1) != 0 { + for (i, j) in (0..ai_lo).zip(extension_factor - ai_lo..extension_factor) { + (0..cols).for_each(|k| { + module.svp_apply_inplace(&mut vmp_res[j], k, &x_pow_a[ai_hi + 1], 0); + module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_res[j], k); + }); + } + } // Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai} - for (i, j) in (ai_lo..extension_factor).zip(0..extension_factor - ai_lo) { - (0..cols).for_each(|k| { - module.svp_apply_inplace(&mut vmp_res[j], k, &xai_plus_y_dft, 0); - module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_res[j], k); - }); + if ai_hi != 0 { + // Sets acc_add_dft[ai_lo..extension_factor] += (acc[0..extension_factor - ai_lo] * sk) * X^{-ai} + for (i, j) in (ai_lo..extension_factor).zip(0..extension_factor - ai_lo) { + (0..cols).for_each(|k| { + module.svp_apply_inplace(&mut vmp_res[j], k, &x_pow_a[ai_hi], 0); + module.vec_znx_dft_add_inplace(&mut acc_add_dft[i], k, &vmp_res[j], k); + }); + } } } }); @@ -220,49 +246,17 @@ pub(crate) fn cggi_blind_rotate_block_binary_extended( }); } -fn set_xai_plus_y( - module: &Module, - ai: usize, - y: i64, - res: &mut ScalarZnxDft<&mut [u8], FFT64>, - buf: &mut ScalarZnx<&mut [u8]>, -) { - let n: usize = module.n(); - - { - let raw: &mut [i64] = buf.at_mut(0, 0); - if ai < n { - raw[ai] = 1; - } else { - raw[(ai - n) & (n - 1)] = -1; - } - raw[0] += y; - } - - module.svp_prepare(res, 0, buf, 0); - - { - let raw: &mut [i64] = buf.at_mut(0, 0); - - if ai < n { - raw[ai] = 0; - } else { - raw[(ai - n) & (n - 1)] = 0; - } - raw[0] = 0; - } -} - -pub(crate) fn cggi_blind_rotate_block_binary( +pub(crate) fn cggi_blind_rotate_block_binary( module: &Module, res: &mut GLWECiphertext, lwe: &LWECiphertext, lut: &LookUpTable, - brk: &BlindRotationKeyCGGI, + brk: &BlindRotationKeyCGGI, scratch: &mut Scratch, ) where DataRes: AsRef<[u8]> + AsMut<[u8]>, DataIn: AsRef<[u8]>, + DataBrk: AsRef<[u8]>, { let mut lwe_2n: Vec = vec![0i64; lwe.n() + 1]; // TODO: from scratch space let mut out_mut: GLWECiphertext<&mut [u8]> = res.to_mut(); @@ -290,9 +284,18 @@ pub(crate) fn cggi_blind_rotate_block_binary( let (mut acc_dft, scratch1) = scratch.tmp_vec_znx_dft(module, cols, rows); let (mut vmp_res, scratch2) = scratch1.tmp_vec_znx_dft(module, cols, brk.size()); let (mut acc_add_dft, scratch3) = scratch2.tmp_vec_znx_dft(module, cols, brk.size()); - let (mut xai_plus_y, scratch4) = scratch3.tmp_scalar_znx(module, 1); + let (mut minus_one, scratch4) = scratch3.tmp_scalar_znx_dft(module, 1); let (mut xai_plus_y_dft, scratch5) = scratch4.tmp_scalar_znx_dft(module, 1); + minus_one.raw_mut()[..module.n() >> 1].fill(-1.0); + + let x_pow_a: &Vec, FFT64>>; + if let Some(b) = &brk.x_pow_a { + x_pow_a = b + } else { + panic!("invalid key: x_pow_a has not been initialized") + } + izip!( a.chunks_exact(block_size), brk.data.chunks_exact(block_size) @@ -305,13 +308,13 @@ pub(crate) fn cggi_blind_rotate_block_binary( acc_add_dft.zero(); izip!(ai.iter(), ski.iter()).for_each(|(aii, skii)| { - let ai_pos: usize = ((aii + two_n as i64) % two_n as i64) as usize; + let ai_pos: usize = ((aii + two_n as i64) & (two_n - 1) as i64) as usize; // vmp_res = DFT(acc) * BRK[i] module.vmp_apply(&mut vmp_res, &acc_dft, &skii.data, scratch5); // DFT(X^ai -1) - set_xai_plus_y(module, ai_pos, -1, &mut xai_plus_y_dft, &mut xai_plus_y); + module.vec_znx_dft_add(&mut xai_plus_y_dft, 0, &x_pow_a[ai_pos], 0, &minus_one, 0); // DFT(X^ai -1) * (DFT(acc) * BRK[i]) (0..cols).for_each(|i| { @@ -320,10 +323,6 @@ pub(crate) fn cggi_blind_rotate_block_binary( }); }); - (0..cols).for_each(|i| { - module.vec_znx_dft_add_inplace(&mut acc_dft, i, &acc_add_dft, i); - }); - { let (mut acc_add_big, scratch6) = scratch5.tmp_vec_znx_big(module, 1, brk.size()); @@ -336,16 +335,17 @@ pub(crate) fn cggi_blind_rotate_block_binary( }); } -pub(crate) fn cggi_blind_rotate_binary_standard( +pub(crate) fn cggi_blind_rotate_binary_standard( module: &Module, res: &mut GLWECiphertext, lwe: &LWECiphertext, lut: &LookUpTable, - brk: &BlindRotationKeyCGGI, + brk: &BlindRotationKeyCGGI, scratch: &mut Scratch, ) where DataRes: AsRef<[u8]> + AsMut<[u8]>, DataIn: AsRef<[u8]>, + DataBrk: AsRef<[u8]>, { #[cfg(debug_assertions)] { diff --git a/core/src/blind_rotation/key.rs b/core/src/blind_rotation/key.rs index b83d60c..01511c3 100644 --- a/core/src/blind_rotation/key.rs +++ b/core/src/blind_rotation/key.rs @@ -1,11 +1,15 @@ -use backend::{Backend, FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut}; +use backend::{ + Backend, FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScalarZnxToRef, Scratch, + ZnxView, ZnxViewMut, +}; use sampling::source::Source; use crate::{Distribution, FourierGLWESecret, GGSWCiphertext, Infos, LWESecret}; -pub struct BlindRotationKeyCGGI { - pub(crate) data: Vec, B>>, +pub struct BlindRotationKeyCGGI { + pub(crate) data: Vec>, pub(crate) dist: Distribution, + pub(crate) x_pow_a: Option, B>>>, } // pub struct BlindRotationKeyFHEW { @@ -13,20 +17,61 @@ pub struct BlindRotationKeyCGGI { // pub(crate) auto: Vec, B>>, //} -impl BlindRotationKeyCGGI { +impl BlindRotationKeyCGGI, FFT64> { pub fn allocate(module: &Module, n_lwe: usize, basek: usize, k: usize, rows: usize, rank: usize) -> Self { let mut data: Vec, FFT64>> = Vec::with_capacity(n_lwe); (0..n_lwe).for_each(|_| data.push(GGSWCiphertext::alloc(module, basek, k, rows, 1, rank))); Self { data, dist: Distribution::NONE, + x_pow_a: None::, FFT64>>>, } } pub fn generate_from_sk_scratch_space(module: &Module, basek: usize, k: usize, rank: usize) -> usize { GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank) } +} +impl> BlindRotationKeyCGGI { + #[allow(dead_code)] + pub(crate) fn n(&self) -> usize { + self.data[0].n() + } + + #[allow(dead_code)] + pub(crate) fn rows(&self) -> usize { + self.data[0].rows() + } + + #[allow(dead_code)] + pub(crate) fn k(&self) -> usize { + self.data[0].k() + } + + #[allow(dead_code)] + pub(crate) fn size(&self) -> usize { + self.data[0].size() + } + + #[allow(dead_code)] + pub(crate) fn rank(&self) -> usize { + self.data[0].rank() + } + + pub(crate) fn basek(&self) -> usize { + self.data[0].basek() + } + + pub(crate) fn block_size(&self) -> usize { + match self.dist { + Distribution::BinaryBlock(value) => value, + _ => 1, + } + } +} + +impl + AsMut<[u8]>> BlindRotationKeyCGGI { pub fn generate_from_sk( &mut self, module: &Module, @@ -64,42 +109,51 @@ impl BlindRotationKeyCGGI { self.data.iter_mut().enumerate().for_each(|(i, ggsw)| { pt.at_mut(0, 0)[0] = sk_ref.at(0, 0)[i]; ggsw.encrypt_sk(module, &pt, sk_glwe, source_xa, source_xe, sigma, scratch); - }) - } + }); - pub(crate) fn block_size(&self) -> usize { - match self.dist { - Distribution::BinaryBlock(value) => value, - _ => 1, + match sk_lwe.dist { + Distribution::BinaryBlock(_) => { + let mut x_pow_a: Vec, FFT64>> = Vec::with_capacity(module.n() << 1); + let mut buf: ScalarZnx> = module.new_scalar_znx(1); + (0..module.n() << 1).for_each(|i| { + let mut res: ScalarZnxDft, FFT64> = module.new_scalar_znx_dft(1); + set_xai_plus_y(module, i, 0, &mut res, &mut buf); + x_pow_a.push(res); + }); + self.x_pow_a = Some(x_pow_a); + } + _ => {} } } +} - #[allow(dead_code)] - pub(crate) fn n(&self) -> usize { - self.data[0].n() +pub fn set_xai_plus_y(module: &Module, ai: usize, y: i64, res: &mut ScalarZnxDft, buf: &mut ScalarZnx) +where + A: AsRef<[u8]> + AsMut<[u8]>, + B: AsRef<[u8]> + AsMut<[u8]>, +{ + let n: usize = module.n(); + + { + let raw: &mut [i64] = buf.at_mut(0, 0); + if ai < n { + raw[ai] = 1; + } else { + raw[(ai - n) & (n - 1)] = -1; + } + raw[0] += y; } - #[allow(dead_code)] - pub(crate) fn rows(&self) -> usize { - self.data[0].rows() - } + module.svp_prepare(res, 0, buf, 0); - #[allow(dead_code)] - pub(crate) fn k(&self) -> usize { - self.data[0].k() - } + { + let raw: &mut [i64] = buf.at_mut(0, 0); - #[allow(dead_code)] - pub(crate) fn size(&self) -> usize { - self.data[0].size() - } - - #[allow(dead_code)] - pub(crate) fn rank(&self) -> usize { - self.data[0].rank() - } - - pub(crate) fn basek(&self) -> usize { - self.data[0].basek() + if ai < n { + raw[ai] = 0; + } else { + raw[(ai - n) & (n - 1)] = 0; + } + raw[0] = 0; } } diff --git a/core/src/blind_rotation/lut.rs b/core/src/blind_rotation/lut.rs index 7446e9a..300aa6e 100644 --- a/core/src/blind_rotation/lut.rs +++ b/core/src/blind_rotation/lut.rs @@ -24,6 +24,10 @@ impl LookUpTable { Self { data, basek, k } } + pub fn log_extension_factor(&self) -> usize { + (usize::BITS - (self.extension_factor() - 1).leading_zeros()) as _ + } + pub fn extension_factor(&self) -> usize { self.data.len() } diff --git a/core/src/blind_rotation/test_fft64/cggi.rs b/core/src/blind_rotation/test_fft64/cggi.rs index e544494..2fbad48 100644 --- a/core/src/blind_rotation/test_fft64/cggi.rs +++ b/core/src/blind_rotation/test_fft64/cggi.rs @@ -39,8 +39,8 @@ fn blind_rotatio_test(n_lwe: usize, block_size: usize, extension_factor: usize) let message_modulus: usize = 1 << 4; - let mut source_xs: Source = Source::new([1u8; 32]); - let mut source_xe: Source = Source::new([1u8; 32]); + let mut source_xs: Source = Source::new([2u8; 32]); + let mut source_xe: Source = Source::new([2u8; 32]); let mut source_xa: Source = Source::new([1u8; 32]); let mut sk_glwe: GLWESecret> = GLWESecret::alloc(&module, rank); @@ -65,7 +65,8 @@ fn blind_rotatio_test(n_lwe: usize, block_size: usize, extension_factor: usize) rank, )); - let mut brk: BlindRotationKeyCGGI = BlindRotationKeyCGGI::allocate(&module, n_lwe, basek, k_brk, rows_brk, rank); + let mut brk: BlindRotationKeyCGGI, FFT64> = + BlindRotationKeyCGGI::allocate(&module, n_lwe, basek, k_brk, rows_brk, rank); brk.generate_from_sk( &module, @@ -86,14 +87,8 @@ fn blind_rotatio_test(n_lwe: usize, block_size: usize, extension_factor: usize) pt_lwe.data.encode_coeff_i64(0, basek, bits, 0, x, bits); - // println!("{}", pt_lwe.data); - lwe.encrypt_sk(&pt_lwe, &sk_lwe, &mut source_xa, &mut source_xe, 3.2); - lwe.decrypt(&mut pt_lwe, &sk_lwe); - - // println!("{}", pt_lwe.data); - let mut f: Vec = vec![0i64; message_modulus]; f.iter_mut() .enumerate() @@ -106,14 +101,10 @@ fn blind_rotatio_test(n_lwe: usize, block_size: usize, extension_factor: usize) cggi_blind_rotate(&module, &mut res, &lwe, &lut, &brk, scratch_br.borrow()); - println!("out_mut.data: {}", res.data); - let mut pt_have: GLWEPlaintext> = GLWEPlaintext::alloc(&module, basek, k_res); res.decrypt(&module, &mut pt_have, &sk_glwe_dft, scratch.borrow()); - println!("pt_have: {}", pt_have.data); - let mut lwe_2n: Vec = vec![0i64; lwe.n() + 1]; // TODO: from scratch space negate_and_mod_switch_2n(2 * lut.domain_size(), &mut lwe_2n, &lwe.to_ref()); @@ -124,23 +115,11 @@ fn blind_rotatio_test(n_lwe: usize, block_size: usize, extension_factor: usize) .zip(sk_lwe.data.at(0, 0)) .map(|(x, y)| x * y) .sum::()) - % (2 * lut.domain_size()) as i64; - - // println!("pt_want: {}", pt_want); + & (2 * lut.domain_size() - 1) as i64; lut.rotate(pt_want); - // lut.data.iter().for_each(|d| { - // println!("{}", d); - // }); - // First limb should be exactly equal (test are parameterized such that the noise does not reach // the first limb) assert_eq!(pt_have.data.at(0, 0), lut.data[0].at(0, 0)); - - // Then checks the noise - // module.vec_znx_sub_ab_inplace(&mut lut.data[0], 0, &pt_have.data, 0); - // let noise: f64 = lut.data[0].std(0, basek); - // println!("noise: {}", noise); - // assert!(noise < 1e-3); }