diff --git a/base2k/examples/rlwe_encrypt.rs b/base2k/examples/rlwe_encrypt.rs index 925c5c4..0d44271 100644 --- a/base2k/examples/rlwe_encrypt.rs +++ b/base2k/examples/rlwe_encrypt.rs @@ -38,13 +38,13 @@ fn main() { let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.cols()); // Applies buf_dft <- s * a - module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols()); + module.svp_apply_dft(&mut buf_dft, &s_ppol, &a); // Alias scratch space let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); // buf_big <- IDFT(buf_dft) (not normalized) - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.cols()); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft); let mut m: VecZnx = module.new_vec_znx(msg_cols); @@ -74,8 +74,8 @@ fn main() { //Decrypt // buf_big <- a * s - module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols()); - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, b.cols()); + module.svp_apply_dft(&mut buf_dft, &s_ppol, &a); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft); // buf_big <- a * s + b module.vec_znx_big_add_small_inplace(&mut buf_big, &b); diff --git a/base2k/examples/vector_matrix_product.rs b/base2k/examples/vector_matrix_product.rs index 1280cc6..22d0303 100644 --- a/base2k/examples/vector_matrix_product.rs +++ b/base2k/examples/vector_matrix_product.rs @@ -49,7 +49,7 @@ fn main() { module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); let mut c_big: VecZnxBig = c_dft.as_vec_znx_big(); - module.vec_znx_idft_tmp_a(&mut c_big, &mut c_dft, cols); + module.vec_znx_idft_tmp_a(&mut c_big, &mut c_dft); let mut res: VecZnx = module.new_vec_znx(cols); module.vec_znx_big_normalize(log_base2k, &mut res, &c_big, &mut buf); diff --git a/base2k/src/ffi/vmp.rs b/base2k/src/ffi/vmp.rs index 9035667..9c1a218 100644 --- a/base2k/src/ffi/vmp.rs +++ b/base2k/src/ffi/vmp.rs @@ -36,6 +36,21 @@ unsafe extern "C" { ); } +unsafe extern "C" { + pub unsafe fn vmp_apply_dft_add( + module: *const MODULE, + res: *mut VEC_ZNX_DFT, + res_size: u64, + a: *const i64, + a_size: u64, + a_sl: u64, + pmat: *const VMP_PMAT, + nrows: u64, + ncols: u64, + tmp_space: *mut u8, + ); +} + unsafe extern "C" { pub unsafe fn vmp_apply_dft_tmp_bytes( module: *const MODULE, @@ -60,6 +75,20 @@ unsafe extern "C" { ); } +unsafe extern "C" { + pub unsafe fn vmp_apply_dft_to_dft_add( + module: *const MODULE, + res: *mut VEC_ZNX_DFT, + res_size: u64, + a_dft: *const VEC_ZNX_DFT, + a_size: u64, + pmat: *const VMP_PMAT, + nrows: u64, + ncols: u64, + tmp_space: *mut u8, + ); +} + unsafe extern "C" { pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes( module: *const MODULE, diff --git a/base2k/src/module.rs b/base2k/src/module.rs index 23abe60..6eb0267 100644 --- a/base2k/src/module.rs +++ b/base2k/src/module.rs @@ -51,27 +51,23 @@ impl Module { (self.n() << 1) as _ } - // GALOISGENERATOR^|gen| * sign(gen) + // Returns GALOISGENERATOR^|gen| * sign(gen) pub fn galois_element(&self, gen: i64) -> i64 { if gen == 0 { return 1; } + ((mod_exp_u64(GALOISGENERATOR, gen.abs() as usize) & (self.cyclotomic_order() - 1)) as i64) + * gen.signum() + } - let mut gal_el: u64 = 1; - let mut gen_1_pow: u64 = GALOISGENERATOR; - let mut e: usize = gen.abs() as usize; - while e > 0 { - if e & 1 == 1 { - gal_el = gal_el.wrapping_mul(gen_1_pow); - } - - gen_1_pow = gen_1_pow.wrapping_mul(gen_1_pow); - e >>= 1; + // Returns gen^-1 + pub fn galois_element_inv(&self, gen: i64) -> i64 { + if gen == 0 { + panic!("cannot invert 0") } - - gal_el &= self.cyclotomic_order() - 1; - - (gal_el as i64) * gen.signum() + ((mod_exp_u64(gen.abs() as u64, (self.cyclotomic_order() - 1) as usize) + & (self.cyclotomic_order() - 1)) as i64) + * gen.signum() } pub fn free(self) { @@ -79,3 +75,17 @@ impl Module { drop(self); } } + +fn mod_exp_u64(x: u64, e: usize) -> u64 { + let mut y: u64 = 1; + let mut x_pow: u64 = x; + let mut exp = e; + while exp > 0 { + if exp & 1 == 1 { + y = y.wrapping_mul(x_pow); + } + x_pow = x_pow.wrapping_mul(x_pow); + exp >>= 1; + } + y +} diff --git a/base2k/src/svp.rs b/base2k/src/svp.rs index 8f62195..c8b6139 100644 --- a/base2k/src/svp.rs +++ b/base2k/src/svp.rs @@ -1,6 +1,6 @@ -use crate::ffi::svp; +use crate::ffi::svp::{self, svp_ppol_t}; use crate::ffi::vec_znx_dft::vec_znx_dft_t; -use crate::{assert_alignement, Module, VecZnx, VecZnxDft}; +use crate::{assert_alignement, Module, VecZnx, VecZnxDft, BACKEND}; use crate::{alloc_aligned, cast_mut, Infos}; use rand::seq::SliceRandom; @@ -35,15 +35,15 @@ impl Scalar { self.n } - pub fn buffer_size(n: usize) -> usize { - n + pub fn bytes_of(n: usize) -> usize { + n * std::mem::size_of::() } - pub fn from_buffer(&mut self, n: usize, bytes: &mut [u8]) -> Self { - let size: usize = Self::buffer_size(n); + pub fn from_bytes(n: usize, bytes: &mut [u8]) -> Self { + let size: usize = Self::bytes_of(n); debug_assert!( bytes.len() == size, - "invalid buffer: bytes.len()={} < self.buffer_size(n={})={}", + "invalid buffer: bytes.len()={} < self.bytes_of(n={})={}", bytes.len(), n, size @@ -63,6 +63,28 @@ impl Scalar { } } + pub fn from_bytes_borrow(n: usize, bytes: &mut [u8]) -> Self { + let size: usize = Self::bytes_of(n); + debug_assert!( + bytes.len() == size, + "invalid buffer: bytes.len()={} < self.bytes_of(n={})={}", + bytes.len(), + n, + size + ); + #[cfg(debug_assertions)] + { + assert_alignement(bytes.as_ptr()) + } + let bytes_i64: &mut [i64] = cast_mut::(bytes); + let ptr: *mut i64 = bytes_i64.as_mut_ptr(); + Self { + n: n, + data: Vec::new(), + ptr: ptr, + } + } + pub fn as_ptr(&self) -> *const i64 { self.ptr } @@ -87,26 +109,89 @@ impl Scalar { .for_each(|x: &mut i64| *x = (((source.next_u32() & 1) as i64) << 1) - 1); self.data.shuffle(source); } + + pub fn as_vec_znx(&self) -> VecZnx { + VecZnx { + n: self.n, + cols: 1, + data: Vec::new(), + ptr: self.ptr, + } + } } -pub struct SvpPPol(pub *mut svp::svp_ppol_t, pub usize); +pub trait ScalarOps { + fn bytes_of_scalar(&self) -> usize; + fn new_scalar(&self) -> Scalar; + fn new_scalar_from_bytes(&self, bytes: &mut [u8]) -> Scalar; + fn new_scalar_from_bytes_borrow(&self, tmp_bytes: &mut [u8]) -> Scalar; +} +impl ScalarOps for Module { + fn bytes_of_scalar(&self) -> usize { + Scalar::bytes_of(self.n()) + } + fn new_scalar(&self) -> Scalar { + Scalar::new(self.n()) + } + fn new_scalar_from_bytes(&self, bytes: &mut [u8]) -> Scalar { + Scalar::from_bytes(self.n(), bytes) + } + fn new_scalar_from_bytes_borrow(&self, tmp_bytes: &mut [u8]) -> Scalar { + Scalar::from_bytes_borrow(self.n(), tmp_bytes) + } +} + +pub struct SvpPPol { + pub n: usize, + pub data: Vec, + pub ptr: *mut u8, + pub backend: BACKEND, +} /// A prepared [crate::Scalar] for [SvpPPolOps::svp_apply_dft]. /// An [SvpPPol] an be seen as a [VecZnxDft] of one limb. -/// The backend array of an [SvpPPol] is allocated in C and must be freed manually. impl SvpPPol { - /// Returns the ring degree of the [SvpPPol]. - pub fn n(&self) -> usize { - self.1 + pub fn new(module: &Module) -> Self { + module.new_svp_ppol() } - pub fn from_bytes(size: usize, bytes: &mut [u8]) -> SvpPPol { + /// Returns the ring degree of the [SvpPPol]. + pub fn n(&self) -> usize { + self.n + } + + pub fn bytes_of(module: &Module) -> usize { + module.bytes_of_svp_ppol() + } + + pub fn from_bytes(module: &Module, bytes: &mut [u8]) -> SvpPPol { #[cfg(debug_assertions)] { - assert_alignement(bytes.as_ptr()) + assert_alignement(bytes.as_ptr()); + assert_eq!(bytes.len(), module.bytes_of_svp_ppol()); + } + unsafe { + Self { + n: module.n(), + data: Vec::from_raw_parts(bytes.as_mut_ptr(), bytes.len(), bytes.len()), + ptr: bytes.as_mut_ptr(), + backend: module.backend(), + } + } + } + + pub fn from_bytes_borrow(module: &Module, tmp_bytes: &mut [u8]) -> SvpPPol { + #[cfg(debug_assertions)] + { + assert_alignement(tmp_bytes.as_ptr()); + assert_eq!(tmp_bytes.len(), module.bytes_of_svp_ppol()); + } + Self { + n: module.n(), + data: Vec::new(), + ptr: tmp_bytes.as_mut_ptr(), + backend: module.backend(), } - debug_assert!(bytes.len() << 3 >= size); - SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size) } /// Returns the number of cols of the [SvpPPol], which is always 1. @@ -120,45 +205,64 @@ pub trait SvpPPolOps { fn new_svp_ppol(&self) -> SvpPPol; /// Returns the minimum number of bytes necessary to allocate - /// a new [SvpPPol] through [SvpPPol::from_bytes]. + /// a new [SvpPPol] through [SvpPPol::from_bytes] ro. fn bytes_of_svp_ppol(&self) -> usize; + /// Allocates a new [SvpPPol] from an array of bytes. + /// The array of bytes is owned by the [SvpPPol]. + /// The method will panic if bytes.len() < [SvpPPolOps::bytes_of_svp_ppol] + fn new_svp_ppol_from_bytes(&self, bytes: &mut [u8]) -> SvpPPol; + + /// Allocates a new [SvpPPol] from an array of bytes. + /// The array of bytes is borrowed by the [SvpPPol]. + /// The method will panic if bytes.len() < [SvpPPolOps::bytes_of_svp_ppol] + fn new_svp_ppol_from_bytes_borrow(&self, tmp_bytes: &mut [u8]) -> SvpPPol; + /// Prepares a [crate::Scalar] for a [SvpPPolOps::svp_apply_dft]. fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar); /// Applies the [SvpPPol] x [VecZnxDft] product, where each limb of /// the [VecZnxDft] is multiplied with [SvpPPol]. - fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx, b_cols: usize); + fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx); } impl SvpPPolOps for Module { fn new_svp_ppol(&self) -> SvpPPol { - unsafe { SvpPPol(svp::new_svp_ppol(self.ptr), self.n()) } + let mut data: Vec = alloc_aligned::(self.bytes_of_svp_ppol()); + let ptr: *mut u8 = data.as_mut_ptr(); + SvpPPol { + data: data, + ptr: ptr, + n: self.n(), + backend: self.backend(), + } } fn bytes_of_svp_ppol(&self) -> usize { unsafe { svp::bytes_of_svp_ppol(self.ptr) as usize } } - fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar) { - unsafe { svp::svp_prepare(self.ptr, svp_ppol.0, a.as_ptr()) } + fn new_svp_ppol_from_bytes(&self, bytes: &mut [u8]) -> SvpPPol { + SvpPPol::from_bytes(self, bytes) } - fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx, b_cols: usize) { - debug_assert!( - c.cols() >= b_cols, - "invalid c_vector: c_vector.cols()={} < b.cols()={}", - c.cols(), - b_cols - ); + fn new_svp_ppol_from_bytes_borrow(&self, tmp_bytes: &mut [u8]) -> SvpPPol { + SvpPPol::from_bytes_borrow(self, tmp_bytes) + } + + fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar) { + unsafe { svp::svp_prepare(self.ptr, svp_ppol.ptr as *mut svp_ppol_t, a.as_ptr()) } + } + + fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx) { unsafe { svp::svp_apply_dft( self.ptr, c.ptr as *mut vec_znx_dft_t, - b_cols as u64, - a.0, + c.cols() as u64, + a.ptr as *const svp_ppol_t, b.as_ptr(), - b_cols as u64, + b.cols() as u64, b.n() as u64, ) } diff --git a/base2k/src/vec_znx.rs b/base2k/src/vec_znx.rs index 6f45bac..d5235b5 100644 --- a/base2k/src/vec_znx.rs +++ b/base2k/src/vec_znx.rs @@ -12,16 +12,16 @@ use std::cmp::min; #[derive(Clone)] pub struct VecZnx { /// Polynomial degree. - n: usize, + pub n: usize, /// Number of columns. - cols: usize, + pub cols: usize, /// Polynomial coefficients, as a contiguous array. Each col is equally spaced by n. - data: Vec, + pub data: Vec, /// Pointer to data (data can be enpty if [VecZnx] borrows space instead of owning it). - ptr: *mut i64, + pub ptr: *mut i64, } pub trait VecZnxVec { @@ -363,10 +363,10 @@ pub trait VecZnxOps { fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx); /// b <- phi_k(a) where phi_k: X^i -> X^{i*k} (mod (X^{n} + 1)) - fn vec_znx_automorphism(&self, k: i64, b: &mut VecZnx, a: &VecZnx, a_cols: usize); + fn vec_znx_automorphism(&self, k: i64, b: &mut VecZnx, a: &VecZnx); /// a <- phi_k(a) where phi_k: X^i -> X^{i*k} (mod (X^{n} + 1)) - fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx, a_cols: usize); + fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx); /// Splits b into subrings and copies them them into a. /// @@ -540,10 +540,9 @@ impl VecZnxOps for Module { /// # Panics /// /// The method will panic if the argument `a` is greater than `a.cols()`. - fn vec_znx_automorphism(&self, k: i64, b: &mut VecZnx, a: &VecZnx, a_cols: usize) { + fn vec_znx_automorphism(&self, k: i64, b: &mut VecZnx, a: &VecZnx) { debug_assert_eq!(a.n(), self.n()); debug_assert_eq!(b.n(), self.n()); - debug_assert!(a.cols() >= a_cols); unsafe { vec_znx::vec_znx_automorphism( self.ptr, @@ -552,7 +551,7 @@ impl VecZnxOps for Module { b.cols() as u64, b.n() as u64, a.as_ptr(), - a_cols as u64, + a.cols() as u64, a.n() as u64, ); } @@ -569,9 +568,8 @@ impl VecZnxOps for Module { /// # Panics /// /// The method will panic if the argument `cols` is greater than `self.cols()`. - fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx, a_cols: usize) { + fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx) { debug_assert_eq!(a.n(), self.n()); - debug_assert!(a.cols() >= a_cols); unsafe { vec_znx::vec_znx_automorphism( self.ptr, @@ -580,7 +578,7 @@ impl VecZnxOps for Module { a.cols() as u64, a.n() as u64, a.as_ptr(), - a_cols as u64, + a.cols() as u64, a.n() as u64, ); } diff --git a/base2k/src/vec_znx_big.rs b/base2k/src/vec_znx_big.rs index 8b63992..77122d1 100644 --- a/base2k/src/vec_znx_big.rs +++ b/base2k/src/vec_znx_big.rs @@ -16,6 +16,7 @@ impl VecZnxBig { pub fn from_bytes(module: &Module, cols: usize, bytes: &mut [u8]) -> Self { #[cfg(debug_assertions)] { + assert_eq!(bytes.len(), module.bytes_of_vec_znx_big(cols)); assert_alignement(bytes.as_ptr()) }; unsafe { @@ -54,14 +55,6 @@ impl VecZnxBig { } } - pub fn n(&self) -> usize { - self.n - } - - pub fn cols(&self) -> usize { - self.cols - } - pub fn backend(&self) -> BACKEND { self.backend } @@ -77,12 +70,36 @@ impl VecZnxBig { } } +impl Infos for VecZnxBig { + /// Returns the base 2 logarithm of the [VecZnx] degree. + fn log_n(&self) -> usize { + (usize::BITS - (self.n - 1).leading_zeros()) as _ + } + + /// Returns the [VecZnx] degree. + fn n(&self) -> usize { + self.n + } + + /// Returns the number of cols of the [VecZnx]. + fn cols(&self) -> usize { + self.cols + } + + /// Returns the number of rows of the [VecZnx]. + fn rows(&self) -> usize { + 1 + } +} + pub trait VecZnxBigOps { /// Allocates a vector Z[X]/(X^N+1) that stores not normalized values. fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig; /// Returns a new [VecZnxBig] with the provided bytes array as backing array. /// + /// Behavior: takes ownership of the backing array. + /// /// # Arguments /// /// * `cols`: the number of cols of the [VecZnxBig]. @@ -92,6 +109,19 @@ pub trait VecZnxBigOps { /// If `bytes.len()` < [Module::bytes_of_vec_znx_big]. fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig; + /// Returns a new [VecZnxBig] with the provided bytes array as backing array. + /// + /// Behavior: the backing array is only borrowed. + /// + /// # Arguments + /// + /// * `cols`: the number of cols of the [VecZnxBig]. + /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_big]. + /// + /// # Panics + /// If `bytes.len()` < [Module::bytes_of_vec_znx_big]. + fn new_vec_znx_big_from_bytes_borrow(&self, cols: usize, tmp_bytes: &mut [u8]) -> VecZnxBig; + /// Returns the minimum number of bytes necessary to allocate /// a new [VecZnxBig] through [VecZnxBig::from_bytes]. fn bytes_of_vec_znx_big(&self, cols: usize) -> usize; @@ -151,19 +181,13 @@ impl VecZnxBigOps for Module { } fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig { - debug_assert!( - bytes.len() >= ::bytes_of_vec_znx_big(self, cols), - "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", - bytes.len(), - ::bytes_of_vec_znx_big(self, cols) - ); - #[cfg(debug_assertions)] - { - assert_alignement(bytes.as_ptr()) - } VecZnxBig::from_bytes(self, cols, bytes) } + fn new_vec_znx_big_from_bytes_borrow(&self, cols: usize, tmp_bytes: &mut [u8]) -> VecZnxBig { + VecZnxBig::from_bytes_borrow(self, cols, tmp_bytes) + } + fn bytes_of_vec_znx_big(&self, cols: usize) -> usize { unsafe { vec_znx_big::bytes_of_vec_znx_big(self.ptr, cols as u64) as usize } } diff --git a/base2k/src/vec_znx_dft.rs b/base2k/src/vec_znx_dft.rs index 367270d..877adc8 100644 --- a/base2k/src/vec_znx_dft.rs +++ b/base2k/src/vec_znx_dft.rs @@ -61,14 +61,6 @@ impl VecZnxDft { } } - pub fn n(&self) -> usize { - self.n - } - - pub fn cols(&self) -> usize { - self.cols - } - pub fn backend(&self) -> BACKEND { self.backend } @@ -102,12 +94,36 @@ impl VecZnxDft { } } +impl Infos for VecZnxDft { + /// Returns the base 2 logarithm of the [VecZnx] degree. + fn log_n(&self) -> usize { + (usize::BITS - (self.n - 1).leading_zeros()) as _ + } + + /// Returns the [VecZnx] degree. + fn n(&self) -> usize { + self.n + } + + /// Returns the number of cols of the [VecZnx]. + fn cols(&self) -> usize { + self.cols + } + + /// Returns the number of rows of the [VecZnx]. + fn rows(&self) -> usize { + 1 + } +} + pub trait VecZnxDftOps { /// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space. fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft; /// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// + /// Behavior: takes ownership of the backing array. + /// /// # Arguments /// /// * `cols`: the number of cols of the [VecZnxDft]. @@ -117,6 +133,19 @@ pub trait VecZnxDftOps { /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft; + /// Returns a new [VecZnxDft] with the provided bytes array as backing array. + /// + /// Behavior: the backing array is only borrowed. + /// + /// # Arguments + /// + /// * `cols`: the number of cols of the [VecZnxDft]. + /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft]. + /// + /// # Panics + /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. + fn new_vec_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft; + /// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// /// # Arguments @@ -133,28 +162,15 @@ pub trait VecZnxDftOps { fn vec_znx_idft_tmp_bytes(&self) -> usize; /// b <- IDFT(a), uses a as scratch space. - fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_cols: usize); + fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft); - fn vec_znx_idft(&self, b: &mut VecZnxBig, a: &VecZnxDft, a_cols: usize, tmp_bytes: &mut [u8]); + fn vec_znx_idft(&self, b: &mut VecZnxBig, a: &VecZnxDft, tmp_bytes: &mut [u8]); - fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &VecZnx, a_cols: usize); + fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &VecZnx); - fn vec_znx_dft_automorphism( - &self, - k: i64, - b: &mut VecZnxDft, - b_cols: usize, - a: &VecZnxDft, - a_cols: usize, - ); + fn vec_znx_dft_automorphism(&self, k: i64, b: &mut VecZnxDft, a: &VecZnxDft); - fn vec_znx_dft_automorphism_inplace( - &self, - k: i64, - a: &mut VecZnxDft, - a_cols: usize, - tmp_bytes: &mut [u8], - ); + fn vec_znx_dft_automorphism_inplace(&self, k: i64, a: &mut VecZnxDft, tmp_bytes: &mut [u8]); fn vec_znx_dft_automorphism_tmp_bytes(&self) -> usize; } @@ -173,37 +189,25 @@ impl VecZnxDftOps for Module { } fn new_vec_znx_dft_from_bytes(&self, cols: usize, tmp_bytes: &mut [u8]) -> VecZnxDft { - debug_assert!( - tmp_bytes.len() >= Self::bytes_of_vec_znx_dft(self, cols), - "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", - tmp_bytes.len(), - Self::bytes_of_vec_znx_dft(self, cols) - ); - #[cfg(debug_assertions)] - { - assert_alignement(tmp_bytes.as_ptr()) - } VecZnxDft::from_bytes(self, cols, tmp_bytes) } + fn new_vec_znx_dft_from_bytes_borrow(&self, cols: usize, tmp_bytes: &mut [u8]) -> VecZnxDft { + VecZnxDft::from_bytes_borrow(self, cols, tmp_bytes) + } + fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize { unsafe { bytes_of_vec_znx_dft(self.ptr, cols as u64) as usize } } - fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_cols: usize) { - debug_assert!( - b.cols() >= a_cols, - "invalid c_vector: b_vector.cols()={} < a_cols={}", - b.cols(), - a_cols - ); + fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft) { unsafe { vec_znx_dft::vec_znx_idft_tmp_a( self.ptr, b.ptr as *mut vec_znx_big_t, b.cols() as u64, a.ptr as *mut vec_znx_dft_t, - a_cols as u64, + a.cols() as u64, ) } } @@ -216,41 +220,23 @@ impl VecZnxDftOps for Module { /// /// # Panics /// If b.cols < a_cols - fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &VecZnx, a_cols: usize) { - debug_assert!( - b.cols() >= a_cols, - "invalid a_cols: b.cols()={} < a_cols={}", - b.cols(), - a_cols - ); + fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &VecZnx) { unsafe { vec_znx_dft::vec_znx_dft( self.ptr, b.ptr as *mut vec_znx_dft_t, b.cols() as u64, a.as_ptr(), - a_cols as u64, + a.cols() as u64, a.n() as u64, ) } } // b <- IDFT(a), scratch space size obtained with [vec_znx_idft_tmp_bytes]. - fn vec_znx_idft(&self, b: &mut VecZnxBig, a: &VecZnxDft, a_cols: usize, tmp_bytes: &mut [u8]) { + fn vec_znx_idft(&self, b: &mut VecZnxBig, a: &VecZnxDft, tmp_bytes: &mut [u8]) { #[cfg(debug_assertions)] { - assert!( - b.cols() >= a_cols, - "invalid c_vector: b.cols()={} < a_cols={}", - b.cols(), - a_cols - ); - assert!( - a.cols() >= a_cols, - "invalid c_vector: a.cols()={} < a_cols={}", - a.cols(), - a_cols - ); assert!( tmp_bytes.len() >= Self::vec_znx_idft_tmp_bytes(self), "invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_idft_tmp_bytes()={}", @@ -263,65 +249,31 @@ impl VecZnxDftOps for Module { vec_znx_dft::vec_znx_idft( self.ptr, b.ptr as *mut vec_znx_big_t, - a.cols() as u64, + b.cols() as u64, a.ptr as *const vec_znx_dft_t, - a_cols as u64, + a.cols() as u64, tmp_bytes.as_mut_ptr(), ) } } - fn vec_znx_dft_automorphism( - &self, - k: i64, - b: &mut VecZnxDft, - b_cols: usize, - a: &VecZnxDft, - a_cols: usize, - ) { - #[cfg(debug_assertions)] - { - assert!( - b.cols() >= a_cols, - "invalid c_vector: b.cols()={} < a_cols={}", - b.cols(), - a_cols - ); - assert!( - a.cols() >= a_cols, - "invalid c_vector: a.cols()={} < a_cols={}", - a.cols(), - a_cols - ); - } + fn vec_znx_dft_automorphism(&self, k: i64, b: &mut VecZnxDft, a: &VecZnxDft) { unsafe { vec_znx_dft::vec_znx_dft_automorphism( self.ptr, k, b.ptr as *mut vec_znx_dft_t, - b_cols as u64, + b.cols() as u64, a.ptr as *const vec_znx_dft_t, - a_cols as u64, + a.cols() as u64, [0u8; 0].as_mut_ptr(), ); } } - fn vec_znx_dft_automorphism_inplace( - &self, - k: i64, - a: &mut VecZnxDft, - a_cols: usize, - tmp_bytes: &mut [u8], - ) { + fn vec_znx_dft_automorphism_inplace(&self, k: i64, a: &mut VecZnxDft, tmp_bytes: &mut [u8]) { #[cfg(debug_assertions)] { - assert!( - a.cols() >= a_cols, - "invalid c_vector: a.cols()={} < a_cols={}", - a.cols(), - a_cols - ); assert!( tmp_bytes.len() >= Self::vec_znx_dft_automorphism_tmp_bytes(self), "invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_dft_automorphism_tmp_bytes()={}", @@ -335,9 +287,9 @@ impl VecZnxDftOps for Module { self.ptr, k, a.ptr as *mut vec_znx_dft_t, - a_cols as u64, + a.cols() as u64, a.ptr as *const vec_znx_dft_t, - a_cols as u64, + a.cols() as u64, tmp_bytes.as_mut_ptr(), ); } @@ -379,16 +331,16 @@ mod tests { let p: i64 = -5; // a_dft <- DFT(a) - module.vec_znx_dft(&mut a_dft, &a, cols); + module.vec_znx_dft(&mut a_dft, &a); // a_dft <- AUTO(a_dft) - module.vec_znx_dft_automorphism_inplace(p, &mut a_dft, cols, &mut tmp_bytes); + module.vec_znx_dft_automorphism_inplace(p, &mut a_dft, &mut tmp_bytes); // a <- AUTO(a) - module.vec_znx_automorphism_inplace(p, &mut a, cols); + module.vec_znx_automorphism_inplace(p, &mut a); // b_dft <- DFT(AUTO(a)) - module.vec_znx_dft(&mut b_dft, &a, cols); + module.vec_znx_dft(&mut b_dft, &a); let a_f64: &[f64] = a_dft.raw(&module); let b_f64: &[f64] = b_dft.raw(&module); diff --git a/base2k/src/vmp.rs b/base2k/src/vmp.rs index e1c84d3..9484db8 100644 --- a/base2k/src/vmp.rs +++ b/base2k/src/vmp.rs @@ -253,6 +253,32 @@ pub trait VmpPMatOps { /// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_tmp_bytes]. fn vmp_apply_dft(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, buf: &mut [u8]); + /// Applies the vector matrix product [VecZnxDft] x [VmpPMat] and adds on the receiver. + /// + /// A vector matrix product is equivalent to a sum of [crate::SvpPPolOps::svp_apply_dft] + /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) + /// and each vector a [VecZnxDft] (row) of the [VmpPMat]. + /// + /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and + /// `j` cols, the output is a [VecZnx] of `j` cols. + /// + /// If there is a mismatch between the dimensions the largest valid ones are used. + /// + /// ```text + /// |a b c d| x |e f g| = (a * |e f g| + b * |h i j| + c * |k l m|) = |n o p| + /// |h i j| + /// |k l m| + /// ``` + /// where each element is a [VecZnxDft]. + /// + /// # Arguments + /// + /// * `c`: the operand on which the output of the vector matrix product is added, as a [VecZnxDft]. + /// * `a`: the left operand [VecZnx] of the vector matrix product. + /// * `b`: the right operand [VmpPMat] of the vector matrix product. + /// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_tmp_bytes]. + fn vmp_apply_dft_add(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, buf: &mut [u8]); + /// Returns the size of the stratch space necessary for [VmpPMatOps::vmp_apply_dft_to_dft]. /// /// # Arguments @@ -296,6 +322,39 @@ pub trait VmpPMatOps { /// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft, a: &VecZnxDft, b: &VmpPMat, buf: &mut [u8]); + /// Applies the vector matrix product [VecZnxDft] x [VmpPMat] and adds on top of the receiver instead of overwritting it. + /// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. + /// + /// A vector matrix product is equivalent to a sum of [crate::SvpPPolOps::svp_apply_dft] + /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) + /// and each vector a [VecZnxDft] (row) of the [VmpPMat]. + /// + /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and + /// `j` cols, the output is a [VecZnx] of `j` cols. + /// + /// If there is a mismatch between the dimensions the largest valid ones are used. + /// + /// ```text + /// |a b c d| x |e f g| = (a * |e f g| + b * |h i j| + c * |k l m|) = |n o p| + /// |h i j| + /// |k l m| + /// ``` + /// where each element is a [VecZnxDft]. + /// + /// # Arguments + /// + /// * `c`: the operand on which the output of the vector matrix product is added, as a [VecZnxDft]. + /// * `a`: the left operand [VecZnxDft] of the vector matrix product. + /// * `b`: the right operand [VmpPMat] of the vector matrix product. + /// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. + fn vmp_apply_dft_to_dft_add( + &self, + c: &mut VecZnxDft, + a: &VecZnxDft, + b: &VmpPMat, + buf: &mut [u8], + ); + /// Applies the vector matrix product [VecZnxDft] x [VmpPMat] in place. /// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. /// @@ -503,6 +562,30 @@ impl VmpPMatOps for Module { } } + fn vmp_apply_dft_add(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, tmp_bytes: &mut [u8]) { + debug_assert!( + tmp_bytes.len() >= self.vmp_apply_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()) + ); + #[cfg(debug_assertions)] + { + assert_alignement(tmp_bytes.as_ptr()); + } + unsafe { + vmp::vmp_apply_dft_add( + self.ptr, + c.ptr as *mut vec_znx_dft_t, + c.cols() as u64, + a.as_ptr(), + a.cols() as u64, + a.n() as u64, + b.as_ptr() as *const vmp_pmat_t, + b.rows() as u64, + b.cols() as u64, + tmp_bytes.as_mut_ptr(), + ) + } + } + fn vmp_apply_dft_to_dft_tmp_bytes( &self, res_cols: usize, @@ -551,6 +634,36 @@ impl VmpPMatOps for Module { } } + fn vmp_apply_dft_to_dft_add( + &self, + c: &mut VecZnxDft, + a: &VecZnxDft, + b: &VmpPMat, + tmp_bytes: &mut [u8], + ) { + debug_assert!( + tmp_bytes.len() + >= self.vmp_apply_dft_to_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()) + ); + #[cfg(debug_assertions)] + { + assert_alignement(tmp_bytes.as_ptr()); + } + unsafe { + vmp::vmp_apply_dft_to_dft_add( + self.ptr, + c.ptr as *mut vec_znx_dft_t, + c.cols() as u64, + a.ptr as *const vec_znx_dft_t, + a.cols() as u64, + b.as_ptr() as *const vmp_pmat_t, + b.rows() as u64, + b.cols() as u64, + tmp_bytes.as_mut_ptr(), + ) + } + } + fn vmp_apply_dft_to_dft_inplace(&self, b: &mut VecZnxDft, a: &VmpPMat, tmp_bytes: &mut [u8]) { debug_assert!( tmp_bytes.len() @@ -604,7 +717,7 @@ mod tests { for row_i in 0..vpmat_rows { let mut source: Source = Source::new([0u8; 32]); module.fill_uniform(log_base2k, &mut a, vpmat_cols, &mut source); - module.vec_znx_dft(&mut a_dft, &a, vpmat_cols); + module.vec_znx_dft(&mut a_dft, &a); module.vmp_prepare_row(&mut vmpmat_0, &a.raw(), row_i, &mut tmp_bytes); // Checks that prepare(vmp_pmat, a) = prepare_dft(vmp_pmat, a_dft) @@ -617,7 +730,7 @@ mod tests { // Checks that a_big = extract(prepare_dft(vmp_pmat, a_dft), b_big) module.vmp_extract_row(&mut b_big, &vmpmat_0, row_i); - module.vec_znx_idft(&mut a_big, &a_dft, vpmat_cols, &mut tmp_bytes); + module.vec_znx_idft(&mut a_big, &a_dft, &mut tmp_bytes); assert_eq!(a_big.raw::(&module), b_big.raw::(&module)); } diff --git a/rlwe/benches/gadget_product.rs b/rlwe/benches/gadget_product.rs index b2aeaa0..74a7ad4 100644 --- a/rlwe/benches/gadget_product.rs +++ b/rlwe/benches/gadget_product.rs @@ -1,5 +1,5 @@ use base2k::{ - Infos, BACKEND, Module, Sampling, SvpPPolOps, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, + BACKEND, Infos, Module, Sampling, SvpPPolOps, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, alloc_aligned_u8, }; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; @@ -7,7 +7,7 @@ use rlwe::{ ciphertext::{Ciphertext, new_gadget_ciphertext}, elem::ElemCommon, encryptor::{encrypt_grlwe_sk, encrypt_grlwe_sk_tmp_bytes}, - gadget_product::{gadget_product_core, gadget_product_tmp_bytes}, + gadget_product::{gadget_product_core, gadget_product_core_tmp_bytes}, keys::SecretKey, parameters::{Parameters, ParametersLiteral}, }; @@ -50,7 +50,7 @@ fn bench_gadget_product_inplace(c: &mut Criterion) { let mut tmp_bytes: Vec = alloc_aligned_u8( params.encrypt_rlwe_sk_tmp_bytes(params.log_q()) - | gadget_product_tmp_bytes( + | gadget_product_core_tmp_bytes( params.module(), params.log_base2k(), params.log_q(), diff --git a/rlwe/examples/rlk_experiments.rs b/rlwe/examples/rlk_experiments.rs deleted file mode 100644 index cd2331b..0000000 --- a/rlwe/examples/rlk_experiments.rs +++ /dev/null @@ -1,151 +0,0 @@ -use base2k::{ - Encoding, Infos, Module, Sampling, SvpPPol, SvpPPolOps, VecZnx, VecZnxDftOps, VecZnxOps, - VmpPMat, VmpPMatOps, is_aligned, -}; -use itertools::izip; -use rlwe::ciphertext::{Ciphertext, new_gadget_ciphertext}; -use rlwe::elem::ElemCommon; -use rlwe::encryptor::encrypt_rlwe_sk; -use rlwe::keys::SecretKey; -use rlwe::plaintext::Plaintext; -use sampling::source::{Source, new_seed}; - -fn main() { - let n: usize = 32; - let module: Module = Module::new(n, base2k::BACKEND::FFT64); - let log_base2k: usize = 16; - let log_k: usize = 32; - let cols: usize = 4; - - let mut a: VecZnx = module.new_vec_znx(cols); - let mut data: Vec = vec![0i64; n]; - data[0] = 0; - data[1] = 0; - a.encode_vec_i64(log_base2k, log_k, &data, 16); - - let mut a_dft: base2k::VecZnxDft = module.new_vec_znx_dft(cols); - - module.vec_znx_dft(&mut a_dft, &a, cols); - - (0..cols).for_each(|i| { - println!("{:?}", a_dft.at::(&module, i)); - }) -} - -pub struct GadgetCiphertextProtocol {} - -impl GadgetCiphertextProtocol { - pub fn new() -> GadgetCiphertextProtocol { - Self {} - } - - pub fn allocate( - module: &Module, - log_base2k: usize, - rows: usize, - log_q: usize, - ) -> GadgetCiphertextShare { - GadgetCiphertextShare::new(module, log_base2k, rows, log_q) - } - - pub fn gen_share( - module: &Module, - sk: &SecretKey, - pt: &Plaintext, - seed: &[u8; 32], - share: &mut GadgetCiphertextShare, - tmp_bytes: &mut [u8], - ) { - share.seed.copy_from_slice(seed); - let mut source_xe: Source = Source::new(new_seed()); - let mut source_xa: Source = Source::new(*seed); - let mut sk_ppol: SvpPPol = module.new_svp_ppol(); - sk.prepare(module, &mut sk_ppol); - share.value.iter_mut().for_each(|ai| { - //let elem = Elem{}; - //encrypt_rlwe_sk_thread_safe(module, ai, Some(pt.elem()), &sk_ppol, &mut source_xa, &mut source_xe, 3.2, tmp_bytes); - }) - } -} - -pub struct GadgetCiphertextShare { - pub seed: [u8; 32], - pub log_q: usize, - pub log_base2k: usize, - pub value: Vec, -} - -impl GadgetCiphertextShare { - pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Self { - let value: Vec = Vec::new(); - let cols: usize = (log_q + log_base2k - 1) / log_base2k; - (0..rows).for_each(|_| { - let vec_znx: VecZnx = module.new_vec_znx(cols); - }); - Self { - seed: [u8::default(); 32], - log_q: log_q, - log_base2k: log_base2k, - value: value, - } - } - - pub fn rows(&self) -> usize { - self.value.len() - } - - pub fn cols(&self) -> usize { - self.value[0].cols() - } - - pub fn aggregate_inplace(&mut self, module: &Module, a: &GadgetCiphertextShare) { - izip!(self.value.iter_mut(), a.value.iter()).for_each(|(bi, ai)| { - module.vec_znx_add_inplace(bi, ai); - }) - } - - pub fn get(&self, module: &Module, b: &mut Ciphertext, tmp_bytes: &mut [u8]) { - assert!(is_aligned(tmp_bytes.as_ptr())); - - let rows: usize = b.rows(); - let cols: usize = b.cols(); - - assert!(tmp_bytes.len() >= gadget_ciphertext_share_get_tmp_bytes(module, rows, cols)); - - assert_eq!(self.value.len(), rows); - assert_eq!(self.value[0].cols(), cols); - - let (tmp_bytes_vmp_prepare_row, tmp_bytes_vec_znx) = - tmp_bytes.split_at_mut(module.vmp_prepare_tmp_bytes(rows, cols)); - - let mut c: VecZnx = VecZnx::from_bytes_borrow(module.n(), cols, tmp_bytes_vec_znx); - - let mut source: Source = Source::new(self.seed); - - (0..self.value.len()).for_each(|row_i| { - module.vmp_prepare_row( - b.at_mut(0), - self.value[row_i].raw(), - row_i, - tmp_bytes_vmp_prepare_row, - ); - module.fill_uniform(self.log_base2k, &mut c, cols, &mut source); - module.vmp_prepare_row(b.at_mut(1), c.raw(), row_i, tmp_bytes_vmp_prepare_row) - }) - } - - pub fn get_new(&self, module: &Module, tmp_bytes: &mut [u8]) -> Ciphertext { - let mut b: Ciphertext = - new_gadget_ciphertext(module, self.log_base2k, self.rows(), self.log_q); - self.get(module, &mut b, tmp_bytes); - b - } -} - -pub fn gadget_ciphertext_share_get_tmp_bytes(module: &Module, rows: usize, cols: usize) -> usize { - module.vmp_prepare_tmp_bytes(rows, cols) + module.bytes_of_vec_znx(cols) -} - -pub struct CircularCiphertextProtocol {} - -pub struct CircularGadgetCiphertextProtocol {} diff --git a/rlwe/src/automorphism.rs b/rlwe/src/automorphism.rs new file mode 100644 index 0000000..e57cc3a --- /dev/null +++ b/rlwe/src/automorphism.rs @@ -0,0 +1,272 @@ +use crate::{ + ciphertext::{Ciphertext, new_gadget_ciphertext}, + elem::ElemCommon, + encryptor::{encrypt_grlwe_sk, encrypt_grlwe_sk_tmp_bytes}, + keys::SecretKey, + parameters::Parameters, +}; +use base2k::{ + Module, Scalar, ScalarOps, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, + VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, assert_alignement, +}; +use sampling::source::Source; + +/// Stores DFT([-A*AUTO(s, -p) + 2^{-K*i}*s + E, A]) where AUTO(X, p): X^{i} -> X^{i*p} +pub struct AutomorphismKey { + pub value: Ciphertext, + pub p: i64, +} + +pub fn automorphis_key_new_tmp_bytes( + module: &Module, + log_base2k: usize, + rows: usize, + log_q: usize, +) -> usize { + module.bytes_of_scalar() + + module.bytes_of_svp_ppol() + + encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q) +} + +impl Parameters { + pub fn automorphism_key_new_tmp_bytes(&self, rows: usize, log_q: usize) -> usize { + automorphis_key_new_tmp_bytes(self.module(), self.log_base2k(), rows, log_q) + } +} + +impl AutomorphismKey { + pub fn new( + module: &Module, + p: i64, + sk: &SecretKey, + log_base2k: usize, + rows: usize, + log_q: usize, + source_xa: &mut Source, + source_xe: &mut Source, + sigma: f64, + tmp_bytes: &mut [u8], + ) -> Self { + let (sk_auto_bytes, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_scalar()); + let (sk_out_bytes, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_svp_ppol()); + + let sk_auto: Scalar = module.new_scalar_from_bytes_borrow(sk_auto_bytes); + let mut sk_out: SvpPPol = module.new_svp_ppol_from_bytes_borrow(sk_out_bytes); + let mut value: Ciphertext = new_gadget_ciphertext(module, log_base2k, rows, log_q); + + let p_inv: i64 = module.galois_element_inv(p); + module.vec_znx_automorphism(p_inv, &mut sk_auto.as_vec_znx(), &sk.0.as_vec_znx()); + module.svp_prepare(&mut sk_out, &sk_auto); + encrypt_grlwe_sk( + module, &mut value, &sk.0, &sk_out, source_xa, source_xe, sigma, tmp_bytes, + ); + + Self { value: value, p: p } + } +} + +pub fn automorphism_tmp_bytes( + module: &Module, + c_cols: usize, + a_cols: usize, + b_rows: usize, + b_cols: usize, +) -> usize { + return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) + + 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols)); +} + +pub fn automorphism( + module: &Module, + c: &mut Ciphertext, + a: &Ciphertext, + b: &AutomorphismKey, + tmp_bytes: &mut [u8], +) { + let cols = std::cmp::min(c.cols(), a.cols()); + + #[cfg(debug_assertions)] + { + assert!( + tmp_bytes.len() + >= automorphism_tmp_bytes( + module, + c.cols(), + a.cols(), + b.value.rows(), + b.value.cols() + ) + ); + assert_alignement(tmp_bytes.as_ptr()); + } + + let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + + let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); + let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_res_dft); + let mut res_big: VecZnxBig = res_dft.as_vec_znx_big(); + + // a1_dft = DFT(a[1]) + module.vec_znx_dft(&mut a1_dft, a.at(1)); + + // res_dft = IDFT() = [-b*AUTO(s, -p) + a * s + e] + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(0), tmp_bytes); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); + + // res_dft = [-b*AUTO(s, -p) + a * s + e] + [-a * s + m + e] = [-b*AUTO(s, -p) + m + e] + module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0)); + + // c[0] = NORMALIZE([-b*AUTO(s, -p) + m + e]) + module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(0), &mut res_big, tmp_bytes); + + // c[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)] + module.vec_znx_automorphism_inplace(b.p, c.at_mut(0)); + + // res_dft = IDFT() = [b] + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(1), tmp_bytes); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); + + // c[1] = b + module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(1), &mut res_big, tmp_bytes); + + // c[1] = AUTO(b, p) + module.vec_znx_automorphism_inplace(b.p, c.at_mut(1)); +} + +pub fn automorphism_big( + module: &Module, + c: &mut Ciphertext, + a: &Ciphertext, + b: &AutomorphismKey, + tmp_bytes: &mut [u8], +) { + let cols = std::cmp::min(c.cols(), a.cols()); + + #[cfg(debug_assertions)] + { + assert!( + tmp_bytes.len() + >= automorphism_tmp_bytes( + module, + c.cols(), + a.cols(), + b.value.rows(), + b.value.cols() + ) + ); + assert_alignement(tmp_bytes.as_ptr()); + } + + let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + + let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); + let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_res_dft); + + // a1_dft = DFT(a[1]) + module.vec_znx_dft(&mut a1_dft, a.at(1)); + + // res_dft = IDFT() = [-b*AUTO(s, -p) + a * s + e] + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(0), tmp_bytes); + module.vec_znx_idft_tmp_a(c.at_mut(0), &mut res_dft); + + // res_dft = [-b*AUTO(s, -p) + a * s + e] + [-a * s + m + e] = [-b*AUTO(s, -p) + m + e] + module.vec_znx_big_add_small_inplace(c.at_mut(0), a.at(0)); + + // c[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)] + module.vec_znx_big_automorphism_inplace(b.p, c.at_mut(0)); + + // res_dft = IDFT() = [b] + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(1), tmp_bytes); + module.vec_znx_idft_tmp_a(c.at_mut(1), &mut res_dft); + + // c[1] = AUTO(b, p) + module.vec_znx_big_automorphism_inplace(b.p, c.at_mut(1)); +} + +#[cfg(test)] +mod test { + use crate::{ + ciphertext::{new_gadget_ciphertext, Ciphertext}, decryptor::decrypt_rlwe, elem::{Elem, ElemCommon, ElemVecZnx}, encryptor::encrypt_rlwe_sk, keys::SecretKey, parameters::{Parameters, ParametersLiteral}, plaintext::Plaintext + }; + use base2k::{ + alloc_aligned, Encoding, Infos, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, BACKEND + }; + use sampling::source::{Source, new_seed}; + + use super::{AutomorphismKey, automorphis_key_new_tmp_bytes}; + + #[test] + fn test_automorphism() { + let log_base2k: usize = 10; + let q_cols: usize = 4; + let p_cols: usize = 1; + + // Basic parameters with enough limbs to test edge cases + let params_lit: ParametersLiteral = ParametersLiteral { + backend: BACKEND::FFT64, + log_n: 12, + log_q: q_cols * log_base2k, + log_p: p_cols * log_base2k, + log_base2k: log_base2k, + log_scale: 20, + xe: 3.2, + xs: 1 << 11, + }; + + let params: Parameters = Parameters::new(¶ms_lit); + + let rows: usize = params.cols_q(); + + // scratch space + let mut tmp_bytes: Vec = alloc_aligned( + params.decrypt_rlwe_tmp_byte(params.log_q()) + | params.encrypt_rlwe_sk_tmp_bytes(params.log_q()) + | params.gadget_product_tmp_bytes( + params.log_qp(), + params.log_qp(), + params.cols_qp(), + params.log_qp(), + ) + | params.encrypt_grlwe_sk_tmp_bytes(rows, params.log_qp()) + | params.automorphism_key_new_tmp_bytes(rows, params.log_qp()), + ); + + // Samplers for public and private randomness + let mut source_xe: Source = Source::new(new_seed()); + let mut source_xa: Source = Source::new(new_seed()); + let mut source_xs: Source = Source::new(new_seed()); + + // Two secret keys + let mut sk: SecretKey = SecretKey::new(params.module()); + sk.fill_ternary_hw(params.xs(), &mut source_xs); + let mut sk_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol(); + params.module().svp_prepare(&mut sk_svp_ppol, &sk.0); + + let p: i64 = -5; + + let auto_key: AutomorphismKey = AutomorphismKey::new( + params.module(), + p, + &sk, + params.log_base2k(), + rows, + params.log_qp(), + &mut source_xa, + &mut source_xe, + params.xe(), + &mut tmp_bytes, + ); + + let data: Vec = vec![0i64; params.n()]; + + let mut ct: Ciphertext = Ciphertext::new(params.module(), params.log_base2k(), params.log_q(), 2); + let mut pt: Plaintext = Plaintext::new(params.module(), params.log_base2k(), params.log_q()); + + pt.at_mut(0).encode_vec_i64(params.log_base2k(), 2*params.log_base2k(), &data, 32); + + encrypt_rlwe_sk(params.module(), &mut ct.elem_mut(), Some(&pt.elem()), &sk_svp_ppol, &mut source_xa, &mut source_xe, params.xe(), &mut tmp_bytes); + + } +} diff --git a/rlwe/src/decryptor.rs b/rlwe/src/decryptor.rs index e4d9545..8a1e5d7 100644 --- a/rlwe/src/decryptor.rs +++ b/rlwe/src/decryptor.rs @@ -62,14 +62,13 @@ pub fn decrypt_rlwe( let (tmp_bytes_vec_znx_dft, tmp_bytes_normalize) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); - let mut res_dft: VecZnxDft = - VecZnxDft::from_bytes_borrow(module, a.cols(), tmp_bytes_vec_znx_dft); + let mut res_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft); let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big(); // res_dft <- DFT(ct[1]) * DFT(sk) - module.svp_apply_dft(&mut res_dft, sk, a.at(1), cols); + module.svp_apply_dft(&mut res_dft, sk, a.at(1)); // res_big <- ct[1] x sk - module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); // res_big <- ct[1] x sk + ct[0] module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0)); // res <- normalize(ct[1] x sk + ct[0]) diff --git a/rlwe/src/encryptor.rs b/rlwe/src/encryptor.rs index dd25280..8d8f3c4 100644 --- a/rlwe/src/encryptor.rs +++ b/rlwe/src/encryptor.rs @@ -153,13 +153,13 @@ pub fn encrypt_rlwe_sk( let mut buf_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft); // Applies buf_dft <- DFT(s) * DFT(c1) - module.svp_apply_dft(&mut buf_dft, sk, c1, cols); + module.svp_apply_dft(&mut buf_dft, sk, c1); // Alias scratch space let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); // buf_big = s x c1 - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, cols); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft); // c0 <- -s x c1 + m let c0: &mut VecZnx = ct.at_mut(0); diff --git a/rlwe/src/gadget_product.rs b/rlwe/src/gadget_product.rs index 3011ac4..ad4bf69 100644 --- a/rlwe/src/gadget_product.rs +++ b/rlwe/src/gadget_product.rs @@ -1,8 +1,10 @@ use crate::{ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters}; -use base2k::{Module, VecZnx, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps}; +use base2k::{ + Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps, +}; use std::cmp::min; -pub fn gadget_product_tmp_bytes( +pub fn gadget_product_core_tmp_bytes( module: &Module, log_base2k: usize, res_log_q: usize, @@ -24,7 +26,7 @@ impl Parameters { gct_rows: usize, gct_log_q: usize, ) -> usize { - gadget_product_tmp_bytes( + gadget_product_core_tmp_bytes( self.module(), self.log_base2k(), res_log_q, @@ -35,54 +37,99 @@ impl Parameters { } } -/// Evaluates the gadget product res <- a x b. -/// -/// # Arguments -/// -/// * `module`: backend support for operations mod (X^N + 1). -/// * `res`: an [Elem] to store (-cs + m * a + e, c) with res_ncols cols. -/// * `a`: a [VecZnx] of a_ncols cols. -/// * `b`: a [Ciphertext] as a vector of (-Bs + m * 2^{-k} + E, B) -/// containing b_nrows [VecZnx], each of b_ncols cols. -/// -/// # Computation -/// -/// res = sum[min(a_ncols, b_nrows)] decomp(a, i) * (-B[i]s + m * 2^{-k*i} + E[i], B[i]) -/// = (cs + m * a + e, c) with min(res_cols, b_cols) cols. pub fn gadget_product_core( module: &Module, res_dft_0: &mut VecZnxDft, res_dft_1: &mut VecZnxDft, a: &VecZnx, - a_cols: usize, b: &Ciphertext, b_cols: usize, tmp_bytes: &mut [u8], ) { assert!(b_cols <= b.cols()); - module.vec_znx_dft(res_dft_1, a, min(a_cols, b_cols)); + module.vec_znx_dft(res_dft_1, a); module.vmp_apply_dft_to_dft(res_dft_0, res_dft_1, b.at(0), tmp_bytes); module.vmp_apply_dft_to_dft_inplace(res_dft_1, b.at(1), tmp_bytes); } -/* -// res_big[a * (G0|G1)] <- IDFT(res_dft[a * (G0|G1)]) -module.vec_znx_idft_tmp_a(&mut res_big_0, &mut res_dft_0, b_cols); -module.vec_znx_idft_tmp_a(&mut res_big_1, &mut res_dft_1, b_cols); - -// res_big <- res[0] + res_big[a*G0] -module.vec_znx_big_add_small_inplace(&mut res_big_0, res.at(0)); -module.vec_znx_big_normalize(log_base2k, res.at_mut(0), &res_big_0, tmp_bytes_carry); - -if OVERWRITE { - // res[1] = normalize(res_big[a*G1]) - module.vec_znx_big_normalize(log_base2k, res.at_mut(1), &res_big_1, tmp_bytes_carry); -} else { - // res[1] = normalize(res_big[a*G1] + res[1]) - module.vec_znx_big_add_small_inplace(&mut res_big_1, res.at(1)); - module.vec_znx_big_normalize(log_base2k, res.at_mut(1), &res_big_1, tmp_bytes_carry); +pub fn gadget_product_big_tmp_bytes( + module: &Module, + c_cols: usize, + a_cols: usize, + b_rows: usize, + b_cols: usize, +) -> usize { + return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) + + 2 * module.bytes_of_vec_znx_dft(min(c_cols, a_cols)); +} + +/// Evaluates the gadget product: c.at(i) = IDFT() +/// +/// # Arguments +/// +/// * `module`: backend support for operations mod (X^N + 1). +/// * `c`: a [Ciphertext] with cols_c cols. +/// * `a`: a [Ciphertext] with cols_a cols. +/// * `b`: a [Ciphertext] with at least min(cols_c, cols_a) rows. +pub fn gadget_product_big( + module: &Module, + c: &mut Ciphertext, + a: &Ciphertext, + b: &Ciphertext, + tmp_bytes: &mut [u8], +) { + let cols: usize = min(c.cols(), a.cols()); + + let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + + let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); + let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_res_dft); + + // a1_dft = DFT(a[1]) + module.vec_znx_dft(&mut a1_dft, a.at(1)); + + // c[i] = IDFT(DFT(a[1]) * b[i]) + (0..2).for_each(|i| { + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.at(i), tmp_bytes); + module.vec_znx_idft_tmp_a(c.at_mut(i), &mut res_dft); + }) +} + +/// Evaluates the gadget product: c.at(i) = NORMALIZE(IDFT() +/// +/// # Arguments +/// +/// * `module`: backend support for operations mod (X^N + 1). +/// * `c`: a [Ciphertext] with cols_c cols. +/// * `a`: a [Ciphertext] with cols_a cols. +/// * `b`: a [Ciphertext] with at least min(cols_c, cols_a) rows. +pub fn gadget_product( + module: &Module, + c: &mut Ciphertext, + a: &Ciphertext, + b: &Ciphertext, + tmp_bytes: &mut [u8], +) { + let cols: usize = min(c.cols(), a.cols()); + + let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + + let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); + let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_res_dft); + let mut res_big: VecZnxBig = res_dft.as_vec_znx_big(); + + // a1_dft = DFT(a[1]) + module.vec_znx_dft(&mut a1_dft, a.at(1)); + + // c[i] = IDFT(DFT(a[1]) * b[i]) + (0..2).for_each(|i| { + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.at(i), tmp_bytes); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); + module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(i), &mut res_big, tmp_bytes); + }) } -*/ #[cfg(test)] mod test { @@ -97,7 +144,7 @@ mod test { plaintext::Plaintext, }; use base2k::{ - Infos, BACKEND, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, + BACKEND, Infos, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, alloc_aligned_u8, }; use sampling::source::{Source, new_seed}; @@ -125,7 +172,6 @@ mod test { // scratch space let mut tmp_bytes: Vec = alloc_aligned_u8( params.decrypt_rlwe_tmp_byte(params.log_qp()) - | params.encrypt_rlwe_sk_tmp_bytes(params.log_qp()) | params.gadget_product_tmp_bytes( params.log_qp(), params.log_qp(), @@ -193,12 +239,8 @@ mod test { let mut a_times_s: VecZnx = params.module().new_vec_znx(a.cols()); // a * sk0 - params - .module() - .svp_apply_dft(&mut a_dft, &sk0_svp_ppol, &a, a.cols()); - params - .module() - .vec_znx_idft_tmp_a(&mut a_big, &mut a_dft, a.cols()); + params.module().svp_apply_dft(&mut a_dft, &sk0_svp_ppol, &a); + params.module().vec_znx_idft_tmp_a(&mut a_big, &mut a_dft); params.module().vec_znx_big_normalize( params.log_base2k(), &mut a_times_s, @@ -228,7 +270,6 @@ mod test { &mut res_dft_0, &mut res_dft_1, &a, - a_cols, &gadget_ct, b_cols, &mut tmp_bytes, @@ -237,11 +278,11 @@ mod test { // res_big_0 = IDFT(res_dft_0) params .module() - .vec_znx_idft_tmp_a(&mut res_big_0, &mut res_dft_0, b_cols); + .vec_znx_idft_tmp_a(&mut res_big_0, &mut res_dft_0); // res_big_1 = IDFT(res_dft_1); params .module() - .vec_znx_idft_tmp_a(&mut res_big_1, &mut res_dft_1, b_cols); + .vec_znx_idft_tmp_a(&mut res_big_1, &mut res_dft_1); // res_big_0 = normalize(res_big_0) params.module().vec_znx_big_normalize( diff --git a/rlwe/src/lib.rs b/rlwe/src/lib.rs index 1243297..7eeb13b 100644 --- a/rlwe/src/lib.rs +++ b/rlwe/src/lib.rs @@ -1,3 +1,4 @@ +pub mod automorphism; pub mod ciphertext; pub mod decryptor; pub mod elem; @@ -8,3 +9,4 @@ pub mod keys; pub mod parameters; pub mod plaintext; pub mod rgsw_product; +pub mod trace; diff --git a/rlwe/src/rgsw_product.rs b/rlwe/src/rgsw_product.rs index 51ee793..e5632cb 100644 --- a/rlwe/src/rgsw_product.rs +++ b/rlwe/src/rgsw_product.rs @@ -39,7 +39,7 @@ pub fn rgsw_product( let mut _r2_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes(cols, tmp_bytes_r2_dft); // c0_dft <- DFT(a[0]) - module.vec_znx_dft(&mut c0_dft, a.at(0), in_cols); + module.vec_znx_dft(&mut c0_dft, a.at(0)); // r_dft <- sum[rows] c0_dft[cols] x RGSW[0][cols] module.vmp_apply_dft_to_dft( @@ -50,5 +50,5 @@ pub fn rgsw_product( ); // c1_dft <- DFT(a[1]) - module.vec_znx_dft(&mut c1_dft, a.at(1), in_cols); + module.vec_znx_dft(&mut c1_dft, a.at(1)); } diff --git a/rlwe/src/trace.rs b/rlwe/src/trace.rs new file mode 100644 index 0000000..1882345 --- /dev/null +++ b/rlwe/src/trace.rs @@ -0,0 +1,112 @@ +use crate::{automorphism::AutomorphismKey, ciphertext::Ciphertext, elem::ElemCommon}; +use base2k::{ + Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMatOps, + assert_alignement, +}; +use std::collections::HashMap; + +pub fn trace_tmp_bytes( + module: &Module, + c_cols: usize, + a_cols: usize, + b_rows: usize, + b_cols: usize, +) -> usize { + return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) + + 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols)); +} + +pub fn trace_inplace( + module: &Module, + a: &mut Ciphertext, + start: usize, + end: usize, + b: HashMap, + tmp_bytes: &mut [u8], +) { + let cols: usize = a.cols(); + + let b_rows: usize; + let b_cols: usize; + + if let Some((_, key)) = b.iter().next() { + b_rows = key.value.rows(); + b_cols = key.value.cols(); + } else { + panic!("b: HashMap, is empty") + } + + #[cfg(debug_assertions)] + { + assert!(start <= end); + assert!(end <= module.n()); + assert!(tmp_bytes.len() >= trace_tmp_bytes(module, cols, cols, b_rows, b_cols)); + assert_alignement(tmp_bytes.as_ptr()); + } + + let cols: usize = std::cmp::min(b_cols, a.cols()); + + let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); + + let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); + let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_res_dft); + let mut res_big: VecZnxBig = res_dft.as_vec_znx_big(); + + let log_base2k: usize = a.log_base2k(); + + (start..end).for_each(|i| { + a.at_mut(0).rsh(log_base2k, 1, tmp_bytes); + a.at_mut(1).rsh(log_base2k, 1, tmp_bytes); + + let p: i64; + if i == 0 { + p = -1; + } else { + p = module.galois_element(1 << (i - 1)); + } + + if let Some(key) = b.get(&p) { + module.vec_znx_dft(&mut a1_dft, a.at(1)); + + // a[0] = NORMALIZE(a[0] + AUTO(a[0] + IDFT())) + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, key.value.at(0), tmp_bytes); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); + module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0)); + module.vec_znx_big_automorphism_inplace(p, &mut res_big); + module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0)); + module.vec_znx_big_normalize(a.log_base2k(), a.at_mut(0), &mut res_big, tmp_bytes); + + // a[1] = NORMALIZE(a[1] + AUTO(IDFT())) + module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, key.value.at(1), tmp_bytes); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft); + module.vec_znx_big_automorphism_inplace(p, &mut res_big); + module.vec_znx_big_add_small_inplace(&mut res_big, a.at(1)); + module.vec_znx_big_normalize(a.log_base2k(), a.at_mut(1), &mut res_big, tmp_bytes); + } else { + panic!("b[{}] is empty", p) + } + }) +} + +#[cfg(test)] +mod test { + use crate::{ + ciphertext::{Ciphertext, new_gadget_ciphertext}, + decryptor::decrypt_rlwe, + elem::{Elem, ElemCommon, ElemVecZnx}, + encryptor::encrypt_grlwe_sk, + gadget_product::gadget_product_core, + keys::SecretKey, + parameters::{Parameters, ParametersLiteral}, + plaintext::Plaintext, + }; + use base2k::{ + BACKEND, Infos, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, + VecZnxDftOps, VecZnxOps, VmpPMat, alloc_aligned_u8, + }; + use sampling::source::{Source, new_seed}; + + #[test] + fn test_trace_inplace() {} +}