diff --git a/base2k/examples/rlwe_encrypt.rs b/base2k/examples/rlwe_encrypt.rs index 0281ec3..9856362 100644 --- a/base2k/examples/rlwe_encrypt.rs +++ b/base2k/examples/rlwe_encrypt.rs @@ -8,9 +8,9 @@ use sampling::source::Source; fn main() { let n: usize = 16; let log_base2k: usize = 18; - let limbs: usize = 3; - let msg_limbs: usize = 2; - let log_scale: usize = msg_limbs * log_base2k - 5; + let cols: usize = 3; + let msg_cols: usize = 2; + let log_scale: usize = msg_cols * log_base2k - 5; let module: Module = Module::new::(n); let mut carry: Vec = vec![0; module.vec_znx_big_normalize_tmp_bytes()]; @@ -18,7 +18,7 @@ fn main() { let seed: [u8; 32] = [0; 32]; let mut source: Source = Source::new(seed); - let mut res: VecZnx = module.new_vec_znx(limbs); + let mut res: VecZnx = module.new_vec_znx(cols); // s <- Z_{-1, 0, 1}[X]/(X^{N}+1) let mut s: Scalar = Scalar::new(n); @@ -31,22 +31,22 @@ fn main() { module.svp_prepare(&mut s_ppol, &s); // a <- Z_{2^prec}[X]/(X^{N}+1) - let mut a: VecZnx = module.new_vec_znx(limbs); - module.fill_uniform(log_base2k, &mut a, limbs, &mut source); + let mut a: VecZnx = module.new_vec_znx(cols); + module.fill_uniform(log_base2k, &mut a, cols, &mut source); // Scratch space for DFT values - let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.limbs()); + let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.cols()); // Applies buf_dft <- s * a - module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.limbs()); + module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols()); // Alias scratch space let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); // buf_big <- IDFT(buf_dft) (not normalized) - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.limbs()); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.cols()); - let mut m: VecZnx = module.new_vec_znx(msg_limbs); + let mut m: VecZnx = module.new_vec_znx(msg_cols); let mut want: Vec = vec![0; n]; want.iter_mut() @@ -60,12 +60,12 @@ fn main() { module.vec_znx_big_sub_small_a_inplace(&mut buf_big, &m); // b <- normalize(buf_big) + e - let mut b: VecZnx = module.new_vec_znx(limbs); + let mut b: VecZnx = module.new_vec_znx(cols); module.vec_znx_big_normalize(log_base2k, &mut b, &buf_big, &mut carry); module.add_normal( log_base2k, &mut b, - log_base2k * limbs, + log_base2k * cols, &mut source, 3.2, 19.0, @@ -74,8 +74,8 @@ fn main() { //Decrypt // buf_big <- a * s - module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.limbs()); - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, b.limbs()); + module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols()); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, b.cols()); // buf_big <- a * s + b module.vec_znx_big_add_small_inplace(&mut buf_big, &b); @@ -85,9 +85,9 @@ fn main() { // have = m * 2^{log_scale} + e let mut have: Vec = vec![i64::default(); n]; - res.decode_vec_i64(log_base2k, res.limbs() * log_base2k, &mut have); + res.decode_vec_i64(log_base2k, res.cols() * log_base2k, &mut have); - let scale: f64 = (1 << (res.limbs() * log_base2k - log_scale)) as f64; + let scale: f64 = (1 << (res.cols() * log_base2k - log_scale)) as f64; izip!(want.iter(), have.iter()) .enumerate() .for_each(|(i, (a, b))| { diff --git a/base2k/examples/vector_matrix_product.rs b/base2k/examples/vector_matrix_product.rs index 7dd9a59..11a5783 100644 --- a/base2k/examples/vector_matrix_product.rs +++ b/base2k/examples/vector_matrix_product.rs @@ -4,31 +4,31 @@ use base2k::{ }; fn main() { - let log_n = 5; - let n = 1 << log_n; + let log_n: i32 = 5; + let n: usize = 1 << log_n; let module: Module = Module::new::(n); let log_base2k: usize = 15; - let limbs: usize = 5; - let log_k: usize = log_base2k * limbs - 5; + let cols: usize = 5; + let log_k: usize = log_base2k * cols - 5; - let rows: usize = limbs; - let cols: usize = limbs + 1; + let rows: usize = cols; + let cols: usize = cols + 1; // Maximum size of the byte scratch needed let tmp_bytes: usize = module.vmp_prepare_tmp_bytes(rows, cols) - | module.vmp_apply_dft_tmp_bytes(limbs, limbs, rows, cols); + | module.vmp_apply_dft_tmp_bytes(cols, cols, rows, cols); let mut buf: Vec = vec![0; tmp_bytes]; let mut a_values: Vec = vec![i64::default(); n]; a_values[1] = (1 << log_base2k) + 1; - let mut a: VecZnx = module.new_vec_znx(limbs); + let mut a: VecZnx = module.new_vec_znx(cols); a.encode_vec_i64(log_base2k, log_k, &a_values, 32); a.normalize(log_base2k, &mut buf); - a.print_limbs(a.limbs(), n); + a.print(a.cols(), n); println!(); let mut vecznx: Vec = Vec::new(); @@ -40,8 +40,10 @@ fn main() { vecznx[i].data[i * n + 1] = 1 as i64; }); + let slices: Vec<&[i64]> = vecznx.iter().map(|v| v.data.as_slice()).collect(); + let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); - module.vmp_prepare_dblptr(&mut vmp_pmat, &vecznx, &mut buf); + module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf); let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); @@ -55,7 +57,7 @@ fn main() { let mut values_res: Vec = vec![i64::default(); n]; res.decode_vec_i64(log_base2k, log_k, &mut values_res); - res.print_limbs(res.limbs(), n); + res.print(res.cols(), n); module.free(); c_dft.free(); diff --git a/base2k/src/encoding.rs b/base2k/src/encoding.rs index fcabc49..5a2ca2d 100644 --- a/base2k/src/encoding.rs +++ b/base2k/src/encoding.rs @@ -54,11 +54,10 @@ pub trait Encoding { impl Encoding for VecZnx { fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { - let limbs: usize = (log_k + log_base2k - 1) / log_base2k; + + let cols: usize = (log_k + log_base2k - 1) / log_base2k; - println!("limbs: {}", limbs); - - assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs()); + assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols()); let size: usize = min(data.len(), self.n()); let log_k_rem: usize = log_base2k - (log_k % log_base2k); @@ -67,19 +66,19 @@ impl Encoding for VecZnx { // values on the last limb. // Else we decompose values base2k. if log_max + log_k_rem < 63 || log_k_rem == log_base2k { - (0..self.limbs()).for_each(|i| unsafe { + (0..self.cols()).for_each(|i| unsafe { znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr()); }); - self.at_mut(limbs - 1)[..size].copy_from_slice(&data[..size]); + self.at_mut(cols - 1)[..size].copy_from_slice(&data[..size]); } else { let mask: i64 = (1 << log_base2k) - 1; - let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); + let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); (0..steps).for_each(|i| unsafe { znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr()); }); - (limbs - steps..limbs) + (cols - steps..cols) .rev() .enumerate() .for_each(|(i, i_rev)| { @@ -91,9 +90,9 @@ impl Encoding for VecZnx { // Case where self.prec % self.k != 0. if log_k_rem != log_base2k { - let limbs = self.limbs(); - let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); - (limbs - steps..limbs).rev().for_each(|i| { + let cols = self.cols(); + let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); + (cols - steps..cols).rev().for_each(|i| { self.at_mut(i)[..size] .iter_mut() .for_each(|x| *x <<= log_k_rem); @@ -102,7 +101,7 @@ impl Encoding for VecZnx { } fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]) { - let limbs: usize = (log_k + log_base2k - 1) / log_base2k; + let cols: usize = (log_k + log_base2k - 1) / log_base2k; assert!( data.len() >= self.n, "invalid data: data.len()={} < self.n()={}", @@ -111,8 +110,8 @@ impl Encoding for VecZnx { ); data.copy_from_slice(self.at(0)); let rem: usize = log_base2k - (log_k % log_base2k); - (1..limbs).for_each(|i| { - if i == limbs - 1 && rem != log_base2k { + (1..cols).for_each(|i| { + if i == cols - 1 && rem != log_base2k { let k_rem: usize = log_base2k - rem; izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| { *y = (*y << k_rem) + (x >> rem); @@ -134,25 +133,25 @@ impl Encoding for VecZnx { log_max: usize, ) { assert!(i < self.n()); - let limbs: usize = (log_k + log_base2k - 1) / log_base2k; - assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs()); + let cols: usize = (log_k + log_base2k - 1) / log_base2k; + assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols()); let log_k_rem: usize = log_base2k - (log_k % log_base2k); - let limbs = self.limbs(); + let cols = self.cols(); // If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy // values on the last limb. // Else we decompose values base2k. if log_max + log_k_rem < 63 || log_k_rem == log_base2k { - (0..limbs - 1).for_each(|j| self.at_mut(j)[i] = 0); + (0..cols - 1).for_each(|j| self.at_mut(j)[i] = 0); - self.at_mut(self.limbs() - 1)[i] = value; + self.at_mut(self.cols() - 1)[i] = value; } else { let mask: i64 = (1 << log_base2k) - 1; - let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); + let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); - (0..limbs - steps).for_each(|j| self.at_mut(j)[i] = 0); + (0..cols - steps).for_each(|j| self.at_mut(j)[i] = 0); - (limbs - steps..limbs) + (cols - steps..cols) .rev() .enumerate() .for_each(|(j, j_rev)| { @@ -162,22 +161,22 @@ impl Encoding for VecZnx { // Case where self.prec % self.k != 0. if log_k_rem != log_base2k { - let limbs = self.limbs(); - let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); - (limbs - steps..limbs).rev().for_each(|j| { + let cols = self.cols(); + let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); + (cols - steps..cols).rev().for_each(|j| { self.at_mut(j)[i] <<= log_k_rem; }) } } fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64 { - let limbs: usize = (log_k + log_base2k - 1) / log_base2k; + let cols: usize = (log_k + log_base2k - 1) / log_base2k; assert!(i < self.n()); let mut res: i64 = self.data[i]; let rem: usize = log_base2k - (log_k % log_base2k); - (1..limbs).for_each(|i| { + (1..cols).for_each(|i| { let x = self.data[i * self.n]; - if i == limbs - 1 && rem != log_base2k { + if i == cols - 1 && rem != log_base2k { let k_rem: usize = log_base2k - rem; res = (res << k_rem) + (x >> rem); } else { @@ -198,9 +197,9 @@ mod tests { fn test_set_get_i64_lo_norm() { let n: usize = 8; let log_base2k: usize = 17; - let limbs: usize = 5; - let log_k: usize = limbs * log_base2k - 5; - let mut a: VecZnx = VecZnx::new(n, limbs); + let cols: usize = 5; + let log_k: usize = cols * log_base2k - 5; + let mut a: VecZnx = VecZnx::new(n, cols); let mut have: Vec = vec![i64::default(); n]; have.iter_mut() .enumerate() @@ -215,9 +214,9 @@ mod tests { fn test_set_get_i64_hi_norm() { let n: usize = 8; let log_base2k: usize = 17; - let limbs: usize = 5; - let log_k: usize = limbs * log_base2k - 5; - let mut a: VecZnx = VecZnx::new(n, limbs); + let cols: usize = 5; + let log_k: usize = cols * log_base2k - 5; + let mut a: VecZnx = VecZnx::new(n, cols); let mut have: Vec = vec![i64::default(); n]; let mut source = Source::new([1; 32]); have.iter_mut().for_each(|x| { @@ -226,9 +225,9 @@ mod tests { .wrapping_sub(u64::MAX / 2 + 1) as i64; }); a.encode_vec_i64(log_base2k, log_k, &have, 63); - //(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); + //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); let mut want = vec![i64::default(); n]; - //(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); + //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); a.decode_vec_i64(log_base2k, log_k, &mut want); izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b)); } diff --git a/base2k/src/infos.rs b/base2k/src/infos.rs index 2d0441f..e853a03 100644 --- a/base2k/src/infos.rs +++ b/base2k/src/infos.rs @@ -7,10 +7,6 @@ pub trait Infos { /// Returns the base two logarithm of the ring dimension of the receiver. fn log_n(&self) -> usize; - /// Returns the number of limbs of the receiver. - /// This method is equivalent to [Infos::cols]. - fn limbs(&self) -> usize; - /// Returns the number of columns of the receiver. /// This method is equivalent to [Infos::limbs]. fn cols(&self) -> usize; @@ -30,11 +26,6 @@ impl Infos for VecZnx { self.n } - /// Returns the number of limbs of the [VecZnx]. - fn limbs(&self) -> usize { - self.data.len() / self.n - } - /// Returns the number of limbs of the [VecZnx]. fn cols(&self) -> usize { self.data.len() / self.n @@ -57,11 +48,6 @@ impl Infos for VecZnxBorrow { self.n } - /// Returns the number of limbs of the [VecZnx]. - fn limbs(&self) -> usize { - self.limbs - } - /// Returns the number of limbs of the [VecZnx]. fn cols(&self) -> usize { self.limbs @@ -83,12 +69,6 @@ impl Infos for VmpPMat { (usize::BITS - (self.n() - 1).leading_zeros()) as _ } - /// Returns the number of limbs of each [VecZnxDft]. - /// This method is equivalent to [Self::cols]. - fn limbs(&self) -> usize { - self.cols - } - /// Returns the number of rows (i.e. of [VecZnxDft]) of the [VmpPMat] fn rows(&self) -> usize { self.rows diff --git a/base2k/src/lib.rs b/base2k/src/lib.rs index 7db95ce..ee25f6f 100644 --- a/base2k/src/lib.rs +++ b/base2k/src/lib.rs @@ -31,6 +31,7 @@ pub use vmp::*; pub const GALOISGENERATOR: u64 = 5; +#[allow(dead_code)] fn is_aligned(ptr: *const T, align: usize) -> bool { (ptr as usize) % align == 0 } diff --git a/base2k/src/sampling.rs b/base2k/src/sampling.rs index 1698d0f..fb94930 100644 --- a/base2k/src/sampling.rs +++ b/base2k/src/sampling.rs @@ -3,8 +3,8 @@ use rand_distr::{Distribution, Normal}; use sampling::source::Source; pub trait Sampling { - /// Fills the first `limbs` limbs with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\] - fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source); + /// Fills the first `cols` cols with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\] + fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source); /// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\]. fn add_dist_f64>( @@ -30,11 +30,11 @@ pub trait Sampling { } impl Sampling for Module { - fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source) { + fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source) { let base2k: u64 = 1 << log_base2k; let mask: u64 = base2k - 1; let base2k_half: i64 = (base2k >> 1) as i64; - let size: usize = a.n() * limbs; + let size: usize = a.n() * cols; a.raw_mut()[..size] .iter_mut() .for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half); @@ -58,7 +58,7 @@ impl Sampling for Module { let log_base2k_rem: usize = log_k % log_base2k; if log_base2k_rem != 0 { - a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| { + a.at_mut(a.cols() - 1).iter_mut().for_each(|a| { let mut dist_f64: f64 = dist.sample(source); while dist_f64.abs() > bound { dist_f64 = dist.sample(source) @@ -66,7 +66,7 @@ impl Sampling for Module { *a += (dist_f64.round() as i64) << log_base2k_rem }); } else { - a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| { + a.at_mut(a.cols() - 1).iter_mut().for_each(|a| { let mut dist_f64: f64 = dist.sample(source); while dist_f64.abs() > bound { dist_f64 = dist.sample(source) diff --git a/base2k/src/svp.rs b/base2k/src/svp.rs index eab573d..9bfe2ef 100644 --- a/base2k/src/svp.rs +++ b/base2k/src/svp.rs @@ -77,8 +77,8 @@ impl SvpPPol { SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size) } - /// Returns the number of limbs of the [SvpPPol], which is always 1. - pub fn limbs(&self) -> usize { + /// Returns the number of cols of the [SvpPPol], which is always 1. + pub fn cols(&self) -> usize { 1 } } @@ -101,7 +101,7 @@ pub trait SvpPPolOps { c: &mut VecZnxDft, a: &SvpPPol, b: &T, - b_limbs: usize, + b_cols: usize, ); } @@ -123,22 +123,22 @@ impl SvpPPolOps for Module { c: &mut VecZnxDft, a: &SvpPPol, b: &T, - b_limbs: usize, + b_cols: usize, ) { assert!( - c.limbs() >= b_limbs, - "invalid c_vector: c_vector.limbs()={} < b.limbs()={}", - c.limbs(), - b_limbs + c.cols() >= b_cols, + "invalid c_vector: c_vector.cols()={} < b.cols()={}", + c.cols(), + b_cols ); unsafe { svp::svp_apply_dft( self.0, c.0, - b_limbs as u64, + b_cols as u64, a.0, b.as_ptr(), - b_limbs as u64, + b_cols as u64, b.n() as u64, ) } diff --git a/base2k/src/vec_znx.rs b/base2k/src/vec_znx.rs index b5680e4..382344f 100644 --- a/base2k/src/vec_znx.rs +++ b/base2k/src/vec_znx.rs @@ -8,19 +8,66 @@ use itertools::izip; use std::cmp::min; pub trait VecZnxApi { + type Owned: VecZnxApi + Infos; + + fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned; + /// Returns the minimum size of the [u8] array required to assign a - /// new backend array to a [VecZnx] through [VecZnx::from_bytes]. + /// new backend array. fn bytes_of(n: usize, limbs: usize) -> usize; + + /// Returns the backing array. fn raw(&self) -> &[i64]; + + /// Returns the mutable backing array. fn raw_mut(&mut self) -> &mut [i64]; + + /// Returns a non-mutable pointer to the backing array. fn as_ptr(&self) -> *const i64; + + /// Returns a mutable pointer to the backing array. fn as_mut_ptr(&mut self) -> *mut i64; + + /// Returns a non-mutable reference to the i-th limb. fn at(&self, i: usize) -> &[i64]; + + /// Returns a mutable reference to the i-th limb . fn at_mut(&mut self, i: usize) -> &mut [i64]; + + /// Returns a non-mutable pointer to the i-th limb. fn at_ptr(&self, i: usize) -> *const i64; + + /// Returns a mutable pointer to the i-th limb. fn at_mut_ptr(&mut self, i: usize) -> *mut i64; + + /// Zeroes the backing array. fn zero(&mut self); fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]); + + /// Right shifts the coefficients by k bits. + /// + /// # Arguments + /// + /// * `log_base2k`: the base two logarithm of the coefficients decomposition. + /// * `k`: the shift amount. + /// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3. + /// + /// # Panics + /// + /// The method will panic if carry.len() < self.n() * self.limbs() << 3. + fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]); + + /// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}. + /// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}. + /// + /// # Arguments + /// + /// * `a`: the receiver polynomial in which the extracted coefficients are stored. + fn switch_degree(&self, a: &mut T) + where + Self: AsRef; + + fn print(&self, limbs: usize, n: usize); } pub fn bytes_of_vec_znx(n: usize, limbs: usize) -> usize { @@ -33,14 +80,16 @@ pub struct VecZnxBorrow { pub data: *mut i64, } -impl VecZnxBorrow { +impl VecZnxApi for VecZnxBorrow { + type Owned = VecZnxBorrow; + /// Returns a new struct implementing [VecZnxBorrow] with the provided data as backing array. /// /// The struct will *NOT* take ownership of buf[..[VecZnx::bytes_of]] /// /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [VecZnx::bytes_of]. - pub fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> VecZnxBorrow { + fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned { let size = Self::bytes_of(n, limbs); assert!( bytes.len() >= size, @@ -56,9 +105,7 @@ impl VecZnxBorrow { data: cast_mut(&mut bytes[..size]).as_mut_ptr(), } } -} -impl VecZnxApi for VecZnxBorrow { fn bytes_of(n: usize, limbs: usize) -> usize { bytes_of_vec_znx(n, limbs) } @@ -104,39 +151,35 @@ impl VecZnxApi for VecZnxBorrow { } fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) { - assert!( - carry.len() >= self.n() * 8, - "invalid carry: carry.len()={} < self.n()={}", - carry.len(), - self.n() - ); + normalize(log_base2k, self, carry) + } - let carry_i64: &mut [i64] = cast_mut(carry); + fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) { + rsh(log_base2k, self, k, carry) + } - unsafe { - znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr()); - (0..self.limbs()).rev().for_each(|i| { - znx::znx_normalize( - self.n as u64, - log_base2k as u64, - self.at_mut_ptr(i), - carry_i64.as_mut_ptr(), - self.at_mut_ptr(i), - carry_i64.as_mut_ptr(), - ) - }); - } + fn switch_degree(&self, a: &mut T) + where + Self: AsRef, + { + switch_degree(a, self.as_ref()); + } + + fn print(&self, limbs: usize, n: usize) { + (0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n])) } } -impl VecZnx { +impl VecZnxApi for VecZnx { + type Owned = VecZnx; + /// Returns a new struct implementing [VecZnx] with the provided data as backing array. /// /// The struct will take ownership of buf[..[VecZnx::bytes_of]] /// /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [VecZnx::bytes_of]. - pub fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> VecZnx { + fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> Self::Owned { let size = Self::bytes_of(n, limbs); assert!( buf.len() >= size, @@ -152,9 +195,7 @@ impl VecZnx { data: alias_mut_slice_to_vec(cast_mut(&mut buf[..size])), } } -} -impl VecZnxApi for VecZnx { fn bytes_of(n: usize, limbs: usize) -> usize { bytes_of_vec_znx(n, limbs) } @@ -167,66 +208,53 @@ impl VecZnxApi for VecZnx { &mut self.data } - /// Returns a non-mutable pointer to the backing array of the [VecZnx]. fn as_ptr(&self) -> *const i64 { self.data.as_ptr() } - /// Returns a mutable pointer to the backing array of the [VecZnx]. fn as_mut_ptr(&mut self) -> *mut i64 { self.data.as_mut_ptr() } - /// Returns a non-mutable reference to the i-th limb of the [VecZnx]. fn at(&self, i: usize) -> &[i64] { let n: usize = self.n(); &self.raw()[n * i..n * (i + 1)] } - /// Returns a mutable reference to the i-th limb of the [VecZnx]. fn at_mut(&mut self, i: usize) -> &mut [i64] { let n: usize = self.n(); &mut self.raw_mut()[n * i..n * (i + 1)] } - /// Returns a non-mutable pointer to the i-th limb of the [VecZnx]. fn at_ptr(&self, i: usize) -> *const i64 { &self.data[i * self.n] as *const i64 } - /// Returns a mutable pointer to the i-th limb of the [VecZnx]. fn at_mut_ptr(&mut self, i: usize) -> *mut i64 { &mut self.data[i * self.n] as *mut i64 } - /// Zeroes the backing array of the [VecZnx]. fn zero(&mut self) { unsafe { znx::znx_zero_i64_ref(self.data.len() as u64, self.data.as_mut_ptr()) } } fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) { - assert!( - carry.len() >= self.n() * 8, - "invalid carry: carry.len()={} < self.n()={}", - carry.len(), - self.n() - ); + normalize(log_base2k, self, carry) + } - let carry_i64: &mut [i64] = cast_mut(carry); + fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) { + rsh(log_base2k, self, k, carry) + } - unsafe { - znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr()); - (0..self.limbs()).rev().for_each(|i| { - znx::znx_normalize( - self.n as u64, - log_base2k as u64, - self.at_mut_ptr(i), - carry_i64.as_mut_ptr(), - self.at_mut_ptr(i), - carry_i64.as_mut_ptr(), - ) - }); - } + fn switch_degree(&self, a: &mut T) + where + Self: AsRef, + { + switch_degree(a, self.as_ref()) + } + + fn print(&self, limbs: usize, n: usize) { + (0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n])) } } @@ -269,99 +297,105 @@ impl VecZnx { } self.data - .truncate((self.limbs() - k / log_base2k) * self.n()); + .truncate((self.cols() - k / log_base2k) * self.n()); let k_rem: usize = k % log_base2k; if k_rem != 0 { let mask: i64 = ((1 << (log_base2k - k_rem - 1)) - 1) << k_rem; - self.at_mut(self.limbs() - 1) + self.at_mut(self.cols() - 1) .iter_mut() .for_each(|x: &mut i64| *x &= mask) } } +} - /// Right shifts the coefficients by k bits. - /// - /// # Arguments - /// - /// * `log_base2k`: the base two logarithm of the coefficients decomposition. - /// * `k`: the shift amount. - /// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3. - /// - /// # Panics - /// - /// The method will panic if carry.len() < self.n() * self.limbs() << 3. - pub fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) { - let n: usize = self.n(); +pub fn switch_degree(b: &mut T, a: &T) { + let (n_in, n_out) = (a.n(), b.n()); + let (gap_in, gap_out): (usize, usize); - assert!( - carry.len() >> 3 >= n, - "invalid carry: carry.len()/8={} < self.n()={}", - carry.len() >> 3, - n - ); - - let limbs: usize = self.limbs(); - let limbs_steps: usize = k / log_base2k; - - self.data.rotate_right(self.n * limbs_steps); - unsafe { - znx::znx_zero_i64_ref((self.n * limbs_steps) as u64, self.data.as_mut_ptr()); - } - - let k_rem = k % log_base2k; - - if k_rem != 0 { - let carry_i64: &mut [i64] = cast_mut(carry); - - unsafe { - znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr()); - } - - let mask: i64 = (1 << k_rem) - 1; - let log_base2k: usize = log_base2k; - - (limbs_steps..limbs).for_each(|i| { - izip!(carry_i64.iter_mut(), self.at_mut(i).iter_mut()).for_each(|(ci, xi)| { - *xi += *ci << log_base2k; - *ci = *xi & mask; - *xi /= 1 << k_rem; - }); - }) - } + if n_in > n_out { + (gap_in, gap_out) = (n_in / n_out, 1) + } else { + (gap_in, gap_out) = (1, n_out / n_in); + b.zero(); } - /// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}. - /// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}. - /// - /// # Arguments - /// - /// * `a`: the receiver polynomial in which the extracted coefficients are stored. - pub fn switch_degree(&self, a: &mut VecZnx) { - let (n_in, n_out) = (self.n(), a.n()); - let (gap_in, gap_out): (usize, usize); + let limbs = min(a.cols(), b.cols()); - if n_in > n_out { - (gap_in, gap_out) = (n_in / n_out, 1) - } else { - (gap_in, gap_out) = (1, n_out / n_in); - a.zero(); - } + (0..limbs).for_each(|i| { + izip!( + a.at(i).iter().step_by(gap_in), + b.at_mut(i).iter_mut().step_by(gap_out) + ) + .for_each(|(x_in, x_out)| *x_out = *x_in); + }); +} - let limbs = min(self.limbs(), a.limbs()); +fn normalize(log_base2k: usize, a: &mut T, carry: &mut [u8]) { + let n: usize = a.n(); - (0..limbs).for_each(|i| { - izip!( - self.at(i).iter().step_by(gap_in), - a.at_mut(i).iter_mut().step_by(gap_out) + assert!( + carry.len() >= n * 8, + "invalid carry: carry.len()={} < self.n()={}", + carry.len(), + n + ); + + let carry_i64: &mut [i64] = cast_mut(carry); + + unsafe { + znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr()); + (0..a.cols()).rev().for_each(|i| { + znx::znx_normalize( + n as u64, + log_base2k as u64, + a.at_mut_ptr(i), + carry_i64.as_mut_ptr(), + a.at_mut_ptr(i), + carry_i64.as_mut_ptr(), ) - .for_each(|(x_in, x_out)| *x_out = *x_in); }); } +} - pub fn print_limbs(&self, limbs: usize, n: usize) { - (0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n])) +pub fn rsh(log_base2k: usize, a: &mut T, k: usize, carry: &mut [u8]) { + let n: usize = a.n(); + + assert!( + carry.len() >> 3 >= n, + "invalid carry: carry.len()/8={} < self.n()={}", + carry.len() >> 3, + n + ); + + let limbs: usize = a.cols(); + let limbs_steps: usize = k / log_base2k; + + a.raw_mut().rotate_right(n * limbs_steps); + unsafe { + znx::znx_zero_i64_ref((n * limbs_steps) as u64, a.as_mut_ptr()); + } + + let k_rem = k % log_base2k; + + if k_rem != 0 { + let carry_i64: &mut [i64] = cast_mut(carry); + + unsafe { + znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr()); + } + + let mask: i64 = (1 << k_rem) - 1; + let log_base2k: usize = log_base2k; + + (limbs_steps..limbs).for_each(|i| { + izip!(carry_i64.iter_mut(), a.at_mut(i).iter_mut()).for_each(|(ci, xi)| { + *xi += *ci << log_base2k; + *ci = *xi & mask; + *xi /= 1 << k_rem; + }); + }) } } @@ -413,7 +447,7 @@ pub trait VecZnxOps { /// /// This method requires that all [VecZnx] of b have the same ring degree /// and that b.n() * b.len() <= a.n() - fn vec_znx_split(&self, b: &mut Vec, a: &VecZnx, buf: &mut VecZnx); + fn vec_znx_split(&self, b: &mut Vec, a: &T, buf: &mut T); /// Merges the subrings a into b. /// @@ -421,7 +455,7 @@ pub trait VecZnxOps { /// /// This method requires that all [VecZnx] of a have the same ring degree /// and that a.n() * a.len() <= b.n() - fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec); + fn vec_znx_merge(&self, b: &mut T, a: &Vec); } impl VecZnxOps for Module { @@ -439,13 +473,13 @@ impl VecZnxOps for Module { vec_znx::vec_znx_add( self.0, c.as_mut_ptr(), - c.limbs() as u64, + c.cols() as u64, c.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.as_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, ) } @@ -457,13 +491,13 @@ impl VecZnxOps for Module { vec_znx::vec_znx_add( self.0, b.as_mut_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.as_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, ) } @@ -475,13 +509,13 @@ impl VecZnxOps for Module { vec_znx::vec_znx_sub( self.0, c.as_mut_ptr(), - c.limbs() as u64, + c.cols() as u64, c.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.as_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, ) } @@ -493,13 +527,13 @@ impl VecZnxOps for Module { vec_znx::vec_znx_sub( self.0, b.as_mut_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.as_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, ) } @@ -510,10 +544,10 @@ impl VecZnxOps for Module { vec_znx::vec_znx_negate( self.0, b.as_mut_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, ) } @@ -524,10 +558,10 @@ impl VecZnxOps for Module { vec_znx::vec_znx_negate( self.0, a.as_mut_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, ) } @@ -539,10 +573,10 @@ impl VecZnxOps for Module { self.0, k, a.as_mut_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.as_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, ) } @@ -554,10 +588,10 @@ impl VecZnxOps for Module { self.0, k, a.as_mut_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, ) } @@ -603,13 +637,13 @@ impl VecZnxOps for Module { fn vec_znx_automorphism(&self, k: i64, b: &mut T, a: &T, limbs_a: usize) { assert_eq!(a.n(), self.n()); assert_eq!(b.n(), self.n()); - assert!(a.limbs() >= limbs_a); + assert!(a.cols() >= limbs_a); unsafe { vec_znx::vec_znx_automorphism( self.0, k, b.as_mut_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, a.as_ptr(), limbs_a as u64, @@ -660,13 +694,13 @@ impl VecZnxOps for Module { limbs_a: usize, ) { assert_eq!(a.n(), self.n()); - assert!(a.limbs() >= limbs_a); + assert!(a.cols() >= limbs_a); unsafe { vec_znx::vec_znx_automorphism( self.0, k, a.as_mut_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, a.as_ptr(), limbs_a as u64, @@ -675,7 +709,7 @@ impl VecZnxOps for Module { } } - fn vec_znx_split(&self, b: &mut Vec, a: &VecZnx, buf: &mut VecZnx) { + fn vec_znx_split(&self, b: &mut Vec, a: &T, buf: &mut T) { let (n_in, n_out) = (a.n(), b[0].n()); assert!( @@ -692,16 +726,16 @@ impl VecZnxOps for Module { b.iter_mut().enumerate().for_each(|(i, bi)| { if i == 0 { - a.switch_degree(bi); + switch_degree(bi, a); self.vec_znx_rotate(-1, buf, a); } else { - buf.switch_degree(bi); + switch_degree(bi, buf); self.vec_znx_rotate_inplace(-1, buf); } }) } - fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec) { + fn vec_znx_merge(&self, b: &mut T, a: &Vec) { let (n_in, n_out) = (b.n(), a[0].n()); assert!( @@ -717,7 +751,7 @@ impl VecZnxOps for Module { }); a.iter().enumerate().for_each(|(_, ai)| { - ai.switch_degree(b); + switch_degree(b, ai); self.vec_znx_rotate_inplace(-1, b); }); diff --git a/base2k/src/vec_znx_big.rs b/base2k/src/vec_znx_big.rs index fa36e6f..c837240 100644 --- a/base2k/src/vec_znx_big.rs +++ b/base2k/src/vec_znx_big.rs @@ -8,39 +8,39 @@ impl VecZnxBig { /// Returns a new [VecZnxBig] with the provided data as backing array. /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [Module::bytes_of_vec_znx_big]. - pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxBig { + pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxBig { VecZnxBig( data.as_mut_ptr() as *mut vec_znx_big::vec_znx_bigcoeff_t, - limbs, + cols, ) } pub fn as_vec_znx_dft(&mut self) -> VecZnxDft { VecZnxDft(self.0 as *mut vec_znx_dft::vec_znx_dft_t, self.1) } - pub fn limbs(&self) -> usize { + pub fn cols(&self) -> usize { self.1 } } pub trait VecZnxBigOps { /// Allocates a vector Z[X]/(X^N+1) that stores not normalized values. - fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig; + fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig; /// Returns a new [VecZnxBig] with the provided bytes array as backing array. /// /// # Arguments /// - /// * `limbs`: the number of limbs of the [VecZnxBig]. + /// * `cols`: the number of cols of the [VecZnxBig]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_big]. /// /// # Panics /// If `bytes.len()` < [Module::bytes_of_vec_znx_big]. - fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig; + fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig; /// Returns the minimum number of bytes necessary to allocate /// a new [VecZnxBig] through [VecZnxBig::from_bytes]. - fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize; + fn bytes_of_vec_znx_big(&self, cols: usize) -> usize; /// b <- b - a fn vec_znx_big_sub_small_a_inplace(&self, b: &mut VecZnxBig, a: &T); @@ -89,22 +89,22 @@ pub trait VecZnxBigOps { } impl VecZnxBigOps for Module { - fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig { - unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, limbs as u64), limbs) } + fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig { + unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, cols as u64), cols) } } - fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig { + fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig { assert!( - bytes.len() >= ::bytes_of_vec_znx_big(self, limbs), + bytes.len() >= ::bytes_of_vec_znx_big(self, cols), "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", bytes.len(), - ::bytes_of_vec_znx_big(self, limbs) + ::bytes_of_vec_znx_big(self, cols) ); - VecZnxBig::from_bytes(limbs, bytes) + VecZnxBig::from_bytes(cols, bytes) } - fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize { - unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, limbs as u64) as usize } + fn bytes_of_vec_znx_big(&self, cols: usize) -> usize { + unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, cols as u64) as usize } } fn vec_znx_big_sub_small_a_inplace(&self, b: &mut VecZnxBig, a: &T) { @@ -112,12 +112,12 @@ impl VecZnxBigOps for Module { vec_znx_big::vec_znx_big_sub_small_a( self.0, b.0, - b.limbs() as u64, + b.cols() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.0, - b.limbs() as u64, + b.cols() as u64, ) } } @@ -132,12 +132,12 @@ impl VecZnxBigOps for Module { vec_znx_big::vec_znx_big_sub_small_a( self.0, c.0, - c.limbs() as u64, + c.cols() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.0, - b.limbs() as u64, + b.cols() as u64, ) } } @@ -147,11 +147,11 @@ impl VecZnxBigOps for Module { vec_znx_big::vec_znx_big_add_small( self.0, c.0, - c.limbs() as u64, + c.cols() as u64, b.0, - b.limbs() as u64, + b.cols() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, ) } @@ -162,11 +162,11 @@ impl VecZnxBigOps for Module { vec_znx_big::vec_znx_big_add_small( self.0, b.0, - b.limbs() as u64, + b.cols() as u64, b.0, - b.limbs() as u64, + b.cols() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, ) } @@ -194,10 +194,10 @@ impl VecZnxBigOps for Module { self.0, log_base2k as u64, b.as_mut_ptr(), - b.limbs() as u64, + b.cols() as u64, b.n() as u64, a.0, - a.limbs() as u64, + a.cols() as u64, tmp_bytes.as_mut_ptr(), ) } @@ -228,7 +228,7 @@ impl VecZnxBigOps for Module { self.0, log_base2k as u64, res.as_mut_ptr(), - res.limbs() as u64, + res.cols() as u64, res.n() as u64, a.0, a_range_begin as u64, @@ -245,9 +245,9 @@ impl VecZnxBigOps for Module { self.0, gal_el, b.0, - b.limbs() as u64, + b.cols() as u64, a.0, - a.limbs() as u64, + a.cols() as u64, ); } } @@ -258,9 +258,9 @@ impl VecZnxBigOps for Module { self.0, gal_el, a.0, - a.limbs() as u64, + a.cols() as u64, a.0, - a.limbs() as u64, + a.cols() as u64, ); } } diff --git a/base2k/src/vec_znx_dft.rs b/base2k/src/vec_znx_dft.rs index 6ae2dca..9d7ad0e 100644 --- a/base2k/src/vec_znx_dft.rs +++ b/base2k/src/vec_znx_dft.rs @@ -1,7 +1,7 @@ use crate::ffi::vec_znx_big; use crate::ffi::vec_znx_dft; use crate::ffi::vec_znx_dft::bytes_of_vec_znx_dft; -use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxBig}; +use crate::{Infos, Module, VecZnxApi, VecZnxBig}; pub struct VecZnxDft(pub *mut vec_znx_dft::vec_znx_dft_t, pub usize); @@ -9,8 +9,8 @@ impl VecZnxDft { /// Returns a new [VecZnxDft] with the provided data as backing array. /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [Module::bytes_of_vec_znx_dft]. - pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxDft { - VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, limbs) + pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxDft { + VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, cols) } /// Cast a [VecZnxDft] into a [VecZnxBig]. @@ -19,36 +19,36 @@ impl VecZnxDft { pub fn as_vec_znx_big(&mut self) -> VecZnxBig { VecZnxBig(self.0 as *mut vec_znx_big::vec_znx_bigcoeff_t, self.1) } - pub fn limbs(&self) -> usize { + pub fn cols(&self) -> usize { self.1 } } pub trait VecZnxDftOps { /// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space. - fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft; + fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft; /// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// /// # Arguments /// - /// * `limbs`: the number of limbs of the [VecZnxDft]. + /// * `cols`: the number of cols of the [VecZnxDft]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft]. /// /// # Panics /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. - fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft; + fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft; /// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// /// # Arguments /// - /// * `limbs`: the number of limbs of the [VecZnxDft]. + /// * `cols`: the number of cols of the [VecZnxDft]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft]. /// /// # Panics /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. - fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize; + fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize; /// Returns the minimum number of bytes necessary to allocate /// a new [VecZnxDft] through [VecZnxDft::from_bytes]. @@ -69,33 +69,33 @@ pub trait VecZnxDftOps { } impl VecZnxDftOps for Module { - fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft { - unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, limbs as u64), limbs) } + fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft { + unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, cols as u64), cols) } } - fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft { + fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft { assert!( - bytes.len() >= ::bytes_of_vec_znx_dft(self, limbs), + bytes.len() >= ::bytes_of_vec_znx_dft(self, cols), "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", bytes.len(), - ::bytes_of_vec_znx_dft(self, limbs) + ::bytes_of_vec_znx_dft(self, cols) ); - VecZnxDft::from_bytes(limbs, bytes) + VecZnxDft::from_bytes(cols, bytes) } - fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize { - unsafe { bytes_of_vec_znx_dft(self.0, limbs as u64) as usize } + fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize { + unsafe { bytes_of_vec_znx_dft(self.0, cols as u64) as usize } } fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_limbs: usize) { assert!( - b.limbs() >= a_limbs, - "invalid c_vector: b_vector.limbs()={} < a_limbs={}", - b.limbs(), + b.cols() >= a_limbs, + "invalid c_vector: b_vector.cols()={} < a_limbs={}", + b.cols(), a_limbs ); unsafe { - vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.limbs() as u64, a.0, a_limbs as u64) + vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.cols() as u64, a.0, a_limbs as u64) } } @@ -106,21 +106,21 @@ impl VecZnxDftOps for Module { /// b <- DFT(a) /// /// # Panics - /// If b.limbs < a_limbs - fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &T, a_limbs: usize) { + /// If b.cols < a_cols + fn vec_znx_dft(&self, b: &mut VecZnxDft, a: &T, a_cols: usize) { assert!( - b.limbs() >= a_limbs, - "invalid a_limbs: b.limbs()={} < a_limbs={}", - b.limbs(), - a_limbs + b.cols() >= a_cols, + "invalid a_cols: b.cols()={} < a_cols={}", + b.cols(), + a_cols ); unsafe { vec_znx_dft::vec_znx_dft( self.0, b.0, - b.limbs() as u64, + b.cols() as u64, a.as_ptr(), - a_limbs as u64, + a_cols as u64, a.n() as u64, ) } @@ -131,20 +131,20 @@ impl VecZnxDftOps for Module { &self, b: &mut VecZnxBig, a: &mut VecZnxDft, - a_limbs: usize, + a_cols: usize, tmp_bytes: &mut [u8], ) { assert!( - b.limbs() >= a_limbs, - "invalid c_vector: b.limbs()={} < a_limbs={}", - b.limbs(), - a_limbs + b.cols() >= a_cols, + "invalid c_vector: b.cols()={} < a_cols={}", + b.cols(), + a_cols ); assert!( - a.limbs() >= a_limbs, - "invalid c_vector: a.limbs()={} < a_limbs={}", - a.limbs(), - a_limbs + a.cols() >= a_cols, + "invalid c_vector: a.cols()={} < a_cols={}", + a.cols(), + a_cols ); assert!( tmp_bytes.len() <= ::vec_znx_idft_tmp_bytes(self), @@ -156,9 +156,9 @@ impl VecZnxDftOps for Module { vec_znx_dft::vec_znx_idft( self.0, b.0, - a.limbs() as u64, + a.cols() as u64, a.0, - a_limbs as u64, + a_cols as u64, tmp_bytes.as_mut_ptr(), ) } diff --git a/base2k/src/vmp.rs b/base2k/src/vmp.rs index b240817..46d2a09 100644 --- a/base2k/src/vmp.rs +++ b/base2k/src/vmp.rs @@ -1,5 +1,5 @@ use crate::ffi::vmp; -use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxDft}; +use crate::{Infos, Module, VecZnxApi, VecZnxDft}; /// Vector Matrix Product Prepared Matrix: a vector of [VecZnx], /// stored as a 3D matrix in the DFT domain in a single contiguous array. @@ -15,7 +15,7 @@ pub struct VmpPMat { pub data: *mut vmp::vmp_pmat_t, /// The number of [VecZnxDft]. pub rows: usize, - /// The number of limbs in each [VecZnxDft]. + /// The number of cols in each [VecZnxDft]. pub cols: usize, /// The ring degree of each [VecZnxDft]. pub n: usize, @@ -86,7 +86,7 @@ pub trait VmpPMatOps { /// # Arguments /// /// * `rows`: number of rows (number of [VecZnxDft]). - /// * `cols`: number of cols (number of limbs of each [VecZnxDft]). + /// * `cols`: number of cols (number of cols of each [VecZnxDft]). fn new_vmp_pmat(&self, rows: usize, cols: usize) -> VmpPMat; /// Returns the number of bytes needed as scratch space for [VmpPMatOps::vmp_prepare_contiguous]. @@ -153,15 +153,17 @@ pub trait VmpPMatOps { /// vecznx.push(module.new_vec_znx(cols)); /// }); /// + /// let slices: Vec<&[i64]> = vecznx.iter().map(|v| v.data.as_slice()).collect(); + /// /// let mut buf: Vec = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)]; /// /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); - /// module.vmp_prepare_dblptr(&mut vmp_pmat, &vecznx, &mut buf); + /// module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf); /// /// vmp_pmat.free(); /// module.free(); /// ``` - fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &Vec, buf: &mut [u8]); + fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]); /// Prepares the ith-row of [VmpPMat] from a vector of [VecZnx]. /// @@ -175,7 +177,7 @@ pub trait VmpPMatOps { /// The size of buf can be obtained with [VmpPMatOps::vmp_prepare_tmp_bytes]. /// /// # Example /// ``` - /// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxOps, Free}; + /// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxApi, VecZnxOps, Free}; /// use std::cmp::min; /// /// let n: usize = 1024; @@ -188,31 +190,25 @@ pub trait VmpPMatOps { /// let mut buf: Vec = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)]; /// /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); - /// module.vmp_prepare_row(&mut vmp_pmat, &vecznx, 0, &mut buf); + /// module.vmp_prepare_row(&mut vmp_pmat, vecznx.raw(), 0, &mut buf); /// /// vmp_pmat.free(); /// module.free(); /// ``` - fn vmp_prepare_row( - &self, - b: &mut VmpPMat, - a: &T, - row_i: usize, - tmp_bytes: &mut [u8], - ); + fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, tmp_bytes: &mut [u8]); /// Returns the size of the stratch space necessary for [VmpPMatOps::vmp_apply_dft]. /// /// # Arguments /// - /// * `c_limbs`: number of limbs of the output [VecZnxDft]. - /// * `a_limbs`: number of limbs of the input [VecZnx]. + /// * `c_cols`: number of cols of the output [VecZnxDft]. + /// * `a_cols`: number of cols of the input [VecZnx]. /// * `rows`: number of rows of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat]. fn vmp_apply_dft_tmp_bytes( &self, - c_limbs: usize, - a_limbs: usize, + c_cols: usize, + a_cols: usize, rows: usize, cols: usize, ) -> usize; @@ -223,8 +219,8 @@ pub trait VmpPMatOps { /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// - /// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and - /// `j` cols, the output is a [VecZnx] of `j` limbs. + /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and + /// `j` cols, the output is a [VecZnx] of `j` cols. /// /// If there is a mismatch between the dimensions the largest valid ones are used. /// @@ -249,18 +245,18 @@ pub trait VmpPMatOps { /// let n = 1024; /// /// let module: Module = Module::new::(n); - /// let limbs: usize = 5; + /// let cols: usize = 5; /// - /// let rows: usize = limbs; - /// let cols: usize = limbs + 1; - /// let c_limbs: usize = cols; - /// let a_limbs: usize = limbs; - /// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_limbs, a_limbs, rows, cols); + /// let rows: usize = cols; + /// let cols: usize = cols + 1; + /// let c_cols: usize = cols; + /// let a_cols: usize = cols; + /// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_cols, a_cols, rows, cols); /// /// let mut buf: Vec = vec![0; tmp_bytes]; /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// - /// let a: VecZnx = module.new_vec_znx(limbs); + /// let a: VecZnx = module.new_vec_znx(cols); /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); /// @@ -280,14 +276,14 @@ pub trait VmpPMatOps { /// /// # Arguments /// - /// * `c_limbs`: number of limbs of the output [VecZnxDft]. - /// * `a_limbs`: number of limbs of the input [VecZnxDft]. + /// * `c_cols`: number of cols of the output [VecZnxDft]. + /// * `a_cols`: number of cols of the input [VecZnxDft]. /// * `rows`: number of rows of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat]. fn vmp_apply_dft_to_dft_tmp_bytes( &self, - c_limbs: usize, - a_limbs: usize, + c_cols: usize, + a_cols: usize, rows: usize, cols: usize, ) -> usize; @@ -299,8 +295,8 @@ pub trait VmpPMatOps { /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// - /// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and - /// `j` cols, the output is a [VecZnx] of `j` limbs. + /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and + /// `j` cols, the output is a [VecZnx] of `j` cols. /// /// If there is a mismatch between the dimensions the largest valid ones are used. /// @@ -325,18 +321,18 @@ pub trait VmpPMatOps { /// let n = 1024; /// /// let module: Module = Module::new::(n); - /// let limbs: usize = 5; + /// let cols: usize = 5; /// - /// let rows: usize = limbs; - /// let cols: usize = limbs + 1; - /// let c_limbs: usize = cols; - /// let a_limbs: usize = limbs; - /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_limbs, a_limbs, rows, cols); + /// let rows: usize = cols; + /// let cols: usize = cols + 1; + /// let c_cols: usize = cols; + /// let a_cols: usize = cols; + /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, rows, cols); /// /// let mut buf: Vec = vec![0; tmp_bytes]; /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// - /// let a_dft: VecZnxDft = module.new_vec_znx_dft(limbs); + /// let a_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// module.vmp_apply_dft_to_dft(&mut c_dft, &a_dft, &vmp_pmat, &mut buf); /// @@ -354,8 +350,8 @@ pub trait VmpPMatOps { /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// - /// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and - /// `j` cols, the output is a [VecZnx] of `j` limbs. + /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and + /// `j` cols, the output is a [VecZnx] of `j` cols. /// /// If there is a mismatch between the dimensions the largest valid ones are used. /// @@ -379,17 +375,17 @@ pub trait VmpPMatOps { /// let n = 1024; /// /// let module: Module = Module::new::(n); - /// let limbs: usize = 5; + /// let cols: usize = 5; /// - /// let rows: usize = limbs; - /// let cols: usize = limbs + 1; - /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(limbs, limbs, rows, cols); + /// let rows: usize = cols; + /// let cols: usize = cols + 1; + /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(cols, cols, rows, cols); /// /// let mut buf: Vec = vec![0; tmp_bytes]; - /// let a: VecZnx = module.new_vec_znx(limbs); + /// let a: VecZnx = module.new_vec_znx(cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// - /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(limbs); + /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// module.vmp_apply_dft_to_dft_inplace(&mut c_dft, &vmp_pmat, &mut buf); /// /// c_dft.free(); @@ -428,12 +424,7 @@ impl VmpPMatOps for Module { } } - fn vmp_prepare_dblptr( - &self, - b: &mut VmpPMat, - a: &Vec, - buf: &mut [u8], - ) { + fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]) { let ptrs: Vec<*const i64> = a.iter().map(|v| v.as_ptr()).collect(); unsafe { vmp::vmp_prepare_dblptr( @@ -447,13 +438,7 @@ impl VmpPMatOps for Module { } } - fn vmp_prepare_row( - &self, - b: &mut VmpPMat, - a: &T, - row_i: usize, - buf: &mut [u8], - ) { + fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, buf: &mut [u8]) { unsafe { vmp::vmp_prepare_row( self.0, @@ -469,16 +454,16 @@ impl VmpPMatOps for Module { fn vmp_apply_dft_tmp_bytes( &self, - c_limbs: usize, - a_limbs: usize, + c_cols: usize, + a_cols: usize, rows: usize, cols: usize, ) -> usize { unsafe { vmp::vmp_apply_dft_tmp_bytes( self.0, - c_limbs as u64, - a_limbs as u64, + c_cols as u64, + a_cols as u64, rows as u64, cols as u64, ) as usize @@ -496,9 +481,9 @@ impl VmpPMatOps for Module { vmp::vmp_apply_dft( self.0, c.0, - c.limbs() as u64, + c.cols() as u64, a.as_ptr(), - a.limbs() as u64, + a.cols() as u64, a.n() as u64, b.data(), b.rows() as u64, @@ -510,16 +495,16 @@ impl VmpPMatOps for Module { fn vmp_apply_dft_to_dft_tmp_bytes( &self, - c_limbs: usize, - a_limbs: usize, + c_cols: usize, + a_cols: usize, rows: usize, cols: usize, ) -> usize { unsafe { vmp::vmp_apply_dft_to_dft_tmp_bytes( self.0, - c_limbs as u64, - a_limbs as u64, + c_cols as u64, + a_cols as u64, rows as u64, cols as u64, ) as usize @@ -531,9 +516,9 @@ impl VmpPMatOps for Module { vmp::vmp_apply_dft_to_dft( self.0, c.0, - c.limbs() as u64, + c.cols() as u64, a.0, - a.limbs() as u64, + a.cols() as u64, b.data(), b.rows() as u64, b.cols() as u64, @@ -547,9 +532,9 @@ impl VmpPMatOps for Module { vmp::vmp_apply_dft_to_dft( self.0, b.0, - b.limbs() as u64, + b.cols() as u64, b.0, - b.limbs() as u64, + b.cols() as u64, a.data(), a.rows() as u64, a.cols() as u64, diff --git a/rlwe/benches/gadget_product.rs b/rlwe/benches/gadget_product.rs index 033e2a1..457dfda 100644 --- a/rlwe/benches/gadget_product.rs +++ b/rlwe/benches/gadget_product.rs @@ -1,10 +1,10 @@ use base2k::{ - FFT64, Module, Sampling, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxDft, VecZnxDftOps, - VmpPMat, VmpPMatOps, alloc_aligned_u8, + FFT64, Module, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxDft, VecZnxDftOps, VmpPMat, + VmpPMatOps, alloc_aligned_u8, }; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use rlwe::{ - ciphertext::{Ciphertext, GadgetCiphertext}, + ciphertext::{Ciphertext, new_gadget_ciphertext}, elem::Elem, encryptor::{encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes}, evaluator::gadget_product_tmp_bytes, @@ -18,7 +18,7 @@ fn gadget_product_inplace(c: &mut Criterion) { fn gadget_product<'a>( module: &'a Module, elem: &'a mut Elem, - gadget_ct: &'a GadgetCiphertext, + gadget_ct: &'a Ciphertext, tmp_bytes: &'a mut [u8], ) -> Box { let factor: usize = 2; @@ -105,7 +105,7 @@ fn gadget_product_inplace(c: &mut Criterion) { let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol(); params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0); - let mut gadget_ct: GadgetCiphertext = GadgetCiphertext::new( + let mut gadget_ct: Ciphertext = new_gadget_ciphertext( params.module(), params.log_base2k(), params.limbs_q(), @@ -123,7 +123,7 @@ fn gadget_product_inplace(c: &mut Criterion) { &mut tmp_bytes, ); - let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); + let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); params.encrypt_rlwe_sk_thread_safe( &mut ct, diff --git a/rlwe/examples/encryption.rs b/rlwe/examples/encryption.rs index 8b692ad..25689f6 100644 --- a/rlwe/examples/encryption.rs +++ b/rlwe/examples/encryption.rs @@ -1,13 +1,11 @@ -use base2k::{Encoding, FFT64, SvpPPolOps, VecZnxApi, VecZnx}; +use base2k::{Encoding, FFT64, SvpPPolOps, VecZnx, VecZnxApi}; use rlwe::{ ciphertext::Ciphertext, - decryptor::{Decryptor, decrypt_rlwe_thread_safe_tmp_byte}, - encryptor::{EncryptorSk, encrypt_rlwe_sk_tmp_bytes}, keys::SecretKey, parameters::{Parameters, ParametersLiteral}, plaintext::Plaintext, }; -use sampling::source::{Source, new_seed}; +use sampling::source::Source; fn main() { let params_lit: ParametersLiteral = ParametersLiteral { @@ -30,8 +28,7 @@ fn main() { let mut source: Source = Source::new([0; 32]); let mut sk: SecretKey = SecretKey::new(params.module()); - //sk.fill_ternary_hw(params.xs(), &mut source); - sk.0.0[0] = 1; + sk.fill_ternary_hw(params.xs(), &mut source); let mut want = vec![i64::default(); params.n()]; @@ -47,10 +44,10 @@ fn main() { pt.0.value[0].normalize(log_base2k, &mut tmp_bytes); println!("log_k: {}", log_k); - pt.0.value[0].print_limbs(pt.limbs(), 16); + pt.0.value[0].print(pt.cols(), 16); println!(); - let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); + let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); let mut source_xe: Source = Source::new([1; 32]); let mut source_xa: Source = Source::new([2; 32]); @@ -69,7 +66,7 @@ fn main() { params.decrypt_rlwe_thread_safe(&mut pt, &ct, &sk_svp_ppol, &mut tmp_bytes); - pt.0.value[0].print_limbs(pt.limbs(), 16); + pt.0.value[0].print(pt.cols(), 16); let mut have = vec![i64::default(); params.n()]; diff --git a/rlwe/examples/gadget_product.rs b/rlwe/examples/gadget_product.rs index f7e4da5..40d94a9 100644 --- a/rlwe/examples/gadget_product.rs +++ b/rlwe/examples/gadget_product.rs @@ -1,22 +1,15 @@ -use base2k::{ - Encoding, FFT64, Infos, Sampling, Scalar, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxDft, - VecZnxOps, -}; +use base2k::{Encoding, FFT64, SvpPPolOps, VecZnx, VecZnxApi, VmpPMat}; use rlwe::{ - ciphertext::{Ciphertext, GadgetCiphertext}, - decryptor::{Decryptor, decrypt_rlwe_thread_safe, decrypt_rlwe_thread_safe_tmp_byte}, - elem::Elem, - encryptor::{ - EncryptorSk, encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes, - encrypt_rlwe_sk_tmp_bytes, - }, + ciphertext::{Ciphertext, new_gadget_ciphertext}, + decryptor::decrypt_rlwe_thread_safe, + encryptor::{encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes}, evaluator::{gadget_product_inplace_thread_safe, gadget_product_tmp_bytes}, - key_generator::{gen_switching_key_thread_safe, gen_switching_key_thread_safe_tmp_bytes}, - keys::{SecretKey, SwitchingKey}, + key_generator::gen_switching_key_thread_safe_tmp_bytes, + keys::SecretKey, parameters::{Parameters, ParametersLiteral}, plaintext::Plaintext, }; -use sampling::source::{Source, new_seed}; +use sampling::source::Source; fn main() { let params_lit: ParametersLiteral = ParametersLiteral { @@ -82,7 +75,7 @@ fn main() { let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol(); params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0); - let mut gadget_ct: GadgetCiphertext = GadgetCiphertext::new( + let mut gadget_ct: Ciphertext = new_gadget_ciphertext( params.module(), log_base2k, params.limbs_q(), @@ -100,21 +93,15 @@ fn main() { &mut tmp_bytes, ); - println!("DONE?"); - - let mut pt: Plaintext = Plaintext::::new( - params.module(), - params.log_base2k(), - params.log_q(), - params.log_scale(), - ); + let mut pt: Plaintext = + Plaintext::::new(params.module(), params.log_base2k(), params.log_q()); let mut want = vec![i64::default(); params.n()]; want.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64); pt.0.value[0].encode_vec_i64(log_base2k, log_k, &want, 32); pt.0.value[0].normalize(log_base2k, &mut tmp_bytes); - let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); + let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); params.encrypt_rlwe_sk_thread_safe( &mut ct, @@ -132,10 +119,10 @@ fn main() { &mut tmp_bytes, ); - println!("ct.limbs()={}", ct.limbs()); + println!("ct.limbs()={}", ct.cols()); println!("gadget_ct.rows()={}", gadget_ct.rows()); println!("gadget_ct.cols()={}", gadget_ct.cols()); - println!("res.limbs()={}", ct.limbs()); + println!("res.limbs()={}", ct.cols()); println!(); decrypt_rlwe_thread_safe( @@ -146,7 +133,7 @@ fn main() { &mut tmp_bytes, ); - pt.0.value[0].print_limbs(pt.limbs(), 16); + pt.0.value[0].print(pt.cols(), 16); let mut have: Vec = vec![i64::default(); params.n()]; diff --git a/rlwe/src/ciphertext.rs b/rlwe/src/ciphertext.rs index 9875744..0147fa9 100644 --- a/rlwe/src/ciphertext.rs +++ b/rlwe/src/ciphertext.rs @@ -1,47 +1,47 @@ -use crate::elem::{Elem, ElemBasics}; +use crate::elem::{Elem, ElemVecZnx, VecZnxCommon}; use crate::parameters::Parameters; use crate::plaintext::Plaintext; -use base2k::{Infos, Module, VecZnx, VecZnxApi, VmpPMat, VmpPMatOps}; +use base2k::{Infos, Module, VecZnx, VecZnxApi, VmpPMat}; -pub struct Ciphertext(pub Elem); +pub struct Ciphertext(pub Elem); -impl Ciphertext { - pub fn new( - module: &Module, - log_base2k: usize, - log_q: usize, - degree: usize, - log_scale: usize, - ) -> Self { - Self(Elem::new(module, log_base2k, log_q, degree, log_scale)) +impl Ciphertext { + pub fn new(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> Self { + Self(Elem::::new(module, log_base2k, log_q, rows)) } +} +impl Ciphertext +where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, +{ pub fn n(&self) -> usize { self.0.n() } - pub fn degree(&self) -> usize { - self.0.degree() - } - pub fn log_q(&self) -> usize { - self.0.log_q() + self.0.log_q } - pub fn limbs(&self) -> usize { - self.0.limbs() + pub fn rows(&self) -> usize { + self.0.rows() } - pub fn at(&self, i: usize) -> &(impl VecZnxApi + Infos) { + pub fn cols(&self) -> usize { + self.0.cols() + } + + pub fn at(&self, i: usize) -> &T { self.0.at(i) } - pub fn at_mut(&mut self, i: usize) -> &mut (impl VecZnxApi + Infos) { + pub fn at_mut(&mut self, i: usize) -> &mut T { self.0.at_mut(i) } pub fn log_base2k(&self) -> usize { - self.0.log_base2k() + self.0.log_base2k } pub fn log_scale(&self) -> usize { @@ -52,87 +52,59 @@ impl Ciphertext { self.0.zero() } - pub fn as_plaintext(&self) -> Plaintext { - unsafe { Plaintext(std::ptr::read(&self.0)) } + pub fn as_plaintext(&self) -> Plaintext { + unsafe { Plaintext::(std::ptr::read(&self.0)) } } } impl Parameters { - pub fn new_ciphertext(&self, log_q: usize) -> Ciphertext { - Ciphertext::new(self.module(), self.log_base2k(), log_q, self.log_scale(), 1) + pub fn new_ciphertext(&self, log_q: usize) -> Ciphertext { + Ciphertext::new(self.module(), self.log_base2k(), log_q, 2) } } -pub struct GadgetCiphertext { - pub value: VmpPMat, - pub log_base2k: usize, - pub log_q: usize, +pub fn new_gadget_ciphertext( + module: &Module, + log_base2k: usize, + rows: usize, + log_q: usize, +) -> Ciphertext { + let cols: usize = (log_q + log_base2k - 1) / log_base2k; + let mut elem: Elem = Elem::::new(module, log_base2k, rows, 2 * cols); + elem.log_q = log_q; + Ciphertext(elem) } -impl GadgetCiphertext { - pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Self { - let cols: usize = (log_q + log_base2k - 1) / log_base2k; - Self { - value: module.new_vmp_pmat(rows, cols * 2), - log_base2k, - log_q, - } - } +pub fn new_rgsw_ciphertext( + module: &Module, + log_base2k: usize, + rows: usize, + log_q: usize, +) -> Ciphertext { + let cols: usize = (log_q + log_base2k - 1) / log_base2k; + let mut elem: Elem = Elem::::new(module, log_base2k, 2 * rows, 2 * cols); + elem.log_q = log_q; + Ciphertext(elem) +} +impl Ciphertext { pub fn n(&self) -> usize { - self.value.n + self.0.n() } pub fn rows(&self) -> usize { - self.value.rows + self.0.rows() } pub fn cols(&self) -> usize { - self.value.cols - } - - pub fn log_q(&self) -> usize { - self.log_q + self.0.cols() } pub fn log_base2k(&self) -> usize { - self.log_base2k - } -} - -pub struct RGSWCiphertext { - pub value: VmpPMat, - pub log_base2k: usize, - pub log_q: usize, -} - -impl RGSWCiphertext { - pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Self { - let cols: usize = (log_q + log_base2k - 1) / log_base2k; - Self { - value: module.new_vmp_pmat(rows * 2, cols * 2), - log_base2k, - log_q, - } - } - - pub fn n(&self) -> usize { - self.value.n - } - - pub fn rows(&self) -> usize { - self.value.rows - } - - pub fn cols(&self) -> usize { - self.value.cols + self.0.log_base2k } pub fn log_q(&self) -> usize { - self.log_q - } - - pub fn log_base2k(&self) -> usize { - self.log_base2k + self.0.log_q } } diff --git a/rlwe/src/decryptor.rs b/rlwe/src/decryptor.rs index bab6dc7..81725c6 100644 --- a/rlwe/src/decryptor.rs +++ b/rlwe/src/decryptor.rs @@ -1,12 +1,12 @@ use crate::{ ciphertext::Ciphertext, - elem::{Elem, ElemBasics}, + elem::{Elem, ElemVecZnx, VecZnxCommon}, keys::SecretKey, parameters::Parameters, plaintext::Plaintext, }; use base2k::{ - Infos, VecZnx, Module, SvpPPol, SvpPPolOps, VecZnxApi, VecZnxBigOps, VecZnxDft, VecZnxDftOps, + Infos, Module, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBigOps, VecZnxDft, VecZnxDftOps, }; use std::cmp::min; @@ -34,13 +34,16 @@ impl Parameters { ) } - pub fn decrypt_rlwe_thread_safe( + pub fn decrypt_rlwe_thread_safe( &self, - res: &mut Plaintext, - ct: &Ciphertext, + res: &mut Plaintext, + ct: &Ciphertext, sk: &SvpPPol, tmp_bytes: &mut [u8], - ) { + ) where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { decrypt_rlwe_thread_safe(self.module(), &mut res.0, &ct.0, sk, tmp_bytes) } } @@ -52,26 +55,29 @@ pub fn decrypt_rlwe_thread_safe( sk: &SvpPPol, tmp_bytes: &mut [u8], ) where - T: VecZnxApi + Infos, + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, { + let cols: usize = a.cols(); + assert!( - tmp_bytes.len() >= decrypt_rlwe_thread_safe_tmp_byte(module, a.limbs()), + tmp_bytes.len() >= decrypt_rlwe_thread_safe_tmp_byte(module, cols), "invalid tmp_bytes: tmp_bytes.len()={} < decrypt_rlwe_thread_safe_tmp_byte={}", tmp_bytes.len(), - decrypt_rlwe_thread_safe_tmp_byte(module, a.limbs()) + decrypt_rlwe_thread_safe_tmp_byte(module, cols) ); - let res_dft_bytes: usize = module.bytes_of_vec_znx_dft(a.limbs()); + let res_dft_bytes: usize = module.bytes_of_vec_znx_dft(cols); - let mut res_dft: VecZnxDft = VecZnxDft::from_bytes(a.limbs(), tmp_bytes); + let mut res_dft: VecZnxDft = VecZnxDft::from_bytes(a.cols(), tmp_bytes); let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big(); // res_dft <- DFT(ct[1]) * DFT(sk) - module.svp_apply_dft(&mut res_dft, sk, &a.value[1], a.limbs()); + module.svp_apply_dft(&mut res_dft, sk, a.at(1), cols); // res_big <- ct[1] x sk - module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, a.limbs()); + module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols); // res_big <- ct[1] x sk + ct[0] - module.vec_znx_big_add_small_inplace(&mut res_big, &a.value[0]); + module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0)); // res <- normalize(ct[1] x sk + ct[0]) module.vec_znx_big_normalize( a.log_base2k(), diff --git a/rlwe/src/elem.rs b/rlwe/src/elem.rs index 7a3ed63..d66eb93 100644 --- a/rlwe/src/elem.rs +++ b/rlwe/src/elem.rs @@ -1,82 +1,68 @@ -use crate::parameters::Parameters; -use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow, VecZnxOps}; +use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow, VecZnxOps, VmpPMat, VmpPMatOps}; +use crate::parameters::Parameters; impl Parameters { - pub fn bytes_of_elem(&self, log_q: usize, degree: usize) -> usize { - Elem::::bytes_of(self.module(), self.log_base2k(), log_q, degree) - } - - pub fn elem_from_bytes(&self, log_q: usize, degree: usize, bytes: &mut [u8]) -> Elem { - Elem::::from_bytes(self.module(), self.log_base2k(), log_q, degree, bytes) - } - - pub fn elem_borrow_from_bytes(&self, log_q: usize, degree: usize, bytes: &mut [u8]) -> Elem { - Elem::::from_bytes(self.module(), self.log_base2k(), log_q, degree, bytes) + pub fn elem_from_bytes(&self, log_q: usize, rows: usize, bytes: &mut [u8]) -> Elem + where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { + Elem::::from_bytes(self.module(), self.log_base2k(), log_q, rows, bytes) } } -pub struct Elem { +pub struct Elem { pub value: Vec, pub log_base2k: usize, pub log_q: usize, pub log_scale: usize, } -pub trait ElemBasics -where - T: VecZnxApi + Infos, -{ - fn n(&self) -> usize; - fn degree(&self) -> usize; - fn limbs(&self) -> usize; - fn log_base2k(&self) -> usize; - fn log_scale(&self) -> usize; - fn log_q(&self) -> usize; +pub trait VecZnxCommon: VecZnxApi + Infos {} +impl VecZnxCommon for VecZnx {} +impl VecZnxCommon for VecZnxBorrow {} + +pub trait ElemVecZnx { + fn from_bytes( + module: &Module, + log_base2k: usize, + log_q: usize, + rows: usize, + bytes: &mut [u8], + ) -> Elem; + fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> usize; fn at(&self, i: usize) -> &T; fn at_mut(&mut self, i: usize) -> &mut T; fn zero(&mut self); } -impl Elem { - pub fn new( - module: &Module, - log_base2k: usize, - log_q: usize, - degree: usize, - log_scale: usize, - ) -> Self { - let limbs: usize = (log_q + log_base2k - 1) / log_base2k; - let mut value: Vec = Vec::new(); - (0..degree + 1).for_each(|_| value.push(module.new_vec_znx(limbs))); - Self { - value, - log_q, - log_base2k, - log_scale: log_scale, - } - } - - pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, degree: usize) -> usize { +impl ElemVecZnx for Elem +where + T: VecZnxCommon, + Elem: Infos, +{ + fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> usize { let cols = (log_q + log_base2k - 1) / log_base2k; - module.n() * cols * (degree + 1) * 8 + module.n() * cols * (rows + 1) * 8 } - pub fn from_bytes( + fn from_bytes( module: &Module, log_base2k: usize, log_q: usize, - degree: usize, + rows: usize, bytes: &mut [u8], - ) -> Self { + ) -> Elem { + assert!(rows > 0); let n: usize = module.n(); - assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, degree)); - let mut value: Vec = Vec::new(); + assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, rows)); + let mut value: Vec = Vec::new(); let limbs: usize = (log_q + log_base2k - 1) / log_base2k; - let size = VecZnx::bytes_of(n, limbs); + let size = T::bytes_of(n, limbs); let mut ptr: usize = 0; - (0..degree + 1).for_each(|_| { - value.push(VecZnx::from_bytes(n, limbs, &mut bytes[ptr..])); + (0..rows).for_each(|_| { + value.push(T::from_bytes(n, limbs, &mut bytes[ptr..])); ptr += size }); Self { @@ -86,74 +72,14 @@ impl Elem { log_scale: 0, } } -} - -impl Elem { - - pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, degree: usize) -> usize { - let cols = (log_q + log_base2k - 1) / log_base2k; - module.n() * cols * (degree + 1) * 8 - } - - pub fn from_bytes( - module: &Module, - log_base2k: usize, - log_q: usize, - degree: usize, - bytes: &mut [u8], - ) -> Self { - let n: usize = module.n(); - assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, degree)); - let mut value: Vec = Vec::new(); - let limbs: usize = (log_q + log_base2k - 1) / log_base2k; - let size = VecZnxBorrow::bytes_of(n, limbs); - let mut ptr: usize = 0; - (0..degree + 1).for_each(|_| { - value.push(VecZnxBorrow::from_bytes(n, limbs, &mut bytes[ptr..])); - ptr += size - }); - Self { - value, - log_q, - log_base2k, - log_scale: 0, - } - } -} - - -impl ElemBasics for Elem { - fn n(&self) -> usize { - self.value[0].n() - } - - fn degree(&self) -> usize { - self.value.len() - } - - fn limbs(&self) -> usize { - self.value[0].limbs() - } - - fn log_base2k(&self) -> usize { - self.log_base2k - } - - fn log_scale(&self) -> usize { - self.log_scale - } - - fn log_q(&self) -> usize { - self.log_q - } fn at(&self, i: usize) -> &T { - assert!(i <= self.degree()); + assert!(i < self.rows()); &self.value[i] } fn at_mut(&mut self, i: usize) -> &mut T { - assert!(i <= self.degree()); + assert!(i < self.rows()); &mut self.value[i] } @@ -161,3 +87,97 @@ impl ElemBasics for Elem { self.value.iter_mut().for_each(|i| i.zero()); } } + +impl Elem { + pub fn log_base2k(&self) -> usize { + self.log_base2k + } + + pub fn log_q(&self) -> usize { + self.log_q + } + + pub fn log_scale(&self) -> usize { + self.log_scale + } +} + +impl Infos for Elem { + fn n(&self) -> usize { + self.value[0].n() + } + + fn log_n(&self) -> usize { + self.value[0].log_n() + } + + fn rows(&self) -> usize { + self.value.len() + } + fn cols(&self) -> usize { + self.value[0].cols() + } +} + +impl Infos for Elem { + fn n(&self) -> usize { + self.value[0].n() + } + + fn log_n(&self) -> usize { + self.value[0].log_n() + } + + fn rows(&self) -> usize { + self.value.len() + } + fn cols(&self) -> usize { + self.value[0].cols() + } +} + +impl Elem { + pub fn new(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> Self { + assert!(rows > 0); + let limbs: usize = (log_q + log_base2k - 1) / log_base2k; + let mut value: Vec = Vec::new(); + (0..rows).for_each(|_| value.push(module.new_vec_znx(limbs))); + Self { + value, + log_q, + log_base2k, + log_scale: 0, + } + } +} + +impl Infos for Elem { + fn n(&self) -> usize { + self.value[0].n() + } + + fn log_n(&self) -> usize { + self.value[0].log_n() + } + + fn rows(&self) -> usize { + self.value[0].rows() + } + + fn cols(&self) -> usize { + self.value[0].cols() + } +} + +impl Elem { + pub fn new(module: &Module, log_base2k: usize, rows: usize, cols: usize) -> Self { + assert!(rows > 0); + assert!(cols > 0); + Self { + value: Vec::from([module.new_vmp_pmat(rows, cols); 1]), + log_q: 0, + log_base2k: log_base2k, + log_scale: 0, + } + } +} diff --git a/rlwe/src/encryptor.rs b/rlwe/src/encryptor.rs index 265959a..f57d9b7 100644 --- a/rlwe/src/encryptor.rs +++ b/rlwe/src/encryptor.rs @@ -1,14 +1,14 @@ -use crate::ciphertext::{Ciphertext, GadgetCiphertext}; -use crate::elem::{Elem, ElemBasics}; +use crate::ciphertext::Ciphertext; +use crate::elem::{Elem, ElemVecZnx, VecZnxCommon}; use crate::keys::SecretKey; use crate::parameters::Parameters; use crate::plaintext::Plaintext; use base2k::sampling::Sampling; use base2k::{ - cast_mut, Infos, VecZnxBorrow, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps, - VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMatOps, alloc_aligned_u8, cast, + Infos, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps, + VecZnxBorrow, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, cast_mut, }; -use rand_distr::num_traits::ops::bytes; + use sampling::source::{Source, new_seed}; pub struct EncryptorSk { @@ -49,12 +49,15 @@ impl EncryptorSk { self.source_xe = Source::new(seed) } - pub fn encrypt_rlwe_sk( + pub fn encrypt_rlwe_sk( &mut self, params: &Parameters, - ct: &mut Ciphertext, - pt: Option<&Plaintext>, - ) { + ct: &mut Ciphertext, + pt: Option<&Plaintext>, + ) where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { assert!( self.initialized == true, "invalid call to [EncryptorSk.encrypt_rlwe_sk]: [EncryptorSk] has not been initialized with a [SecretKey]" @@ -69,15 +72,18 @@ impl EncryptorSk { ); } - pub fn encrypt_rlwe_sk_thread_safe( + pub fn encrypt_rlwe_sk_thread_safe( &self, params: &Parameters, - ct: &mut Ciphertext, - pt: Option<&Plaintext>, + ct: &mut Ciphertext, + pt: Option<&Plaintext>, source_xa: &mut Source, source_xe: &mut Source, tmp_bytes: &mut [u8], - ) { + ) where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { assert!( self.initialized == true, "invalid call to [EncryptorSk.encrypt_rlwe_sk_thread_safe]: [EncryptorSk] has not been initialized with a [SecretKey]" @@ -91,16 +97,19 @@ impl Parameters { encrypt_rlwe_sk_tmp_bytes(self.module(), self.log_base2k(), log_q) } - pub fn encrypt_rlwe_sk_thread_safe( + pub fn encrypt_rlwe_sk_thread_safe( &self, - ct: &mut Ciphertext, - pt: Option<&Plaintext>, + ct: &mut Ciphertext, + pt: Option<&Plaintext>, sk: &SvpPPol, source_xa: &mut Source, source_xe: &mut Source, tmp_bytes: &mut [u8], - ) { - encrypt_rlwe_sk_thread_safe::( + ) where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { + encrypt_rlwe_sk_thread_safe( self.module(), &mut ct.0, pt.map(|pt| &pt.0), @@ -128,9 +137,10 @@ pub fn encrypt_rlwe_sk_thread_safe( sigma: f64, tmp_bytes: &mut [u8], ) where - T: VecZnxApi + Infos, + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, { - let limbs: usize = ct.limbs(); + let cols: usize = ct.cols(); let log_base2k: usize = ct.log_base2k(); let log_q: usize = ct.log_q(); @@ -146,22 +156,22 @@ pub fn encrypt_rlwe_sk_thread_safe( let c1: &mut T = ct.at_mut(1); // c1 <- Z_{2^prec}[X]/(X^{N}+1) - module.fill_uniform(log_base2k, c1, limbs, source_xa); + module.fill_uniform(log_base2k, c1, cols, source_xa); - let bytes_of_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(limbs); + let bytes_of_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(cols); // Scratch space for DFT values let mut buf_dft: VecZnxDft = - VecZnxDft::from_bytes(limbs, &mut tmp_bytes[..bytes_of_vec_znx_dft]); + VecZnxDft::from_bytes(cols, &mut tmp_bytes[..bytes_of_vec_znx_dft]); // Applies buf_dft <- DFT(s) * DFT(c1) - module.svp_apply_dft(&mut buf_dft, sk, c1, limbs); + module.svp_apply_dft(&mut buf_dft, sk, c1, cols); // Alias scratch space let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); // buf_big = s x c1 - module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, limbs); + module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, cols); let carry: &mut [u8] = &mut tmp_bytes[bytes_of_vec_znx_dft..]; @@ -194,7 +204,7 @@ pub fn encrypt_grlwe_sk_tmp_bytes( log_q: usize, ) -> usize { let cols = (log_q + log_base2k - 1) / log_base2k; - Elem::::bytes_of(module, log_base2k, log_q, 1) + Elem::::bytes_of(module, log_base2k, log_q, 2) + Plaintext::::bytes_of(module, log_base2k, log_q) + encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q) + module.vmp_prepare_tmp_bytes(rows, 2 * cols) @@ -202,7 +212,7 @@ pub fn encrypt_grlwe_sk_tmp_bytes( pub fn encrypt_grlwe_sk_thread_safe( module: &Module, - ct: &mut GadgetCiphertext, + ct: &mut Ciphertext, m: &Scalar, sk: &SvpPPol, source_xa: &mut Source, @@ -212,7 +222,7 @@ pub fn encrypt_grlwe_sk_thread_safe( ) { let rows: usize = ct.rows(); let log_q: usize = ct.log_q(); - let cols: usize = (log_q + ct.log_base2k() - 1) / ct.log_base2k(); + //let cols: usize = (log_q + ct.log_base2k() - 1) / ct.log_base2k(); let log_base2k: usize = ct.log_base2k(); let min_tmp_bytes_len = encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q); @@ -224,24 +234,24 @@ pub fn encrypt_grlwe_sk_thread_safe( min_tmp_bytes_len ); - let bytes_of_elem: usize = Elem::::bytes_of(module, log_base2k, log_q, 1); + let bytes_of_elem: usize = Elem::::bytes_of(module, log_base2k, log_q, 2); let bytes_of_pt: usize = Plaintext::::bytes_of(module, log_base2k, log_q); let bytes_of_enc_sk: usize = encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q); - let bytes_of_vmp_prepare_row: usize = module.vmp_prepare_tmp_bytes(rows, 2 * cols); let (tmp_bytes_pt, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_pt); let (tmp_bytes_enc_sk, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_enc_sk); let (tmp_bytes_elem, tmp_bytes_vmp_prepare_row) = tmp_bytes.split_at_mut(bytes_of_elem); - let mut tmp_elem: Elem = Elem::::from_bytes(module, log_base2k, ct.log_q(), 1, tmp_bytes_elem); - let mut tmp_pt: Plaintext = Plaintext::::from_bytes(module, log_base2k, log_q, tmp_bytes_pt); + let mut tmp_elem: Elem = + Elem::::from_bytes(module, log_base2k, ct.log_q(), 2, tmp_bytes_elem); + let mut tmp_pt: Plaintext = + Plaintext::::from_bytes(module, log_base2k, log_q, tmp_bytes_pt); (0..rows).for_each(|row_i| { // Sets the i-th row of the RLWE sample to m (i.e. m * 2^{-log_base2k*i}) - tmp_pt.0.value[0].at_mut(row_i).copy_from_slice(&m.0); + tmp_pt.0.value[0].at_mut(row_i).copy_from_slice(&m.0); // Encrypts RLWE(m * 2^{-log_base2k*i}) - encrypt_rlwe_sk_thread_safe( module, &mut tmp_elem, @@ -252,24 +262,23 @@ pub fn encrypt_grlwe_sk_thread_safe( sigma, tmp_bytes_enc_sk, ); - // Zeroes the ith-row of tmp_pt tmp_pt.0.value[0].at_mut(row_i).fill(0); - println!("row:{}/{}", row_i, rows); - tmp_elem.at(0).print_limbs(tmp_elem.limbs(), tmp_elem.n()); - tmp_elem.at(1).print_limbs(tmp_elem.limbs(), tmp_elem.n()); - println!(); - println!(">>>"); + //println!("row:{}/{}", row_i, rows); + //tmp_elem.at(0).print(tmp_elem.limbs(), tmp_elem.n()); + //tmp_elem.at(1).print(tmp_elem.limbs(), tmp_elem.n()); + //println!(); + //println!(">>>"); // GRLWE[row_i][0||1] = [-as + m * 2^{-i*log_base2k} + e*2^{-log_q} || a] module.vmp_prepare_row( - &mut ct.value, + &mut ct.0.value[0], cast_mut::(tmp_bytes_elem), row_i, tmp_bytes_vmp_prepare_row, ); }); - println!("DONE"); + //println!("DONE"); } diff --git a/rlwe/src/evaluator.rs b/rlwe/src/evaluator.rs index 0eee16b..cdd834f 100644 --- a/rlwe/src/evaluator.rs +++ b/rlwe/src/evaluator.rs @@ -1,9 +1,9 @@ use crate::{ - ciphertext::{Ciphertext, GadgetCiphertext, RGSWCiphertext}, - elem::{Elem, ElemBasics}, + ciphertext::Ciphertext, + elem::{Elem, ElemVecZnx, VecZnxCommon}, }; use base2k::{ - Infos, Module, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMatOps, + Infos, Module, VecZnxApi, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps, }; use std::cmp::min; @@ -22,13 +22,14 @@ pub fn gadget_product_tmp_bytes( + 2 * module.bytes_of_vec_znx_dft(gct_cols) } -pub fn gadget_product_inplace_thread_safe( +pub fn gadget_product_inplace_thread_safe + Infos>( module: &Module, res: &mut Elem, - b: &GadgetCiphertext, + b: &Ciphertext, tmp_bytes: &mut [u8], ) where - T: VecZnxApi + Infos, + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, { unsafe { let a_ptr: *const T = res.at(1) as *const T; @@ -50,21 +51,22 @@ pub fn gadget_product_inplace_thread_safe( /// /// res = sum[min(a_ncols, b_nrows)] decomp(a, i) * (-B[i]s + m * 2^{-k*i} + E[i], B[i]) /// = (cs + m * a + e, c) with min(res_limbs, b_cols) limbs. -pub fn gadget_product_thread_safe( +pub fn gadget_product_thread_safe + Infos>( module: &Module, res: &mut Elem, a: &T, - b: &GadgetCiphertext, + b: &Ciphertext, tmp_bytes: &mut [u8], ) where - T: VecZnxApi + Infos, + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, { let log_base2k: usize = b.log_base2k(); - let rows: usize = min(b.rows(), a.limbs()); + let rows: usize = min(b.rows(), a.cols()); let cols: usize = b.cols(); let bytes_vmp_apply_dft: usize = - module.vmp_apply_dft_to_dft_tmp_bytes(cols, a.limbs(), rows, cols); + module.vmp_apply_dft_to_dft_tmp_bytes(cols, a.cols(), rows, cols); let bytes_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(cols); let (tmp_bytes_vmp_apply_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_vmp_apply_dft); @@ -82,11 +84,16 @@ pub fn gadget_product_thread_safe( module.new_vec_znx_big_from_bytes(cols >> 1, tmp_bytes_res_dft_c1); // a_dft <- DFT(a) - module.vec_znx_dft(&mut c1_dft, a, a.limbs()); + module.vec_znx_dft(&mut c1_dft, a, a.cols()); // (n x cols) <- (n x limbs=rows) x (rows x cols) // res_dft[a * (G0|G1)] <- sum[rows] DFT(a) x (DFT(G0)|DFT(G1)) - module.vmp_apply_dft_to_dft(&mut res_dft, &c1_dft, &b.value, tmp_bytes_vmp_apply_dft); + module.vmp_apply_dft_to_dft( + &mut res_dft, + &c1_dft, + &b.0.value[0], + tmp_bytes_vmp_apply_dft, + ); // res_big[a * (G0|G1)] <- IDFT(res_dft[a * (G0|G1)]) module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols); @@ -105,24 +112,25 @@ pub fn gadget_product_thread_safe( } } -pub fn rgsw_product_thread_safe( +pub fn rgsw_product_thread_safe + Infos>( module: &Module, res: &mut Elem, - a: &Ciphertext, - b: &RGSWCiphertext, + a: &Ciphertext, + b: &Ciphertext, tmp_bytes: &mut [u8], ) where - T: VecZnxApi + Infos, + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, { let log_base2k: usize = b.log_base2k(); - let rows: usize = a.limbs(); + let rows: usize = min(b.rows(), a.cols()); let cols: usize = b.cols(); - let in_limbs = a.limbs(); - let out_limbs: usize = a.limbs(); + let in_cols = a.cols(); + let out_cols: usize = a.cols(); let bytes_of_vec_znx_dft = module.bytes_of_vec_znx_dft(cols); let bytes_of_vmp_apply_dft_to_dft = - module.vmp_apply_dft_to_dft_tmp_bytes(out_limbs, in_limbs, rows, cols); + module.vmp_apply_dft_to_dft_tmp_bytes(out_cols, in_cols, rows, cols); let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft); let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft); @@ -139,16 +147,16 @@ pub fn rgsw_product_thread_safe( let mut r2_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes(cols, tmp_bytes_r2_dft); // c0_dft <- DFT(a[0]) - module.vec_znx_dft(&mut c0_dft, a.at(0), a.limbs()); + module.vec_znx_dft(&mut c0_dft, a.at(0), in_cols); // r_dft <- sum[rows] c0_dft[cols] x RGSW[0][cols] module.vmp_apply_dft_to_dft( &mut r1_dft, &c1_dft, - &b.value, + &b.0.value[0], bytes_of_vmp_apply_dft_to_dft, ); // c1_dft <- DFT(a[1]) - module.vec_znx_dft(&mut c1_dft, a.at(1), a.limbs()); + module.vec_znx_dft(&mut c1_dft, a.at(1), in_cols); } diff --git a/rlwe/src/keys.rs b/rlwe/src/keys.rs index 3561e9e..c1808dd 100644 --- a/rlwe/src/keys.rs +++ b/rlwe/src/keys.rs @@ -1,7 +1,7 @@ -use crate::ciphertext::GadgetCiphertext; +use crate::ciphertext::{Ciphertext, new_gadget_ciphertext}; use crate::elem::Elem; use crate::encryptor::{encrypt_rlwe_sk_thread_safe, encrypt_rlwe_sk_tmp_bytes}; -use base2k::{Module, Scalar, SvpPPol, SvpPPolOps, VecZnx}; +use base2k::{Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VmpPMat}; use sampling::source::Source; pub struct SecretKey(pub Scalar); @@ -28,7 +28,7 @@ pub struct PublicKey(pub Elem); impl PublicKey { pub fn new(module: &Module, log_base2k: usize, log_q: usize) -> PublicKey { - PublicKey(Elem::new(module, log_base2k, log_q, 1, 0)) + PublicKey(Elem::::new(module, log_base2k, log_q, 2)) } pub fn gen_thread_safe( @@ -57,11 +57,11 @@ impl PublicKey { } } -pub struct SwitchingKey(pub GadgetCiphertext); +pub struct SwitchingKey(pub Ciphertext); impl SwitchingKey { pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> SwitchingKey { - SwitchingKey(GadgetCiphertext::new(module, log_base2k, rows, log_q)) + SwitchingKey(new_gadget_ciphertext(module, log_base2k, rows, log_q)) } pub fn n(&self) -> usize { diff --git a/rlwe/src/plaintext.rs b/rlwe/src/plaintext.rs index 651feab..38ae6da 100644 --- a/rlwe/src/plaintext.rs +++ b/rlwe/src/plaintext.rs @@ -1,62 +1,72 @@ use crate::ciphertext::Ciphertext; -use crate::elem::{Elem, ElemBasics}; +use crate::elem::{Elem, ElemVecZnx, VecZnxCommon}; use crate::parameters::Parameters; -use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow}; +use base2k::{Infos, Module, VecZnx, VecZnxApi}; -pub struct Plaintext(pub Elem); +pub struct Plaintext(pub Elem); impl Parameters { pub fn new_plaintext(&self, log_q: usize) -> Plaintext { - Plaintext::new(self.module(), self.log_base2k(), log_q, self.log_scale()) + Plaintext::new(self.module(), self.log_base2k(), log_q) } - pub fn bytes_of_plaintext(&self, log_q: usize) -> usize { - Elem::::bytes_of(self.module(), self.log_base2k(), log_q, 0) + pub fn bytes_of_plaintext(&self, log_q: usize) -> usize + where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { + Elem::::bytes_of(self.module(), self.log_base2k(), log_q, 1) } - pub fn plaintext_from_bytes(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext { - Plaintext(self.elem_from_bytes(log_q, 0, bytes)) - } - - pub fn plaintext_borrow_from_bytes(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext { - Plaintext(self.elem_borrow_from_bytes(log_q, 0, bytes)) + pub fn plaintext_from_bytes(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext + where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, + { + Plaintext::(self.elem_from_bytes::(log_q, 1, bytes)) } } impl Plaintext { - pub fn new(module: &Module, log_base2k: usize, log_q: usize, log_scale: usize) -> Self { - Self(Elem::::new(module, log_base2k, log_q, 0, log_scale)) + pub fn new(module: &Module, log_base2k: usize, log_q: usize) -> Self { + Self(Elem::::new(module, log_base2k, log_q, 1)) } +} +impl Plaintext +where + T: VecZnxCommon, + Elem: Infos + ElemVecZnx, +{ pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize) -> usize { - Elem::::bytes_of(module, log_base2k, log_q, 0) + Elem::::bytes_of(module, log_base2k, log_q, 1) } pub fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self { - Self(Elem::::from_bytes(module, log_base2k, log_q, 0, bytes)) + Self(Elem::::from_bytes(module, log_base2k, log_q, 1, bytes)) } pub fn n(&self) -> usize { self.0.n() } - pub fn degree(&self) -> usize { - self.0.degree() - } - pub fn log_q(&self) -> usize { - self.0.log_q() + self.0.log_q } - pub fn limbs(&self) -> usize { - self.0.limbs() + pub fn rows(&self) -> usize { + self.0.rows() } - pub fn at(&self, i: usize) -> &VecZnx { + pub fn cols(&self) -> usize { + self.0.cols() + } + + pub fn at(&self, i: usize) -> &T { self.0.at(i) } - pub fn at_mut(&mut self, i: usize) -> &mut VecZnx { + pub fn at_mut(&mut self, i: usize) -> &mut T { self.0.at_mut(i) } @@ -72,61 +82,7 @@ impl Plaintext { self.0.zero() } - pub fn as_ciphertext(&self) -> Ciphertext { - unsafe { Ciphertext(std::ptr::read(&self.0)) } + pub fn as_ciphertext(&self) -> Ciphertext { + unsafe { Ciphertext::(std::ptr::read(&self.0)) } } - -} - -impl Plaintext { - - pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize) -> usize { - Elem::::bytes_of(module, log_base2k, log_q, 0) - } - - pub fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self { - Self(Elem::::from_bytes(module, log_base2k, log_q, 0, bytes)) - } - - pub fn n(&self) -> usize { - self.0.n() - } - - pub fn degree(&self) -> usize { - self.0.degree() - } - - pub fn log_q(&self) -> usize { - self.0.log_q() - } - - pub fn limbs(&self) -> usize { - self.0.limbs() - } - - pub fn at(&self, i: usize) -> &VecZnxBorrow { - self.0.at(i) - } - - pub fn at_mut(&mut self, i: usize) -> &mut VecZnxBorrow { - self.0.at_mut(i) - } - - pub fn log_base2k(&self) -> usize { - self.0.log_base2k() - } - - pub fn log_scale(&self) -> usize { - self.0.log_scale() - } - - pub fn zero(&mut self) { - self.0.zero() - } - - /* - pub fn as_ciphertext(&self) -> Ciphertext { - unsafe { Ciphertext(std::ptr::read(&self.0)) } - } - */ }