From 83a7617f4b9a9114664782c237dcacf19e151974 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Bossuat Date: Thu, 24 Apr 2025 19:05:26 +0200 Subject: [PATCH] Updated vec_znx to stacked memory layout --- base2k/examples/rlwe_encrypt.rs | 12 +- base2k/examples/vector_matrix_product.rs | 34 +-- base2k/src/encoding.rs | 222 ++++++++------ base2k/src/stats.rs | 4 +- base2k/src/svp.rs | 1 + base2k/src/vec_znx.rs | 350 ++++++++++++++++------- base2k/src/vec_znx_dft.rs | 2 +- base2k/src/vmp.rs | 6 +- 8 files changed, 399 insertions(+), 232 deletions(-) diff --git a/base2k/examples/rlwe_encrypt.rs b/base2k/examples/rlwe_encrypt.rs index 1d977ff..1dea72d 100644 --- a/base2k/examples/rlwe_encrypt.rs +++ b/base2k/examples/rlwe_encrypt.rs @@ -18,7 +18,7 @@ fn main() { let seed: [u8; 32] = [0; 32]; let mut source: Source = Source::new(seed); - let mut res: VecZnx = module.new_vec_znx(cols); + let mut res: VecZnx = module.new_vec_znx(1, cols); // s <- Z_{-1, 0, 1}[X]/(X^{N}+1) let mut s: Scalar = Scalar::new(n); @@ -31,7 +31,7 @@ fn main() { module.svp_prepare(&mut s_ppol, &s); // a <- Z_{2^prec}[X]/(X^{N}+1) - let mut a: VecZnx = module.new_vec_znx(cols); + let mut a: VecZnx = module.new_vec_znx(1, cols); module.fill_uniform(log_base2k, &mut a, cols, &mut source); // Scratch space for DFT values @@ -46,21 +46,21 @@ fn main() { // buf_big <- IDFT(buf_dft) (not normalized) module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft); - let mut m: VecZnx = module.new_vec_znx(msg_cols); + let mut m: VecZnx = module.new_vec_znx(1, msg_cols); let mut want: Vec = vec![0; n]; want.iter_mut() .for_each(|x| *x = source.next_u64n(16, 15) as i64); // m - m.encode_vec_i64(log_base2k, log_scale, &want, 4); + m.encode_vec_i64(0, log_base2k, log_scale, &want, 4); m.normalize(log_base2k, &mut carry); // buf_big <- m - buf_big module.vec_znx_big_sub_small_a_inplace(&mut buf_big, &m); // b <- normalize(buf_big) + e - let mut b: VecZnx = module.new_vec_znx(cols); + let mut b: VecZnx = module.new_vec_znx(1, cols); module.vec_znx_big_normalize(log_base2k, &mut b, &buf_big, &mut carry); module.add_normal( log_base2k, @@ -85,7 +85,7 @@ fn main() { // have = m * 2^{log_scale} + e let mut have: Vec = vec![i64::default(); n]; - res.decode_vec_i64(log_base2k, res.cols() * log_base2k, &mut have); + res.decode_vec_i64(0, log_base2k, res.cols() * log_base2k, &mut have); let scale: f64 = (1 << (res.cols() * log_base2k - log_scale)) as f64; izip!(want.iter(), have.iter()) diff --git a/base2k/examples/vector_matrix_product.rs b/base2k/examples/vector_matrix_product.rs index be40e25..be4f189 100644 --- a/base2k/examples/vector_matrix_product.rs +++ b/base2k/examples/vector_matrix_product.rs @@ -1,6 +1,6 @@ use base2k::{ - BACKEND, Encoding, Infos, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VecZnxVec, VmpPMat, - VmpPMatOps, alloc_aligned, + BACKEND, Encoding, Infos, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, + alloc_aligned, }; fn main() { @@ -23,26 +23,20 @@ fn main() { let mut a_values: Vec = vec![i64::default(); n]; a_values[1] = (1 << log_base2k) + 1; - let mut a: VecZnx = module.new_vec_znx(cols); - a.encode_vec_i64(log_base2k, log_k, &a_values, 32); + let mut a: VecZnx = module.new_vec_znx(1, rows); + a.encode_vec_i64(0, log_base2k, log_k, &a_values, 32); a.normalize(log_base2k, &mut buf); - a.print(a.cols(), n); + a.print(0, a.cols(), n); println!(); - let mut vecznx: Vec = Vec::new(); - (0..rows).for_each(|_| { - vecznx.push(module.new_vec_znx(cols)); - }); - - (0..rows).for_each(|i| { - vecznx[i].raw_mut()[i * n + 1] = 1 as i64; - }); - - let slices: Vec<&[i64]> = vecznx.dblptr(); - let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); - module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf); + + (0..a.cols()).for_each(|row_i| { + let mut tmp: VecZnx = module.new_vec_znx(1, cols); + tmp.at_mut(row_i)[1] = 1 as i64; + module.vmp_prepare_row(&mut vmp_pmat, tmp.raw(), row_i, &mut buf); + }); let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); @@ -50,13 +44,13 @@ fn main() { let mut c_big: VecZnxBig = c_dft.as_vec_znx_big(); module.vec_znx_idft_tmp_a(&mut c_big, &mut c_dft); - let mut res: VecZnx = module.new_vec_znx(cols); + let mut res: VecZnx = module.new_vec_znx(1, rows); module.vec_znx_big_normalize(log_base2k, &mut res, &c_big, &mut buf); let mut values_res: Vec = vec![i64::default(); n]; - res.decode_vec_i64(log_base2k, log_k, &mut values_res); + res.decode_vec_i64(0, log_base2k, log_k, &mut values_res); - res.print(res.cols(), n); + res.print(0, res.cols(), n); module.free(); diff --git a/base2k/src/encoding.rs b/base2k/src/encoding.rs index 4615838..f7773ee 100644 --- a/base2k/src/encoding.rs +++ b/base2k/src/encoding.rs @@ -9,94 +9,104 @@ pub trait Encoding { /// /// # Arguments /// - /// * `log_base2k`: base two logarithm decomposition of the receiver. - /// * `log_k`: base two logarithm of the scaling of the data. + /// * `poly_idx`: the index of the poly where to encode the data. + /// * `log_base2k`: base two negative logarithm decomposition of the receiver. + /// * `log_k`: base two negative logarithm of the scaling of the data. /// * `data`: data to encode on the receiver. - /// * `log_max`: base two logarithm of the infinity norm of the input data. - fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize); + /// * `log_max`: base two negative logarithm of the infinity norm of the input data. + fn encode_vec_i64(&mut self, poly_idx: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize); /// decode a vector of i64 from the receiver. /// /// # Arguments /// - /// * `log_base2k`: base two logarithm decomposition of the receiver. - /// * `log_k`: base two logarithm of the scaling of the data. + /// * `poly_idx`: the index of the poly where to encode the data. + /// * `log_base2k`: base two negative logarithm decomposition of the receiver. + /// * `log_k`: base two negative logarithm of the scaling of the data. /// * `data`: data to decode from the receiver. - fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]); + fn decode_vec_i64(&self, poly_idx: usize, log_base2k: usize, log_k: usize, data: &mut [i64]); /// decode a vector of Float from the receiver. /// /// # Arguments - /// * `log_base2k`: base two logarithm decomposition of the receiver. + /// * `poly_idx`: the index of the poly where to encode the data. + /// * `log_base2k`: base two negative logarithm decomposition of the receiver. /// * `data`: data to decode from the receiver. - fn decode_vec_float(&self, log_base2k: usize, data: &mut [Float]); + fn decode_vec_float(&self, poly_idx: usize, log_base2k: usize, data: &mut [Float]); /// encodes a single i64 on the receiver at the given index. /// /// # Arguments /// - /// * `log_base2k`: base two logarithm decomposition of the receiver. - /// * `log_k`: base two logarithm of the scaling of the data. + /// * `poly_idx`: the index of the poly where to encode the data. + /// * `log_base2k`: base two negative logarithm decomposition of the receiver. + /// * `log_k`: base two negative logarithm of the scaling of the data. /// * `i`: index of the coefficient on which to encode the data. /// * `data`: data to encode on the receiver. - /// * `log_max`: base two logarithm of the infinity norm of the input data. - fn encode_coeff_i64(&mut self, log_base2k: usize, log_k: usize, i: usize, data: i64, log_max: usize); + /// * `log_max`: base two negative logarithm of the infinity norm of the input data. + fn encode_coeff_i64(&mut self, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize, data: i64, log_max: usize); /// decode a single of i64 from the receiver at the given index. /// /// # Arguments /// - /// * `log_base2k`: base two logarithm decomposition of the receiver. - /// * `log_k`: base two logarithm of the scaling of the data. + /// * `poly_idx`: the index of the poly where to encode the data. + /// * `log_base2k`: base two negative logarithm decomposition of the receiver. + /// * `log_k`: base two negative logarithm of the scaling of the data. /// * `i`: index of the coefficient to decode. /// * `data`: data to decode from the receiver. - fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64; + fn decode_coeff_i64(&self, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize) -> i64; } impl Encoding for VecZnx { - fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { - encode_vec_i64(self, log_base2k, log_k, data, log_max) + fn encode_vec_i64(&mut self, poly_idx: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { + encode_vec_i64(self, poly_idx, log_base2k, log_k, data, log_max) } - fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]) { - decode_vec_i64(self, log_base2k, log_k, data) + fn decode_vec_i64(&self, poly_idx: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) { + decode_vec_i64(self, poly_idx, log_base2k, log_k, data) } - fn decode_vec_float(&self, log_base2k: usize, data: &mut [Float]) { - decode_vec_float(self, log_base2k, data) + fn decode_vec_float(&self, poly_idx: usize, log_base2k: usize, data: &mut [Float]) { + decode_vec_float(self, poly_idx, log_base2k, data) } - fn encode_coeff_i64(&mut self, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) { - encode_coeff_i64(self, log_base2k, log_k, i, value, log_max) + fn encode_coeff_i64(&mut self, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) { + encode_coeff_i64(self, poly_idx, log_base2k, log_k, i, value, log_max) } - fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64 { - decode_coeff_i64(self, log_base2k, log_k, i) + fn decode_coeff_i64(&self, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 { + decode_coeff_i64(self, poly_idx, log_base2k, log_k, i) } } -fn encode_vec_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { +fn encode_vec_i64(a: &mut VecZnx, poly_idx: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { let cols: usize = (log_k + log_base2k - 1) / log_base2k; - debug_assert!( - cols <= a.cols(), - "invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.cols()={}", - cols, - a.cols() - ); + #[cfg(debug_assertions)] + { + assert!( + cols <= a.cols(), + "invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.cols()={}", + cols, + a.cols() + ); + assert!(poly_idx < a.size); + assert!(data.len() <= a.n()) + } - let size: usize = min(data.len(), a.n()); + let data_len: usize = data.len(); let log_k_rem: usize = log_base2k - (log_k % log_base2k); (0..a.cols()).for_each(|i| unsafe { - znx_zero_i64_ref(size as u64, a.at_mut(i).as_mut_ptr()); + znx_zero_i64_ref(a.n() as u64, a.at_poly_mut_ptr(poly_idx, i)); }); // If 2^{log_base2k} * 2^{k_rem} < 2^{63}-1, then we can simply copy // values on the last limb. // Else we decompose values base2k. if log_max + log_k_rem < 63 || log_k_rem == log_base2k { - a.at_mut(cols - 1)[..size].copy_from_slice(&data[..size]); + a.at_poly_mut(poly_idx, cols - 1)[..data_len].copy_from_slice(&data[..data_len]); } else { let mask: i64 = (1 << log_base2k) - 1; let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); @@ -105,7 +115,7 @@ fn encode_vec_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, data: &[i64], .enumerate() .for_each(|(i, i_rev)| { let shift: usize = i * log_base2k; - izip!(a.at_mut(i_rev)[..size].iter_mut(), data[..size].iter()).for_each(|(y, x)| *y = (x >> shift) & mask); + izip!(a.at_poly_mut(poly_idx, i_rev).iter_mut(), data.iter()).for_each(|(y, x)| *y = (x >> shift) & mask); }) } @@ -113,45 +123,53 @@ fn encode_vec_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, data: &[i64], if log_k_rem != log_base2k { let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); (cols - steps..cols).rev().for_each(|i| { - a.at_mut(i)[..size] + a.at_poly_mut(poly_idx, i)[..data_len] .iter_mut() .for_each(|x| *x <<= log_k_rem); }) } } -fn decode_vec_i64(a: &VecZnx, log_base2k: usize, log_k: usize, data: &mut [i64]) { +fn decode_vec_i64(a: &VecZnx, poly_idx: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) { let cols: usize = (log_k + log_base2k - 1) / log_base2k; - debug_assert!( - data.len() >= a.n(), - "invalid data: data.len()={} < a.n()={}", - data.len(), - a.n() - ); - data.copy_from_slice(a.at(0)); + #[cfg(debug_assertions)] + { + assert!( + data.len() >= a.n(), + "invalid data: data.len()={} < a.n()={}", + data.len(), + a.n() + ); + assert!(poly_idx < a.size()); + } + data.copy_from_slice(a.at_poly(poly_idx, 0)); let rem: usize = log_base2k - (log_k % log_base2k); (1..cols).for_each(|i| { if i == cols - 1 && rem != log_base2k { let k_rem: usize = log_base2k - rem; - izip!(a.at(i).iter(), data.iter_mut()).for_each(|(x, y)| { + izip!(a.at_poly(poly_idx, i).iter(), data.iter_mut()).for_each(|(x, y)| { *y = (*y << k_rem) + (x >> rem); }); } else { - izip!(a.at(i).iter(), data.iter_mut()).for_each(|(x, y)| { + izip!(a.at_poly(poly_idx, i).iter(), data.iter_mut()).for_each(|(x, y)| { *y = (*y << log_base2k) + x; }); } }) } -fn decode_vec_float(a: &VecZnx, log_base2k: usize, data: &mut [Float]) { +fn decode_vec_float(a: &VecZnx, poly_idx: usize, log_base2k: usize, data: &mut [Float]) { let cols: usize = a.cols(); - debug_assert!( - data.len() >= a.n(), - "invalid data: data.len()={} < a.n()={}", - data.len(), - a.n() - ); + #[cfg(debug_assertions)] + { + assert!( + data.len() >= a.n(), + "invalid data: data.len()={} < a.n()={}", + data.len(), + a.n() + ); + assert!(poly_idx < a.size()); + } let prec: u32 = (log_base2k * cols) as u32; @@ -161,12 +179,12 @@ fn decode_vec_float(a: &VecZnx, log_base2k: usize, data: &mut [Float]) { // y[i] = sum x[j][i] * 2^{-log_base2k*j} (0..cols).for_each(|i| { if i == 0 { - izip!(a.at(cols - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| { + izip!(a.at_poly(poly_idx, cols - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| { y.assign(*x); *y /= &base; }); } else { - izip!(a.at(cols - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| { + izip!(a.at_poly(poly_idx, cols - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| { *y += Float::with_val(prec, *x); *y /= &base; }); @@ -174,23 +192,29 @@ fn decode_vec_float(a: &VecZnx, log_base2k: usize, data: &mut [Float]) { }); } -fn encode_coeff_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) { - debug_assert!(i < a.n()); +fn encode_coeff_i64(a: &mut VecZnx, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) { let cols: usize = (log_k + log_base2k - 1) / log_base2k; - debug_assert!( - cols <= a.cols(), - "invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.cols()={}", - cols, - a.cols() - ); + + #[cfg(debug_assertions)] + { + assert!(i < a.n()); + assert!( + cols <= a.cols(), + "invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.cols()={}", + cols, + a.cols() + ); + assert!(poly_idx < a.size()); + } + let log_k_rem: usize = log_base2k - (log_k % log_base2k); - (0..a.cols()).for_each(|j| a.at_mut(j)[i] = 0); + (0..a.cols()).for_each(|j| a.at_poly_mut(poly_idx, j)[i] = 0); // If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy // values on the last limb. // Else we decompose values base2k. if log_max + log_k_rem < 63 || log_k_rem == log_base2k { - a.at_mut(cols - 1)[i] = value; + a.at_poly_mut(poly_idx, cols - 1)[i] = value; } else { let mask: i64 = (1 << log_base2k) - 1; let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); @@ -198,7 +222,7 @@ fn encode_coeff_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, i: usize, v .rev() .enumerate() .for_each(|(j, j_rev)| { - a.at_mut(j_rev)[i] = (value >> (j * log_base2k)) & mask; + a.at_poly_mut(poly_idx, j_rev)[i] = (value >> (j * log_base2k)) & mask; }) } @@ -206,19 +230,25 @@ fn encode_coeff_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, i: usize, v if log_k_rem != log_base2k { let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k); (cols - steps..cols).rev().for_each(|j| { - a.at_mut(j)[i] <<= log_k_rem; + a.at_poly_mut(poly_idx, j)[i] <<= log_k_rem; }) } } -fn decode_coeff_i64(a: &VecZnx, log_base2k: usize, log_k: usize, i: usize) -> i64 { +fn decode_coeff_i64(a: &VecZnx, poly_idx: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 { + #[cfg(debug_assertions)] + { + assert!(i < a.n()); + assert!(poly_idx < a.size()) + } + let cols: usize = (log_k + log_base2k - 1) / log_base2k; - debug_assert!(i < a.n()); let data: &[i64] = a.raw(); let mut res: i64 = data[i]; let rem: usize = log_base2k - (log_k % log_base2k); + let slice_size: usize = a.n() * a.size(); (1..cols).for_each(|i| { - let x = data[i * a.n()]; + let x = data[i * slice_size]; if i == cols - 1 && rem != log_base2k { let k_rem: usize = log_base2k - rem; res = (res << k_rem) + (x >> rem); @@ -241,15 +271,19 @@ mod tests { let log_base2k: usize = 17; let cols: usize = 5; let log_k: usize = cols * log_base2k - 5; - let mut a: VecZnx = VecZnx::new(n, cols); - let mut have: Vec = vec![i64::default(); n]; - have.iter_mut() - .enumerate() - .for_each(|(i, x)| *x = (i as i64) - (n as i64) / 2); - a.encode_vec_i64(log_base2k, log_k, &have, 10); - let mut want = vec![i64::default(); n]; - a.decode_vec_i64(log_base2k, log_k, &mut want); - izip!(want, have).for_each(|(a, b)| assert_eq!(a, b)); + let mut a: VecZnx = VecZnx::new(n, 2, cols); + let mut source: Source = Source::new([0u8; 32]); + let raw: &mut [i64] = a.raw_mut(); + raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64); + (0..a.size()).for_each(|poly_idx| { + let mut have: Vec = vec![i64::default(); n]; + have.iter_mut() + .for_each(|x| *x = (source.next_i64() << 56) >> 56); + a.encode_vec_i64(poly_idx, log_base2k, log_k, &have, 10); + let mut want: Vec = vec![i64::default(); n]; + a.decode_vec_i64(poly_idx, log_base2k, log_k, &mut want); + izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b)); + }); } #[test] @@ -258,19 +292,17 @@ mod tests { let log_base2k: usize = 17; let cols: usize = 5; let log_k: usize = cols * log_base2k - 5; - let mut a: VecZnx = VecZnx::new(n, cols); - let mut have: Vec = vec![i64::default(); n]; - let mut source = Source::new([1; 32]); - have.iter_mut().for_each(|x| { - *x = source - .next_u64n(u64::MAX, u64::MAX) - .wrapping_sub(u64::MAX / 2 + 1) as i64; - }); - a.encode_vec_i64(log_base2k, log_k, &have, 63); - //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); - let mut want = vec![i64::default(); n]; - //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); - a.decode_vec_i64(log_base2k, log_k, &mut want); - izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b)); + let mut a: VecZnx = VecZnx::new(n, 2, cols); + let mut source = Source::new([0u8; 32]); + let raw: &mut [i64] = a.raw_mut(); + raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64); + (0..a.size()).for_each(|poly_idx| { + let mut have: Vec = vec![i64::default(); n]; + have.iter_mut().for_each(|x| *x = source.next_i64()); + a.encode_vec_i64(poly_idx, log_base2k, log_k, &have, 64); + let mut want = vec![i64::default(); n]; + a.decode_vec_i64(poly_idx, log_base2k, log_k, &mut want); + izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b)); + }) } } diff --git a/base2k/src/stats.rs b/base2k/src/stats.rs index 776ae75..f72ebaa 100644 --- a/base2k/src/stats.rs +++ b/base2k/src/stats.rs @@ -4,10 +4,10 @@ use rug::float::Round; use rug::ops::{AddAssignRound, DivAssignRound, SubAssignRound}; impl VecZnx { - pub fn std(&self, log_base2k: usize) -> f64 { + pub fn std(&self, poly_idx: usize, log_base2k: usize) -> f64 { let prec: u32 = (self.cols() * log_base2k) as u32; let mut data: Vec = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect(); - self.decode_vec_float(log_base2k, &mut data); + self.decode_vec_float(poly_idx, log_base2k, &mut data); // std = sqrt(sum((xi - avg)^2) / n) let mut avg: Float = Float::with_val(prec, 0); data.iter().for_each(|x| { diff --git a/base2k/src/svp.rs b/base2k/src/svp.rs index 9b2c64b..a55f954 100644 --- a/base2k/src/svp.rs +++ b/base2k/src/svp.rs @@ -117,6 +117,7 @@ impl Scalar { pub fn as_vec_znx(&self) -> VecZnx { VecZnx { n: self.n, + size: 1, // TODO REVIEW IF NEED TO ADD size TO SCALAR cols: 1, data: Vec::new(), ptr: self.ptr, diff --git a/base2k/src/vec_znx.rs b/base2k/src/vec_znx.rs index 01659af..f68ed9c 100644 --- a/base2k/src/vec_znx.rs +++ b/base2k/src/vec_znx.rs @@ -6,14 +6,24 @@ use crate::{alloc_aligned, assert_alignement}; use itertools::izip; use std::cmp::min; -/// [VecZnx] represents a vector of small norm polynomials of Zn\[X\] with [i64] coefficients. +/// [VecZnx] represents collection of contiguously stacked vector of small norm polynomials of +/// Zn\[X\] with [i64] coefficients. /// A [VecZnx] is composed of multiple Zn\[X\] polynomials stored in a single contiguous array /// in the memory. +/// +/// # Example +/// +/// Given 3 polynomials (a, b, c) of Zn\[X\], each with 4 columns, then the memory +/// layout is: `[a0, b0, c0, a1, b1, c1, a2, b2, c2, a3, b3, c3]`, where ai, bi, ci +/// are small polynomials of Zn\[X\]. #[derive(Clone)] pub struct VecZnx { /// Polynomial degree. pub n: usize, + /// Stack size + pub size: usize, + /// Number of columns. pub cols: usize, @@ -24,23 +34,8 @@ pub struct VecZnx { pub ptr: *mut i64, } -pub trait VecZnxVec { - fn dblptr(&self) -> Vec<&[i64]>; - fn dblptr_mut(&mut self) -> Vec<&mut [i64]>; -} - -impl VecZnxVec for Vec { - fn dblptr(&self) -> Vec<&[i64]> { - self.iter().map(|v| v.raw()).collect() - } - - fn dblptr_mut(&mut self) -> Vec<&mut [i64]> { - self.iter_mut().map(|v| v.raw_mut()).collect() - } -} - -pub fn bytes_of_vec_znx(n: usize, cols: usize) -> usize { - n * cols * 8 +pub fn bytes_of_vec_znx(n: usize, size: usize, cols: usize) -> usize { + n * size * cols * 8 } impl VecZnx { @@ -49,11 +44,12 @@ impl VecZnx { /// The struct will take ownership of buf[..[VecZnx::bytes_of]] /// /// User must ensure that data is properly alligned and that - /// the size of data is at least equal to [VecZnx::bytes_of]. - pub fn from_bytes(n: usize, cols: usize, bytes: &mut [u8]) -> Self { + /// the size of data is equal to [VecZnx::bytes_of]. + pub fn from_bytes(n: usize, size: usize, cols: usize, bytes: &mut [u8]) -> Self { #[cfg(debug_assertions)] { - assert_eq!(bytes.len(), Self::bytes_of(n, cols)); + assert!(size > 0); + assert_eq!(bytes.len(), Self::bytes_of(n, size, cols)); assert_alignement(bytes.as_ptr()); } unsafe { @@ -61,75 +57,138 @@ impl VecZnx { let ptr: *mut i64 = bytes_i64.as_mut_ptr(); VecZnx { n: n, + size: size, cols: cols, - data: Vec::from_raw_parts(bytes_i64.as_mut_ptr(), bytes.len(), bytes.len()), + data: Vec::from_raw_parts(ptr, bytes.len(), bytes.len()), ptr: ptr, } } } - pub fn from_bytes_borrow(n: usize, cols: usize, bytes: &mut [u8]) -> Self { + pub fn from_bytes_borrow(n: usize, size: usize, cols: usize, bytes: &mut [u8]) -> Self { #[cfg(debug_assertions)] { - assert!(bytes.len() >= Self::bytes_of(n, cols)); + assert!(size > 0); + assert!(bytes.len() >= Self::bytes_of(n, size, cols)); assert_alignement(bytes.as_ptr()); } VecZnx { n: n, + size: size, cols: cols, data: Vec::new(), ptr: bytes.as_mut_ptr() as *mut i64, } } - pub fn bytes_of(n: usize, cols: usize) -> usize { - bytes_of_vec_znx(n, cols) + pub fn bytes_of(n: usize, size: usize, cols: usize) -> usize { + bytes_of_vec_znx(n, size, cols) } pub fn copy_from(&mut self, a: &VecZnx) { copy_vec_znx_from(self, a); } - pub fn raw(&self) -> &[i64] { - unsafe { std::slice::from_raw_parts(self.ptr, self.n * self.cols) } - } - pub fn borrowing(&self) -> bool { self.data.len() == 0 } - pub fn raw_mut(&mut self) -> &mut [i64] { - unsafe { std::slice::from_raw_parts_mut(self.ptr, self.n * self.cols) } + /// TODO: when SML refactoring is done, move this to the [Infos] trait. + pub fn size(&self) -> usize { + self.size } + /// Total size is [VecZnx::n()] * [VecZnx::size()] * [VecZnx::cols()]. + pub fn raw(&self) -> &[i64] { + unsafe { std::slice::from_raw_parts(self.ptr, self.n * self.size * self.cols) } + } + + /// Returns a reference to backend slice of the receiver. + /// Total size is [VecZnx::n()] * [VecZnx::size()] * [VecZnx::cols()]. + pub fn raw_mut(&mut self) -> &mut [i64] { + unsafe { std::slice::from_raw_parts_mut(self.ptr, self.n * self.size * self.cols) } + } + + /// Returns a non-mutable pointer to the backedn slice of the receiver. pub fn as_ptr(&self) -> *const i64 { self.ptr } + /// Returns a mutable pointer to the backedn slice of the receiver. pub fn as_mut_ptr(&mut self) -> *mut i64 { self.ptr } - pub fn at(&self, i: usize) -> &[i64] { - let n: usize = self.n(); - &self.raw()[n * i..n * (i + 1)] - } - - pub fn at_mut(&mut self, i: usize) -> &mut [i64] { - let n: usize = self.n(); - &mut self.raw_mut()[n * i..n * (i + 1)] - } - + /// Returns a non-mutable pointer starting a the j-th column. pub fn at_ptr(&self, i: usize) -> *const i64 { - self.ptr.wrapping_add(i * self.n) + #[cfg(debug_assertions)] + { + assert!(i < self.cols); + } + let offset: usize = self.n * self.size * i; + self.ptr.wrapping_add(offset) } - pub fn at_mut_ptr(&mut self, i: usize) -> *mut i64 { - self.ptr.wrapping_add(i * self.n) + /// Returns non-mutable reference to the ith-column. + /// The slice contains [VecZnx::size()] small polynomials, each of [VecZnx::n()] coefficients. + pub fn at(&self, i: usize) -> &[i64] { + unsafe { std::slice::from_raw_parts(self.at_ptr(i), self.n * self.size) } + } + + /// Returns a non-mutable pointer starting a the j-th column of the i-th polynomial. + pub fn at_poly_ptr(&self, i: usize, j: usize) -> *const i64 { + #[cfg(debug_assertions)] + { + assert!(i < self.size); + assert!(j < self.cols); + } + let offset: usize = self.n * (self.size * j + i); + self.ptr.wrapping_add(offset) + } + + /// Returns non-mutable reference to the j-th column of the i-th polynomial. + /// The slice contains one small polynomial of [VecZnx::n()] coefficients. + pub fn at_poly(&self, i: usize, j: usize) -> &[i64] { + unsafe { std::slice::from_raw_parts(self.at_poly_ptr(i, j), self.n) } + } + + /// Returns a mutable pointer starting a the j-th column. + pub fn at_mut_ptr(&self, i: usize) -> *mut i64 { + #[cfg(debug_assertions)] + { + assert!(i < self.cols); + } + let offset: usize = self.n * self.size * i; + self.ptr.wrapping_add(offset) + } + + /// Returns mutable reference to the ith-column. + /// The slice contains [VecZnx::size()] small polynomials, each of [VecZnx::n()] coefficients. + pub fn at_mut(&mut self, i: usize) -> &mut [i64] { + unsafe { std::slice::from_raw_parts_mut(self.at_mut_ptr(i), self.n * self.size) } + } + + /// Returns a mutable pointer starting a the j-th column of the i-th polynomial. + pub fn at_poly_mut_ptr(&mut self, i: usize, j: usize) -> *mut i64 { + #[cfg(debug_assertions)] + { + assert!(i < self.size); + assert!(j < self.cols); + } + + let offset: usize = self.n * (self.size * j + i); + self.ptr.wrapping_add(offset) + } + + /// Returns mutable reference to the j-th column of the i-th polynomial. + /// The slice contains one small polynomial of [VecZnx::n()] coefficients. + pub fn at_poly_mut(&mut self, i: usize, j: usize) -> &mut [i64] { + let ptr: *mut i64 = self.at_poly_mut_ptr(i, j); + unsafe { std::slice::from_raw_parts_mut(ptr, self.n) } } pub fn zero(&mut self) { - unsafe { znx::znx_zero_i64_ref((self.n * self.cols) as u64, self.ptr) } + unsafe { znx::znx_zero_i64_ref((self.n * self.cols * self.size) as u64, self.ptr) } } pub fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) { @@ -144,8 +203,8 @@ impl VecZnx { switch_degree(a, self) } - pub fn print(&self, cols: usize, n: usize) { - (0..cols).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n])) + pub fn print(&self, poly: usize, cols: usize, n: usize) { + (0..cols).for_each(|i| println!("{}: {:?}", i, &self.at_poly(poly, i)[..n])) } } @@ -182,11 +241,19 @@ pub fn copy_vec_znx_from(b: &mut VecZnx, a: &VecZnx) { impl VecZnx { /// Allocates a new [VecZnx] composed of #cols polynomials of Z\[X\]. - pub fn new(n: usize, cols: usize) -> Self { - let mut data: Vec = alloc_aligned::(n * cols); + pub fn new(n: usize, size: usize, cols: usize) -> Self { + #[cfg(debug_assertions)] + { + assert!(n > 0); + assert!(n & (n - 1) == 0); + assert!(size > 0); + assert!(cols > 0); + } + let mut data: Vec = alloc_aligned::(n * size * cols); let ptr: *mut i64 = data.as_mut_ptr(); Self { n: n, + size: size, cols: cols, data: data, ptr: ptr, @@ -206,7 +273,7 @@ impl VecZnx { if !self.borrowing() { self.data - .truncate((self.cols() - k / log_base2k) * self.n()); + .truncate((self.cols() - k / log_base2k) * self.n() * self.size()); } self.cols -= k / log_base2k; @@ -244,14 +311,20 @@ pub fn switch_degree(b: &mut VecZnx, a: &VecZnx) { }); } +fn normalize_tmp_bytes(n: usize, size: usize) -> usize { + n * size * std::mem::size_of::() +} + fn normalize(log_base2k: usize, a: &mut VecZnx, tmp_bytes: &mut [u8]) { let n: usize = a.n(); + let size: usize = a.size(); debug_assert!( - tmp_bytes.len() >= n * 8, - "invalid tmp_bytes: tmp_bytes.len()={} < self.n()={}", + tmp_bytes.len() >= normalize_tmp_bytes(n, size), + "invalid tmp_bytes: tmp_bytes.len()={} < normalize_tmp_bytes({}, {})", tmp_bytes.len(), - n + n, + size, ); #[cfg(debug_assertions)] { @@ -264,7 +337,7 @@ fn normalize(log_base2k: usize, a: &mut VecZnx, tmp_bytes: &mut [u8]) { znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr()); (0..a.cols()).rev().for_each(|i| { znx::znx_normalize( - n as u64, + (n * size) as u64, log_base2k as u64, a.at_mut_ptr(i), carry_i64.as_mut_ptr(), @@ -275,27 +348,32 @@ fn normalize(log_base2k: usize, a: &mut VecZnx, tmp_bytes: &mut [u8]) { } } +pub fn rsh_tmp_bytes(n: usize, size: usize) -> usize { + n * size * std::mem::size_of::() +} + pub fn rsh(log_base2k: usize, a: &mut VecZnx, k: usize, tmp_bytes: &mut [u8]) { let n: usize = a.n(); - - debug_assert!( - tmp_bytes.len() >> 3 >= n, - "invalid carry: carry.len()/8={} < self.n()={}", - tmp_bytes.len() >> 3, - n - ); + let size: usize = a.size(); #[cfg(debug_assertions)] { - assert_alignement(tmp_bytes.as_ptr()) + assert!( + tmp_bytes.len() >= rsh_tmp_bytes(n, size), + "invalid carry: carry.len()/8={} < rsh_tmp_bytes({}, {})", + tmp_bytes.len() >> 3, + n, + size, + ); + assert_alignement(tmp_bytes.as_ptr()); } let cols: usize = a.cols(); let cols_steps: usize = k / log_base2k; - a.raw_mut().rotate_right(n * cols_steps); + a.raw_mut().rotate_right(n * size * cols_steps); unsafe { - znx::znx_zero_i64_ref((n * cols_steps) as u64, a.as_mut_ptr()); + znx::znx_zero_i64_ref((n * size * cols_steps) as u64, a.as_mut_ptr()); } let k_rem = k % log_base2k; @@ -304,7 +382,7 @@ pub fn rsh(log_base2k: usize, a: &mut VecZnx, k: usize, tmp_bytes: &mut [u8]) { let carry_i64: &mut [i64] = cast_mut(tmp_bytes); unsafe { - znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr()); + znx::znx_zero_i64_ref((n * size) as u64, carry_i64.as_mut_ptr()); } let log_base2k: usize = log_base2k; @@ -330,13 +408,13 @@ pub trait VecZnxOps { /// # Arguments /// /// * `cols`: the number of cols. - fn new_vec_znx(&self, cols: usize) -> VecZnx; + fn new_vec_znx(&self, size: usize, cols: usize) -> VecZnx; /// Returns the minimum number of bytes necessary to allocate /// a new [VecZnx] through [VecZnx::from_bytes]. - fn bytes_of_vec_znx(&self, cols: usize) -> usize; + fn bytes_of_vec_znx(&self, size: usize, cols: usize) -> usize; - fn vec_znx_normalize_tmp_bytes(&self) -> usize; + fn vec_znx_normalize_tmp_bytes(&self, size: usize) -> usize; /// c <- a + b. fn vec_znx_add(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx); @@ -389,162 +467,216 @@ pub trait VecZnxOps { } impl VecZnxOps for Module { - fn new_vec_znx(&self, cols: usize) -> VecZnx { - VecZnx::new(self.n(), cols) + fn new_vec_znx(&self, size: usize, cols: usize) -> VecZnx { + VecZnx::new(self.n(), size, cols) } - fn bytes_of_vec_znx(&self, cols: usize) -> usize { - self.n() * cols * 8 + fn bytes_of_vec_znx(&self, size: usize, cols: usize) -> usize { + bytes_of_vec_znx(self.n(), size, cols) } - fn vec_znx_normalize_tmp_bytes(&self) -> usize { - unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(self.ptr) as usize } + fn vec_znx_normalize_tmp_bytes(&self, size: usize) -> usize { + unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(self.ptr) as usize * size } } // c <- a + b fn vec_znx_add(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(c.n(), n); + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_add( self.ptr, c.as_mut_ptr(), c.cols() as u64, - c.n() as u64, + (n * c.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, b.as_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, ) } } // b <- a + b fn vec_znx_add_inplace(&self, b: &mut VecZnx, a: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_add( self.ptr, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, b.as_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, ) } } // c <- a + b fn vec_znx_sub(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(c.n(), n); + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_sub( self.ptr, c.as_mut_ptr(), c.cols() as u64, - c.n() as u64, + (n * c.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, b.as_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, ) } } // b <- a - b fn vec_znx_sub_ab_inplace(&self, b: &mut VecZnx, a: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_sub( self.ptr, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, b.as_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, ) } } // b <- b - a fn vec_znx_sub_ba_inplace(&self, b: &mut VecZnx, a: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_sub( self.ptr, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, b.as_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ) } } fn vec_znx_negate(&self, b: &mut VecZnx, a: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_negate( self.ptr, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ) } } fn vec_znx_negate_inplace(&self, a: &mut VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + } unsafe { vec_znx::vec_znx_negate( self.ptr, a.as_mut_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ) } } fn vec_znx_rotate(&self, k: i64, b: &mut VecZnx, a: &VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_rotate( self.ptr, k, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ) } } fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx) { + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + } unsafe { vec_znx::vec_znx_rotate( self.ptr, k, a.as_mut_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ) } } @@ -562,18 +694,22 @@ impl VecZnxOps for Module { /// /// The method will panic if the argument `a` is greater than `a.cols()`. fn vec_znx_automorphism(&self, k: i64, b: &mut VecZnx, a: &VecZnx) { - debug_assert_eq!(a.n(), self.n()); - debug_assert_eq!(b.n(), self.n()); + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + assert_eq!(b.n(), n); + } unsafe { vec_znx::vec_znx_automorphism( self.ptr, k, b.as_mut_ptr(), b.cols() as u64, - b.n() as u64, + (n * b.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ); } } @@ -590,17 +726,21 @@ impl VecZnxOps for Module { /// /// The method will panic if the argument `cols` is greater than `self.cols()`. fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx) { - debug_assert_eq!(a.n(), self.n()); + let n: usize = self.n(); + #[cfg(debug_assertions)] + { + assert_eq!(a.n(), n); + } unsafe { vec_znx::vec_znx_automorphism( self.ptr, k, a.as_mut_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, a.as_ptr(), a.cols() as u64, - a.n() as u64, + (n * a.size()) as u64, ); } } diff --git a/base2k/src/vec_znx_dft.rs b/base2k/src/vec_znx_dft.rs index 7798298..188d13b 100644 --- a/base2k/src/vec_znx_dft.rs +++ b/base2k/src/vec_znx_dft.rs @@ -317,7 +317,7 @@ mod tests { let cols: usize = 2; let log_base2k: usize = 17; - let mut a: VecZnx = module.new_vec_znx(cols); + let mut a: VecZnx = module.new_vec_znx(1, cols); let mut a_dft: VecZnxDft = module.new_vec_znx_dft(cols); let mut b_dft: VecZnxDft = module.new_vec_znx_dft(cols); diff --git a/base2k/src/vmp.rs b/base2k/src/vmp.rs index 1ffdfc0..90ca9c8 100644 --- a/base2k/src/vmp.rs +++ b/base2k/src/vmp.rs @@ -424,10 +424,10 @@ impl VmpPMatOps for Module { } fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, tmp_bytes: &mut [u8]) { - debug_assert_eq!(a.len(), b.cols() * self.n()); - debug_assert!(tmp_bytes.len() >= self.vmp_prepare_tmp_bytes(b.rows(), b.cols())); #[cfg(debug_assertions)] { + assert_eq!(a.len(), b.cols() * self.n()); + assert!(tmp_bytes.len() >= self.vmp_prepare_tmp_bytes(b.rows(), b.cols())); assert_alignement(tmp_bytes.as_ptr()); } unsafe { @@ -642,7 +642,7 @@ mod tests { let vpmat_rows: usize = 4; let vpmat_cols: usize = 5; let log_base2k: usize = 8; - let mut a: VecZnx = module.new_vec_znx(vpmat_cols); + let mut a: VecZnx = module.new_vec_znx(1, vpmat_cols); let mut a_dft: VecZnxDft = module.new_vec_znx_dft(vpmat_cols); let mut a_big: VecZnxBig = module.new_vec_znx_big(vpmat_cols); let mut b_big: VecZnxBig = module.new_vec_znx_big(vpmat_cols);