prototype trait for Elem<T> + new ciphertext for VmPPmat

This commit is contained in:
Jean-Philippe Bossuat
2025-02-18 11:04:13 +01:00
parent fdc2f3ac42
commit d486e89761
21 changed files with 767 additions and 811 deletions

View File

@@ -8,9 +8,9 @@ use sampling::source::Source;
fn main() { fn main() {
let n: usize = 16; let n: usize = 16;
let log_base2k: usize = 18; let log_base2k: usize = 18;
let limbs: usize = 3; let cols: usize = 3;
let msg_limbs: usize = 2; let msg_cols: usize = 2;
let log_scale: usize = msg_limbs * log_base2k - 5; let log_scale: usize = msg_cols * log_base2k - 5;
let module: Module = Module::new::<FFT64>(n); let module: Module = Module::new::<FFT64>(n);
let mut carry: Vec<u8> = vec![0; module.vec_znx_big_normalize_tmp_bytes()]; let mut carry: Vec<u8> = vec![0; module.vec_znx_big_normalize_tmp_bytes()];
@@ -18,7 +18,7 @@ fn main() {
let seed: [u8; 32] = [0; 32]; let seed: [u8; 32] = [0; 32];
let mut source: Source = Source::new(seed); let mut source: Source = Source::new(seed);
let mut res: VecZnx = module.new_vec_znx(limbs); let mut res: VecZnx = module.new_vec_znx(cols);
// s <- Z_{-1, 0, 1}[X]/(X^{N}+1) // s <- Z_{-1, 0, 1}[X]/(X^{N}+1)
let mut s: Scalar = Scalar::new(n); let mut s: Scalar = Scalar::new(n);
@@ -31,22 +31,22 @@ fn main() {
module.svp_prepare(&mut s_ppol, &s); module.svp_prepare(&mut s_ppol, &s);
// a <- Z_{2^prec}[X]/(X^{N}+1) // a <- Z_{2^prec}[X]/(X^{N}+1)
let mut a: VecZnx = module.new_vec_znx(limbs); let mut a: VecZnx = module.new_vec_znx(cols);
module.fill_uniform(log_base2k, &mut a, limbs, &mut source); module.fill_uniform(log_base2k, &mut a, cols, &mut source);
// Scratch space for DFT values // Scratch space for DFT values
let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.limbs()); let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.cols());
// Applies buf_dft <- s * a // Applies buf_dft <- s * a
module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.limbs()); module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols());
// Alias scratch space // Alias scratch space
let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big();
// buf_big <- IDFT(buf_dft) (not normalized) // buf_big <- IDFT(buf_dft) (not normalized)
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.limbs()); module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.cols());
let mut m: VecZnx = module.new_vec_znx(msg_limbs); let mut m: VecZnx = module.new_vec_znx(msg_cols);
let mut want: Vec<i64> = vec![0; n]; let mut want: Vec<i64> = vec![0; n];
want.iter_mut() want.iter_mut()
@@ -60,12 +60,12 @@ fn main() {
module.vec_znx_big_sub_small_a_inplace(&mut buf_big, &m); module.vec_znx_big_sub_small_a_inplace(&mut buf_big, &m);
// b <- normalize(buf_big) + e // b <- normalize(buf_big) + e
let mut b: VecZnx = module.new_vec_znx(limbs); let mut b: VecZnx = module.new_vec_znx(cols);
module.vec_znx_big_normalize(log_base2k, &mut b, &buf_big, &mut carry); module.vec_znx_big_normalize(log_base2k, &mut b, &buf_big, &mut carry);
module.add_normal( module.add_normal(
log_base2k, log_base2k,
&mut b, &mut b,
log_base2k * limbs, log_base2k * cols,
&mut source, &mut source,
3.2, 3.2,
19.0, 19.0,
@@ -74,8 +74,8 @@ fn main() {
//Decrypt //Decrypt
// buf_big <- a * s // buf_big <- a * s
module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.limbs()); module.svp_apply_dft(&mut buf_dft, &s_ppol, &a, a.cols());
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, b.limbs()); module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, b.cols());
// buf_big <- a * s + b // buf_big <- a * s + b
module.vec_znx_big_add_small_inplace(&mut buf_big, &b); module.vec_znx_big_add_small_inplace(&mut buf_big, &b);
@@ -85,9 +85,9 @@ fn main() {
// have = m * 2^{log_scale} + e // have = m * 2^{log_scale} + e
let mut have: Vec<i64> = vec![i64::default(); n]; let mut have: Vec<i64> = vec![i64::default(); n];
res.decode_vec_i64(log_base2k, res.limbs() * log_base2k, &mut have); res.decode_vec_i64(log_base2k, res.cols() * log_base2k, &mut have);
let scale: f64 = (1 << (res.limbs() * log_base2k - log_scale)) as f64; let scale: f64 = (1 << (res.cols() * log_base2k - log_scale)) as f64;
izip!(want.iter(), have.iter()) izip!(want.iter(), have.iter())
.enumerate() .enumerate()
.for_each(|(i, (a, b))| { .for_each(|(i, (a, b))| {

View File

@@ -4,31 +4,31 @@ use base2k::{
}; };
fn main() { fn main() {
let log_n = 5; let log_n: i32 = 5;
let n = 1 << log_n; let n: usize = 1 << log_n;
let module: Module = Module::new::<FFT64>(n); let module: Module = Module::new::<FFT64>(n);
let log_base2k: usize = 15; let log_base2k: usize = 15;
let limbs: usize = 5; let cols: usize = 5;
let log_k: usize = log_base2k * limbs - 5; let log_k: usize = log_base2k * cols - 5;
let rows: usize = limbs; let rows: usize = cols;
let cols: usize = limbs + 1; let cols: usize = cols + 1;
// Maximum size of the byte scratch needed // Maximum size of the byte scratch needed
let tmp_bytes: usize = module.vmp_prepare_tmp_bytes(rows, cols) let tmp_bytes: usize = module.vmp_prepare_tmp_bytes(rows, cols)
| module.vmp_apply_dft_tmp_bytes(limbs, limbs, rows, cols); | module.vmp_apply_dft_tmp_bytes(cols, cols, rows, cols);
let mut buf: Vec<u8> = vec![0; tmp_bytes]; let mut buf: Vec<u8> = vec![0; tmp_bytes];
let mut a_values: Vec<i64> = vec![i64::default(); n]; let mut a_values: Vec<i64> = vec![i64::default(); n];
a_values[1] = (1 << log_base2k) + 1; a_values[1] = (1 << log_base2k) + 1;
let mut a: VecZnx = module.new_vec_znx(limbs); let mut a: VecZnx = module.new_vec_znx(cols);
a.encode_vec_i64(log_base2k, log_k, &a_values, 32); a.encode_vec_i64(log_base2k, log_k, &a_values, 32);
a.normalize(log_base2k, &mut buf); a.normalize(log_base2k, &mut buf);
a.print_limbs(a.limbs(), n); a.print(a.cols(), n);
println!(); println!();
let mut vecznx: Vec<VecZnx> = Vec::new(); let mut vecznx: Vec<VecZnx> = Vec::new();
@@ -40,8 +40,10 @@ fn main() {
vecznx[i].data[i * n + 1] = 1 as i64; vecznx[i].data[i * n + 1] = 1 as i64;
}); });
let slices: Vec<&[i64]> = vecznx.iter().map(|v| v.data.as_slice()).collect();
let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
module.vmp_prepare_dblptr(&mut vmp_pmat, &vecznx, &mut buf); module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf);
let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf);
@@ -55,7 +57,7 @@ fn main() {
let mut values_res: Vec<i64> = vec![i64::default(); n]; let mut values_res: Vec<i64> = vec![i64::default(); n];
res.decode_vec_i64(log_base2k, log_k, &mut values_res); res.decode_vec_i64(log_base2k, log_k, &mut values_res);
res.print_limbs(res.limbs(), n); res.print(res.cols(), n);
module.free(); module.free();
c_dft.free(); c_dft.free();

View File

@@ -54,11 +54,10 @@ pub trait Encoding {
impl Encoding for VecZnx { impl Encoding for VecZnx {
fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) { fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
let limbs: usize = (log_k + log_base2k - 1) / log_base2k;
println!("limbs: {}", limbs); let cols: usize = (log_k + log_base2k - 1) / log_base2k;
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs()); assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols());
let size: usize = min(data.len(), self.n()); let size: usize = min(data.len(), self.n());
let log_k_rem: usize = log_base2k - (log_k % log_base2k); let log_k_rem: usize = log_base2k - (log_k % log_base2k);
@@ -67,19 +66,19 @@ impl Encoding for VecZnx {
// values on the last limb. // values on the last limb.
// Else we decompose values base2k. // Else we decompose values base2k.
if log_max + log_k_rem < 63 || log_k_rem == log_base2k { if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
(0..self.limbs()).for_each(|i| unsafe { (0..self.cols()).for_each(|i| unsafe {
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr()); znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
}); });
self.at_mut(limbs - 1)[..size].copy_from_slice(&data[..size]); self.at_mut(cols - 1)[..size].copy_from_slice(&data[..size]);
} else { } else {
let mask: i64 = (1 << log_base2k) - 1; let mask: i64 = (1 << log_base2k) - 1;
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
(0..steps).for_each(|i| unsafe { (0..steps).for_each(|i| unsafe {
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr()); znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
}); });
(limbs - steps..limbs) (cols - steps..cols)
.rev() .rev()
.enumerate() .enumerate()
.for_each(|(i, i_rev)| { .for_each(|(i, i_rev)| {
@@ -91,9 +90,9 @@ impl Encoding for VecZnx {
// Case where self.prec % self.k != 0. // Case where self.prec % self.k != 0.
if log_k_rem != log_base2k { if log_k_rem != log_base2k {
let limbs = self.limbs(); let cols = self.cols();
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
(limbs - steps..limbs).rev().for_each(|i| { (cols - steps..cols).rev().for_each(|i| {
self.at_mut(i)[..size] self.at_mut(i)[..size]
.iter_mut() .iter_mut()
.for_each(|x| *x <<= log_k_rem); .for_each(|x| *x <<= log_k_rem);
@@ -102,7 +101,7 @@ impl Encoding for VecZnx {
} }
fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]) { fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]) {
let limbs: usize = (log_k + log_base2k - 1) / log_base2k; let cols: usize = (log_k + log_base2k - 1) / log_base2k;
assert!( assert!(
data.len() >= self.n, data.len() >= self.n,
"invalid data: data.len()={} < self.n()={}", "invalid data: data.len()={} < self.n()={}",
@@ -111,8 +110,8 @@ impl Encoding for VecZnx {
); );
data.copy_from_slice(self.at(0)); data.copy_from_slice(self.at(0));
let rem: usize = log_base2k - (log_k % log_base2k); let rem: usize = log_base2k - (log_k % log_base2k);
(1..limbs).for_each(|i| { (1..cols).for_each(|i| {
if i == limbs - 1 && rem != log_base2k { if i == cols - 1 && rem != log_base2k {
let k_rem: usize = log_base2k - rem; let k_rem: usize = log_base2k - rem;
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| { izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << k_rem) + (x >> rem); *y = (*y << k_rem) + (x >> rem);
@@ -134,25 +133,25 @@ impl Encoding for VecZnx {
log_max: usize, log_max: usize,
) { ) {
assert!(i < self.n()); assert!(i < self.n());
let limbs: usize = (log_k + log_base2k - 1) / log_base2k; let cols: usize = (log_k + log_base2k - 1) / log_base2k;
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs()); assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols());
let log_k_rem: usize = log_base2k - (log_k % log_base2k); let log_k_rem: usize = log_base2k - (log_k % log_base2k);
let limbs = self.limbs(); let cols = self.cols();
// If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy // If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb. // values on the last limb.
// Else we decompose values base2k. // Else we decompose values base2k.
if log_max + log_k_rem < 63 || log_k_rem == log_base2k { if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
(0..limbs - 1).for_each(|j| self.at_mut(j)[i] = 0); (0..cols - 1).for_each(|j| self.at_mut(j)[i] = 0);
self.at_mut(self.limbs() - 1)[i] = value; self.at_mut(self.cols() - 1)[i] = value;
} else { } else {
let mask: i64 = (1 << log_base2k) - 1; let mask: i64 = (1 << log_base2k) - 1;
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
(0..limbs - steps).for_each(|j| self.at_mut(j)[i] = 0); (0..cols - steps).for_each(|j| self.at_mut(j)[i] = 0);
(limbs - steps..limbs) (cols - steps..cols)
.rev() .rev()
.enumerate() .enumerate()
.for_each(|(j, j_rev)| { .for_each(|(j, j_rev)| {
@@ -162,22 +161,22 @@ impl Encoding for VecZnx {
// Case where self.prec % self.k != 0. // Case where self.prec % self.k != 0.
if log_k_rem != log_base2k { if log_k_rem != log_base2k {
let limbs = self.limbs(); let cols = self.cols();
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k); let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
(limbs - steps..limbs).rev().for_each(|j| { (cols - steps..cols).rev().for_each(|j| {
self.at_mut(j)[i] <<= log_k_rem; self.at_mut(j)[i] <<= log_k_rem;
}) })
} }
} }
fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64 { fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64 {
let limbs: usize = (log_k + log_base2k - 1) / log_base2k; let cols: usize = (log_k + log_base2k - 1) / log_base2k;
assert!(i < self.n()); assert!(i < self.n());
let mut res: i64 = self.data[i]; let mut res: i64 = self.data[i];
let rem: usize = log_base2k - (log_k % log_base2k); let rem: usize = log_base2k - (log_k % log_base2k);
(1..limbs).for_each(|i| { (1..cols).for_each(|i| {
let x = self.data[i * self.n]; let x = self.data[i * self.n];
if i == limbs - 1 && rem != log_base2k { if i == cols - 1 && rem != log_base2k {
let k_rem: usize = log_base2k - rem; let k_rem: usize = log_base2k - rem;
res = (res << k_rem) + (x >> rem); res = (res << k_rem) + (x >> rem);
} else { } else {
@@ -198,9 +197,9 @@ mod tests {
fn test_set_get_i64_lo_norm() { fn test_set_get_i64_lo_norm() {
let n: usize = 8; let n: usize = 8;
let log_base2k: usize = 17; let log_base2k: usize = 17;
let limbs: usize = 5; let cols: usize = 5;
let log_k: usize = limbs * log_base2k - 5; let log_k: usize = cols * log_base2k - 5;
let mut a: VecZnx = VecZnx::new(n, limbs); let mut a: VecZnx = VecZnx::new(n, cols);
let mut have: Vec<i64> = vec![i64::default(); n]; let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut() have.iter_mut()
.enumerate() .enumerate()
@@ -215,9 +214,9 @@ mod tests {
fn test_set_get_i64_hi_norm() { fn test_set_get_i64_hi_norm() {
let n: usize = 8; let n: usize = 8;
let log_base2k: usize = 17; let log_base2k: usize = 17;
let limbs: usize = 5; let cols: usize = 5;
let log_k: usize = limbs * log_base2k - 5; let log_k: usize = cols * log_base2k - 5;
let mut a: VecZnx = VecZnx::new(n, limbs); let mut a: VecZnx = VecZnx::new(n, cols);
let mut have: Vec<i64> = vec![i64::default(); n]; let mut have: Vec<i64> = vec![i64::default(); n];
let mut source = Source::new([1; 32]); let mut source = Source::new([1; 32]);
have.iter_mut().for_each(|x| { have.iter_mut().for_each(|x| {
@@ -226,9 +225,9 @@ mod tests {
.wrapping_sub(u64::MAX / 2 + 1) as i64; .wrapping_sub(u64::MAX / 2 + 1) as i64;
}); });
a.encode_vec_i64(log_base2k, log_k, &have, 63); a.encode_vec_i64(log_base2k, log_k, &have, 63);
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
let mut want = vec![i64::default(); n]; let mut want = vec![i64::default(); n];
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i))); //(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
a.decode_vec_i64(log_base2k, log_k, &mut want); a.decode_vec_i64(log_base2k, log_k, &mut want);
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b)); izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
} }

View File

@@ -7,10 +7,6 @@ pub trait Infos {
/// Returns the base two logarithm of the ring dimension of the receiver. /// Returns the base two logarithm of the ring dimension of the receiver.
fn log_n(&self) -> usize; fn log_n(&self) -> usize;
/// Returns the number of limbs of the receiver.
/// This method is equivalent to [Infos::cols].
fn limbs(&self) -> usize;
/// Returns the number of columns of the receiver. /// Returns the number of columns of the receiver.
/// This method is equivalent to [Infos::limbs]. /// This method is equivalent to [Infos::limbs].
fn cols(&self) -> usize; fn cols(&self) -> usize;
@@ -30,11 +26,6 @@ impl Infos for VecZnx {
self.n self.n
} }
/// Returns the number of limbs of the [VecZnx].
fn limbs(&self) -> usize {
self.data.len() / self.n
}
/// Returns the number of limbs of the [VecZnx]. /// Returns the number of limbs of the [VecZnx].
fn cols(&self) -> usize { fn cols(&self) -> usize {
self.data.len() / self.n self.data.len() / self.n
@@ -57,11 +48,6 @@ impl Infos for VecZnxBorrow {
self.n self.n
} }
/// Returns the number of limbs of the [VecZnx].
fn limbs(&self) -> usize {
self.limbs
}
/// Returns the number of limbs of the [VecZnx]. /// Returns the number of limbs of the [VecZnx].
fn cols(&self) -> usize { fn cols(&self) -> usize {
self.limbs self.limbs
@@ -83,12 +69,6 @@ impl Infos for VmpPMat {
(usize::BITS - (self.n() - 1).leading_zeros()) as _ (usize::BITS - (self.n() - 1).leading_zeros()) as _
} }
/// Returns the number of limbs of each [VecZnxDft].
/// This method is equivalent to [Self::cols].
fn limbs(&self) -> usize {
self.cols
}
/// Returns the number of rows (i.e. of [VecZnxDft]) of the [VmpPMat] /// Returns the number of rows (i.e. of [VecZnxDft]) of the [VmpPMat]
fn rows(&self) -> usize { fn rows(&self) -> usize {
self.rows self.rows

View File

@@ -31,6 +31,7 @@ pub use vmp::*;
pub const GALOISGENERATOR: u64 = 5; pub const GALOISGENERATOR: u64 = 5;
#[allow(dead_code)]
fn is_aligned<T>(ptr: *const T, align: usize) -> bool { fn is_aligned<T>(ptr: *const T, align: usize) -> bool {
(ptr as usize) % align == 0 (ptr as usize) % align == 0
} }

View File

@@ -3,8 +3,8 @@ use rand_distr::{Distribution, Normal};
use sampling::source::Source; use sampling::source::Source;
pub trait Sampling<T: VecZnxApi + Infos> { pub trait Sampling<T: VecZnxApi + Infos> {
/// Fills the first `limbs` limbs with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\] /// Fills the first `cols` cols with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source); fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source);
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\]. /// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
fn add_dist_f64<D: Distribution<f64>>( fn add_dist_f64<D: Distribution<f64>>(
@@ -30,11 +30,11 @@ pub trait Sampling<T: VecZnxApi + Infos> {
} }
impl<T: VecZnxApi + Infos> Sampling<T> for Module { impl<T: VecZnxApi + Infos> Sampling<T> for Module {
fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source) { fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source) {
let base2k: u64 = 1 << log_base2k; let base2k: u64 = 1 << log_base2k;
let mask: u64 = base2k - 1; let mask: u64 = base2k - 1;
let base2k_half: i64 = (base2k >> 1) as i64; let base2k_half: i64 = (base2k >> 1) as i64;
let size: usize = a.n() * limbs; let size: usize = a.n() * cols;
a.raw_mut()[..size] a.raw_mut()[..size]
.iter_mut() .iter_mut()
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half); .for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
@@ -58,7 +58,7 @@ impl<T: VecZnxApi + Infos> Sampling<T> for Module {
let log_base2k_rem: usize = log_k % log_base2k; let log_base2k_rem: usize = log_k % log_base2k;
if log_base2k_rem != 0 { if log_base2k_rem != 0 {
a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| { a.at_mut(a.cols() - 1).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source); let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound { while dist_f64.abs() > bound {
dist_f64 = dist.sample(source) dist_f64 = dist.sample(source)
@@ -66,7 +66,7 @@ impl<T: VecZnxApi + Infos> Sampling<T> for Module {
*a += (dist_f64.round() as i64) << log_base2k_rem *a += (dist_f64.round() as i64) << log_base2k_rem
}); });
} else { } else {
a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| { a.at_mut(a.cols() - 1).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source); let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound { while dist_f64.abs() > bound {
dist_f64 = dist.sample(source) dist_f64 = dist.sample(source)

View File

@@ -77,8 +77,8 @@ impl SvpPPol {
SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size) SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size)
} }
/// Returns the number of limbs of the [SvpPPol], which is always 1. /// Returns the number of cols of the [SvpPPol], which is always 1.
pub fn limbs(&self) -> usize { pub fn cols(&self) -> usize {
1 1
} }
} }
@@ -101,7 +101,7 @@ pub trait SvpPPolOps {
c: &mut VecZnxDft, c: &mut VecZnxDft,
a: &SvpPPol, a: &SvpPPol,
b: &T, b: &T,
b_limbs: usize, b_cols: usize,
); );
} }
@@ -123,22 +123,22 @@ impl SvpPPolOps for Module {
c: &mut VecZnxDft, c: &mut VecZnxDft,
a: &SvpPPol, a: &SvpPPol,
b: &T, b: &T,
b_limbs: usize, b_cols: usize,
) { ) {
assert!( assert!(
c.limbs() >= b_limbs, c.cols() >= b_cols,
"invalid c_vector: c_vector.limbs()={} < b.limbs()={}", "invalid c_vector: c_vector.cols()={} < b.cols()={}",
c.limbs(), c.cols(),
b_limbs b_cols
); );
unsafe { unsafe {
svp::svp_apply_dft( svp::svp_apply_dft(
self.0, self.0,
c.0, c.0,
b_limbs as u64, b_cols as u64,
a.0, a.0,
b.as_ptr(), b.as_ptr(),
b_limbs as u64, b_cols as u64,
b.n() as u64, b.n() as u64,
) )
} }

View File

@@ -8,19 +8,66 @@ use itertools::izip;
use std::cmp::min; use std::cmp::min;
pub trait VecZnxApi { pub trait VecZnxApi {
type Owned: VecZnxApi + Infos;
fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned;
/// Returns the minimum size of the [u8] array required to assign a /// Returns the minimum size of the [u8] array required to assign a
/// new backend array to a [VecZnx] through [VecZnx::from_bytes]. /// new backend array.
fn bytes_of(n: usize, limbs: usize) -> usize; fn bytes_of(n: usize, limbs: usize) -> usize;
/// Returns the backing array.
fn raw(&self) -> &[i64]; fn raw(&self) -> &[i64];
/// Returns the mutable backing array.
fn raw_mut(&mut self) -> &mut [i64]; fn raw_mut(&mut self) -> &mut [i64];
/// Returns a non-mutable pointer to the backing array.
fn as_ptr(&self) -> *const i64; fn as_ptr(&self) -> *const i64;
/// Returns a mutable pointer to the backing array.
fn as_mut_ptr(&mut self) -> *mut i64; fn as_mut_ptr(&mut self) -> *mut i64;
/// Returns a non-mutable reference to the i-th limb.
fn at(&self, i: usize) -> &[i64]; fn at(&self, i: usize) -> &[i64];
/// Returns a mutable reference to the i-th limb .
fn at_mut(&mut self, i: usize) -> &mut [i64]; fn at_mut(&mut self, i: usize) -> &mut [i64];
/// Returns a non-mutable pointer to the i-th limb.
fn at_ptr(&self, i: usize) -> *const i64; fn at_ptr(&self, i: usize) -> *const i64;
/// Returns a mutable pointer to the i-th limb.
fn at_mut_ptr(&mut self, i: usize) -> *mut i64; fn at_mut_ptr(&mut self, i: usize) -> *mut i64;
/// Zeroes the backing array.
fn zero(&mut self); fn zero(&mut self);
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]); fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]);
/// Right shifts the coefficients by k bits.
///
/// # Arguments
///
/// * `log_base2k`: the base two logarithm of the coefficients decomposition.
/// * `k`: the shift amount.
/// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3.
///
/// # Panics
///
/// The method will panic if carry.len() < self.n() * self.limbs() << 3.
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]);
/// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}.
/// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}.
///
/// # Arguments
///
/// * `a`: the receiver polynomial in which the extracted coefficients are stored.
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
where
Self: AsRef<T>;
fn print(&self, limbs: usize, n: usize);
} }
pub fn bytes_of_vec_znx(n: usize, limbs: usize) -> usize { pub fn bytes_of_vec_znx(n: usize, limbs: usize) -> usize {
@@ -33,14 +80,16 @@ pub struct VecZnxBorrow {
pub data: *mut i64, pub data: *mut i64,
} }
impl VecZnxBorrow { impl VecZnxApi for VecZnxBorrow {
type Owned = VecZnxBorrow;
/// Returns a new struct implementing [VecZnxBorrow] with the provided data as backing array. /// Returns a new struct implementing [VecZnxBorrow] with the provided data as backing array.
/// ///
/// The struct will *NOT* take ownership of buf[..[VecZnx::bytes_of]] /// The struct will *NOT* take ownership of buf[..[VecZnx::bytes_of]]
/// ///
/// User must ensure that data is properly alligned and that /// User must ensure that data is properly alligned and that
/// the size of data is at least equal to [VecZnx::bytes_of]. /// the size of data is at least equal to [VecZnx::bytes_of].
pub fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> VecZnxBorrow { fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned {
let size = Self::bytes_of(n, limbs); let size = Self::bytes_of(n, limbs);
assert!( assert!(
bytes.len() >= size, bytes.len() >= size,
@@ -56,9 +105,7 @@ impl VecZnxBorrow {
data: cast_mut(&mut bytes[..size]).as_mut_ptr(), data: cast_mut(&mut bytes[..size]).as_mut_ptr(),
} }
} }
}
impl VecZnxApi for VecZnxBorrow {
fn bytes_of(n: usize, limbs: usize) -> usize { fn bytes_of(n: usize, limbs: usize) -> usize {
bytes_of_vec_znx(n, limbs) bytes_of_vec_znx(n, limbs)
} }
@@ -104,39 +151,35 @@ impl VecZnxApi for VecZnxBorrow {
} }
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) { fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) {
assert!( normalize(log_base2k, self, carry)
carry.len() >= self.n() * 8,
"invalid carry: carry.len()={} < self.n()={}",
carry.len(),
self.n()
);
let carry_i64: &mut [i64] = cast_mut(carry);
unsafe {
znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
(0..self.limbs()).rev().for_each(|i| {
znx::znx_normalize(
self.n as u64,
log_base2k as u64,
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
)
});
} }
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) {
rsh(log_base2k, self, k, carry)
}
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
where
Self: AsRef<T>,
{
switch_degree(a, self.as_ref());
}
fn print(&self, limbs: usize, n: usize) {
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
} }
} }
impl VecZnx { impl VecZnxApi for VecZnx {
type Owned = VecZnx;
/// Returns a new struct implementing [VecZnx] with the provided data as backing array. /// Returns a new struct implementing [VecZnx] with the provided data as backing array.
/// ///
/// The struct will take ownership of buf[..[VecZnx::bytes_of]] /// The struct will take ownership of buf[..[VecZnx::bytes_of]]
/// ///
/// User must ensure that data is properly alligned and that /// User must ensure that data is properly alligned and that
/// the size of data is at least equal to [VecZnx::bytes_of]. /// the size of data is at least equal to [VecZnx::bytes_of].
pub fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> VecZnx { fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> Self::Owned {
let size = Self::bytes_of(n, limbs); let size = Self::bytes_of(n, limbs);
assert!( assert!(
buf.len() >= size, buf.len() >= size,
@@ -152,9 +195,7 @@ impl VecZnx {
data: alias_mut_slice_to_vec(cast_mut(&mut buf[..size])), data: alias_mut_slice_to_vec(cast_mut(&mut buf[..size])),
} }
} }
}
impl VecZnxApi for VecZnx {
fn bytes_of(n: usize, limbs: usize) -> usize { fn bytes_of(n: usize, limbs: usize) -> usize {
bytes_of_vec_znx(n, limbs) bytes_of_vec_znx(n, limbs)
} }
@@ -167,66 +208,53 @@ impl VecZnxApi for VecZnx {
&mut self.data &mut self.data
} }
/// Returns a non-mutable pointer to the backing array of the [VecZnx].
fn as_ptr(&self) -> *const i64 { fn as_ptr(&self) -> *const i64 {
self.data.as_ptr() self.data.as_ptr()
} }
/// Returns a mutable pointer to the backing array of the [VecZnx].
fn as_mut_ptr(&mut self) -> *mut i64 { fn as_mut_ptr(&mut self) -> *mut i64 {
self.data.as_mut_ptr() self.data.as_mut_ptr()
} }
/// Returns a non-mutable reference to the i-th limb of the [VecZnx].
fn at(&self, i: usize) -> &[i64] { fn at(&self, i: usize) -> &[i64] {
let n: usize = self.n(); let n: usize = self.n();
&self.raw()[n * i..n * (i + 1)] &self.raw()[n * i..n * (i + 1)]
} }
/// Returns a mutable reference to the i-th limb of the [VecZnx].
fn at_mut(&mut self, i: usize) -> &mut [i64] { fn at_mut(&mut self, i: usize) -> &mut [i64] {
let n: usize = self.n(); let n: usize = self.n();
&mut self.raw_mut()[n * i..n * (i + 1)] &mut self.raw_mut()[n * i..n * (i + 1)]
} }
/// Returns a non-mutable pointer to the i-th limb of the [VecZnx].
fn at_ptr(&self, i: usize) -> *const i64 { fn at_ptr(&self, i: usize) -> *const i64 {
&self.data[i * self.n] as *const i64 &self.data[i * self.n] as *const i64
} }
/// Returns a mutable pointer to the i-th limb of the [VecZnx].
fn at_mut_ptr(&mut self, i: usize) -> *mut i64 { fn at_mut_ptr(&mut self, i: usize) -> *mut i64 {
&mut self.data[i * self.n] as *mut i64 &mut self.data[i * self.n] as *mut i64
} }
/// Zeroes the backing array of the [VecZnx].
fn zero(&mut self) { fn zero(&mut self) {
unsafe { znx::znx_zero_i64_ref(self.data.len() as u64, self.data.as_mut_ptr()) } unsafe { znx::znx_zero_i64_ref(self.data.len() as u64, self.data.as_mut_ptr()) }
} }
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) { fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) {
assert!( normalize(log_base2k, self, carry)
carry.len() >= self.n() * 8,
"invalid carry: carry.len()={} < self.n()={}",
carry.len(),
self.n()
);
let carry_i64: &mut [i64] = cast_mut(carry);
unsafe {
znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
(0..self.limbs()).rev().for_each(|i| {
znx::znx_normalize(
self.n as u64,
log_base2k as u64,
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
self.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
)
});
} }
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) {
rsh(log_base2k, self, k, carry)
}
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
where
Self: AsRef<T>,
{
switch_degree(a, self.as_ref())
}
fn print(&self, limbs: usize, n: usize) {
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
} }
} }
@@ -269,31 +297,70 @@ impl VecZnx {
} }
self.data self.data
.truncate((self.limbs() - k / log_base2k) * self.n()); .truncate((self.cols() - k / log_base2k) * self.n());
let k_rem: usize = k % log_base2k; let k_rem: usize = k % log_base2k;
if k_rem != 0 { if k_rem != 0 {
let mask: i64 = ((1 << (log_base2k - k_rem - 1)) - 1) << k_rem; let mask: i64 = ((1 << (log_base2k - k_rem - 1)) - 1) << k_rem;
self.at_mut(self.limbs() - 1) self.at_mut(self.cols() - 1)
.iter_mut() .iter_mut()
.for_each(|x: &mut i64| *x &= mask) .for_each(|x: &mut i64| *x &= mask)
} }
} }
}
/// Right shifts the coefficients by k bits. pub fn switch_degree<T: VecZnxApi + Infos>(b: &mut T, a: &T) {
/// let (n_in, n_out) = (a.n(), b.n());
/// # Arguments let (gap_in, gap_out): (usize, usize);
///
/// * `log_base2k`: the base two logarithm of the coefficients decomposition. if n_in > n_out {
/// * `k`: the shift amount. (gap_in, gap_out) = (n_in / n_out, 1)
/// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3. } else {
/// (gap_in, gap_out) = (1, n_out / n_in);
/// # Panics b.zero();
/// }
/// The method will panic if carry.len() < self.n() * self.limbs() << 3.
pub fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) { let limbs = min(a.cols(), b.cols());
let n: usize = self.n();
(0..limbs).for_each(|i| {
izip!(
a.at(i).iter().step_by(gap_in),
b.at_mut(i).iter_mut().step_by(gap_out)
)
.for_each(|(x_in, x_out)| *x_out = *x_in);
});
}
fn normalize<T: VecZnxApi + Infos>(log_base2k: usize, a: &mut T, carry: &mut [u8]) {
let n: usize = a.n();
assert!(
carry.len() >= n * 8,
"invalid carry: carry.len()={} < self.n()={}",
carry.len(),
n
);
let carry_i64: &mut [i64] = cast_mut(carry);
unsafe {
znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr());
(0..a.cols()).rev().for_each(|i| {
znx::znx_normalize(
n as u64,
log_base2k as u64,
a.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
a.at_mut_ptr(i),
carry_i64.as_mut_ptr(),
)
});
}
}
pub fn rsh<T: VecZnxApi + Infos>(log_base2k: usize, a: &mut T, k: usize, carry: &mut [u8]) {
let n: usize = a.n();
assert!( assert!(
carry.len() >> 3 >= n, carry.len() >> 3 >= n,
@@ -302,12 +369,12 @@ impl VecZnx {
n n
); );
let limbs: usize = self.limbs(); let limbs: usize = a.cols();
let limbs_steps: usize = k / log_base2k; let limbs_steps: usize = k / log_base2k;
self.data.rotate_right(self.n * limbs_steps); a.raw_mut().rotate_right(n * limbs_steps);
unsafe { unsafe {
znx::znx_zero_i64_ref((self.n * limbs_steps) as u64, self.data.as_mut_ptr()); znx::znx_zero_i64_ref((n * limbs_steps) as u64, a.as_mut_ptr());
} }
let k_rem = k % log_base2k; let k_rem = k % log_base2k;
@@ -323,7 +390,7 @@ impl VecZnx {
let log_base2k: usize = log_base2k; let log_base2k: usize = log_base2k;
(limbs_steps..limbs).for_each(|i| { (limbs_steps..limbs).for_each(|i| {
izip!(carry_i64.iter_mut(), self.at_mut(i).iter_mut()).for_each(|(ci, xi)| { izip!(carry_i64.iter_mut(), a.at_mut(i).iter_mut()).for_each(|(ci, xi)| {
*xi += *ci << log_base2k; *xi += *ci << log_base2k;
*ci = *xi & mask; *ci = *xi & mask;
*xi /= 1 << k_rem; *xi /= 1 << k_rem;
@@ -332,39 +399,6 @@ impl VecZnx {
} }
} }
/// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}.
/// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}.
///
/// # Arguments
///
/// * `a`: the receiver polynomial in which the extracted coefficients are stored.
pub fn switch_degree(&self, a: &mut VecZnx) {
let (n_in, n_out) = (self.n(), a.n());
let (gap_in, gap_out): (usize, usize);
if n_in > n_out {
(gap_in, gap_out) = (n_in / n_out, 1)
} else {
(gap_in, gap_out) = (1, n_out / n_in);
a.zero();
}
let limbs = min(self.limbs(), a.limbs());
(0..limbs).for_each(|i| {
izip!(
self.at(i).iter().step_by(gap_in),
a.at_mut(i).iter_mut().step_by(gap_out)
)
.for_each(|(x_in, x_out)| *x_out = *x_in);
});
}
pub fn print_limbs(&self, limbs: usize, n: usize) {
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
}
}
pub trait VecZnxOps { pub trait VecZnxOps {
/// Allocates a new [VecZnx]. /// Allocates a new [VecZnx].
/// ///
@@ -413,7 +447,7 @@ pub trait VecZnxOps {
/// ///
/// This method requires that all [VecZnx] of b have the same ring degree /// This method requires that all [VecZnx] of b have the same ring degree
/// and that b.n() * b.len() <= a.n() /// and that b.n() * b.len() <= a.n()
fn vec_znx_split(&self, b: &mut Vec<VecZnx>, a: &VecZnx, buf: &mut VecZnx); fn vec_znx_split<T: VecZnxApi + Infos>(&self, b: &mut Vec<T>, a: &T, buf: &mut T);
/// Merges the subrings a into b. /// Merges the subrings a into b.
/// ///
@@ -421,7 +455,7 @@ pub trait VecZnxOps {
/// ///
/// This method requires that all [VecZnx] of a have the same ring degree /// This method requires that all [VecZnx] of a have the same ring degree
/// and that a.n() * a.len() <= b.n() /// and that a.n() * a.len() <= b.n()
fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec<VecZnx>); fn vec_znx_merge<T: VecZnxApi + Infos>(&self, b: &mut T, a: &Vec<T>);
} }
impl VecZnxOps for Module { impl VecZnxOps for Module {
@@ -439,13 +473,13 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_add( vec_znx::vec_znx_add(
self.0, self.0,
c.as_mut_ptr(), c.as_mut_ptr(),
c.limbs() as u64, c.cols() as u64,
c.n() as u64, c.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.as_ptr(), b.as_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
) )
} }
@@ -457,13 +491,13 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_add( vec_znx::vec_znx_add(
self.0, self.0,
b.as_mut_ptr(), b.as_mut_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.as_ptr(), b.as_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
) )
} }
@@ -475,13 +509,13 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_sub( vec_znx::vec_znx_sub(
self.0, self.0,
c.as_mut_ptr(), c.as_mut_ptr(),
c.limbs() as u64, c.cols() as u64,
c.n() as u64, c.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.as_ptr(), b.as_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
) )
} }
@@ -493,13 +527,13 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_sub( vec_znx::vec_znx_sub(
self.0, self.0,
b.as_mut_ptr(), b.as_mut_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.as_ptr(), b.as_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
) )
} }
@@ -510,10 +544,10 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_negate( vec_znx::vec_znx_negate(
self.0, self.0,
b.as_mut_ptr(), b.as_mut_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -524,10 +558,10 @@ impl VecZnxOps for Module {
vec_znx::vec_znx_negate( vec_znx::vec_znx_negate(
self.0, self.0,
a.as_mut_ptr(), a.as_mut_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -539,10 +573,10 @@ impl VecZnxOps for Module {
self.0, self.0,
k, k,
a.as_mut_ptr(), a.as_mut_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.as_ptr(), b.as_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
) )
} }
@@ -554,10 +588,10 @@ impl VecZnxOps for Module {
self.0, self.0,
k, k,
a.as_mut_ptr(), a.as_mut_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -603,13 +637,13 @@ impl VecZnxOps for Module {
fn vec_znx_automorphism<T: VecZnxApi + Infos>(&self, k: i64, b: &mut T, a: &T, limbs_a: usize) { fn vec_znx_automorphism<T: VecZnxApi + Infos>(&self, k: i64, b: &mut T, a: &T, limbs_a: usize) {
assert_eq!(a.n(), self.n()); assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n()); assert_eq!(b.n(), self.n());
assert!(a.limbs() >= limbs_a); assert!(a.cols() >= limbs_a);
unsafe { unsafe {
vec_znx::vec_znx_automorphism( vec_znx::vec_znx_automorphism(
self.0, self.0,
k, k,
b.as_mut_ptr(), b.as_mut_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
a.as_ptr(), a.as_ptr(),
limbs_a as u64, limbs_a as u64,
@@ -660,13 +694,13 @@ impl VecZnxOps for Module {
limbs_a: usize, limbs_a: usize,
) { ) {
assert_eq!(a.n(), self.n()); assert_eq!(a.n(), self.n());
assert!(a.limbs() >= limbs_a); assert!(a.cols() >= limbs_a);
unsafe { unsafe {
vec_znx::vec_znx_automorphism( vec_znx::vec_znx_automorphism(
self.0, self.0,
k, k,
a.as_mut_ptr(), a.as_mut_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
a.as_ptr(), a.as_ptr(),
limbs_a as u64, limbs_a as u64,
@@ -675,7 +709,7 @@ impl VecZnxOps for Module {
} }
} }
fn vec_znx_split(&self, b: &mut Vec<VecZnx>, a: &VecZnx, buf: &mut VecZnx) { fn vec_znx_split<T: VecZnxApi + Infos>(&self, b: &mut Vec<T>, a: &T, buf: &mut T) {
let (n_in, n_out) = (a.n(), b[0].n()); let (n_in, n_out) = (a.n(), b[0].n());
assert!( assert!(
@@ -692,16 +726,16 @@ impl VecZnxOps for Module {
b.iter_mut().enumerate().for_each(|(i, bi)| { b.iter_mut().enumerate().for_each(|(i, bi)| {
if i == 0 { if i == 0 {
a.switch_degree(bi); switch_degree(bi, a);
self.vec_znx_rotate(-1, buf, a); self.vec_znx_rotate(-1, buf, a);
} else { } else {
buf.switch_degree(bi); switch_degree(bi, buf);
self.vec_znx_rotate_inplace(-1, buf); self.vec_znx_rotate_inplace(-1, buf);
} }
}) })
} }
fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec<VecZnx>) { fn vec_znx_merge<T: VecZnxApi + Infos>(&self, b: &mut T, a: &Vec<T>) {
let (n_in, n_out) = (b.n(), a[0].n()); let (n_in, n_out) = (b.n(), a[0].n());
assert!( assert!(
@@ -717,7 +751,7 @@ impl VecZnxOps for Module {
}); });
a.iter().enumerate().for_each(|(_, ai)| { a.iter().enumerate().for_each(|(_, ai)| {
ai.switch_degree(b); switch_degree(b, ai);
self.vec_znx_rotate_inplace(-1, b); self.vec_znx_rotate_inplace(-1, b);
}); });

View File

@@ -8,39 +8,39 @@ impl VecZnxBig {
/// Returns a new [VecZnxBig] with the provided data as backing array. /// Returns a new [VecZnxBig] with the provided data as backing array.
/// User must ensure that data is properly alligned and that /// User must ensure that data is properly alligned and that
/// the size of data is at least equal to [Module::bytes_of_vec_znx_big]. /// the size of data is at least equal to [Module::bytes_of_vec_znx_big].
pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxBig { pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxBig {
VecZnxBig( VecZnxBig(
data.as_mut_ptr() as *mut vec_znx_big::vec_znx_bigcoeff_t, data.as_mut_ptr() as *mut vec_znx_big::vec_znx_bigcoeff_t,
limbs, cols,
) )
} }
pub fn as_vec_znx_dft(&mut self) -> VecZnxDft { pub fn as_vec_znx_dft(&mut self) -> VecZnxDft {
VecZnxDft(self.0 as *mut vec_znx_dft::vec_znx_dft_t, self.1) VecZnxDft(self.0 as *mut vec_znx_dft::vec_znx_dft_t, self.1)
} }
pub fn limbs(&self) -> usize { pub fn cols(&self) -> usize {
self.1 self.1
} }
} }
pub trait VecZnxBigOps { pub trait VecZnxBigOps {
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values. /// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig; fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig;
/// Returns a new [VecZnxBig] with the provided bytes array as backing array. /// Returns a new [VecZnxBig] with the provided bytes array as backing array.
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `limbs`: the number of limbs of the [VecZnxBig]. /// * `cols`: the number of cols of the [VecZnxBig].
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_big]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_big].
/// ///
/// # Panics /// # Panics
/// If `bytes.len()` < [Module::bytes_of_vec_znx_big]. /// If `bytes.len()` < [Module::bytes_of_vec_znx_big].
fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig; fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig;
/// Returns the minimum number of bytes necessary to allocate /// Returns the minimum number of bytes necessary to allocate
/// a new [VecZnxBig] through [VecZnxBig::from_bytes]. /// a new [VecZnxBig] through [VecZnxBig::from_bytes].
fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize; fn bytes_of_vec_znx_big(&self, cols: usize) -> usize;
/// b <- b - a /// b <- b - a
fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T); fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T);
@@ -89,22 +89,22 @@ pub trait VecZnxBigOps {
} }
impl VecZnxBigOps for Module { impl VecZnxBigOps for Module {
fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig { fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig {
unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, limbs as u64), limbs) } unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, cols as u64), cols) }
} }
fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig { fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig {
assert!( assert!(
bytes.len() >= <Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, limbs), bytes.len() >= <Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, cols),
"invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}",
bytes.len(), bytes.len(),
<Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, limbs) <Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, cols)
); );
VecZnxBig::from_bytes(limbs, bytes) VecZnxBig::from_bytes(cols, bytes)
} }
fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize { fn bytes_of_vec_znx_big(&self, cols: usize) -> usize {
unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, limbs as u64) as usize } unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, cols as u64) as usize }
} }
fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T) { fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T) {
@@ -112,12 +112,12 @@ impl VecZnxBigOps for Module {
vec_znx_big::vec_znx_big_sub_small_a( vec_znx_big::vec_znx_big_sub_small_a(
self.0, self.0,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
) )
} }
} }
@@ -132,12 +132,12 @@ impl VecZnxBigOps for Module {
vec_znx_big::vec_znx_big_sub_small_a( vec_znx_big::vec_znx_big_sub_small_a(
self.0, self.0,
c.0, c.0,
c.limbs() as u64, c.cols() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
) )
} }
} }
@@ -147,11 +147,11 @@ impl VecZnxBigOps for Module {
vec_znx_big::vec_znx_big_add_small( vec_znx_big::vec_znx_big_add_small(
self.0, self.0,
c.0, c.0,
c.limbs() as u64, c.cols() as u64,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -162,11 +162,11 @@ impl VecZnxBigOps for Module {
vec_znx_big::vec_znx_big_add_small( vec_znx_big::vec_znx_big_add_small(
self.0, self.0,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -194,10 +194,10 @@ impl VecZnxBigOps for Module {
self.0, self.0,
log_base2k as u64, log_base2k as u64,
b.as_mut_ptr(), b.as_mut_ptr(),
b.limbs() as u64, b.cols() as u64,
b.n() as u64, b.n() as u64,
a.0, a.0,
a.limbs() as u64, a.cols() as u64,
tmp_bytes.as_mut_ptr(), tmp_bytes.as_mut_ptr(),
) )
} }
@@ -228,7 +228,7 @@ impl VecZnxBigOps for Module {
self.0, self.0,
log_base2k as u64, log_base2k as u64,
res.as_mut_ptr(), res.as_mut_ptr(),
res.limbs() as u64, res.cols() as u64,
res.n() as u64, res.n() as u64,
a.0, a.0,
a_range_begin as u64, a_range_begin as u64,
@@ -245,9 +245,9 @@ impl VecZnxBigOps for Module {
self.0, self.0,
gal_el, gal_el,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.0, a.0,
a.limbs() as u64, a.cols() as u64,
); );
} }
} }
@@ -258,9 +258,9 @@ impl VecZnxBigOps for Module {
self.0, self.0,
gal_el, gal_el,
a.0, a.0,
a.limbs() as u64, a.cols() as u64,
a.0, a.0,
a.limbs() as u64, a.cols() as u64,
); );
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::ffi::vec_znx_big; use crate::ffi::vec_znx_big;
use crate::ffi::vec_znx_dft; use crate::ffi::vec_znx_dft;
use crate::ffi::vec_znx_dft::bytes_of_vec_znx_dft; use crate::ffi::vec_znx_dft::bytes_of_vec_znx_dft;
use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxBig}; use crate::{Infos, Module, VecZnxApi, VecZnxBig};
pub struct VecZnxDft(pub *mut vec_znx_dft::vec_znx_dft_t, pub usize); pub struct VecZnxDft(pub *mut vec_znx_dft::vec_znx_dft_t, pub usize);
@@ -9,8 +9,8 @@ impl VecZnxDft {
/// Returns a new [VecZnxDft] with the provided data as backing array. /// Returns a new [VecZnxDft] with the provided data as backing array.
/// User must ensure that data is properly alligned and that /// User must ensure that data is properly alligned and that
/// the size of data is at least equal to [Module::bytes_of_vec_znx_dft]. /// the size of data is at least equal to [Module::bytes_of_vec_znx_dft].
pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxDft { pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxDft {
VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, limbs) VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, cols)
} }
/// Cast a [VecZnxDft] into a [VecZnxBig]. /// Cast a [VecZnxDft] into a [VecZnxBig].
@@ -19,36 +19,36 @@ impl VecZnxDft {
pub fn as_vec_znx_big(&mut self) -> VecZnxBig { pub fn as_vec_znx_big(&mut self) -> VecZnxBig {
VecZnxBig(self.0 as *mut vec_znx_big::vec_znx_bigcoeff_t, self.1) VecZnxBig(self.0 as *mut vec_znx_big::vec_znx_bigcoeff_t, self.1)
} }
pub fn limbs(&self) -> usize { pub fn cols(&self) -> usize {
self.1 self.1
} }
} }
pub trait VecZnxDftOps { pub trait VecZnxDftOps {
/// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space. /// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space.
fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft; fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft;
/// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// Returns a new [VecZnxDft] with the provided bytes array as backing array.
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `limbs`: the number of limbs of the [VecZnxDft]. /// * `cols`: the number of cols of the [VecZnxDft].
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft].
/// ///
/// # Panics /// # Panics
/// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft].
fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft; fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft;
/// Returns a new [VecZnxDft] with the provided bytes array as backing array. /// Returns a new [VecZnxDft] with the provided bytes array as backing array.
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `limbs`: the number of limbs of the [VecZnxDft]. /// * `cols`: the number of cols of the [VecZnxDft].
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft]. /// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft].
/// ///
/// # Panics /// # Panics
/// If `bytes.len()` < [Module::bytes_of_vec_znx_dft]. /// If `bytes.len()` < [Module::bytes_of_vec_znx_dft].
fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize; fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize;
/// Returns the minimum number of bytes necessary to allocate /// Returns the minimum number of bytes necessary to allocate
/// a new [VecZnxDft] through [VecZnxDft::from_bytes]. /// a new [VecZnxDft] through [VecZnxDft::from_bytes].
@@ -69,33 +69,33 @@ pub trait VecZnxDftOps {
} }
impl VecZnxDftOps for Module { impl VecZnxDftOps for Module {
fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft { fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft {
unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, limbs as u64), limbs) } unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, cols as u64), cols) }
} }
fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft { fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft {
assert!( assert!(
bytes.len() >= <Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, limbs), bytes.len() >= <Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, cols),
"invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}", "invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}",
bytes.len(), bytes.len(),
<Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, limbs) <Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, cols)
); );
VecZnxDft::from_bytes(limbs, bytes) VecZnxDft::from_bytes(cols, bytes)
} }
fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize { fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize {
unsafe { bytes_of_vec_znx_dft(self.0, limbs as u64) as usize } unsafe { bytes_of_vec_znx_dft(self.0, cols as u64) as usize }
} }
fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_limbs: usize) { fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_limbs: usize) {
assert!( assert!(
b.limbs() >= a_limbs, b.cols() >= a_limbs,
"invalid c_vector: b_vector.limbs()={} < a_limbs={}", "invalid c_vector: b_vector.cols()={} < a_limbs={}",
b.limbs(), b.cols(),
a_limbs a_limbs
); );
unsafe { unsafe {
vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.limbs() as u64, a.0, a_limbs as u64) vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.cols() as u64, a.0, a_limbs as u64)
} }
} }
@@ -106,21 +106,21 @@ impl VecZnxDftOps for Module {
/// b <- DFT(a) /// b <- DFT(a)
/// ///
/// # Panics /// # Panics
/// If b.limbs < a_limbs /// If b.cols < a_cols
fn vec_znx_dft<T: VecZnxApi + Infos>(&self, b: &mut VecZnxDft, a: &T, a_limbs: usize) { fn vec_znx_dft<T: VecZnxApi + Infos>(&self, b: &mut VecZnxDft, a: &T, a_cols: usize) {
assert!( assert!(
b.limbs() >= a_limbs, b.cols() >= a_cols,
"invalid a_limbs: b.limbs()={} < a_limbs={}", "invalid a_cols: b.cols()={} < a_cols={}",
b.limbs(), b.cols(),
a_limbs a_cols
); );
unsafe { unsafe {
vec_znx_dft::vec_znx_dft( vec_znx_dft::vec_znx_dft(
self.0, self.0,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.as_ptr(), a.as_ptr(),
a_limbs as u64, a_cols as u64,
a.n() as u64, a.n() as u64,
) )
} }
@@ -131,20 +131,20 @@ impl VecZnxDftOps for Module {
&self, &self,
b: &mut VecZnxBig, b: &mut VecZnxBig,
a: &mut VecZnxDft, a: &mut VecZnxDft,
a_limbs: usize, a_cols: usize,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) {
assert!( assert!(
b.limbs() >= a_limbs, b.cols() >= a_cols,
"invalid c_vector: b.limbs()={} < a_limbs={}", "invalid c_vector: b.cols()={} < a_cols={}",
b.limbs(), b.cols(),
a_limbs a_cols
); );
assert!( assert!(
a.limbs() >= a_limbs, a.cols() >= a_cols,
"invalid c_vector: a.limbs()={} < a_limbs={}", "invalid c_vector: a.cols()={} < a_cols={}",
a.limbs(), a.cols(),
a_limbs a_cols
); );
assert!( assert!(
tmp_bytes.len() <= <Module as VecZnxDftOps>::vec_znx_idft_tmp_bytes(self), tmp_bytes.len() <= <Module as VecZnxDftOps>::vec_znx_idft_tmp_bytes(self),
@@ -156,9 +156,9 @@ impl VecZnxDftOps for Module {
vec_znx_dft::vec_znx_idft( vec_znx_dft::vec_znx_idft(
self.0, self.0,
b.0, b.0,
a.limbs() as u64, a.cols() as u64,
a.0, a.0,
a_limbs as u64, a_cols as u64,
tmp_bytes.as_mut_ptr(), tmp_bytes.as_mut_ptr(),
) )
} }

View File

@@ -1,5 +1,5 @@
use crate::ffi::vmp; use crate::ffi::vmp;
use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxDft}; use crate::{Infos, Module, VecZnxApi, VecZnxDft};
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx], /// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
/// stored as a 3D matrix in the DFT domain in a single contiguous array. /// stored as a 3D matrix in the DFT domain in a single contiguous array.
@@ -15,7 +15,7 @@ pub struct VmpPMat {
pub data: *mut vmp::vmp_pmat_t, pub data: *mut vmp::vmp_pmat_t,
/// The number of [VecZnxDft]. /// The number of [VecZnxDft].
pub rows: usize, pub rows: usize,
/// The number of limbs in each [VecZnxDft]. /// The number of cols in each [VecZnxDft].
pub cols: usize, pub cols: usize,
/// The ring degree of each [VecZnxDft]. /// The ring degree of each [VecZnxDft].
pub n: usize, pub n: usize,
@@ -86,7 +86,7 @@ pub trait VmpPMatOps {
/// # Arguments /// # Arguments
/// ///
/// * `rows`: number of rows (number of [VecZnxDft]). /// * `rows`: number of rows (number of [VecZnxDft]).
/// * `cols`: number of cols (number of limbs of each [VecZnxDft]). /// * `cols`: number of cols (number of cols of each [VecZnxDft]).
fn new_vmp_pmat(&self, rows: usize, cols: usize) -> VmpPMat; fn new_vmp_pmat(&self, rows: usize, cols: usize) -> VmpPMat;
/// Returns the number of bytes needed as scratch space for [VmpPMatOps::vmp_prepare_contiguous]. /// Returns the number of bytes needed as scratch space for [VmpPMatOps::vmp_prepare_contiguous].
@@ -153,15 +153,17 @@ pub trait VmpPMatOps {
/// vecznx.push(module.new_vec_znx(cols)); /// vecznx.push(module.new_vec_znx(cols));
/// }); /// });
/// ///
/// let slices: Vec<&[i64]> = vecznx.iter().map(|v| v.data.as_slice()).collect();
///
/// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)]; /// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)];
/// ///
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
/// module.vmp_prepare_dblptr(&mut vmp_pmat, &vecznx, &mut buf); /// module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf);
/// ///
/// vmp_pmat.free(); /// vmp_pmat.free();
/// module.free(); /// module.free();
/// ``` /// ```
fn vmp_prepare_dblptr<T: VecZnxApi + Infos>(&self, b: &mut VmpPMat, a: &Vec<T>, buf: &mut [u8]); fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]);
/// Prepares the ith-row of [VmpPMat] from a vector of [VecZnx]. /// Prepares the ith-row of [VmpPMat] from a vector of [VecZnx].
/// ///
@@ -175,7 +177,7 @@ pub trait VmpPMatOps {
/// The size of buf can be obtained with [VmpPMatOps::vmp_prepare_tmp_bytes]. /// The size of buf can be obtained with [VmpPMatOps::vmp_prepare_tmp_bytes].
/// /// # Example /// /// # Example
/// ``` /// ```
/// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxOps, Free}; /// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxApi, VecZnxOps, Free};
/// use std::cmp::min; /// use std::cmp::min;
/// ///
/// let n: usize = 1024; /// let n: usize = 1024;
@@ -188,31 +190,25 @@ pub trait VmpPMatOps {
/// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)]; /// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)];
/// ///
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
/// module.vmp_prepare_row(&mut vmp_pmat, &vecznx, 0, &mut buf); /// module.vmp_prepare_row(&mut vmp_pmat, vecznx.raw(), 0, &mut buf);
/// ///
/// vmp_pmat.free(); /// vmp_pmat.free();
/// module.free(); /// module.free();
/// ``` /// ```
fn vmp_prepare_row<T: VecZnxApi + Infos>( fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, tmp_bytes: &mut [u8]);
&self,
b: &mut VmpPMat,
a: &T,
row_i: usize,
tmp_bytes: &mut [u8],
);
/// Returns the size of the stratch space necessary for [VmpPMatOps::vmp_apply_dft]. /// Returns the size of the stratch space necessary for [VmpPMatOps::vmp_apply_dft].
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `c_limbs`: number of limbs of the output [VecZnxDft]. /// * `c_cols`: number of cols of the output [VecZnxDft].
/// * `a_limbs`: number of limbs of the input [VecZnx]. /// * `a_cols`: number of cols of the input [VecZnx].
/// * `rows`: number of rows of the input [VmpPMat]. /// * `rows`: number of rows of the input [VmpPMat].
/// * `cols`: number of cols of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat].
fn vmp_apply_dft_tmp_bytes( fn vmp_apply_dft_tmp_bytes(
&self, &self,
c_limbs: usize, c_cols: usize,
a_limbs: usize, a_cols: usize,
rows: usize, rows: usize,
cols: usize, cols: usize,
) -> usize; ) -> usize;
@@ -223,8 +219,8 @@ pub trait VmpPMatOps {
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
/// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// and each vector a [VecZnxDft] (row) of the [VmpPMat].
/// ///
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
/// `j` cols, the output is a [VecZnx] of `j` limbs. /// `j` cols, the output is a [VecZnx] of `j` cols.
/// ///
/// If there is a mismatch between the dimensions the largest valid ones are used. /// If there is a mismatch between the dimensions the largest valid ones are used.
/// ///
@@ -249,18 +245,18 @@ pub trait VmpPMatOps {
/// let n = 1024; /// let n = 1024;
/// ///
/// let module: Module = Module::new::<FFT64>(n); /// let module: Module = Module::new::<FFT64>(n);
/// let limbs: usize = 5; /// let cols: usize = 5;
/// ///
/// let rows: usize = limbs; /// let rows: usize = cols;
/// let cols: usize = limbs + 1; /// let cols: usize = cols + 1;
/// let c_limbs: usize = cols; /// let c_cols: usize = cols;
/// let a_limbs: usize = limbs; /// let a_cols: usize = cols;
/// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_limbs, a_limbs, rows, cols); /// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_cols, a_cols, rows, cols);
/// ///
/// let mut buf: Vec<u8> = vec![0; tmp_bytes]; /// let mut buf: Vec<u8> = vec![0; tmp_bytes];
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
/// ///
/// let a: VecZnx = module.new_vec_znx(limbs); /// let a: VecZnx = module.new_vec_znx(cols);
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
/// module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf); /// module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf);
/// ///
@@ -280,14 +276,14 @@ pub trait VmpPMatOps {
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `c_limbs`: number of limbs of the output [VecZnxDft]. /// * `c_cols`: number of cols of the output [VecZnxDft].
/// * `a_limbs`: number of limbs of the input [VecZnxDft]. /// * `a_cols`: number of cols of the input [VecZnxDft].
/// * `rows`: number of rows of the input [VmpPMat]. /// * `rows`: number of rows of the input [VmpPMat].
/// * `cols`: number of cols of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat].
fn vmp_apply_dft_to_dft_tmp_bytes( fn vmp_apply_dft_to_dft_tmp_bytes(
&self, &self,
c_limbs: usize, c_cols: usize,
a_limbs: usize, a_cols: usize,
rows: usize, rows: usize,
cols: usize, cols: usize,
) -> usize; ) -> usize;
@@ -299,8 +295,8 @@ pub trait VmpPMatOps {
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
/// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// and each vector a [VecZnxDft] (row) of the [VmpPMat].
/// ///
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
/// `j` cols, the output is a [VecZnx] of `j` limbs. /// `j` cols, the output is a [VecZnx] of `j` cols.
/// ///
/// If there is a mismatch between the dimensions the largest valid ones are used. /// If there is a mismatch between the dimensions the largest valid ones are used.
/// ///
@@ -325,18 +321,18 @@ pub trait VmpPMatOps {
/// let n = 1024; /// let n = 1024;
/// ///
/// let module: Module = Module::new::<FFT64>(n); /// let module: Module = Module::new::<FFT64>(n);
/// let limbs: usize = 5; /// let cols: usize = 5;
/// ///
/// let rows: usize = limbs; /// let rows: usize = cols;
/// let cols: usize = limbs + 1; /// let cols: usize = cols + 1;
/// let c_limbs: usize = cols; /// let c_cols: usize = cols;
/// let a_limbs: usize = limbs; /// let a_cols: usize = cols;
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_limbs, a_limbs, rows, cols); /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, rows, cols);
/// ///
/// let mut buf: Vec<u8> = vec![0; tmp_bytes]; /// let mut buf: Vec<u8> = vec![0; tmp_bytes];
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
/// ///
/// let a_dft: VecZnxDft = module.new_vec_znx_dft(limbs); /// let a_dft: VecZnxDft = module.new_vec_znx_dft(cols);
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols); /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
/// module.vmp_apply_dft_to_dft(&mut c_dft, &a_dft, &vmp_pmat, &mut buf); /// module.vmp_apply_dft_to_dft(&mut c_dft, &a_dft, &vmp_pmat, &mut buf);
/// ///
@@ -354,8 +350,8 @@ pub trait VmpPMatOps {
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol]) /// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
/// and each vector a [VecZnxDft] (row) of the [VmpPMat]. /// and each vector a [VecZnxDft] (row) of the [VmpPMat].
/// ///
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and /// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
/// `j` cols, the output is a [VecZnx] of `j` limbs. /// `j` cols, the output is a [VecZnx] of `j` cols.
/// ///
/// If there is a mismatch between the dimensions the largest valid ones are used. /// If there is a mismatch between the dimensions the largest valid ones are used.
/// ///
@@ -379,17 +375,17 @@ pub trait VmpPMatOps {
/// let n = 1024; /// let n = 1024;
/// ///
/// let module: Module = Module::new::<FFT64>(n); /// let module: Module = Module::new::<FFT64>(n);
/// let limbs: usize = 5; /// let cols: usize = 5;
/// ///
/// let rows: usize = limbs; /// let rows: usize = cols;
/// let cols: usize = limbs + 1; /// let cols: usize = cols + 1;
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(limbs, limbs, rows, cols); /// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(cols, cols, rows, cols);
/// ///
/// let mut buf: Vec<u8> = vec![0; tmp_bytes]; /// let mut buf: Vec<u8> = vec![0; tmp_bytes];
/// let a: VecZnx = module.new_vec_znx(limbs); /// let a: VecZnx = module.new_vec_znx(cols);
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols); /// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
/// ///
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(limbs); /// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
/// module.vmp_apply_dft_to_dft_inplace(&mut c_dft, &vmp_pmat, &mut buf); /// module.vmp_apply_dft_to_dft_inplace(&mut c_dft, &vmp_pmat, &mut buf);
/// ///
/// c_dft.free(); /// c_dft.free();
@@ -428,12 +424,7 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_prepare_dblptr<T: VecZnxApi + Infos>( fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]) {
&self,
b: &mut VmpPMat,
a: &Vec<T>,
buf: &mut [u8],
) {
let ptrs: Vec<*const i64> = a.iter().map(|v| v.as_ptr()).collect(); let ptrs: Vec<*const i64> = a.iter().map(|v| v.as_ptr()).collect();
unsafe { unsafe {
vmp::vmp_prepare_dblptr( vmp::vmp_prepare_dblptr(
@@ -447,13 +438,7 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_prepare_row<T: VecZnxApi + Infos>( fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, buf: &mut [u8]) {
&self,
b: &mut VmpPMat,
a: &T,
row_i: usize,
buf: &mut [u8],
) {
unsafe { unsafe {
vmp::vmp_prepare_row( vmp::vmp_prepare_row(
self.0, self.0,
@@ -469,16 +454,16 @@ impl VmpPMatOps for Module {
fn vmp_apply_dft_tmp_bytes( fn vmp_apply_dft_tmp_bytes(
&self, &self,
c_limbs: usize, c_cols: usize,
a_limbs: usize, a_cols: usize,
rows: usize, rows: usize,
cols: usize, cols: usize,
) -> usize { ) -> usize {
unsafe { unsafe {
vmp::vmp_apply_dft_tmp_bytes( vmp::vmp_apply_dft_tmp_bytes(
self.0, self.0,
c_limbs as u64, c_cols as u64,
a_limbs as u64, a_cols as u64,
rows as u64, rows as u64,
cols as u64, cols as u64,
) as usize ) as usize
@@ -496,9 +481,9 @@ impl VmpPMatOps for Module {
vmp::vmp_apply_dft( vmp::vmp_apply_dft(
self.0, self.0,
c.0, c.0,
c.limbs() as u64, c.cols() as u64,
a.as_ptr(), a.as_ptr(),
a.limbs() as u64, a.cols() as u64,
a.n() as u64, a.n() as u64,
b.data(), b.data(),
b.rows() as u64, b.rows() as u64,
@@ -510,16 +495,16 @@ impl VmpPMatOps for Module {
fn vmp_apply_dft_to_dft_tmp_bytes( fn vmp_apply_dft_to_dft_tmp_bytes(
&self, &self,
c_limbs: usize, c_cols: usize,
a_limbs: usize, a_cols: usize,
rows: usize, rows: usize,
cols: usize, cols: usize,
) -> usize { ) -> usize {
unsafe { unsafe {
vmp::vmp_apply_dft_to_dft_tmp_bytes( vmp::vmp_apply_dft_to_dft_tmp_bytes(
self.0, self.0,
c_limbs as u64, c_cols as u64,
a_limbs as u64, a_cols as u64,
rows as u64, rows as u64,
cols as u64, cols as u64,
) as usize ) as usize
@@ -531,9 +516,9 @@ impl VmpPMatOps for Module {
vmp::vmp_apply_dft_to_dft( vmp::vmp_apply_dft_to_dft(
self.0, self.0,
c.0, c.0,
c.limbs() as u64, c.cols() as u64,
a.0, a.0,
a.limbs() as u64, a.cols() as u64,
b.data(), b.data(),
b.rows() as u64, b.rows() as u64,
b.cols() as u64, b.cols() as u64,
@@ -547,9 +532,9 @@ impl VmpPMatOps for Module {
vmp::vmp_apply_dft_to_dft( vmp::vmp_apply_dft_to_dft(
self.0, self.0,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
b.0, b.0,
b.limbs() as u64, b.cols() as u64,
a.data(), a.data(),
a.rows() as u64, a.rows() as u64,
a.cols() as u64, a.cols() as u64,

View File

@@ -1,10 +1,10 @@
use base2k::{ use base2k::{
FFT64, Module, Sampling, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxDft, VecZnxDftOps, FFT64, Module, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxDft, VecZnxDftOps, VmpPMat,
VmpPMat, VmpPMatOps, alloc_aligned_u8, VmpPMatOps, alloc_aligned_u8,
}; };
use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main};
use rlwe::{ use rlwe::{
ciphertext::{Ciphertext, GadgetCiphertext}, ciphertext::{Ciphertext, new_gadget_ciphertext},
elem::Elem, elem::Elem,
encryptor::{encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes}, encryptor::{encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes},
evaluator::gadget_product_tmp_bytes, evaluator::gadget_product_tmp_bytes,
@@ -18,7 +18,7 @@ fn gadget_product_inplace(c: &mut Criterion) {
fn gadget_product<'a>( fn gadget_product<'a>(
module: &'a Module, module: &'a Module,
elem: &'a mut Elem<VecZnx>, elem: &'a mut Elem<VecZnx>,
gadget_ct: &'a GadgetCiphertext, gadget_ct: &'a Ciphertext<VmpPMat>,
tmp_bytes: &'a mut [u8], tmp_bytes: &'a mut [u8],
) -> Box<dyn FnMut() + 'a> { ) -> Box<dyn FnMut() + 'a> {
let factor: usize = 2; let factor: usize = 2;
@@ -105,7 +105,7 @@ fn gadget_product_inplace(c: &mut Criterion) {
let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol(); let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol();
params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0); params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0);
let mut gadget_ct: GadgetCiphertext = GadgetCiphertext::new( let mut gadget_ct: Ciphertext<VmpPMat> = new_gadget_ciphertext(
params.module(), params.module(),
params.log_base2k(), params.log_base2k(),
params.limbs_q(), params.limbs_q(),
@@ -123,7 +123,7 @@ fn gadget_product_inplace(c: &mut Criterion) {
&mut tmp_bytes, &mut tmp_bytes,
); );
let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); let mut ct: Ciphertext<VecZnx> = params.new_ciphertext(params.log_q());
params.encrypt_rlwe_sk_thread_safe( params.encrypt_rlwe_sk_thread_safe(
&mut ct, &mut ct,

View File

@@ -1,13 +1,11 @@
use base2k::{Encoding, FFT64, SvpPPolOps, VecZnxApi, VecZnx}; use base2k::{Encoding, FFT64, SvpPPolOps, VecZnx, VecZnxApi};
use rlwe::{ use rlwe::{
ciphertext::Ciphertext, ciphertext::Ciphertext,
decryptor::{Decryptor, decrypt_rlwe_thread_safe_tmp_byte},
encryptor::{EncryptorSk, encrypt_rlwe_sk_tmp_bytes},
keys::SecretKey, keys::SecretKey,
parameters::{Parameters, ParametersLiteral}, parameters::{Parameters, ParametersLiteral},
plaintext::Plaintext, plaintext::Plaintext,
}; };
use sampling::source::{Source, new_seed}; use sampling::source::Source;
fn main() { fn main() {
let params_lit: ParametersLiteral = ParametersLiteral { let params_lit: ParametersLiteral = ParametersLiteral {
@@ -30,8 +28,7 @@ fn main() {
let mut source: Source = Source::new([0; 32]); let mut source: Source = Source::new([0; 32]);
let mut sk: SecretKey = SecretKey::new(params.module()); let mut sk: SecretKey = SecretKey::new(params.module());
//sk.fill_ternary_hw(params.xs(), &mut source); sk.fill_ternary_hw(params.xs(), &mut source);
sk.0.0[0] = 1;
let mut want = vec![i64::default(); params.n()]; let mut want = vec![i64::default(); params.n()];
@@ -47,10 +44,10 @@ fn main() {
pt.0.value[0].normalize(log_base2k, &mut tmp_bytes); pt.0.value[0].normalize(log_base2k, &mut tmp_bytes);
println!("log_k: {}", log_k); println!("log_k: {}", log_k);
pt.0.value[0].print_limbs(pt.limbs(), 16); pt.0.value[0].print(pt.cols(), 16);
println!(); println!();
let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); let mut ct: Ciphertext<VecZnx> = params.new_ciphertext(params.log_q());
let mut source_xe: Source = Source::new([1; 32]); let mut source_xe: Source = Source::new([1; 32]);
let mut source_xa: Source = Source::new([2; 32]); let mut source_xa: Source = Source::new([2; 32]);
@@ -69,7 +66,7 @@ fn main() {
params.decrypt_rlwe_thread_safe(&mut pt, &ct, &sk_svp_ppol, &mut tmp_bytes); params.decrypt_rlwe_thread_safe(&mut pt, &ct, &sk_svp_ppol, &mut tmp_bytes);
pt.0.value[0].print_limbs(pt.limbs(), 16); pt.0.value[0].print(pt.cols(), 16);
let mut have = vec![i64::default(); params.n()]; let mut have = vec![i64::default(); params.n()];

View File

@@ -1,22 +1,15 @@
use base2k::{ use base2k::{Encoding, FFT64, SvpPPolOps, VecZnx, VecZnxApi, VmpPMat};
Encoding, FFT64, Infos, Sampling, Scalar, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxDft,
VecZnxOps,
};
use rlwe::{ use rlwe::{
ciphertext::{Ciphertext, GadgetCiphertext}, ciphertext::{Ciphertext, new_gadget_ciphertext},
decryptor::{Decryptor, decrypt_rlwe_thread_safe, decrypt_rlwe_thread_safe_tmp_byte}, decryptor::decrypt_rlwe_thread_safe,
elem::Elem, encryptor::{encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes},
encryptor::{
EncryptorSk, encrypt_grlwe_sk_thread_safe, encrypt_grlwe_sk_tmp_bytes,
encrypt_rlwe_sk_tmp_bytes,
},
evaluator::{gadget_product_inplace_thread_safe, gadget_product_tmp_bytes}, evaluator::{gadget_product_inplace_thread_safe, gadget_product_tmp_bytes},
key_generator::{gen_switching_key_thread_safe, gen_switching_key_thread_safe_tmp_bytes}, key_generator::gen_switching_key_thread_safe_tmp_bytes,
keys::{SecretKey, SwitchingKey}, keys::SecretKey,
parameters::{Parameters, ParametersLiteral}, parameters::{Parameters, ParametersLiteral},
plaintext::Plaintext, plaintext::Plaintext,
}; };
use sampling::source::{Source, new_seed}; use sampling::source::Source;
fn main() { fn main() {
let params_lit: ParametersLiteral = ParametersLiteral { let params_lit: ParametersLiteral = ParametersLiteral {
@@ -82,7 +75,7 @@ fn main() {
let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol(); let mut sk1_svp_ppol: base2k::SvpPPol = params.module().new_svp_ppol();
params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0); params.module().svp_prepare(&mut sk1_svp_ppol, &sk1.0);
let mut gadget_ct: GadgetCiphertext = GadgetCiphertext::new( let mut gadget_ct: Ciphertext<VmpPMat> = new_gadget_ciphertext(
params.module(), params.module(),
log_base2k, log_base2k,
params.limbs_q(), params.limbs_q(),
@@ -100,21 +93,15 @@ fn main() {
&mut tmp_bytes, &mut tmp_bytes,
); );
println!("DONE?"); let mut pt: Plaintext<VecZnx> =
Plaintext::<VecZnx>::new(params.module(), params.log_base2k(), params.log_q());
let mut pt: Plaintext<VecZnx> = Plaintext::<VecZnx>::new(
params.module(),
params.log_base2k(),
params.log_q(),
params.log_scale(),
);
let mut want = vec![i64::default(); params.n()]; let mut want = vec![i64::default(); params.n()];
want.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64); want.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
pt.0.value[0].encode_vec_i64(log_base2k, log_k, &want, 32); pt.0.value[0].encode_vec_i64(log_base2k, log_k, &want, 32);
pt.0.value[0].normalize(log_base2k, &mut tmp_bytes); pt.0.value[0].normalize(log_base2k, &mut tmp_bytes);
let mut ct: Ciphertext = params.new_ciphertext(params.log_q()); let mut ct: Ciphertext<VecZnx> = params.new_ciphertext(params.log_q());
params.encrypt_rlwe_sk_thread_safe( params.encrypt_rlwe_sk_thread_safe(
&mut ct, &mut ct,
@@ -132,10 +119,10 @@ fn main() {
&mut tmp_bytes, &mut tmp_bytes,
); );
println!("ct.limbs()={}", ct.limbs()); println!("ct.limbs()={}", ct.cols());
println!("gadget_ct.rows()={}", gadget_ct.rows()); println!("gadget_ct.rows()={}", gadget_ct.rows());
println!("gadget_ct.cols()={}", gadget_ct.cols()); println!("gadget_ct.cols()={}", gadget_ct.cols());
println!("res.limbs()={}", ct.limbs()); println!("res.limbs()={}", ct.cols());
println!(); println!();
decrypt_rlwe_thread_safe( decrypt_rlwe_thread_safe(
@@ -146,7 +133,7 @@ fn main() {
&mut tmp_bytes, &mut tmp_bytes,
); );
pt.0.value[0].print_limbs(pt.limbs(), 16); pt.0.value[0].print(pt.cols(), 16);
let mut have: Vec<i64> = vec![i64::default(); params.n()]; let mut have: Vec<i64> = vec![i64::default(); params.n()];

View File

@@ -1,47 +1,47 @@
use crate::elem::{Elem, ElemBasics}; use crate::elem::{Elem, ElemVecZnx, VecZnxCommon};
use crate::parameters::Parameters; use crate::parameters::Parameters;
use crate::plaintext::Plaintext; use crate::plaintext::Plaintext;
use base2k::{Infos, Module, VecZnx, VecZnxApi, VmpPMat, VmpPMatOps}; use base2k::{Infos, Module, VecZnx, VecZnxApi, VmpPMat};
pub struct Ciphertext(pub Elem<VecZnx>); pub struct Ciphertext<T>(pub Elem<T>);
impl Ciphertext { impl Ciphertext<VecZnx> {
pub fn new( pub fn new(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> Self {
module: &Module, Self(Elem::<VecZnx>::new(module, log_base2k, log_q, rows))
log_base2k: usize, }
log_q: usize,
degree: usize,
log_scale: usize,
) -> Self {
Self(Elem::new(module, log_base2k, log_q, degree, log_scale))
} }
impl<T> Ciphertext<T>
where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
pub fn n(&self) -> usize { pub fn n(&self) -> usize {
self.0.n() self.0.n()
} }
pub fn degree(&self) -> usize {
self.0.degree()
}
pub fn log_q(&self) -> usize { pub fn log_q(&self) -> usize {
self.0.log_q() self.0.log_q
} }
pub fn limbs(&self) -> usize { pub fn rows(&self) -> usize {
self.0.limbs() self.0.rows()
} }
pub fn at(&self, i: usize) -> &(impl VecZnxApi + Infos) { pub fn cols(&self) -> usize {
self.0.cols()
}
pub fn at(&self, i: usize) -> &T {
self.0.at(i) self.0.at(i)
} }
pub fn at_mut(&mut self, i: usize) -> &mut (impl VecZnxApi + Infos) { pub fn at_mut(&mut self, i: usize) -> &mut T {
self.0.at_mut(i) self.0.at_mut(i)
} }
pub fn log_base2k(&self) -> usize { pub fn log_base2k(&self) -> usize {
self.0.log_base2k() self.0.log_base2k
} }
pub fn log_scale(&self) -> usize { pub fn log_scale(&self) -> usize {
@@ -52,87 +52,59 @@ impl Ciphertext {
self.0.zero() self.0.zero()
} }
pub fn as_plaintext(&self) -> Plaintext<VecZnx> { pub fn as_plaintext(&self) -> Plaintext<T> {
unsafe { Plaintext(std::ptr::read(&self.0)) } unsafe { Plaintext::<T>(std::ptr::read(&self.0)) }
} }
} }
impl Parameters { impl Parameters {
pub fn new_ciphertext(&self, log_q: usize) -> Ciphertext { pub fn new_ciphertext(&self, log_q: usize) -> Ciphertext<VecZnx> {
Ciphertext::new(self.module(), self.log_base2k(), log_q, self.log_scale(), 1) Ciphertext::new(self.module(), self.log_base2k(), log_q, 2)
} }
} }
pub struct GadgetCiphertext { pub fn new_gadget_ciphertext(
pub value: VmpPMat, module: &Module,
pub log_base2k: usize, log_base2k: usize,
pub log_q: usize, rows: usize,
} log_q: usize,
) -> Ciphertext<VmpPMat> {
impl GadgetCiphertext {
pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Self {
let cols: usize = (log_q + log_base2k - 1) / log_base2k; let cols: usize = (log_q + log_base2k - 1) / log_base2k;
Self { let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, rows, 2 * cols);
value: module.new_vmp_pmat(rows, cols * 2), elem.log_q = log_q;
log_base2k, Ciphertext(elem)
log_q,
}
} }
pub fn new_rgsw_ciphertext(
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> Ciphertext<VmpPMat> {
let cols: usize = (log_q + log_base2k - 1) / log_base2k;
let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, 2 * rows, 2 * cols);
elem.log_q = log_q;
Ciphertext(elem)
}
impl Ciphertext<VmpPMat> {
pub fn n(&self) -> usize { pub fn n(&self) -> usize {
self.value.n self.0.n()
} }
pub fn rows(&self) -> usize { pub fn rows(&self) -> usize {
self.value.rows self.0.rows()
} }
pub fn cols(&self) -> usize { pub fn cols(&self) -> usize {
self.value.cols self.0.cols()
}
pub fn log_q(&self) -> usize {
self.log_q
} }
pub fn log_base2k(&self) -> usize { pub fn log_base2k(&self) -> usize {
self.log_base2k self.0.log_base2k
}
}
pub struct RGSWCiphertext {
pub value: VmpPMat,
pub log_base2k: usize,
pub log_q: usize,
}
impl RGSWCiphertext {
pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Self {
let cols: usize = (log_q + log_base2k - 1) / log_base2k;
Self {
value: module.new_vmp_pmat(rows * 2, cols * 2),
log_base2k,
log_q,
}
}
pub fn n(&self) -> usize {
self.value.n
}
pub fn rows(&self) -> usize {
self.value.rows
}
pub fn cols(&self) -> usize {
self.value.cols
} }
pub fn log_q(&self) -> usize { pub fn log_q(&self) -> usize {
self.log_q self.0.log_q
}
pub fn log_base2k(&self) -> usize {
self.log_base2k
} }
} }

View File

@@ -1,12 +1,12 @@
use crate::{ use crate::{
ciphertext::Ciphertext, ciphertext::Ciphertext,
elem::{Elem, ElemBasics}, elem::{Elem, ElemVecZnx, VecZnxCommon},
keys::SecretKey, keys::SecretKey,
parameters::Parameters, parameters::Parameters,
plaintext::Plaintext, plaintext::Plaintext,
}; };
use base2k::{ use base2k::{
Infos, VecZnx, Module, SvpPPol, SvpPPolOps, VecZnxApi, VecZnxBigOps, VecZnxDft, VecZnxDftOps, Infos, Module, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBigOps, VecZnxDft, VecZnxDftOps,
}; };
use std::cmp::min; use std::cmp::min;
@@ -34,13 +34,16 @@ impl Parameters {
) )
} }
pub fn decrypt_rlwe_thread_safe( pub fn decrypt_rlwe_thread_safe<T>(
&self, &self,
res: &mut Plaintext<VecZnx>, res: &mut Plaintext<T>,
ct: &Ciphertext, ct: &Ciphertext<T>,
sk: &SvpPPol, sk: &SvpPPol,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
decrypt_rlwe_thread_safe(self.module(), &mut res.0, &ct.0, sk, tmp_bytes) decrypt_rlwe_thread_safe(self.module(), &mut res.0, &ct.0, sk, tmp_bytes)
} }
} }
@@ -52,26 +55,29 @@ pub fn decrypt_rlwe_thread_safe<T>(
sk: &SvpPPol, sk: &SvpPPol,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) where ) where
T: VecZnxApi + Infos, T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{ {
let cols: usize = a.cols();
assert!( assert!(
tmp_bytes.len() >= decrypt_rlwe_thread_safe_tmp_byte(module, a.limbs()), tmp_bytes.len() >= decrypt_rlwe_thread_safe_tmp_byte(module, cols),
"invalid tmp_bytes: tmp_bytes.len()={} < decrypt_rlwe_thread_safe_tmp_byte={}", "invalid tmp_bytes: tmp_bytes.len()={} < decrypt_rlwe_thread_safe_tmp_byte={}",
tmp_bytes.len(), tmp_bytes.len(),
decrypt_rlwe_thread_safe_tmp_byte(module, a.limbs()) decrypt_rlwe_thread_safe_tmp_byte(module, cols)
); );
let res_dft_bytes: usize = module.bytes_of_vec_znx_dft(a.limbs()); let res_dft_bytes: usize = module.bytes_of_vec_znx_dft(cols);
let mut res_dft: VecZnxDft = VecZnxDft::from_bytes(a.limbs(), tmp_bytes); let mut res_dft: VecZnxDft = VecZnxDft::from_bytes(a.cols(), tmp_bytes);
let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big(); let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big();
// res_dft <- DFT(ct[1]) * DFT(sk) // res_dft <- DFT(ct[1]) * DFT(sk)
module.svp_apply_dft(&mut res_dft, sk, &a.value[1], a.limbs()); module.svp_apply_dft(&mut res_dft, sk, a.at(1), cols);
// res_big <- ct[1] x sk // res_big <- ct[1] x sk
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, a.limbs()); module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols);
// res_big <- ct[1] x sk + ct[0] // res_big <- ct[1] x sk + ct[0]
module.vec_znx_big_add_small_inplace(&mut res_big, &a.value[0]); module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0));
// res <- normalize(ct[1] x sk + ct[0]) // res <- normalize(ct[1] x sk + ct[0])
module.vec_znx_big_normalize( module.vec_znx_big_normalize(
a.log_base2k(), a.log_base2k(),

View File

@@ -1,82 +1,68 @@
use crate::parameters::Parameters; use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow, VecZnxOps, VmpPMat, VmpPMatOps};
use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow, VecZnxOps};
use crate::parameters::Parameters;
impl Parameters { impl Parameters {
pub fn bytes_of_elem(&self, log_q: usize, degree: usize) -> usize { pub fn elem_from_bytes<T>(&self, log_q: usize, rows: usize, bytes: &mut [u8]) -> Elem<T>
Elem::<VecZnx>::bytes_of(self.module(), self.log_base2k(), log_q, degree) where
} T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
pub fn elem_from_bytes(&self, log_q: usize, degree: usize, bytes: &mut [u8]) -> Elem<VecZnx> { {
Elem::<VecZnx>::from_bytes(self.module(), self.log_base2k(), log_q, degree, bytes) Elem::<T>::from_bytes(self.module(), self.log_base2k(), log_q, rows, bytes)
}
pub fn elem_borrow_from_bytes(&self, log_q: usize, degree: usize, bytes: &mut [u8]) -> Elem<VecZnxBorrow> {
Elem::<VecZnxBorrow>::from_bytes(self.module(), self.log_base2k(), log_q, degree, bytes)
} }
} }
pub struct Elem<T: VecZnxApi + Infos> { pub struct Elem<T> {
pub value: Vec<T>, pub value: Vec<T>,
pub log_base2k: usize, pub log_base2k: usize,
pub log_q: usize, pub log_q: usize,
pub log_scale: usize, pub log_scale: usize,
} }
pub trait ElemBasics<T> pub trait VecZnxCommon: VecZnxApi + Infos {}
where impl VecZnxCommon for VecZnx {}
T: VecZnxApi + Infos, impl VecZnxCommon for VecZnxBorrow {}
{
fn n(&self) -> usize; pub trait ElemVecZnx<T: VecZnxCommon> {
fn degree(&self) -> usize; fn from_bytes(
fn limbs(&self) -> usize; module: &Module,
fn log_base2k(&self) -> usize; log_base2k: usize,
fn log_scale(&self) -> usize; log_q: usize,
fn log_q(&self) -> usize; rows: usize,
bytes: &mut [u8],
) -> Elem<T>;
fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> usize;
fn at(&self, i: usize) -> &T; fn at(&self, i: usize) -> &T;
fn at_mut(&mut self, i: usize) -> &mut T; fn at_mut(&mut self, i: usize) -> &mut T;
fn zero(&mut self); fn zero(&mut self);
} }
impl Elem<VecZnx> { impl<T> ElemVecZnx<T> for Elem<T>
pub fn new( where
module: &Module, T: VecZnxCommon<Owned = T>,
log_base2k: usize, Elem<T>: Infos,
log_q: usize, {
degree: usize, fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> usize {
log_scale: usize,
) -> Self {
let limbs: usize = (log_q + log_base2k - 1) / log_base2k;
let mut value: Vec<VecZnx> = Vec::new();
(0..degree + 1).for_each(|_| value.push(module.new_vec_znx(limbs)));
Self {
value,
log_q,
log_base2k,
log_scale: log_scale,
}
}
pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, degree: usize) -> usize {
let cols = (log_q + log_base2k - 1) / log_base2k; let cols = (log_q + log_base2k - 1) / log_base2k;
module.n() * cols * (degree + 1) * 8 module.n() * cols * (rows + 1) * 8
} }
pub fn from_bytes( fn from_bytes(
module: &Module, module: &Module,
log_base2k: usize, log_base2k: usize,
log_q: usize, log_q: usize,
degree: usize, rows: usize,
bytes: &mut [u8], bytes: &mut [u8],
) -> Self { ) -> Elem<T> {
assert!(rows > 0);
let n: usize = module.n(); let n: usize = module.n();
assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, degree)); assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, rows));
let mut value: Vec<VecZnx> = Vec::new(); let mut value: Vec<T> = Vec::new();
let limbs: usize = (log_q + log_base2k - 1) / log_base2k; let limbs: usize = (log_q + log_base2k - 1) / log_base2k;
let size = VecZnx::bytes_of(n, limbs); let size = T::bytes_of(n, limbs);
let mut ptr: usize = 0; let mut ptr: usize = 0;
(0..degree + 1).for_each(|_| { (0..rows).for_each(|_| {
value.push(VecZnx::from_bytes(n, limbs, &mut bytes[ptr..])); value.push(T::from_bytes(n, limbs, &mut bytes[ptr..]));
ptr += size ptr += size
}); });
Self { Self {
@@ -86,74 +72,14 @@ impl Elem<VecZnx> {
log_scale: 0, log_scale: 0,
} }
} }
}
impl Elem<VecZnxBorrow> {
pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, degree: usize) -> usize {
let cols = (log_q + log_base2k - 1) / log_base2k;
module.n() * cols * (degree + 1) * 8
}
pub fn from_bytes(
module: &Module,
log_base2k: usize,
log_q: usize,
degree: usize,
bytes: &mut [u8],
) -> Self {
let n: usize = module.n();
assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, degree));
let mut value: Vec<VecZnxBorrow> = Vec::new();
let limbs: usize = (log_q + log_base2k - 1) / log_base2k;
let size = VecZnxBorrow::bytes_of(n, limbs);
let mut ptr: usize = 0;
(0..degree + 1).for_each(|_| {
value.push(VecZnxBorrow::from_bytes(n, limbs, &mut bytes[ptr..]));
ptr += size
});
Self {
value,
log_q,
log_base2k,
log_scale: 0,
}
}
}
impl<T: VecZnxApi + Infos> ElemBasics<T> for Elem<T> {
fn n(&self) -> usize {
self.value[0].n()
}
fn degree(&self) -> usize {
self.value.len()
}
fn limbs(&self) -> usize {
self.value[0].limbs()
}
fn log_base2k(&self) -> usize {
self.log_base2k
}
fn log_scale(&self) -> usize {
self.log_scale
}
fn log_q(&self) -> usize {
self.log_q
}
fn at(&self, i: usize) -> &T { fn at(&self, i: usize) -> &T {
assert!(i <= self.degree()); assert!(i < self.rows());
&self.value[i] &self.value[i]
} }
fn at_mut(&mut self, i: usize) -> &mut T { fn at_mut(&mut self, i: usize) -> &mut T {
assert!(i <= self.degree()); assert!(i < self.rows());
&mut self.value[i] &mut self.value[i]
} }
@@ -161,3 +87,97 @@ impl<T: VecZnxApi + Infos> ElemBasics<T> for Elem<T> {
self.value.iter_mut().for_each(|i| i.zero()); self.value.iter_mut().for_each(|i| i.zero());
} }
} }
impl<T> Elem<T> {
pub fn log_base2k(&self) -> usize {
self.log_base2k
}
pub fn log_q(&self) -> usize {
self.log_q
}
pub fn log_scale(&self) -> usize {
self.log_scale
}
}
impl Infos for Elem<VecZnx> {
fn n(&self) -> usize {
self.value[0].n()
}
fn log_n(&self) -> usize {
self.value[0].log_n()
}
fn rows(&self) -> usize {
self.value.len()
}
fn cols(&self) -> usize {
self.value[0].cols()
}
}
impl Infos for Elem<VecZnxBorrow> {
fn n(&self) -> usize {
self.value[0].n()
}
fn log_n(&self) -> usize {
self.value[0].log_n()
}
fn rows(&self) -> usize {
self.value.len()
}
fn cols(&self) -> usize {
self.value[0].cols()
}
}
impl Elem<VecZnx> {
pub fn new(module: &Module, log_base2k: usize, log_q: usize, rows: usize) -> Self {
assert!(rows > 0);
let limbs: usize = (log_q + log_base2k - 1) / log_base2k;
let mut value: Vec<VecZnx> = Vec::new();
(0..rows).for_each(|_| value.push(module.new_vec_znx(limbs)));
Self {
value,
log_q,
log_base2k,
log_scale: 0,
}
}
}
impl Infos for Elem<VmpPMat> {
fn n(&self) -> usize {
self.value[0].n()
}
fn log_n(&self) -> usize {
self.value[0].log_n()
}
fn rows(&self) -> usize {
self.value[0].rows()
}
fn cols(&self) -> usize {
self.value[0].cols()
}
}
impl Elem<VmpPMat> {
pub fn new(module: &Module, log_base2k: usize, rows: usize, cols: usize) -> Self {
assert!(rows > 0);
assert!(cols > 0);
Self {
value: Vec::from([module.new_vmp_pmat(rows, cols); 1]),
log_q: 0,
log_base2k: log_base2k,
log_scale: 0,
}
}
}

View File

@@ -1,14 +1,14 @@
use crate::ciphertext::{Ciphertext, GadgetCiphertext}; use crate::ciphertext::Ciphertext;
use crate::elem::{Elem, ElemBasics}; use crate::elem::{Elem, ElemVecZnx, VecZnxCommon};
use crate::keys::SecretKey; use crate::keys::SecretKey;
use crate::parameters::Parameters; use crate::parameters::Parameters;
use crate::plaintext::Plaintext; use crate::plaintext::Plaintext;
use base2k::sampling::Sampling; use base2k::sampling::Sampling;
use base2k::{ use base2k::{
cast_mut, Infos, VecZnxBorrow, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps, Infos, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps,
VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMatOps, alloc_aligned_u8, cast, VecZnxBorrow, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, cast_mut,
}; };
use rand_distr::num_traits::ops::bytes;
use sampling::source::{Source, new_seed}; use sampling::source::{Source, new_seed};
pub struct EncryptorSk { pub struct EncryptorSk {
@@ -49,12 +49,15 @@ impl EncryptorSk {
self.source_xe = Source::new(seed) self.source_xe = Source::new(seed)
} }
pub fn encrypt_rlwe_sk( pub fn encrypt_rlwe_sk<T>(
&mut self, &mut self,
params: &Parameters, params: &Parameters,
ct: &mut Ciphertext, ct: &mut Ciphertext<T>,
pt: Option<&Plaintext<VecZnx>>, pt: Option<&Plaintext<T>>,
) { ) where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
assert!( assert!(
self.initialized == true, self.initialized == true,
"invalid call to [EncryptorSk.encrypt_rlwe_sk]: [EncryptorSk] has not been initialized with a [SecretKey]" "invalid call to [EncryptorSk.encrypt_rlwe_sk]: [EncryptorSk] has not been initialized with a [SecretKey]"
@@ -69,15 +72,18 @@ impl EncryptorSk {
); );
} }
pub fn encrypt_rlwe_sk_thread_safe( pub fn encrypt_rlwe_sk_thread_safe<T>(
&self, &self,
params: &Parameters, params: &Parameters,
ct: &mut Ciphertext, ct: &mut Ciphertext<T>,
pt: Option<&Plaintext<VecZnx>>, pt: Option<&Plaintext<T>>,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
assert!( assert!(
self.initialized == true, self.initialized == true,
"invalid call to [EncryptorSk.encrypt_rlwe_sk_thread_safe]: [EncryptorSk] has not been initialized with a [SecretKey]" "invalid call to [EncryptorSk.encrypt_rlwe_sk_thread_safe]: [EncryptorSk] has not been initialized with a [SecretKey]"
@@ -91,16 +97,19 @@ impl Parameters {
encrypt_rlwe_sk_tmp_bytes(self.module(), self.log_base2k(), log_q) encrypt_rlwe_sk_tmp_bytes(self.module(), self.log_base2k(), log_q)
} }
pub fn encrypt_rlwe_sk_thread_safe( pub fn encrypt_rlwe_sk_thread_safe<T>(
&self, &self,
ct: &mut Ciphertext, ct: &mut Ciphertext<T>,
pt: Option<&Plaintext<VecZnx>>, pt: Option<&Plaintext<T>>,
sk: &SvpPPol, sk: &SvpPPol,
source_xa: &mut Source, source_xa: &mut Source,
source_xe: &mut Source, source_xe: &mut Source,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) where
encrypt_rlwe_sk_thread_safe::<VecZnx>( T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
encrypt_rlwe_sk_thread_safe(
self.module(), self.module(),
&mut ct.0, &mut ct.0,
pt.map(|pt| &pt.0), pt.map(|pt| &pt.0),
@@ -128,9 +137,10 @@ pub fn encrypt_rlwe_sk_thread_safe<T>(
sigma: f64, sigma: f64,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) where ) where
T: VecZnxApi + Infos, T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{ {
let limbs: usize = ct.limbs(); let cols: usize = ct.cols();
let log_base2k: usize = ct.log_base2k(); let log_base2k: usize = ct.log_base2k();
let log_q: usize = ct.log_q(); let log_q: usize = ct.log_q();
@@ -146,22 +156,22 @@ pub fn encrypt_rlwe_sk_thread_safe<T>(
let c1: &mut T = ct.at_mut(1); let c1: &mut T = ct.at_mut(1);
// c1 <- Z_{2^prec}[X]/(X^{N}+1) // c1 <- Z_{2^prec}[X]/(X^{N}+1)
module.fill_uniform(log_base2k, c1, limbs, source_xa); module.fill_uniform(log_base2k, c1, cols, source_xa);
let bytes_of_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(limbs); let bytes_of_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(cols);
// Scratch space for DFT values // Scratch space for DFT values
let mut buf_dft: VecZnxDft = let mut buf_dft: VecZnxDft =
VecZnxDft::from_bytes(limbs, &mut tmp_bytes[..bytes_of_vec_znx_dft]); VecZnxDft::from_bytes(cols, &mut tmp_bytes[..bytes_of_vec_znx_dft]);
// Applies buf_dft <- DFT(s) * DFT(c1) // Applies buf_dft <- DFT(s) * DFT(c1)
module.svp_apply_dft(&mut buf_dft, sk, c1, limbs); module.svp_apply_dft(&mut buf_dft, sk, c1, cols);
// Alias scratch space // Alias scratch space
let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big(); let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big();
// buf_big = s x c1 // buf_big = s x c1
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, limbs); module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, cols);
let carry: &mut [u8] = &mut tmp_bytes[bytes_of_vec_znx_dft..]; let carry: &mut [u8] = &mut tmp_bytes[bytes_of_vec_znx_dft..];
@@ -194,7 +204,7 @@ pub fn encrypt_grlwe_sk_tmp_bytes(
log_q: usize, log_q: usize,
) -> usize { ) -> usize {
let cols = (log_q + log_base2k - 1) / log_base2k; let cols = (log_q + log_base2k - 1) / log_base2k;
Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 1) Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 2)
+ Plaintext::<VecZnx>::bytes_of(module, log_base2k, log_q) + Plaintext::<VecZnx>::bytes_of(module, log_base2k, log_q)
+ encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q) + encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q)
+ module.vmp_prepare_tmp_bytes(rows, 2 * cols) + module.vmp_prepare_tmp_bytes(rows, 2 * cols)
@@ -202,7 +212,7 @@ pub fn encrypt_grlwe_sk_tmp_bytes(
pub fn encrypt_grlwe_sk_thread_safe( pub fn encrypt_grlwe_sk_thread_safe(
module: &Module, module: &Module,
ct: &mut GadgetCiphertext, ct: &mut Ciphertext<VmpPMat>,
m: &Scalar, m: &Scalar,
sk: &SvpPPol, sk: &SvpPPol,
source_xa: &mut Source, source_xa: &mut Source,
@@ -212,7 +222,7 @@ pub fn encrypt_grlwe_sk_thread_safe(
) { ) {
let rows: usize = ct.rows(); let rows: usize = ct.rows();
let log_q: usize = ct.log_q(); let log_q: usize = ct.log_q();
let cols: usize = (log_q + ct.log_base2k() - 1) / ct.log_base2k(); //let cols: usize = (log_q + ct.log_base2k() - 1) / ct.log_base2k();
let log_base2k: usize = ct.log_base2k(); let log_base2k: usize = ct.log_base2k();
let min_tmp_bytes_len = encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q); let min_tmp_bytes_len = encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q);
@@ -224,24 +234,24 @@ pub fn encrypt_grlwe_sk_thread_safe(
min_tmp_bytes_len min_tmp_bytes_len
); );
let bytes_of_elem: usize = Elem::<VecZnxBorrow>::bytes_of(module, log_base2k, log_q, 1); let bytes_of_elem: usize = Elem::<VecZnxBorrow>::bytes_of(module, log_base2k, log_q, 2);
let bytes_of_pt: usize = Plaintext::<VecZnx>::bytes_of(module, log_base2k, log_q); let bytes_of_pt: usize = Plaintext::<VecZnx>::bytes_of(module, log_base2k, log_q);
let bytes_of_enc_sk: usize = encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q); let bytes_of_enc_sk: usize = encrypt_rlwe_sk_tmp_bytes(module, log_base2k, log_q);
let bytes_of_vmp_prepare_row: usize = module.vmp_prepare_tmp_bytes(rows, 2 * cols);
let (tmp_bytes_pt, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_pt); let (tmp_bytes_pt, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_pt);
let (tmp_bytes_enc_sk, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_enc_sk); let (tmp_bytes_enc_sk, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_enc_sk);
let (tmp_bytes_elem, tmp_bytes_vmp_prepare_row) = tmp_bytes.split_at_mut(bytes_of_elem); let (tmp_bytes_elem, tmp_bytes_vmp_prepare_row) = tmp_bytes.split_at_mut(bytes_of_elem);
let mut tmp_elem: Elem<VecZnxBorrow> = Elem::<VecZnxBorrow>::from_bytes(module, log_base2k, ct.log_q(), 1, tmp_bytes_elem); let mut tmp_elem: Elem<VecZnxBorrow> =
let mut tmp_pt: Plaintext<VecZnxBorrow> = Plaintext::<VecZnxBorrow>::from_bytes(module, log_base2k, log_q, tmp_bytes_pt); Elem::<VecZnxBorrow>::from_bytes(module, log_base2k, ct.log_q(), 2, tmp_bytes_elem);
let mut tmp_pt: Plaintext<VecZnxBorrow> =
Plaintext::<VecZnxBorrow>::from_bytes(module, log_base2k, log_q, tmp_bytes_pt);
(0..rows).for_each(|row_i| { (0..rows).for_each(|row_i| {
// Sets the i-th row of the RLWE sample to m (i.e. m * 2^{-log_base2k*i}) // Sets the i-th row of the RLWE sample to m (i.e. m * 2^{-log_base2k*i})
tmp_pt.0.value[0].at_mut(row_i).copy_from_slice(&m.0); tmp_pt.0.value[0].at_mut(row_i).copy_from_slice(&m.0);
// Encrypts RLWE(m * 2^{-log_base2k*i}) // Encrypts RLWE(m * 2^{-log_base2k*i})
encrypt_rlwe_sk_thread_safe( encrypt_rlwe_sk_thread_safe(
module, module,
&mut tmp_elem, &mut tmp_elem,
@@ -253,23 +263,22 @@ pub fn encrypt_grlwe_sk_thread_safe(
tmp_bytes_enc_sk, tmp_bytes_enc_sk,
); );
// Zeroes the ith-row of tmp_pt // Zeroes the ith-row of tmp_pt
tmp_pt.0.value[0].at_mut(row_i).fill(0); tmp_pt.0.value[0].at_mut(row_i).fill(0);
println!("row:{}/{}", row_i, rows); //println!("row:{}/{}", row_i, rows);
tmp_elem.at(0).print_limbs(tmp_elem.limbs(), tmp_elem.n()); //tmp_elem.at(0).print(tmp_elem.limbs(), tmp_elem.n());
tmp_elem.at(1).print_limbs(tmp_elem.limbs(), tmp_elem.n()); //tmp_elem.at(1).print(tmp_elem.limbs(), tmp_elem.n());
println!(); //println!();
println!(">>>"); //println!(">>>");
// GRLWE[row_i][0||1] = [-as + m * 2^{-i*log_base2k} + e*2^{-log_q} || a] // GRLWE[row_i][0||1] = [-as + m * 2^{-i*log_base2k} + e*2^{-log_q} || a]
module.vmp_prepare_row( module.vmp_prepare_row(
&mut ct.value, &mut ct.0.value[0],
cast_mut::<u8, i64>(tmp_bytes_elem), cast_mut::<u8, i64>(tmp_bytes_elem),
row_i, row_i,
tmp_bytes_vmp_prepare_row, tmp_bytes_vmp_prepare_row,
); );
}); });
println!("DONE"); //println!("DONE");
} }

View File

@@ -1,9 +1,9 @@
use crate::{ use crate::{
ciphertext::{Ciphertext, GadgetCiphertext, RGSWCiphertext}, ciphertext::Ciphertext,
elem::{Elem, ElemBasics}, elem::{Elem, ElemVecZnx, VecZnxCommon},
}; };
use base2k::{ use base2k::{
Infos, Module, VecZnx, VecZnxApi, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMatOps, Infos, Module, VecZnxApi, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps,
}; };
use std::cmp::min; use std::cmp::min;
@@ -22,13 +22,14 @@ pub fn gadget_product_tmp_bytes(
+ 2 * module.bytes_of_vec_znx_dft(gct_cols) + 2 * module.bytes_of_vec_znx_dft(gct_cols)
} }
pub fn gadget_product_inplace_thread_safe<const OVERWRITE: bool, T>( pub fn gadget_product_inplace_thread_safe<const OVERWRITE: bool, T: VecZnxApi<Owned = T> + Infos>(
module: &Module, module: &Module,
res: &mut Elem<T>, res: &mut Elem<T>,
b: &GadgetCiphertext, b: &Ciphertext<VmpPMat>,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) where ) where
T: VecZnxApi + Infos, T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{ {
unsafe { unsafe {
let a_ptr: *const T = res.at(1) as *const T; let a_ptr: *const T = res.at(1) as *const T;
@@ -50,21 +51,22 @@ pub fn gadget_product_inplace_thread_safe<const OVERWRITE: bool, T>(
/// ///
/// res = sum[min(a_ncols, b_nrows)] decomp(a, i) * (-B[i]s + m * 2^{-k*i} + E[i], B[i]) /// res = sum[min(a_ncols, b_nrows)] decomp(a, i) * (-B[i]s + m * 2^{-k*i} + E[i], B[i])
/// = (cs + m * a + e, c) with min(res_limbs, b_cols) limbs. /// = (cs + m * a + e, c) with min(res_limbs, b_cols) limbs.
pub fn gadget_product_thread_safe<const OVERWRITE: bool, T>( pub fn gadget_product_thread_safe<const OVERWRITE: bool, T: VecZnxApi<Owned = T> + Infos>(
module: &Module, module: &Module,
res: &mut Elem<T>, res: &mut Elem<T>,
a: &T, a: &T,
b: &GadgetCiphertext, b: &Ciphertext<VmpPMat>,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) where ) where
T: VecZnxApi + Infos, T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{ {
let log_base2k: usize = b.log_base2k(); let log_base2k: usize = b.log_base2k();
let rows: usize = min(b.rows(), a.limbs()); let rows: usize = min(b.rows(), a.cols());
let cols: usize = b.cols(); let cols: usize = b.cols();
let bytes_vmp_apply_dft: usize = let bytes_vmp_apply_dft: usize =
module.vmp_apply_dft_to_dft_tmp_bytes(cols, a.limbs(), rows, cols); module.vmp_apply_dft_to_dft_tmp_bytes(cols, a.cols(), rows, cols);
let bytes_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(cols); let bytes_vec_znx_dft: usize = module.bytes_of_vec_znx_dft(cols);
let (tmp_bytes_vmp_apply_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_vmp_apply_dft); let (tmp_bytes_vmp_apply_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_vmp_apply_dft);
@@ -82,11 +84,16 @@ pub fn gadget_product_thread_safe<const OVERWRITE: bool, T>(
module.new_vec_znx_big_from_bytes(cols >> 1, tmp_bytes_res_dft_c1); module.new_vec_znx_big_from_bytes(cols >> 1, tmp_bytes_res_dft_c1);
// a_dft <- DFT(a) // a_dft <- DFT(a)
module.vec_znx_dft(&mut c1_dft, a, a.limbs()); module.vec_znx_dft(&mut c1_dft, a, a.cols());
// (n x cols) <- (n x limbs=rows) x (rows x cols) // (n x cols) <- (n x limbs=rows) x (rows x cols)
// res_dft[a * (G0|G1)] <- sum[rows] DFT(a) x (DFT(G0)|DFT(G1)) // res_dft[a * (G0|G1)] <- sum[rows] DFT(a) x (DFT(G0)|DFT(G1))
module.vmp_apply_dft_to_dft(&mut res_dft, &c1_dft, &b.value, tmp_bytes_vmp_apply_dft); module.vmp_apply_dft_to_dft(
&mut res_dft,
&c1_dft,
&b.0.value[0],
tmp_bytes_vmp_apply_dft,
);
// res_big[a * (G0|G1)] <- IDFT(res_dft[a * (G0|G1)]) // res_big[a * (G0|G1)] <- IDFT(res_dft[a * (G0|G1)])
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols); module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft, cols);
@@ -105,24 +112,25 @@ pub fn gadget_product_thread_safe<const OVERWRITE: bool, T>(
} }
} }
pub fn rgsw_product_thread_safe<T>( pub fn rgsw_product_thread_safe<T: VecZnxApi<Owned = T> + Infos>(
module: &Module, module: &Module,
res: &mut Elem<T>, res: &mut Elem<T>,
a: &Ciphertext, a: &Ciphertext<T>,
b: &RGSWCiphertext, b: &Ciphertext<VmpPMat>,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) where ) where
T: VecZnxApi + Infos, T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{ {
let log_base2k: usize = b.log_base2k(); let log_base2k: usize = b.log_base2k();
let rows: usize = a.limbs(); let rows: usize = min(b.rows(), a.cols());
let cols: usize = b.cols(); let cols: usize = b.cols();
let in_limbs = a.limbs(); let in_cols = a.cols();
let out_limbs: usize = a.limbs(); let out_cols: usize = a.cols();
let bytes_of_vec_znx_dft = module.bytes_of_vec_znx_dft(cols); let bytes_of_vec_znx_dft = module.bytes_of_vec_znx_dft(cols);
let bytes_of_vmp_apply_dft_to_dft = let bytes_of_vmp_apply_dft_to_dft =
module.vmp_apply_dft_to_dft_tmp_bytes(out_limbs, in_limbs, rows, cols); module.vmp_apply_dft_to_dft_tmp_bytes(out_cols, in_cols, rows, cols);
let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft); let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft);
let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft); let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_vec_znx_dft);
@@ -139,16 +147,16 @@ pub fn rgsw_product_thread_safe<T>(
let mut r2_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes(cols, tmp_bytes_r2_dft); let mut r2_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes(cols, tmp_bytes_r2_dft);
// c0_dft <- DFT(a[0]) // c0_dft <- DFT(a[0])
module.vec_znx_dft(&mut c0_dft, a.at(0), a.limbs()); module.vec_znx_dft(&mut c0_dft, a.at(0), in_cols);
// r_dft <- sum[rows] c0_dft[cols] x RGSW[0][cols] // r_dft <- sum[rows] c0_dft[cols] x RGSW[0][cols]
module.vmp_apply_dft_to_dft( module.vmp_apply_dft_to_dft(
&mut r1_dft, &mut r1_dft,
&c1_dft, &c1_dft,
&b.value, &b.0.value[0],
bytes_of_vmp_apply_dft_to_dft, bytes_of_vmp_apply_dft_to_dft,
); );
// c1_dft <- DFT(a[1]) // c1_dft <- DFT(a[1])
module.vec_znx_dft(&mut c1_dft, a.at(1), a.limbs()); module.vec_znx_dft(&mut c1_dft, a.at(1), in_cols);
} }

View File

@@ -1,7 +1,7 @@
use crate::ciphertext::GadgetCiphertext; use crate::ciphertext::{Ciphertext, new_gadget_ciphertext};
use crate::elem::Elem; use crate::elem::Elem;
use crate::encryptor::{encrypt_rlwe_sk_thread_safe, encrypt_rlwe_sk_tmp_bytes}; use crate::encryptor::{encrypt_rlwe_sk_thread_safe, encrypt_rlwe_sk_tmp_bytes};
use base2k::{Module, Scalar, SvpPPol, SvpPPolOps, VecZnx}; use base2k::{Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VmpPMat};
use sampling::source::Source; use sampling::source::Source;
pub struct SecretKey(pub Scalar); pub struct SecretKey(pub Scalar);
@@ -28,7 +28,7 @@ pub struct PublicKey(pub Elem<VecZnx>);
impl PublicKey { impl PublicKey {
pub fn new(module: &Module, log_base2k: usize, log_q: usize) -> PublicKey { pub fn new(module: &Module, log_base2k: usize, log_q: usize) -> PublicKey {
PublicKey(Elem::new(module, log_base2k, log_q, 1, 0)) PublicKey(Elem::<VecZnx>::new(module, log_base2k, log_q, 2))
} }
pub fn gen_thread_safe( pub fn gen_thread_safe(
@@ -57,11 +57,11 @@ impl PublicKey {
} }
} }
pub struct SwitchingKey(pub GadgetCiphertext); pub struct SwitchingKey(pub Ciphertext<VmpPMat>);
impl SwitchingKey { impl SwitchingKey {
pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> SwitchingKey { pub fn new(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> SwitchingKey {
SwitchingKey(GadgetCiphertext::new(module, log_base2k, rows, log_q)) SwitchingKey(new_gadget_ciphertext(module, log_base2k, rows, log_q))
} }
pub fn n(&self) -> usize { pub fn n(&self) -> usize {

View File

@@ -1,62 +1,72 @@
use crate::ciphertext::Ciphertext; use crate::ciphertext::Ciphertext;
use crate::elem::{Elem, ElemBasics}; use crate::elem::{Elem, ElemVecZnx, VecZnxCommon};
use crate::parameters::Parameters; use crate::parameters::Parameters;
use base2k::{Infos, Module, VecZnx, VecZnxApi, VecZnxBorrow}; use base2k::{Infos, Module, VecZnx, VecZnxApi};
pub struct Plaintext<T: VecZnxApi + Infos>(pub Elem<T>); pub struct Plaintext<T>(pub Elem<T>);
impl Parameters { impl Parameters {
pub fn new_plaintext(&self, log_q: usize) -> Plaintext<VecZnx> { pub fn new_plaintext(&self, log_q: usize) -> Plaintext<VecZnx> {
Plaintext::new(self.module(), self.log_base2k(), log_q, self.log_scale()) Plaintext::new(self.module(), self.log_base2k(), log_q)
} }
pub fn bytes_of_plaintext(&self, log_q: usize) -> usize { pub fn bytes_of_plaintext<T>(&self, log_q: usize) -> usize
Elem::<VecZnx>::bytes_of(self.module(), self.log_base2k(), log_q, 0) where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
Elem::<T>::bytes_of(self.module(), self.log_base2k(), log_q, 1)
} }
pub fn plaintext_from_bytes(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext<VecZnx> { pub fn plaintext_from_bytes<T>(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext<T>
Plaintext(self.elem_from_bytes(log_q, 0, bytes)) where
} T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
pub fn plaintext_borrow_from_bytes(&self, log_q: usize, bytes: &mut [u8]) -> Plaintext<VecZnxBorrow> { {
Plaintext(self.elem_borrow_from_bytes(log_q, 0, bytes)) Plaintext::<T>(self.elem_from_bytes::<T>(log_q, 1, bytes))
} }
} }
impl Plaintext<VecZnx> { impl Plaintext<VecZnx> {
pub fn new(module: &Module, log_base2k: usize, log_q: usize, log_scale: usize) -> Self { pub fn new(module: &Module, log_base2k: usize, log_q: usize) -> Self {
Self(Elem::<VecZnx>::new(module, log_base2k, log_q, 0, log_scale)) Self(Elem::<VecZnx>::new(module, log_base2k, log_q, 1))
}
} }
impl<T> Plaintext<T>
where
T: VecZnxCommon,
Elem<T>: Infos + ElemVecZnx<T>,
{
pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize) -> usize { pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize) -> usize {
Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 0) Elem::<T>::bytes_of(module, log_base2k, log_q, 1)
} }
pub fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self { pub fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self {
Self(Elem::<VecZnx>::from_bytes(module, log_base2k, log_q, 0, bytes)) Self(Elem::<T>::from_bytes(module, log_base2k, log_q, 1, bytes))
} }
pub fn n(&self) -> usize { pub fn n(&self) -> usize {
self.0.n() self.0.n()
} }
pub fn degree(&self) -> usize {
self.0.degree()
}
pub fn log_q(&self) -> usize { pub fn log_q(&self) -> usize {
self.0.log_q() self.0.log_q
} }
pub fn limbs(&self) -> usize { pub fn rows(&self) -> usize {
self.0.limbs() self.0.rows()
} }
pub fn at(&self, i: usize) -> &VecZnx { pub fn cols(&self) -> usize {
self.0.cols()
}
pub fn at(&self, i: usize) -> &T {
self.0.at(i) self.0.at(i)
} }
pub fn at_mut(&mut self, i: usize) -> &mut VecZnx { pub fn at_mut(&mut self, i: usize) -> &mut T {
self.0.at_mut(i) self.0.at_mut(i)
} }
@@ -72,61 +82,7 @@ impl Plaintext<VecZnx> {
self.0.zero() self.0.zero()
} }
pub fn as_ciphertext(&self) -> Ciphertext { pub fn as_ciphertext(&self) -> Ciphertext<T> {
unsafe { Ciphertext(std::ptr::read(&self.0)) } unsafe { Ciphertext::<T>(std::ptr::read(&self.0)) }
} }
}
impl Plaintext<VecZnxBorrow> {
pub fn bytes_of(module: &Module, log_base2k: usize, log_q: usize) -> usize {
Elem::<VecZnxBorrow>::bytes_of(module, log_base2k, log_q, 0)
}
pub fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self {
Self(Elem::<VecZnxBorrow>::from_bytes(module, log_base2k, log_q, 0, bytes))
}
pub fn n(&self) -> usize {
self.0.n()
}
pub fn degree(&self) -> usize {
self.0.degree()
}
pub fn log_q(&self) -> usize {
self.0.log_q()
}
pub fn limbs(&self) -> usize {
self.0.limbs()
}
pub fn at(&self, i: usize) -> &VecZnxBorrow {
self.0.at(i)
}
pub fn at_mut(&mut self, i: usize) -> &mut VecZnxBorrow {
self.0.at_mut(i)
}
pub fn log_base2k(&self) -> usize {
self.0.log_base2k()
}
pub fn log_scale(&self) -> usize {
self.0.log_scale()
}
pub fn zero(&mut self) {
self.0.zero()
}
/*
pub fn as_ciphertext(&self) -> Ciphertext {
unsafe { Ciphertext(std::ptr::read(&self.0)) }
}
*/
} }