mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
prototype trait for Elem<T> + new ciphertext for VmPPmat
This commit is contained in:
@@ -54,11 +54,10 @@ pub trait Encoding {
|
||||
|
||||
impl Encoding for VecZnx {
|
||||
fn encode_vec_i64(&mut self, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
|
||||
let limbs: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
|
||||
let cols: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
|
||||
println!("limbs: {}", limbs);
|
||||
|
||||
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs());
|
||||
assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols());
|
||||
|
||||
let size: usize = min(data.len(), self.n());
|
||||
let log_k_rem: usize = log_base2k - (log_k % log_base2k);
|
||||
@@ -67,19 +66,19 @@ impl Encoding for VecZnx {
|
||||
// values on the last limb.
|
||||
// Else we decompose values base2k.
|
||||
if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
|
||||
(0..self.limbs()).for_each(|i| unsafe {
|
||||
(0..self.cols()).for_each(|i| unsafe {
|
||||
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
|
||||
});
|
||||
self.at_mut(limbs - 1)[..size].copy_from_slice(&data[..size]);
|
||||
self.at_mut(cols - 1)[..size].copy_from_slice(&data[..size]);
|
||||
} else {
|
||||
let mask: i64 = (1 << log_base2k) - 1;
|
||||
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k);
|
||||
let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
|
||||
|
||||
(0..steps).for_each(|i| unsafe {
|
||||
znx_zero_i64_ref(size as u64, self.at_mut(i).as_mut_ptr());
|
||||
});
|
||||
|
||||
(limbs - steps..limbs)
|
||||
(cols - steps..cols)
|
||||
.rev()
|
||||
.enumerate()
|
||||
.for_each(|(i, i_rev)| {
|
||||
@@ -91,9 +90,9 @@ impl Encoding for VecZnx {
|
||||
|
||||
// Case where self.prec % self.k != 0.
|
||||
if log_k_rem != log_base2k {
|
||||
let limbs = self.limbs();
|
||||
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k);
|
||||
(limbs - steps..limbs).rev().for_each(|i| {
|
||||
let cols = self.cols();
|
||||
let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
|
||||
(cols - steps..cols).rev().for_each(|i| {
|
||||
self.at_mut(i)[..size]
|
||||
.iter_mut()
|
||||
.for_each(|x| *x <<= log_k_rem);
|
||||
@@ -102,7 +101,7 @@ impl Encoding for VecZnx {
|
||||
}
|
||||
|
||||
fn decode_vec_i64(&self, log_base2k: usize, log_k: usize, data: &mut [i64]) {
|
||||
let limbs: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
let cols: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
assert!(
|
||||
data.len() >= self.n,
|
||||
"invalid data: data.len()={} < self.n()={}",
|
||||
@@ -111,8 +110,8 @@ impl Encoding for VecZnx {
|
||||
);
|
||||
data.copy_from_slice(self.at(0));
|
||||
let rem: usize = log_base2k - (log_k % log_base2k);
|
||||
(1..limbs).for_each(|i| {
|
||||
if i == limbs - 1 && rem != log_base2k {
|
||||
(1..cols).for_each(|i| {
|
||||
if i == cols - 1 && rem != log_base2k {
|
||||
let k_rem: usize = log_base2k - rem;
|
||||
izip!(self.at(i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
||||
*y = (*y << k_rem) + (x >> rem);
|
||||
@@ -134,25 +133,25 @@ impl Encoding for VecZnx {
|
||||
log_max: usize,
|
||||
) {
|
||||
assert!(i < self.n());
|
||||
let limbs: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
assert!(limbs <= self.limbs(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.limbs()={}", limbs, self.limbs());
|
||||
let cols: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
assert!(cols <= self.cols(), "invalid argument log_k: (log_k + self.log_base2k - 1)/self.log_base2k={} > self.cols()={}", cols, self.cols());
|
||||
let log_k_rem: usize = log_base2k - (log_k % log_base2k);
|
||||
let limbs = self.limbs();
|
||||
let cols = self.cols();
|
||||
|
||||
// If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy
|
||||
// values on the last limb.
|
||||
// Else we decompose values base2k.
|
||||
if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
|
||||
(0..limbs - 1).for_each(|j| self.at_mut(j)[i] = 0);
|
||||
(0..cols - 1).for_each(|j| self.at_mut(j)[i] = 0);
|
||||
|
||||
self.at_mut(self.limbs() - 1)[i] = value;
|
||||
self.at_mut(self.cols() - 1)[i] = value;
|
||||
} else {
|
||||
let mask: i64 = (1 << log_base2k) - 1;
|
||||
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k);
|
||||
let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
|
||||
|
||||
(0..limbs - steps).for_each(|j| self.at_mut(j)[i] = 0);
|
||||
(0..cols - steps).for_each(|j| self.at_mut(j)[i] = 0);
|
||||
|
||||
(limbs - steps..limbs)
|
||||
(cols - steps..cols)
|
||||
.rev()
|
||||
.enumerate()
|
||||
.for_each(|(j, j_rev)| {
|
||||
@@ -162,22 +161,22 @@ impl Encoding for VecZnx {
|
||||
|
||||
// Case where self.prec % self.k != 0.
|
||||
if log_k_rem != log_base2k {
|
||||
let limbs = self.limbs();
|
||||
let steps: usize = min(limbs, (log_max + log_base2k - 1) / log_base2k);
|
||||
(limbs - steps..limbs).rev().for_each(|j| {
|
||||
let cols = self.cols();
|
||||
let steps: usize = min(cols, (log_max + log_base2k - 1) / log_base2k);
|
||||
(cols - steps..cols).rev().for_each(|j| {
|
||||
self.at_mut(j)[i] <<= log_k_rem;
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_coeff_i64(&self, log_base2k: usize, log_k: usize, i: usize) -> i64 {
|
||||
let limbs: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
let cols: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||
assert!(i < self.n());
|
||||
let mut res: i64 = self.data[i];
|
||||
let rem: usize = log_base2k - (log_k % log_base2k);
|
||||
(1..limbs).for_each(|i| {
|
||||
(1..cols).for_each(|i| {
|
||||
let x = self.data[i * self.n];
|
||||
if i == limbs - 1 && rem != log_base2k {
|
||||
if i == cols - 1 && rem != log_base2k {
|
||||
let k_rem: usize = log_base2k - rem;
|
||||
res = (res << k_rem) + (x >> rem);
|
||||
} else {
|
||||
@@ -198,9 +197,9 @@ mod tests {
|
||||
fn test_set_get_i64_lo_norm() {
|
||||
let n: usize = 8;
|
||||
let log_base2k: usize = 17;
|
||||
let limbs: usize = 5;
|
||||
let log_k: usize = limbs * log_base2k - 5;
|
||||
let mut a: VecZnx = VecZnx::new(n, limbs);
|
||||
let cols: usize = 5;
|
||||
let log_k: usize = cols * log_base2k - 5;
|
||||
let mut a: VecZnx = VecZnx::new(n, cols);
|
||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||
have.iter_mut()
|
||||
.enumerate()
|
||||
@@ -215,9 +214,9 @@ mod tests {
|
||||
fn test_set_get_i64_hi_norm() {
|
||||
let n: usize = 8;
|
||||
let log_base2k: usize = 17;
|
||||
let limbs: usize = 5;
|
||||
let log_k: usize = limbs * log_base2k - 5;
|
||||
let mut a: VecZnx = VecZnx::new(n, limbs);
|
||||
let cols: usize = 5;
|
||||
let log_k: usize = cols * log_base2k - 5;
|
||||
let mut a: VecZnx = VecZnx::new(n, cols);
|
||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||
let mut source = Source::new([1; 32]);
|
||||
have.iter_mut().for_each(|x| {
|
||||
@@ -226,9 +225,9 @@ mod tests {
|
||||
.wrapping_sub(u64::MAX / 2 + 1) as i64;
|
||||
});
|
||||
a.encode_vec_i64(log_base2k, log_k, &have, 63);
|
||||
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
||||
//(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
||||
let mut want = vec![i64::default(); n];
|
||||
//(0..a.limbs()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
||||
//(0..a.cols()).for_each(|i| println!("i:{} -> {:?}", i, a.at(i)));
|
||||
a.decode_vec_i64(log_base2k, log_k, &mut want);
|
||||
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
||||
}
|
||||
|
||||
@@ -7,10 +7,6 @@ pub trait Infos {
|
||||
/// Returns the base two logarithm of the ring dimension of the receiver.
|
||||
fn log_n(&self) -> usize;
|
||||
|
||||
/// Returns the number of limbs of the receiver.
|
||||
/// This method is equivalent to [Infos::cols].
|
||||
fn limbs(&self) -> usize;
|
||||
|
||||
/// Returns the number of columns of the receiver.
|
||||
/// This method is equivalent to [Infos::limbs].
|
||||
fn cols(&self) -> usize;
|
||||
@@ -30,11 +26,6 @@ impl Infos for VecZnx {
|
||||
self.n
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of the [VecZnx].
|
||||
fn limbs(&self) -> usize {
|
||||
self.data.len() / self.n
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of the [VecZnx].
|
||||
fn cols(&self) -> usize {
|
||||
self.data.len() / self.n
|
||||
@@ -57,11 +48,6 @@ impl Infos for VecZnxBorrow {
|
||||
self.n
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of the [VecZnx].
|
||||
fn limbs(&self) -> usize {
|
||||
self.limbs
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of the [VecZnx].
|
||||
fn cols(&self) -> usize {
|
||||
self.limbs
|
||||
@@ -83,12 +69,6 @@ impl Infos for VmpPMat {
|
||||
(usize::BITS - (self.n() - 1).leading_zeros()) as _
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of each [VecZnxDft].
|
||||
/// This method is equivalent to [Self::cols].
|
||||
fn limbs(&self) -> usize {
|
||||
self.cols
|
||||
}
|
||||
|
||||
/// Returns the number of rows (i.e. of [VecZnxDft]) of the [VmpPMat]
|
||||
fn rows(&self) -> usize {
|
||||
self.rows
|
||||
|
||||
@@ -31,6 +31,7 @@ pub use vmp::*;
|
||||
|
||||
pub const GALOISGENERATOR: u64 = 5;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn is_aligned<T>(ptr: *const T, align: usize) -> bool {
|
||||
(ptr as usize) % align == 0
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ use rand_distr::{Distribution, Normal};
|
||||
use sampling::source::Source;
|
||||
|
||||
pub trait Sampling<T: VecZnxApi + Infos> {
|
||||
/// Fills the first `limbs` limbs with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
|
||||
fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source);
|
||||
/// Fills the first `cols` cols with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
|
||||
fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source);
|
||||
|
||||
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
|
||||
fn add_dist_f64<D: Distribution<f64>>(
|
||||
@@ -30,11 +30,11 @@ pub trait Sampling<T: VecZnxApi + Infos> {
|
||||
}
|
||||
|
||||
impl<T: VecZnxApi + Infos> Sampling<T> for Module {
|
||||
fn fill_uniform(&self, log_base2k: usize, a: &mut T, limbs: usize, source: &mut Source) {
|
||||
fn fill_uniform(&self, log_base2k: usize, a: &mut T, cols: usize, source: &mut Source) {
|
||||
let base2k: u64 = 1 << log_base2k;
|
||||
let mask: u64 = base2k - 1;
|
||||
let base2k_half: i64 = (base2k >> 1) as i64;
|
||||
let size: usize = a.n() * limbs;
|
||||
let size: usize = a.n() * cols;
|
||||
a.raw_mut()[..size]
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
||||
@@ -58,7 +58,7 @@ impl<T: VecZnxApi + Infos> Sampling<T> for Module {
|
||||
let log_base2k_rem: usize = log_k % log_base2k;
|
||||
|
||||
if log_base2k_rem != 0 {
|
||||
a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| {
|
||||
a.at_mut(a.cols() - 1).iter_mut().for_each(|a| {
|
||||
let mut dist_f64: f64 = dist.sample(source);
|
||||
while dist_f64.abs() > bound {
|
||||
dist_f64 = dist.sample(source)
|
||||
@@ -66,7 +66,7 @@ impl<T: VecZnxApi + Infos> Sampling<T> for Module {
|
||||
*a += (dist_f64.round() as i64) << log_base2k_rem
|
||||
});
|
||||
} else {
|
||||
a.at_mut(a.limbs() - 1).iter_mut().for_each(|a| {
|
||||
a.at_mut(a.cols() - 1).iter_mut().for_each(|a| {
|
||||
let mut dist_f64: f64 = dist.sample(source);
|
||||
while dist_f64.abs() > bound {
|
||||
dist_f64 = dist.sample(source)
|
||||
|
||||
@@ -77,8 +77,8 @@ impl SvpPPol {
|
||||
SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size)
|
||||
}
|
||||
|
||||
/// Returns the number of limbs of the [SvpPPol], which is always 1.
|
||||
pub fn limbs(&self) -> usize {
|
||||
/// Returns the number of cols of the [SvpPPol], which is always 1.
|
||||
pub fn cols(&self) -> usize {
|
||||
1
|
||||
}
|
||||
}
|
||||
@@ -101,7 +101,7 @@ pub trait SvpPPolOps {
|
||||
c: &mut VecZnxDft,
|
||||
a: &SvpPPol,
|
||||
b: &T,
|
||||
b_limbs: usize,
|
||||
b_cols: usize,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -123,22 +123,22 @@ impl SvpPPolOps for Module {
|
||||
c: &mut VecZnxDft,
|
||||
a: &SvpPPol,
|
||||
b: &T,
|
||||
b_limbs: usize,
|
||||
b_cols: usize,
|
||||
) {
|
||||
assert!(
|
||||
c.limbs() >= b_limbs,
|
||||
"invalid c_vector: c_vector.limbs()={} < b.limbs()={}",
|
||||
c.limbs(),
|
||||
b_limbs
|
||||
c.cols() >= b_cols,
|
||||
"invalid c_vector: c_vector.cols()={} < b.cols()={}",
|
||||
c.cols(),
|
||||
b_cols
|
||||
);
|
||||
unsafe {
|
||||
svp::svp_apply_dft(
|
||||
self.0,
|
||||
c.0,
|
||||
b_limbs as u64,
|
||||
b_cols as u64,
|
||||
a.0,
|
||||
b.as_ptr(),
|
||||
b_limbs as u64,
|
||||
b_cols as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,19 +8,66 @@ use itertools::izip;
|
||||
use std::cmp::min;
|
||||
|
||||
pub trait VecZnxApi {
|
||||
type Owned: VecZnxApi + Infos;
|
||||
|
||||
fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned;
|
||||
|
||||
/// Returns the minimum size of the [u8] array required to assign a
|
||||
/// new backend array to a [VecZnx] through [VecZnx::from_bytes].
|
||||
/// new backend array.
|
||||
fn bytes_of(n: usize, limbs: usize) -> usize;
|
||||
|
||||
/// Returns the backing array.
|
||||
fn raw(&self) -> &[i64];
|
||||
|
||||
/// Returns the mutable backing array.
|
||||
fn raw_mut(&mut self) -> &mut [i64];
|
||||
|
||||
/// Returns a non-mutable pointer to the backing array.
|
||||
fn as_ptr(&self) -> *const i64;
|
||||
|
||||
/// Returns a mutable pointer to the backing array.
|
||||
fn as_mut_ptr(&mut self) -> *mut i64;
|
||||
|
||||
/// Returns a non-mutable reference to the i-th limb.
|
||||
fn at(&self, i: usize) -> &[i64];
|
||||
|
||||
/// Returns a mutable reference to the i-th limb .
|
||||
fn at_mut(&mut self, i: usize) -> &mut [i64];
|
||||
|
||||
/// Returns a non-mutable pointer to the i-th limb.
|
||||
fn at_ptr(&self, i: usize) -> *const i64;
|
||||
|
||||
/// Returns a mutable pointer to the i-th limb.
|
||||
fn at_mut_ptr(&mut self, i: usize) -> *mut i64;
|
||||
|
||||
/// Zeroes the backing array.
|
||||
fn zero(&mut self);
|
||||
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]);
|
||||
|
||||
/// Right shifts the coefficients by k bits.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `log_base2k`: the base two logarithm of the coefficients decomposition.
|
||||
/// * `k`: the shift amount.
|
||||
/// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The method will panic if carry.len() < self.n() * self.limbs() << 3.
|
||||
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]);
|
||||
|
||||
/// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}.
|
||||
/// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `a`: the receiver polynomial in which the extracted coefficients are stored.
|
||||
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
|
||||
where
|
||||
Self: AsRef<T>;
|
||||
|
||||
fn print(&self, limbs: usize, n: usize);
|
||||
}
|
||||
|
||||
pub fn bytes_of_vec_znx(n: usize, limbs: usize) -> usize {
|
||||
@@ -33,14 +80,16 @@ pub struct VecZnxBorrow {
|
||||
pub data: *mut i64,
|
||||
}
|
||||
|
||||
impl VecZnxBorrow {
|
||||
impl VecZnxApi for VecZnxBorrow {
|
||||
type Owned = VecZnxBorrow;
|
||||
|
||||
/// Returns a new struct implementing [VecZnxBorrow] with the provided data as backing array.
|
||||
///
|
||||
/// The struct will *NOT* take ownership of buf[..[VecZnx::bytes_of]]
|
||||
///
|
||||
/// User must ensure that data is properly alligned and that
|
||||
/// the size of data is at least equal to [VecZnx::bytes_of].
|
||||
pub fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> VecZnxBorrow {
|
||||
fn from_bytes(n: usize, limbs: usize, bytes: &mut [u8]) -> Self::Owned {
|
||||
let size = Self::bytes_of(n, limbs);
|
||||
assert!(
|
||||
bytes.len() >= size,
|
||||
@@ -56,9 +105,7 @@ impl VecZnxBorrow {
|
||||
data: cast_mut(&mut bytes[..size]).as_mut_ptr(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VecZnxApi for VecZnxBorrow {
|
||||
fn bytes_of(n: usize, limbs: usize) -> usize {
|
||||
bytes_of_vec_znx(n, limbs)
|
||||
}
|
||||
@@ -104,39 +151,35 @@ impl VecZnxApi for VecZnxBorrow {
|
||||
}
|
||||
|
||||
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) {
|
||||
assert!(
|
||||
carry.len() >= self.n() * 8,
|
||||
"invalid carry: carry.len()={} < self.n()={}",
|
||||
carry.len(),
|
||||
self.n()
|
||||
);
|
||||
normalize(log_base2k, self, carry)
|
||||
}
|
||||
|
||||
let carry_i64: &mut [i64] = cast_mut(carry);
|
||||
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) {
|
||||
rsh(log_base2k, self, k, carry)
|
||||
}
|
||||
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
|
||||
(0..self.limbs()).rev().for_each(|i| {
|
||||
znx::znx_normalize(
|
||||
self.n as u64,
|
||||
log_base2k as u64,
|
||||
self.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
self.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
)
|
||||
});
|
||||
}
|
||||
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
|
||||
where
|
||||
Self: AsRef<T>,
|
||||
{
|
||||
switch_degree(a, self.as_ref());
|
||||
}
|
||||
|
||||
fn print(&self, limbs: usize, n: usize) {
|
||||
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
|
||||
}
|
||||
}
|
||||
|
||||
impl VecZnx {
|
||||
impl VecZnxApi for VecZnx {
|
||||
type Owned = VecZnx;
|
||||
|
||||
/// Returns a new struct implementing [VecZnx] with the provided data as backing array.
|
||||
///
|
||||
/// The struct will take ownership of buf[..[VecZnx::bytes_of]]
|
||||
///
|
||||
/// User must ensure that data is properly alligned and that
|
||||
/// the size of data is at least equal to [VecZnx::bytes_of].
|
||||
pub fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> VecZnx {
|
||||
fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> Self::Owned {
|
||||
let size = Self::bytes_of(n, limbs);
|
||||
assert!(
|
||||
buf.len() >= size,
|
||||
@@ -152,9 +195,7 @@ impl VecZnx {
|
||||
data: alias_mut_slice_to_vec(cast_mut(&mut buf[..size])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VecZnxApi for VecZnx {
|
||||
fn bytes_of(n: usize, limbs: usize) -> usize {
|
||||
bytes_of_vec_znx(n, limbs)
|
||||
}
|
||||
@@ -167,66 +208,53 @@ impl VecZnxApi for VecZnx {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Returns a non-mutable pointer to the backing array of the [VecZnx].
|
||||
fn as_ptr(&self) -> *const i64 {
|
||||
self.data.as_ptr()
|
||||
}
|
||||
|
||||
/// Returns a mutable pointer to the backing array of the [VecZnx].
|
||||
fn as_mut_ptr(&mut self) -> *mut i64 {
|
||||
self.data.as_mut_ptr()
|
||||
}
|
||||
|
||||
/// Returns a non-mutable reference to the i-th limb of the [VecZnx].
|
||||
fn at(&self, i: usize) -> &[i64] {
|
||||
let n: usize = self.n();
|
||||
&self.raw()[n * i..n * (i + 1)]
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the i-th limb of the [VecZnx].
|
||||
fn at_mut(&mut self, i: usize) -> &mut [i64] {
|
||||
let n: usize = self.n();
|
||||
&mut self.raw_mut()[n * i..n * (i + 1)]
|
||||
}
|
||||
|
||||
/// Returns a non-mutable pointer to the i-th limb of the [VecZnx].
|
||||
fn at_ptr(&self, i: usize) -> *const i64 {
|
||||
&self.data[i * self.n] as *const i64
|
||||
}
|
||||
|
||||
/// Returns a mutable pointer to the i-th limb of the [VecZnx].
|
||||
fn at_mut_ptr(&mut self, i: usize) -> *mut i64 {
|
||||
&mut self.data[i * self.n] as *mut i64
|
||||
}
|
||||
|
||||
/// Zeroes the backing array of the [VecZnx].
|
||||
fn zero(&mut self) {
|
||||
unsafe { znx::znx_zero_i64_ref(self.data.len() as u64, self.data.as_mut_ptr()) }
|
||||
}
|
||||
|
||||
fn normalize(&mut self, log_base2k: usize, carry: &mut [u8]) {
|
||||
assert!(
|
||||
carry.len() >= self.n() * 8,
|
||||
"invalid carry: carry.len()={} < self.n()={}",
|
||||
carry.len(),
|
||||
self.n()
|
||||
);
|
||||
normalize(log_base2k, self, carry)
|
||||
}
|
||||
|
||||
let carry_i64: &mut [i64] = cast_mut(carry);
|
||||
fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) {
|
||||
rsh(log_base2k, self, k, carry)
|
||||
}
|
||||
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
|
||||
(0..self.limbs()).rev().for_each(|i| {
|
||||
znx::znx_normalize(
|
||||
self.n as u64,
|
||||
log_base2k as u64,
|
||||
self.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
self.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
)
|
||||
});
|
||||
}
|
||||
fn switch_degree<T: VecZnxApi + Infos>(&self, a: &mut T)
|
||||
where
|
||||
Self: AsRef<T>,
|
||||
{
|
||||
switch_degree(a, self.as_ref())
|
||||
}
|
||||
|
||||
fn print(&self, limbs: usize, n: usize) {
|
||||
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,99 +297,105 @@ impl VecZnx {
|
||||
}
|
||||
|
||||
self.data
|
||||
.truncate((self.limbs() - k / log_base2k) * self.n());
|
||||
.truncate((self.cols() - k / log_base2k) * self.n());
|
||||
|
||||
let k_rem: usize = k % log_base2k;
|
||||
|
||||
if k_rem != 0 {
|
||||
let mask: i64 = ((1 << (log_base2k - k_rem - 1)) - 1) << k_rem;
|
||||
self.at_mut(self.limbs() - 1)
|
||||
self.at_mut(self.cols() - 1)
|
||||
.iter_mut()
|
||||
.for_each(|x: &mut i64| *x &= mask)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Right shifts the coefficients by k bits.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `log_base2k`: the base two logarithm of the coefficients decomposition.
|
||||
/// * `k`: the shift amount.
|
||||
/// * `carry`: scratch space of size at least equal to self.n() * self.limbs() << 3.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The method will panic if carry.len() < self.n() * self.limbs() << 3.
|
||||
pub fn rsh(&mut self, log_base2k: usize, k: usize, carry: &mut [u8]) {
|
||||
let n: usize = self.n();
|
||||
pub fn switch_degree<T: VecZnxApi + Infos>(b: &mut T, a: &T) {
|
||||
let (n_in, n_out) = (a.n(), b.n());
|
||||
let (gap_in, gap_out): (usize, usize);
|
||||
|
||||
assert!(
|
||||
carry.len() >> 3 >= n,
|
||||
"invalid carry: carry.len()/8={} < self.n()={}",
|
||||
carry.len() >> 3,
|
||||
n
|
||||
);
|
||||
|
||||
let limbs: usize = self.limbs();
|
||||
let limbs_steps: usize = k / log_base2k;
|
||||
|
||||
self.data.rotate_right(self.n * limbs_steps);
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref((self.n * limbs_steps) as u64, self.data.as_mut_ptr());
|
||||
}
|
||||
|
||||
let k_rem = k % log_base2k;
|
||||
|
||||
if k_rem != 0 {
|
||||
let carry_i64: &mut [i64] = cast_mut(carry);
|
||||
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr());
|
||||
}
|
||||
|
||||
let mask: i64 = (1 << k_rem) - 1;
|
||||
let log_base2k: usize = log_base2k;
|
||||
|
||||
(limbs_steps..limbs).for_each(|i| {
|
||||
izip!(carry_i64.iter_mut(), self.at_mut(i).iter_mut()).for_each(|(ci, xi)| {
|
||||
*xi += *ci << log_base2k;
|
||||
*ci = *xi & mask;
|
||||
*xi /= 1 << k_rem;
|
||||
});
|
||||
})
|
||||
}
|
||||
if n_in > n_out {
|
||||
(gap_in, gap_out) = (n_in / n_out, 1)
|
||||
} else {
|
||||
(gap_in, gap_out) = (1, n_out / n_in);
|
||||
b.zero();
|
||||
}
|
||||
|
||||
/// If self.n() > a.n(): Extracts X^{i*self.n()/a.n()} -> X^{i}.
|
||||
/// If self.n() < a.n(): Extracts X^{i} -> X^{i*a.n()/self.n()}.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `a`: the receiver polynomial in which the extracted coefficients are stored.
|
||||
pub fn switch_degree(&self, a: &mut VecZnx) {
|
||||
let (n_in, n_out) = (self.n(), a.n());
|
||||
let (gap_in, gap_out): (usize, usize);
|
||||
let limbs = min(a.cols(), b.cols());
|
||||
|
||||
if n_in > n_out {
|
||||
(gap_in, gap_out) = (n_in / n_out, 1)
|
||||
} else {
|
||||
(gap_in, gap_out) = (1, n_out / n_in);
|
||||
a.zero();
|
||||
}
|
||||
(0..limbs).for_each(|i| {
|
||||
izip!(
|
||||
a.at(i).iter().step_by(gap_in),
|
||||
b.at_mut(i).iter_mut().step_by(gap_out)
|
||||
)
|
||||
.for_each(|(x_in, x_out)| *x_out = *x_in);
|
||||
});
|
||||
}
|
||||
|
||||
let limbs = min(self.limbs(), a.limbs());
|
||||
fn normalize<T: VecZnxApi + Infos>(log_base2k: usize, a: &mut T, carry: &mut [u8]) {
|
||||
let n: usize = a.n();
|
||||
|
||||
(0..limbs).for_each(|i| {
|
||||
izip!(
|
||||
self.at(i).iter().step_by(gap_in),
|
||||
a.at_mut(i).iter_mut().step_by(gap_out)
|
||||
assert!(
|
||||
carry.len() >= n * 8,
|
||||
"invalid carry: carry.len()={} < self.n()={}",
|
||||
carry.len(),
|
||||
n
|
||||
);
|
||||
|
||||
let carry_i64: &mut [i64] = cast_mut(carry);
|
||||
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr());
|
||||
(0..a.cols()).rev().for_each(|i| {
|
||||
znx::znx_normalize(
|
||||
n as u64,
|
||||
log_base2k as u64,
|
||||
a.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
a.at_mut_ptr(i),
|
||||
carry_i64.as_mut_ptr(),
|
||||
)
|
||||
.for_each(|(x_in, x_out)| *x_out = *x_in);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_limbs(&self, limbs: usize, n: usize) {
|
||||
(0..limbs).for_each(|i| println!("{}: {:?}", i, &self.at(i)[..n]))
|
||||
pub fn rsh<T: VecZnxApi + Infos>(log_base2k: usize, a: &mut T, k: usize, carry: &mut [u8]) {
|
||||
let n: usize = a.n();
|
||||
|
||||
assert!(
|
||||
carry.len() >> 3 >= n,
|
||||
"invalid carry: carry.len()/8={} < self.n()={}",
|
||||
carry.len() >> 3,
|
||||
n
|
||||
);
|
||||
|
||||
let limbs: usize = a.cols();
|
||||
let limbs_steps: usize = k / log_base2k;
|
||||
|
||||
a.raw_mut().rotate_right(n * limbs_steps);
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref((n * limbs_steps) as u64, a.as_mut_ptr());
|
||||
}
|
||||
|
||||
let k_rem = k % log_base2k;
|
||||
|
||||
if k_rem != 0 {
|
||||
let carry_i64: &mut [i64] = cast_mut(carry);
|
||||
|
||||
unsafe {
|
||||
znx::znx_zero_i64_ref(n as u64, carry_i64.as_mut_ptr());
|
||||
}
|
||||
|
||||
let mask: i64 = (1 << k_rem) - 1;
|
||||
let log_base2k: usize = log_base2k;
|
||||
|
||||
(limbs_steps..limbs).for_each(|i| {
|
||||
izip!(carry_i64.iter_mut(), a.at_mut(i).iter_mut()).for_each(|(ci, xi)| {
|
||||
*xi += *ci << log_base2k;
|
||||
*ci = *xi & mask;
|
||||
*xi /= 1 << k_rem;
|
||||
});
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,7 +447,7 @@ pub trait VecZnxOps {
|
||||
///
|
||||
/// This method requires that all [VecZnx] of b have the same ring degree
|
||||
/// and that b.n() * b.len() <= a.n()
|
||||
fn vec_znx_split(&self, b: &mut Vec<VecZnx>, a: &VecZnx, buf: &mut VecZnx);
|
||||
fn vec_znx_split<T: VecZnxApi + Infos>(&self, b: &mut Vec<T>, a: &T, buf: &mut T);
|
||||
|
||||
/// Merges the subrings a into b.
|
||||
///
|
||||
@@ -421,7 +455,7 @@ pub trait VecZnxOps {
|
||||
///
|
||||
/// This method requires that all [VecZnx] of a have the same ring degree
|
||||
/// and that a.n() * a.len() <= b.n()
|
||||
fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec<VecZnx>);
|
||||
fn vec_znx_merge<T: VecZnxApi + Infos>(&self, b: &mut T, a: &Vec<T>);
|
||||
}
|
||||
|
||||
impl VecZnxOps for Module {
|
||||
@@ -439,13 +473,13 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_add(
|
||||
self.0,
|
||||
c.as_mut_ptr(),
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
c.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.as_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -457,13 +491,13 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_add(
|
||||
self.0,
|
||||
b.as_mut_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.as_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -475,13 +509,13 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_sub(
|
||||
self.0,
|
||||
c.as_mut_ptr(),
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
c.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.as_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -493,13 +527,13 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_sub(
|
||||
self.0,
|
||||
b.as_mut_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.as_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -510,10 +544,10 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_negate(
|
||||
self.0,
|
||||
b.as_mut_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -524,10 +558,10 @@ impl VecZnxOps for Module {
|
||||
vec_znx::vec_znx_negate(
|
||||
self.0,
|
||||
a.as_mut_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -539,10 +573,10 @@ impl VecZnxOps for Module {
|
||||
self.0,
|
||||
k,
|
||||
a.as_mut_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.as_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -554,10 +588,10 @@ impl VecZnxOps for Module {
|
||||
self.0,
|
||||
k,
|
||||
a.as_mut_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -603,13 +637,13 @@ impl VecZnxOps for Module {
|
||||
fn vec_znx_automorphism<T: VecZnxApi + Infos>(&self, k: i64, b: &mut T, a: &T, limbs_a: usize) {
|
||||
assert_eq!(a.n(), self.n());
|
||||
assert_eq!(b.n(), self.n());
|
||||
assert!(a.limbs() >= limbs_a);
|
||||
assert!(a.cols() >= limbs_a);
|
||||
unsafe {
|
||||
vec_znx::vec_znx_automorphism(
|
||||
self.0,
|
||||
k,
|
||||
b.as_mut_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
a.as_ptr(),
|
||||
limbs_a as u64,
|
||||
@@ -660,13 +694,13 @@ impl VecZnxOps for Module {
|
||||
limbs_a: usize,
|
||||
) {
|
||||
assert_eq!(a.n(), self.n());
|
||||
assert!(a.limbs() >= limbs_a);
|
||||
assert!(a.cols() >= limbs_a);
|
||||
unsafe {
|
||||
vec_znx::vec_znx_automorphism(
|
||||
self.0,
|
||||
k,
|
||||
a.as_mut_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
a.as_ptr(),
|
||||
limbs_a as u64,
|
||||
@@ -675,7 +709,7 @@ impl VecZnxOps for Module {
|
||||
}
|
||||
}
|
||||
|
||||
fn vec_znx_split(&self, b: &mut Vec<VecZnx>, a: &VecZnx, buf: &mut VecZnx) {
|
||||
fn vec_znx_split<T: VecZnxApi + Infos>(&self, b: &mut Vec<T>, a: &T, buf: &mut T) {
|
||||
let (n_in, n_out) = (a.n(), b[0].n());
|
||||
|
||||
assert!(
|
||||
@@ -692,16 +726,16 @@ impl VecZnxOps for Module {
|
||||
|
||||
b.iter_mut().enumerate().for_each(|(i, bi)| {
|
||||
if i == 0 {
|
||||
a.switch_degree(bi);
|
||||
switch_degree(bi, a);
|
||||
self.vec_znx_rotate(-1, buf, a);
|
||||
} else {
|
||||
buf.switch_degree(bi);
|
||||
switch_degree(bi, buf);
|
||||
self.vec_znx_rotate_inplace(-1, buf);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn vec_znx_merge(&self, b: &mut VecZnx, a: &Vec<VecZnx>) {
|
||||
fn vec_znx_merge<T: VecZnxApi + Infos>(&self, b: &mut T, a: &Vec<T>) {
|
||||
let (n_in, n_out) = (b.n(), a[0].n());
|
||||
|
||||
assert!(
|
||||
@@ -717,7 +751,7 @@ impl VecZnxOps for Module {
|
||||
});
|
||||
|
||||
a.iter().enumerate().for_each(|(_, ai)| {
|
||||
ai.switch_degree(b);
|
||||
switch_degree(b, ai);
|
||||
self.vec_znx_rotate_inplace(-1, b);
|
||||
});
|
||||
|
||||
|
||||
@@ -8,39 +8,39 @@ impl VecZnxBig {
|
||||
/// Returns a new [VecZnxBig] with the provided data as backing array.
|
||||
/// User must ensure that data is properly alligned and that
|
||||
/// the size of data is at least equal to [Module::bytes_of_vec_znx_big].
|
||||
pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxBig {
|
||||
pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxBig {
|
||||
VecZnxBig(
|
||||
data.as_mut_ptr() as *mut vec_znx_big::vec_znx_bigcoeff_t,
|
||||
limbs,
|
||||
cols,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn as_vec_znx_dft(&mut self) -> VecZnxDft {
|
||||
VecZnxDft(self.0 as *mut vec_znx_dft::vec_znx_dft_t, self.1)
|
||||
}
|
||||
pub fn limbs(&self) -> usize {
|
||||
pub fn cols(&self) -> usize {
|
||||
self.1
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VecZnxBigOps {
|
||||
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
|
||||
fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig;
|
||||
fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig;
|
||||
|
||||
/// Returns a new [VecZnxBig] with the provided bytes array as backing array.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `limbs`: the number of limbs of the [VecZnxBig].
|
||||
/// * `cols`: the number of cols of the [VecZnxBig].
|
||||
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_big].
|
||||
///
|
||||
/// # Panics
|
||||
/// If `bytes.len()` < [Module::bytes_of_vec_znx_big].
|
||||
fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig;
|
||||
fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig;
|
||||
|
||||
/// Returns the minimum number of bytes necessary to allocate
|
||||
/// a new [VecZnxBig] through [VecZnxBig::from_bytes].
|
||||
fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize;
|
||||
fn bytes_of_vec_znx_big(&self, cols: usize) -> usize;
|
||||
|
||||
/// b <- b - a
|
||||
fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T);
|
||||
@@ -89,22 +89,22 @@ pub trait VecZnxBigOps {
|
||||
}
|
||||
|
||||
impl VecZnxBigOps for Module {
|
||||
fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig {
|
||||
unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, limbs as u64), limbs) }
|
||||
fn new_vec_znx_big(&self, cols: usize) -> VecZnxBig {
|
||||
unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, cols as u64), cols) }
|
||||
}
|
||||
|
||||
fn new_vec_znx_big_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxBig {
|
||||
fn new_vec_znx_big_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxBig {
|
||||
assert!(
|
||||
bytes.len() >= <Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, limbs),
|
||||
bytes.len() >= <Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, cols),
|
||||
"invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}",
|
||||
bytes.len(),
|
||||
<Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, limbs)
|
||||
<Module as VecZnxBigOps>::bytes_of_vec_znx_big(self, cols)
|
||||
);
|
||||
VecZnxBig::from_bytes(limbs, bytes)
|
||||
VecZnxBig::from_bytes(cols, bytes)
|
||||
}
|
||||
|
||||
fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize {
|
||||
unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, limbs as u64) as usize }
|
||||
fn bytes_of_vec_znx_big(&self, cols: usize) -> usize {
|
||||
unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, cols as u64) as usize }
|
||||
}
|
||||
|
||||
fn vec_znx_big_sub_small_a_inplace<T: VecZnxApi + Infos>(&self, b: &mut VecZnxBig, a: &T) {
|
||||
@@ -112,12 +112,12 @@ impl VecZnxBigOps for Module {
|
||||
vec_znx_big::vec_znx_big_sub_small_a(
|
||||
self.0,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -132,12 +132,12 @@ impl VecZnxBigOps for Module {
|
||||
vec_znx_big::vec_znx_big_sub_small_a(
|
||||
self.0,
|
||||
c.0,
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -147,11 +147,11 @@ impl VecZnxBigOps for Module {
|
||||
vec_znx_big::vec_znx_big_add_small(
|
||||
self.0,
|
||||
c.0,
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -162,11 +162,11 @@ impl VecZnxBigOps for Module {
|
||||
vec_znx_big::vec_znx_big_add_small(
|
||||
self.0,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -194,10 +194,10 @@ impl VecZnxBigOps for Module {
|
||||
self.0,
|
||||
log_base2k as u64,
|
||||
b.as_mut_ptr(),
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.n() as u64,
|
||||
a.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
tmp_bytes.as_mut_ptr(),
|
||||
)
|
||||
}
|
||||
@@ -228,7 +228,7 @@ impl VecZnxBigOps for Module {
|
||||
self.0,
|
||||
log_base2k as u64,
|
||||
res.as_mut_ptr(),
|
||||
res.limbs() as u64,
|
||||
res.cols() as u64,
|
||||
res.n() as u64,
|
||||
a.0,
|
||||
a_range_begin as u64,
|
||||
@@ -245,9 +245,9 @@ impl VecZnxBigOps for Module {
|
||||
self.0,
|
||||
gal_el,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -258,9 +258,9 @@ impl VecZnxBigOps for Module {
|
||||
self.0,
|
||||
gal_el,
|
||||
a.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::ffi::vec_znx_big;
|
||||
use crate::ffi::vec_znx_dft;
|
||||
use crate::ffi::vec_znx_dft::bytes_of_vec_znx_dft;
|
||||
use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxBig};
|
||||
use crate::{Infos, Module, VecZnxApi, VecZnxBig};
|
||||
|
||||
pub struct VecZnxDft(pub *mut vec_znx_dft::vec_znx_dft_t, pub usize);
|
||||
|
||||
@@ -9,8 +9,8 @@ impl VecZnxDft {
|
||||
/// Returns a new [VecZnxDft] with the provided data as backing array.
|
||||
/// User must ensure that data is properly alligned and that
|
||||
/// the size of data is at least equal to [Module::bytes_of_vec_znx_dft].
|
||||
pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxDft {
|
||||
VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, limbs)
|
||||
pub fn from_bytes(cols: usize, data: &mut [u8]) -> VecZnxDft {
|
||||
VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, cols)
|
||||
}
|
||||
|
||||
/// Cast a [VecZnxDft] into a [VecZnxBig].
|
||||
@@ -19,36 +19,36 @@ impl VecZnxDft {
|
||||
pub fn as_vec_znx_big(&mut self) -> VecZnxBig {
|
||||
VecZnxBig(self.0 as *mut vec_znx_big::vec_znx_bigcoeff_t, self.1)
|
||||
}
|
||||
pub fn limbs(&self) -> usize {
|
||||
pub fn cols(&self) -> usize {
|
||||
self.1
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VecZnxDftOps {
|
||||
/// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space.
|
||||
fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft;
|
||||
fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft;
|
||||
|
||||
/// Returns a new [VecZnxDft] with the provided bytes array as backing array.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `limbs`: the number of limbs of the [VecZnxDft].
|
||||
/// * `cols`: the number of cols of the [VecZnxDft].
|
||||
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft].
|
||||
///
|
||||
/// # Panics
|
||||
/// If `bytes.len()` < [Module::bytes_of_vec_znx_dft].
|
||||
fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft;
|
||||
fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft;
|
||||
|
||||
/// Returns a new [VecZnxDft] with the provided bytes array as backing array.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `limbs`: the number of limbs of the [VecZnxDft].
|
||||
/// * `cols`: the number of cols of the [VecZnxDft].
|
||||
/// * `bytes`: a byte array of size at least [Module::bytes_of_vec_znx_dft].
|
||||
///
|
||||
/// # Panics
|
||||
/// If `bytes.len()` < [Module::bytes_of_vec_znx_dft].
|
||||
fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize;
|
||||
fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize;
|
||||
|
||||
/// Returns the minimum number of bytes necessary to allocate
|
||||
/// a new [VecZnxDft] through [VecZnxDft::from_bytes].
|
||||
@@ -69,33 +69,33 @@ pub trait VecZnxDftOps {
|
||||
}
|
||||
|
||||
impl VecZnxDftOps for Module {
|
||||
fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft {
|
||||
unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, limbs as u64), limbs) }
|
||||
fn new_vec_znx_dft(&self, cols: usize) -> VecZnxDft {
|
||||
unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, cols as u64), cols) }
|
||||
}
|
||||
|
||||
fn new_vec_znx_dft_from_bytes(&self, limbs: usize, bytes: &mut [u8]) -> VecZnxDft {
|
||||
fn new_vec_znx_dft_from_bytes(&self, cols: usize, bytes: &mut [u8]) -> VecZnxDft {
|
||||
assert!(
|
||||
bytes.len() >= <Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, limbs),
|
||||
bytes.len() >= <Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, cols),
|
||||
"invalid bytes: bytes.len()={} < bytes_of_vec_znx_dft={}",
|
||||
bytes.len(),
|
||||
<Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, limbs)
|
||||
<Module as VecZnxDftOps>::bytes_of_vec_znx_dft(self, cols)
|
||||
);
|
||||
VecZnxDft::from_bytes(limbs, bytes)
|
||||
VecZnxDft::from_bytes(cols, bytes)
|
||||
}
|
||||
|
||||
fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize {
|
||||
unsafe { bytes_of_vec_znx_dft(self.0, limbs as u64) as usize }
|
||||
fn bytes_of_vec_znx_dft(&self, cols: usize) -> usize {
|
||||
unsafe { bytes_of_vec_znx_dft(self.0, cols as u64) as usize }
|
||||
}
|
||||
|
||||
fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_limbs: usize) {
|
||||
assert!(
|
||||
b.limbs() >= a_limbs,
|
||||
"invalid c_vector: b_vector.limbs()={} < a_limbs={}",
|
||||
b.limbs(),
|
||||
b.cols() >= a_limbs,
|
||||
"invalid c_vector: b_vector.cols()={} < a_limbs={}",
|
||||
b.cols(),
|
||||
a_limbs
|
||||
);
|
||||
unsafe {
|
||||
vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.limbs() as u64, a.0, a_limbs as u64)
|
||||
vec_znx_dft::vec_znx_idft_tmp_a(self.0, b.0, b.cols() as u64, a.0, a_limbs as u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,21 +106,21 @@ impl VecZnxDftOps for Module {
|
||||
/// b <- DFT(a)
|
||||
///
|
||||
/// # Panics
|
||||
/// If b.limbs < a_limbs
|
||||
fn vec_znx_dft<T: VecZnxApi + Infos>(&self, b: &mut VecZnxDft, a: &T, a_limbs: usize) {
|
||||
/// If b.cols < a_cols
|
||||
fn vec_znx_dft<T: VecZnxApi + Infos>(&self, b: &mut VecZnxDft, a: &T, a_cols: usize) {
|
||||
assert!(
|
||||
b.limbs() >= a_limbs,
|
||||
"invalid a_limbs: b.limbs()={} < a_limbs={}",
|
||||
b.limbs(),
|
||||
a_limbs
|
||||
b.cols() >= a_cols,
|
||||
"invalid a_cols: b.cols()={} < a_cols={}",
|
||||
b.cols(),
|
||||
a_cols
|
||||
);
|
||||
unsafe {
|
||||
vec_znx_dft::vec_znx_dft(
|
||||
self.0,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a_limbs as u64,
|
||||
a_cols as u64,
|
||||
a.n() as u64,
|
||||
)
|
||||
}
|
||||
@@ -131,20 +131,20 @@ impl VecZnxDftOps for Module {
|
||||
&self,
|
||||
b: &mut VecZnxBig,
|
||||
a: &mut VecZnxDft,
|
||||
a_limbs: usize,
|
||||
a_cols: usize,
|
||||
tmp_bytes: &mut [u8],
|
||||
) {
|
||||
assert!(
|
||||
b.limbs() >= a_limbs,
|
||||
"invalid c_vector: b.limbs()={} < a_limbs={}",
|
||||
b.limbs(),
|
||||
a_limbs
|
||||
b.cols() >= a_cols,
|
||||
"invalid c_vector: b.cols()={} < a_cols={}",
|
||||
b.cols(),
|
||||
a_cols
|
||||
);
|
||||
assert!(
|
||||
a.limbs() >= a_limbs,
|
||||
"invalid c_vector: a.limbs()={} < a_limbs={}",
|
||||
a.limbs(),
|
||||
a_limbs
|
||||
a.cols() >= a_cols,
|
||||
"invalid c_vector: a.cols()={} < a_cols={}",
|
||||
a.cols(),
|
||||
a_cols
|
||||
);
|
||||
assert!(
|
||||
tmp_bytes.len() <= <Module as VecZnxDftOps>::vec_znx_idft_tmp_bytes(self),
|
||||
@@ -156,9 +156,9 @@ impl VecZnxDftOps for Module {
|
||||
vec_znx_dft::vec_znx_idft(
|
||||
self.0,
|
||||
b.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.0,
|
||||
a_limbs as u64,
|
||||
a_cols as u64,
|
||||
tmp_bytes.as_mut_ptr(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::ffi::vmp;
|
||||
use crate::{Infos, Module, VecZnx, VecZnxApi, VecZnxDft};
|
||||
use crate::{Infos, Module, VecZnxApi, VecZnxDft};
|
||||
|
||||
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
|
||||
/// stored as a 3D matrix in the DFT domain in a single contiguous array.
|
||||
@@ -15,7 +15,7 @@ pub struct VmpPMat {
|
||||
pub data: *mut vmp::vmp_pmat_t,
|
||||
/// The number of [VecZnxDft].
|
||||
pub rows: usize,
|
||||
/// The number of limbs in each [VecZnxDft].
|
||||
/// The number of cols in each [VecZnxDft].
|
||||
pub cols: usize,
|
||||
/// The ring degree of each [VecZnxDft].
|
||||
pub n: usize,
|
||||
@@ -86,7 +86,7 @@ pub trait VmpPMatOps {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `rows`: number of rows (number of [VecZnxDft]).
|
||||
/// * `cols`: number of cols (number of limbs of each [VecZnxDft]).
|
||||
/// * `cols`: number of cols (number of cols of each [VecZnxDft]).
|
||||
fn new_vmp_pmat(&self, rows: usize, cols: usize) -> VmpPMat;
|
||||
|
||||
/// Returns the number of bytes needed as scratch space for [VmpPMatOps::vmp_prepare_contiguous].
|
||||
@@ -153,15 +153,17 @@ pub trait VmpPMatOps {
|
||||
/// vecznx.push(module.new_vec_znx(cols));
|
||||
/// });
|
||||
///
|
||||
/// let slices: Vec<&[i64]> = vecznx.iter().map(|v| v.data.as_slice()).collect();
|
||||
///
|
||||
/// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)];
|
||||
///
|
||||
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
|
||||
/// module.vmp_prepare_dblptr(&mut vmp_pmat, &vecznx, &mut buf);
|
||||
/// module.vmp_prepare_dblptr(&mut vmp_pmat, &slices, &mut buf);
|
||||
///
|
||||
/// vmp_pmat.free();
|
||||
/// module.free();
|
||||
/// ```
|
||||
fn vmp_prepare_dblptr<T: VecZnxApi + Infos>(&self, b: &mut VmpPMat, a: &Vec<T>, buf: &mut [u8]);
|
||||
fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]);
|
||||
|
||||
/// Prepares the ith-row of [VmpPMat] from a vector of [VecZnx].
|
||||
///
|
||||
@@ -175,7 +177,7 @@ pub trait VmpPMatOps {
|
||||
/// The size of buf can be obtained with [VmpPMatOps::vmp_prepare_tmp_bytes].
|
||||
/// /// # Example
|
||||
/// ```
|
||||
/// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxOps, Free};
|
||||
/// use base2k::{Module, FFT64, VmpPMat, VmpPMatOps, VecZnx, VecZnxApi, VecZnxOps, Free};
|
||||
/// use std::cmp::min;
|
||||
///
|
||||
/// let n: usize = 1024;
|
||||
@@ -188,31 +190,25 @@ pub trait VmpPMatOps {
|
||||
/// let mut buf: Vec<u8> = vec![u8::default(); module.vmp_prepare_tmp_bytes(rows, cols)];
|
||||
///
|
||||
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
|
||||
/// module.vmp_prepare_row(&mut vmp_pmat, &vecznx, 0, &mut buf);
|
||||
/// module.vmp_prepare_row(&mut vmp_pmat, vecznx.raw(), 0, &mut buf);
|
||||
///
|
||||
/// vmp_pmat.free();
|
||||
/// module.free();
|
||||
/// ```
|
||||
fn vmp_prepare_row<T: VecZnxApi + Infos>(
|
||||
&self,
|
||||
b: &mut VmpPMat,
|
||||
a: &T,
|
||||
row_i: usize,
|
||||
tmp_bytes: &mut [u8],
|
||||
);
|
||||
fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, tmp_bytes: &mut [u8]);
|
||||
|
||||
/// Returns the size of the stratch space necessary for [VmpPMatOps::vmp_apply_dft].
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `c_limbs`: number of limbs of the output [VecZnxDft].
|
||||
/// * `a_limbs`: number of limbs of the input [VecZnx].
|
||||
/// * `c_cols`: number of cols of the output [VecZnxDft].
|
||||
/// * `a_cols`: number of cols of the input [VecZnx].
|
||||
/// * `rows`: number of rows of the input [VmpPMat].
|
||||
/// * `cols`: number of cols of the input [VmpPMat].
|
||||
fn vmp_apply_dft_tmp_bytes(
|
||||
&self,
|
||||
c_limbs: usize,
|
||||
a_limbs: usize,
|
||||
c_cols: usize,
|
||||
a_cols: usize,
|
||||
rows: usize,
|
||||
cols: usize,
|
||||
) -> usize;
|
||||
@@ -223,8 +219,8 @@ pub trait VmpPMatOps {
|
||||
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
|
||||
/// and each vector a [VecZnxDft] (row) of the [VmpPMat].
|
||||
///
|
||||
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` limbs.
|
||||
/// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` cols.
|
||||
///
|
||||
/// If there is a mismatch between the dimensions the largest valid ones are used.
|
||||
///
|
||||
@@ -249,18 +245,18 @@ pub trait VmpPMatOps {
|
||||
/// let n = 1024;
|
||||
///
|
||||
/// let module: Module = Module::new::<FFT64>(n);
|
||||
/// let limbs: usize = 5;
|
||||
/// let cols: usize = 5;
|
||||
///
|
||||
/// let rows: usize = limbs;
|
||||
/// let cols: usize = limbs + 1;
|
||||
/// let c_limbs: usize = cols;
|
||||
/// let a_limbs: usize = limbs;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_limbs, a_limbs, rows, cols);
|
||||
/// let rows: usize = cols;
|
||||
/// let cols: usize = cols + 1;
|
||||
/// let c_cols: usize = cols;
|
||||
/// let a_cols: usize = cols;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_tmp_bytes(c_cols, a_cols, rows, cols);
|
||||
///
|
||||
/// let mut buf: Vec<u8> = vec![0; tmp_bytes];
|
||||
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
|
||||
///
|
||||
/// let a: VecZnx = module.new_vec_znx(limbs);
|
||||
/// let a: VecZnx = module.new_vec_znx(cols);
|
||||
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
|
||||
/// module.vmp_apply_dft(&mut c_dft, &a, &vmp_pmat, &mut buf);
|
||||
///
|
||||
@@ -280,14 +276,14 @@ pub trait VmpPMatOps {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `c_limbs`: number of limbs of the output [VecZnxDft].
|
||||
/// * `a_limbs`: number of limbs of the input [VecZnxDft].
|
||||
/// * `c_cols`: number of cols of the output [VecZnxDft].
|
||||
/// * `a_cols`: number of cols of the input [VecZnxDft].
|
||||
/// * `rows`: number of rows of the input [VmpPMat].
|
||||
/// * `cols`: number of cols of the input [VmpPMat].
|
||||
fn vmp_apply_dft_to_dft_tmp_bytes(
|
||||
&self,
|
||||
c_limbs: usize,
|
||||
a_limbs: usize,
|
||||
c_cols: usize,
|
||||
a_cols: usize,
|
||||
rows: usize,
|
||||
cols: usize,
|
||||
) -> usize;
|
||||
@@ -299,8 +295,8 @@ pub trait VmpPMatOps {
|
||||
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
|
||||
/// and each vector a [VecZnxDft] (row) of the [VmpPMat].
|
||||
///
|
||||
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` limbs.
|
||||
/// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` cols.
|
||||
///
|
||||
/// If there is a mismatch between the dimensions the largest valid ones are used.
|
||||
///
|
||||
@@ -325,18 +321,18 @@ pub trait VmpPMatOps {
|
||||
/// let n = 1024;
|
||||
///
|
||||
/// let module: Module = Module::new::<FFT64>(n);
|
||||
/// let limbs: usize = 5;
|
||||
/// let cols: usize = 5;
|
||||
///
|
||||
/// let rows: usize = limbs;
|
||||
/// let cols: usize = limbs + 1;
|
||||
/// let c_limbs: usize = cols;
|
||||
/// let a_limbs: usize = limbs;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_limbs, a_limbs, rows, cols);
|
||||
/// let rows: usize = cols;
|
||||
/// let cols: usize = cols + 1;
|
||||
/// let c_cols: usize = cols;
|
||||
/// let a_cols: usize = cols;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, rows, cols);
|
||||
///
|
||||
/// let mut buf: Vec<u8> = vec![0; tmp_bytes];
|
||||
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
|
||||
///
|
||||
/// let a_dft: VecZnxDft = module.new_vec_znx_dft(limbs);
|
||||
/// let a_dft: VecZnxDft = module.new_vec_znx_dft(cols);
|
||||
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
|
||||
/// module.vmp_apply_dft_to_dft(&mut c_dft, &a_dft, &vmp_pmat, &mut buf);
|
||||
///
|
||||
@@ -354,8 +350,8 @@ pub trait VmpPMatOps {
|
||||
/// where each [crate::Scalar] is a limb of the input [VecZnxDft] (equivalent to an [crate::SvpPPol])
|
||||
/// and each vector a [VecZnxDft] (row) of the [VmpPMat].
|
||||
///
|
||||
/// As such, given an input [VecZnx] of `i` limbs and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` limbs.
|
||||
/// As such, given an input [VecZnx] of `i` cols and a [VmpPMat] of `i` rows and
|
||||
/// `j` cols, the output is a [VecZnx] of `j` cols.
|
||||
///
|
||||
/// If there is a mismatch between the dimensions the largest valid ones are used.
|
||||
///
|
||||
@@ -379,17 +375,17 @@ pub trait VmpPMatOps {
|
||||
/// let n = 1024;
|
||||
///
|
||||
/// let module: Module = Module::new::<FFT64>(n);
|
||||
/// let limbs: usize = 5;
|
||||
/// let cols: usize = 5;
|
||||
///
|
||||
/// let rows: usize = limbs;
|
||||
/// let cols: usize = limbs + 1;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(limbs, limbs, rows, cols);
|
||||
/// let rows: usize = cols;
|
||||
/// let cols: usize = cols + 1;
|
||||
/// let tmp_bytes: usize = module.vmp_apply_dft_to_dft_tmp_bytes(cols, cols, rows, cols);
|
||||
///
|
||||
/// let mut buf: Vec<u8> = vec![0; tmp_bytes];
|
||||
/// let a: VecZnx = module.new_vec_znx(limbs);
|
||||
/// let a: VecZnx = module.new_vec_znx(cols);
|
||||
/// let mut vmp_pmat: VmpPMat = module.new_vmp_pmat(rows, cols);
|
||||
///
|
||||
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(limbs);
|
||||
/// let mut c_dft: VecZnxDft = module.new_vec_znx_dft(cols);
|
||||
/// module.vmp_apply_dft_to_dft_inplace(&mut c_dft, &vmp_pmat, &mut buf);
|
||||
///
|
||||
/// c_dft.free();
|
||||
@@ -428,12 +424,7 @@ impl VmpPMatOps for Module {
|
||||
}
|
||||
}
|
||||
|
||||
fn vmp_prepare_dblptr<T: VecZnxApi + Infos>(
|
||||
&self,
|
||||
b: &mut VmpPMat,
|
||||
a: &Vec<T>,
|
||||
buf: &mut [u8],
|
||||
) {
|
||||
fn vmp_prepare_dblptr(&self, b: &mut VmpPMat, a: &[&[i64]], buf: &mut [u8]) {
|
||||
let ptrs: Vec<*const i64> = a.iter().map(|v| v.as_ptr()).collect();
|
||||
unsafe {
|
||||
vmp::vmp_prepare_dblptr(
|
||||
@@ -447,13 +438,7 @@ impl VmpPMatOps for Module {
|
||||
}
|
||||
}
|
||||
|
||||
fn vmp_prepare_row<T: VecZnxApi + Infos>(
|
||||
&self,
|
||||
b: &mut VmpPMat,
|
||||
a: &T,
|
||||
row_i: usize,
|
||||
buf: &mut [u8],
|
||||
) {
|
||||
fn vmp_prepare_row(&self, b: &mut VmpPMat, a: &[i64], row_i: usize, buf: &mut [u8]) {
|
||||
unsafe {
|
||||
vmp::vmp_prepare_row(
|
||||
self.0,
|
||||
@@ -469,16 +454,16 @@ impl VmpPMatOps for Module {
|
||||
|
||||
fn vmp_apply_dft_tmp_bytes(
|
||||
&self,
|
||||
c_limbs: usize,
|
||||
a_limbs: usize,
|
||||
c_cols: usize,
|
||||
a_cols: usize,
|
||||
rows: usize,
|
||||
cols: usize,
|
||||
) -> usize {
|
||||
unsafe {
|
||||
vmp::vmp_apply_dft_tmp_bytes(
|
||||
self.0,
|
||||
c_limbs as u64,
|
||||
a_limbs as u64,
|
||||
c_cols as u64,
|
||||
a_cols as u64,
|
||||
rows as u64,
|
||||
cols as u64,
|
||||
) as usize
|
||||
@@ -496,9 +481,9 @@ impl VmpPMatOps for Module {
|
||||
vmp::vmp_apply_dft(
|
||||
self.0,
|
||||
c.0,
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
a.as_ptr(),
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
a.n() as u64,
|
||||
b.data(),
|
||||
b.rows() as u64,
|
||||
@@ -510,16 +495,16 @@ impl VmpPMatOps for Module {
|
||||
|
||||
fn vmp_apply_dft_to_dft_tmp_bytes(
|
||||
&self,
|
||||
c_limbs: usize,
|
||||
a_limbs: usize,
|
||||
c_cols: usize,
|
||||
a_cols: usize,
|
||||
rows: usize,
|
||||
cols: usize,
|
||||
) -> usize {
|
||||
unsafe {
|
||||
vmp::vmp_apply_dft_to_dft_tmp_bytes(
|
||||
self.0,
|
||||
c_limbs as u64,
|
||||
a_limbs as u64,
|
||||
c_cols as u64,
|
||||
a_cols as u64,
|
||||
rows as u64,
|
||||
cols as u64,
|
||||
) as usize
|
||||
@@ -531,9 +516,9 @@ impl VmpPMatOps for Module {
|
||||
vmp::vmp_apply_dft_to_dft(
|
||||
self.0,
|
||||
c.0,
|
||||
c.limbs() as u64,
|
||||
c.cols() as u64,
|
||||
a.0,
|
||||
a.limbs() as u64,
|
||||
a.cols() as u64,
|
||||
b.data(),
|
||||
b.rows() as u64,
|
||||
b.cols() as u64,
|
||||
@@ -547,9 +532,9 @@ impl VmpPMatOps for Module {
|
||||
vmp::vmp_apply_dft_to_dft(
|
||||
self.0,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
b.0,
|
||||
b.limbs() as u64,
|
||||
b.cols() as u64,
|
||||
a.data(),
|
||||
a.rows() as u64,
|
||||
a.cols() as u64,
|
||||
|
||||
Reference in New Issue
Block a user