everything compiles. Scratchpad not yet implemented

This commit is contained in:
Janmajaya Mall
2025-05-03 16:37:20 +05:30
parent 3ed6fa8ab5
commit ff8370e023
19 changed files with 919 additions and 504 deletions

View File

@@ -1,5 +1,5 @@
use base2k::{
Encoding, FFT64, Module, Sampling, Scalar, ScalarOps, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps,
Encoding, FFT64, Module, Sampling, Scalar, ScalarAlloc, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps,
VecZnxDft, VecZnxDftOps, VecZnxOps, ZnxInfos, alloc_aligned,
};
use itertools::izip;

View File

@@ -1,5 +1,5 @@
use crate::ffi::znx::znx_zero_i64_ref;
use crate::znx_base::ZnxLayout;
use crate::znx_base::{ZnxView, ZnxViewMut};
use crate::{VecZnx, znx_base::ZnxInfos};
use itertools::izip;
use rug::{Assign, Float};
@@ -59,7 +59,7 @@ pub trait Encoding {
fn decode_coeff_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64;
}
impl Encoding for VecZnx {
impl<D: AsMut<[u8]> + AsRef<[u8]>> Encoding for VecZnx<D> {
fn encode_vec_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
encode_vec_i64(self, col_i, log_base2k, log_k, data, log_max)
}
@@ -81,7 +81,14 @@ impl Encoding for VecZnx {
}
}
fn encode_vec_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
fn encode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
a: &mut VecZnx<D>,
col_i: usize,
log_base2k: usize,
log_k: usize,
data: &[i64],
log_max: usize,
) {
let size: usize = (log_k + log_base2k - 1) / log_base2k;
#[cfg(debug_assertions)]
@@ -132,7 +139,7 @@ fn encode_vec_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize,
}
}
fn decode_vec_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
fn decode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
let size: usize = (log_k + log_base2k - 1) / log_base2k;
#[cfg(debug_assertions)]
{
@@ -160,7 +167,7 @@ fn decode_vec_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, dat
})
}
fn decode_vec_float(a: &VecZnx, col_i: usize, log_base2k: usize, data: &mut [Float]) {
fn decode_vec_float<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, data: &mut [Float]) {
let size: usize = a.size();
#[cfg(debug_assertions)]
{
@@ -194,7 +201,15 @@ fn decode_vec_float(a: &VecZnx, col_i: usize, log_base2k: usize, data: &mut [Flo
});
}
fn encode_coeff_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) {
fn encode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
a: &mut VecZnx<D>,
col_i: usize,
log_base2k: usize,
log_k: usize,
i: usize,
value: i64,
log_max: usize,
) {
let size: usize = (log_k + log_base2k - 1) / log_base2k;
#[cfg(debug_assertions)]
@@ -237,7 +252,7 @@ fn encode_coeff_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usiz
}
}
fn decode_coeff_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
fn decode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
#[cfg(debug_assertions)]
{
assert!(i < a.n());
@@ -263,10 +278,9 @@ fn decode_coeff_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i
#[cfg(test)]
mod tests {
use crate::{
Encoding, FFT64, Module, VecZnx, VecZnxOps,
znx_base::{ZnxInfos, ZnxLayout},
};
use crate::vec_znx_ops::*;
use crate::znx_base::*;
use crate::{Encoding, FFT64, Module, VecZnx, znx_base::ZnxInfos};
use itertools::izip;
use sampling::source::Source;
@@ -277,7 +291,7 @@ mod tests {
let log_base2k: usize = 17;
let size: usize = 5;
let log_k: usize = size * log_base2k - 5;
let mut a: VecZnx = module.new_vec_znx(2, size);
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
let mut source: Source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
@@ -299,7 +313,7 @@ mod tests {
let log_base2k: usize = 17;
let size: usize = 5;
let log_k: usize = size * log_base2k - 5;
let mut a: VecZnx = module.new_vec_znx(2, size);
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
let mut source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);

View File

@@ -125,3 +125,29 @@ pub fn alloc_aligned<T>(size: usize) -> Vec<T> {
DEFAULTALIGN,
)
}
pub(crate) struct ScratchSpace {
// data: D,
}
impl ScratchSpace {
fn tmp_vec_znx_dft<D, B>(&mut self, n: usize, cols: usize, size: usize) -> VecZnxDft<D, B> {
todo!()
}
fn tmp_vec_znx_big<D, B>(&mut self, n: usize, cols: usize, size: usize) -> VecZnxBig<D, B> {
todo!()
}
fn vec_znx_big_normalize_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
todo!()
}
fn vmp_apply_dft_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
todo!()
}
fn vmp_apply_dft_to_dft_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
todo!()
}
}

View File

@@ -1,5 +1,5 @@
use crate::znx_base::{GetZnxBase, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
use crate::{Backend, FFT64, Module, alloc_aligned};
use crate::znx_base::{GetZnxBase, ZnxBase, ZnxInfos};
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
use std::marker::PhantomData;
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
@@ -8,68 +8,67 @@ use std::marker::PhantomData;
///
/// [MatZnxDft] is used to permform a vector matrix product between a [VecZnx]/[VecZnxDft] and a [MatZnxDft].
/// See the trait [MatZnxDftOps] for additional information.
pub struct MatZnxDft<B: Backend> {
pub inner: ZnxBase,
pub cols_in: usize,
pub cols_out: usize,
pub struct MatZnxDft<D, B> {
data: D,
n: usize,
size: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
_marker: PhantomData<B>,
}
impl<B: Backend> GetZnxBase for MatZnxDft<B> {
fn znx(&self) -> &ZnxBase {
&self.inner
impl<D, B> ZnxInfos for MatZnxDft<D, B> {
fn cols(&self) -> usize {
self.cols_in
}
fn znx_mut(&mut self) -> &mut ZnxBase {
&mut self.inner
fn rows(&self) -> usize {
self.rows
}
}
impl<B: Backend> ZnxInfos for MatZnxDft<B> {}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
impl ZnxSliceSize for MatZnxDft<FFT64> {
fn sl(&self) -> usize {
self.n()
}
}
impl ZnxLayout for MatZnxDft<FFT64> {
impl<D, B> DataView for MatZnxDft<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D, B> DataViewMut for MatZnxDft<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: AsRef<[u8]>> ZnxView for MatZnxDft<D, FFT64> {
type Scalar = f64;
}
impl<B: Backend> MatZnxDft<B> {
pub fn new(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let bytes: Vec<u8> = alloc_aligned(Self::bytes_of(module, rows, cols_in, cols_out, size));
Self::from_bytes(module, rows, cols_in, cols_out, size, bytes)
impl<D, B> MatZnxDft<D, B> {
pub(crate) fn cols_in(&self) -> usize {
self.cols_in
}
pub fn from_bytes(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize, mut bytes: Vec<u8>) -> Self {
let mut mat: MatZnxDft<B> = Self::from_bytes_borrow(module, rows, cols_in, cols_out, size, &mut bytes);
mat.znx_mut().data = bytes;
mat
pub(crate) fn cols_out(&self) -> usize {
self.cols_out
}
}
pub fn from_bytes_borrow(
module: &Module<B>,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: &mut [u8],
) -> Self {
debug_assert_eq!(
bytes.len(),
Self::bytes_of(module, rows, cols_in, cols_out, size)
);
Self {
inner: ZnxBase::from_bytes_borrow(module.n(), rows, cols_out, size, bytes),
cols_in: cols_in,
cols_out: cols_out,
_marker: PhantomData,
}
}
pub fn bytes_of(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
impl<D: From<Vec<u8>>, B: Backend> MatZnxDft<D, B> {
pub(crate) fn bytes_of(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
unsafe {
crate::ffi::vmp::bytes_of_vmp_pmat(
module.ptr,
@@ -79,16 +78,62 @@ impl<B: Backend> MatZnxDft<B> {
}
}
pub fn cols_in(&self) -> usize {
self.cols_in
pub(crate) fn new(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned(Self::bytes_of(module, rows, cols_in, cols_out, size));
Self {
data: data.into(),
n: module.n(),
size,
rows,
cols_in,
cols_out,
_marker: PhantomData,
}
}
pub fn cols_out(&self) -> usize {
self.cols_out
pub(crate) fn new_from_bytes(
module: &Module<B>,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: impl Into<Vec<u8>>,
) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::bytes_of(module, rows, cols_in, cols_out, size));
Self {
data: data.into(),
n: module.n(),
size,
rows,
cols_in,
cols_out,
_marker: PhantomData,
}
}
// pub fn from_bytes_borrow(
// module: &Module<B>,
// rows: usize,
// cols_in: usize,
// cols_out: usize,
// size: usize,
// bytes: &mut [u8],
// ) -> Self {
// debug_assert_eq!(
// bytes.len(),
// Self::bytes_of(module, rows, cols_in, cols_out, size)
// );
// Self {
// inner: ZnxBase::from_bytes_borrow(module.n(), rows, cols_out, size, bytes),
// cols_in: cols_in,
// cols_out: cols_out,
// _marker: PhantomData,
// }
// }
}
impl MatZnxDft<FFT64> {
impl<D: AsRef<[u8]>> MatZnxDft<D, FFT64> {
/// Returns a copy of the backend array at index (i, j) of the [MatZnxDft].
///
/// # Arguments
@@ -123,3 +168,5 @@ impl MatZnxDft<FFT64> {
}
}
}
pub type MatZnxDftAllocOwned<B> = MatZnxDft<Vec<u8>, B>;

View File

@@ -1,20 +1,19 @@
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
use crate::ffi::vmp;
use crate::znx_base::{ZnxInfos, ZnxLayout};
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
use crate::{
Backend, FFT64, MatZnxDft, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, assert_alignement, is_aligned,
Backend, FFT64, MatZnxDft, MatZnxDftAllocOwned, Module, ScratchSpace, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft,
VecZnxDftAlloc, VecZnxDftOps, assert_alignement, is_aligned,
};
/// This trait implements methods for vector matrix product,
/// that is, multiplying a [VecZnx] with a [MatZnxDft].
pub trait MatZnxDftOps<B: Backend> {
pub trait MatZnxDftAlloc<B> {
/// Allocates a new [MatZnxDft] with the given number of rows and columns.
///
/// # Arguments
///
/// * `rows`: number of rows (number of [VecZnxDft]).
/// * `size`: number of size (number of size of each [VecZnxDft]).
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDft<B>;
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B>;
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
@@ -25,17 +24,21 @@ pub trait MatZnxDftOps<B: Backend> {
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxDft<FFT64>;
) -> MatZnxDftAllocOwned<B>;
fn new_mat_znx_dft_from_bytes_borrow(
&self,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: &mut [u8],
) -> MatZnxDft<FFT64>;
// fn new_mat_znx_dft_from_bytes_borrow(
// &self,
// rows: usize,
// cols_in: usize,
// cols_out: usize,
// size: usize,
// bytes: &mut [u8],
// ) -> MatZnxDft<FFT64>;
}
/// This trait implements methods for vector matrix product,
/// that is, multiplying a [VecZnx] with a [MatZnxDft].
pub trait MatZnxDftOps<DataMut, Data, B: Backend> {
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_prepare_row]
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
@@ -49,7 +52,14 @@ pub trait MatZnxDftOps<B: Backend> {
/// * `buf`: scratch space, the size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
///
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
fn vmp_prepare_row(&self, b: &mut MatZnxDft<B>, b_row: usize, b_col_in: usize, a: &VecZnx, tmp_bytes: &mut [u8]);
fn vmp_prepare_row(
&self,
b: &mut MatZnxDft<DataMut, B>,
b_row: usize,
b_col_in: usize,
a: &VecZnx<Data>,
scratch: &mut ScratchSpace,
);
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_extract_row]
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
@@ -64,11 +74,11 @@ pub trait MatZnxDftOps<B: Backend> {
fn vmp_extract_row(
&self,
log_base2k: usize,
b: &mut VecZnx,
a: &MatZnxDft<B>,
b: &mut VecZnx<DataMut>,
a: &MatZnxDft<Data, B>,
b_row: usize,
b_col_in: usize,
tmp_bytes: &mut [u8],
scratch: &mut ScratchSpace,
);
/// Prepares the ith-row of [MatZnxDft] from a [VecZnxDft].
@@ -80,7 +90,7 @@ pub trait MatZnxDftOps<B: Backend> {
/// * `row_i`: the index of the row to prepare.
///
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<B>, b_row: usize, b_col_in: usize, a: &VecZnxDft<B>);
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<DataMut, B>, b_row: usize, b_col_in: usize, a: &VecZnxDft<Data, B>);
/// Extracts the ith-row of [MatZnxDft] into a [VecZnxDft].
///
@@ -89,7 +99,7 @@ pub trait MatZnxDftOps<B: Backend> {
/// * `b`: the [VecZnxDft] to on which to extract the row of the [MatZnxDft].
/// * `a`: [MatZnxDft] on which the values are encoded.
/// * `row_i`: the index of the row to extract.
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<B>, a: &MatZnxDft<B>, a_row: usize, a_col_in: usize);
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<DataMut, B>, a: &MatZnxDft<Data, B>, a_row: usize, a_col_in: usize);
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft].
///
@@ -133,7 +143,7 @@ pub trait MatZnxDftOps<B: Backend> {
/// * `a`: the left operand [VecZnx] of the vector matrix product.
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_tmp_bytes].
fn vmp_apply_dft(&self, c: &mut VecZnxDft<B>, a: &VecZnx, b: &MatZnxDft<B>, buf: &mut [u8]);
fn vmp_apply_dft(&self, c: &mut VecZnxDft<DataMut, B>, a: &VecZnx<Data>, b: &MatZnxDft<Data, B>, scratch: &mut ScratchSpace);
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft_to_dft].
///
@@ -180,16 +190,22 @@ pub trait MatZnxDftOps<B: Backend> {
/// * `a`: the left operand [VecZnxDft] of the vector matrix product.
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_to_dft_tmp_bytes].
fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft<B>, a: &VecZnxDft<B>, b: &MatZnxDft<B>, buf: &mut [u8]);
fn vmp_apply_dft_to_dft(
&self,
c: &mut VecZnxDft<DataMut, B>,
a: &VecZnxDft<Data, B>,
b: &MatZnxDft<Data, B>,
scratch: &mut ScratchSpace,
);
}
impl MatZnxDftOps<FFT64> for Module<FFT64> {
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDft<FFT64> {
MatZnxDft::<FFT64>::new(self, rows, cols_in, cols_out, size)
impl<B: Backend> MatZnxDftAlloc<B> for Module<B> {
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
MatZnxDftAllocOwned::bytes_of(self, rows, cols_in, cols_out, size)
}
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
MatZnxDft::<FFT64>::bytes_of(self, rows, cols_in, cols_out, size)
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B> {
MatZnxDftAllocOwned::new(self, rows, cols_in, cols_out, size)
}
fn new_mat_znx_dft_from_bytes(
@@ -199,26 +215,28 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxDft<FFT64> {
MatZnxDft::<FFT64>::from_bytes(self, rows, cols_in, cols_out, size, bytes)
}
fn new_mat_znx_dft_from_bytes_borrow(
&self,
rows: usize,
cols_in: usize,
cols_out: usize,
size: usize,
bytes: &mut [u8],
) -> MatZnxDft<FFT64> {
MatZnxDft::<FFT64>::from_bytes_borrow(self, rows, cols_in, cols_out, size, bytes)
) -> MatZnxDftAllocOwned<B> {
MatZnxDftAllocOwned::new_from_bytes(self, rows, cols_in, cols_out, size, bytes)
}
}
impl<DataMut, Data> MatZnxDftOps<DataMut, Data, FFT64> for Module<FFT64>
where
DataMut: AsMut<[u8]> + AsRef<[u8]>,
Data: AsRef<[u8]>,
{
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
self.bytes_of_vec_znx_dft(cols_out, size)
<Self as VecZnxDftAlloc<FFT64>>::bytes_of_vec_znx_dft(self, cols_out, size)
}
fn vmp_prepare_row(&self, b: &mut MatZnxDft<FFT64>, b_row: usize, b_col_in: usize, a: &VecZnx, tmp_bytes: &mut [u8]) {
fn vmp_prepare_row(
&self,
b: &mut MatZnxDft<DataMut, FFT64>,
b_row: usize,
b_col_in: usize,
a: &VecZnx<Data>,
scratch: &mut ScratchSpace,
) {
#[cfg(debug_assertions)]
{
assert_eq!(b.n(), self.n());
@@ -249,33 +267,36 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
b.size(),
a.size()
);
assert!(tmp_bytes.len() >= self.vmp_prepare_row_tmp_bytes(a.cols(), a.size()));
assert!(is_aligned(tmp_bytes.as_ptr()))
// assert!(
// tmp_bytes.len()
// >= <Self as MatZnxDftOps<DataMut, Data, FFT64>>::vmp_prepare_row_tmp_bytes(self, a.cols(), a.size())
// );
// assert!(is_aligned(tmp_bytes.as_ptr()))
}
let cols_out: usize = a.cols();
let a_size: usize = a.size();
let (tmp_bytes_a_dft, _) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, a_size));
let mut a_dft: VecZnxDft<FFT64> = self.new_vec_znx_dft_from_bytes_borrow(cols_out, a_size, tmp_bytes_a_dft);
// let (tmp_bytes_a_dft, _) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, a_size));
let mut a_dft = scratch.tmp_vec_znx_dft::<DataMut, _>(self.n(), cols_out, a_size);
(0..cols_out).for_each(|i| self.vec_znx_dft(&mut a_dft, i, &a, i));
Self::vmp_prepare_row_dft(&self, b, b_row, b_col_in, &a_dft);
}
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
self.bytes_of_vec_znx_dft(cols_out, size) + self.vec_znx_big_normalize_tmp_bytes()
self.bytes_of_vec_znx_dft(cols_out, size)
+ <Self as VecZnxBigOps<DataMut, Data, FFT64>>::vec_znx_big_normalize_tmp_bytes(self)
}
fn vmp_extract_row(
&self,
log_base2k: usize,
b: &mut VecZnx,
a: &MatZnxDft<FFT64>,
b: &mut VecZnx<DataMut>,
a: &MatZnxDft<Data, FFT64>,
a_row: usize,
a_col_in: usize,
tmp_bytes: &mut [u8],
scratch: &mut ScratchSpace,
) {
#[cfg(debug_assertions)]
{
@@ -307,24 +328,24 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
b.size(),
a.size()
);
assert!(tmp_bytes.len() >= self.vmp_extract_row_tmp_bytes(a.cols(), a.size()));
assert!(is_aligned(tmp_bytes.as_ptr()))
// assert!(tmp_bytes.len() >= self.vmp_extract_row_tmp_bytes(a.cols(), a.size()));
// assert!(is_aligned(tmp_bytes.as_ptr()))
}
let cols_out: usize = b.cols();
let size: usize = b.size();
let (bytes_a_dft, tmp_bytes) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, size));
let mut b_dft: VecZnxDft<FFT64> = self.new_vec_znx_dft_from_bytes_borrow(cols_out, size, bytes_a_dft);
// let (bytes_a_dft, tmp_bytes) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, size));
let mut b_dft = scratch.tmp_vec_znx_dft::<DataMut, _>(self.n(), cols_out, size);
Self::vmp_extract_row_dft(&self, &mut b_dft, a, a_row, a_col_in);
let mut b_big: VecZnxBig<FFT64> = b_dft.alias_as_vec_znx_big();
let mut b_big = scratch.tmp_vec_znx_big(self.n(), cols_out, size);
(0..cols_out).for_each(|i| {
self.vec_znx_idft_tmp_a(&mut b_big, i, &mut b_dft, i);
self.vec_znx_big_normalize(log_base2k, b, i, &b_big, i, tmp_bytes);
<Self as VecZnxDftOps<DataMut, Data, FFT64>>::vec_znx_idft_tmp_a(self, &mut b_big, i, &mut b_dft, i);
self.vec_znx_big_normalize(log_base2k, b, i, &b_big, i, scratch);
});
}
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<FFT64>, b_row: usize, b_col_in: usize, a: &VecZnxDft<FFT64>) {
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<DataMut, FFT64>, b_row: usize, b_col_in: usize, a: &VecZnxDft<Data, FFT64>) {
#[cfg(debug_assertions)]
{
assert_eq!(b.n(), self.n());
@@ -369,7 +390,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
}
}
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<FFT64>, a: &MatZnxDft<FFT64>, a_row: usize, a_col_in: usize) {
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<DataMut, FFT64>, a: &MatZnxDft<Data, FFT64>, a_row: usize, a_col_in: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(b.n(), self.n());
@@ -433,18 +454,13 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
}
}
fn vmp_apply_dft(&self, c: &mut VecZnxDft<FFT64>, a: &VecZnx, b: &MatZnxDft<FFT64>, tmp_bytes: &mut [u8]) {
debug_assert!(
tmp_bytes.len()
>= self.vmp_apply_dft_tmp_bytes(
c.size(),
a.size(),
b.rows(),
b.cols_in(),
b.cols_out(),
b.size()
)
);
fn vmp_apply_dft(
&self,
c: &mut VecZnxDft<DataMut, FFT64>,
a: &VecZnx<Data>,
b: &MatZnxDft<Data, FFT64>,
scratch: &mut ScratchSpace,
) {
#[cfg(debug_assertions)]
{
assert_eq!(c.n(), self.n());
@@ -464,18 +480,18 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
a.cols(),
b.cols_in()
);
assert!(
tmp_bytes.len()
>= self.vmp_apply_dft_tmp_bytes(
c.size(),
a.size(),
b.rows(),
b.cols_in(),
b.cols_out(),
b.size()
)
);
assert_alignement(tmp_bytes.as_ptr());
// assert!(
// tmp_bytes.len()
// >= self.vmp_apply_dft_tmp_bytes(
// c.size(),
// a.size(),
// b.rows(),
// b.cols_in(),
// b.cols_out(),
// b.size()
// )
// );
// assert_alignement(tmp_bytes.as_ptr());
}
unsafe {
vmp::vmp_apply_dft(
@@ -488,7 +504,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
b.as_ptr() as *const vmp::vmp_pmat_t,
(b.rows() * b.cols_in()) as u64,
(b.size() * b.cols_out()) as u64,
tmp_bytes.as_mut_ptr(),
scratch.vmp_apply_dft_tmp_bytes(self).as_mut_ptr(),
)
}
}
@@ -515,7 +531,13 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
}
}
fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft<FFT64>, a: &VecZnxDft<FFT64>, b: &MatZnxDft<FFT64>, tmp_bytes: &mut [u8]) {
fn vmp_apply_dft_to_dft(
&self,
c: &mut VecZnxDft<DataMut, FFT64>,
a: &VecZnxDft<Data, FFT64>,
b: &MatZnxDft<Data, FFT64>,
scratch: &mut ScratchSpace,
) {
#[cfg(debug_assertions)]
{
assert_eq!(c.n(), self.n());
@@ -535,20 +557,20 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
a.cols(),
b.cols_in()
);
assert!(
tmp_bytes.len()
>= self.vmp_apply_dft_to_dft_tmp_bytes(
c.cols(),
c.size(),
a.cols(),
a.size(),
b.rows(),
b.cols_in(),
b.cols_out(),
b.size()
)
);
assert_alignement(tmp_bytes.as_ptr());
// assert!(
// tmp_bytes.len()
// >= self.vmp_apply_dft_to_dft_tmp_bytes(
// c.cols(),
// c.size(),
// a.cols(),
// a.size(),
// b.rows(),
// b.cols_in(),
// b.cols_out(),
// b.size()
// )
// );
// assert_alignement(tmp_bytes.as_ptr());
}
unsafe {
vmp::vmp_apply_dft_to_dft(
@@ -560,7 +582,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
b.as_ptr() as *const vmp::vmp_pmat_t,
b.rows() as u64,
(b.size() * b.cols()) as u64,
tmp_bytes.as_mut_ptr(),
scratch.vmp_apply_dft_to_dft_tmp_bytes(self).as_mut_ptr(),
)
}
}
@@ -568,9 +590,12 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
#[cfg(test)]
mod tests {
use crate::mat_znx_dft_ops::*;
use crate::vec_znx_big_ops::*;
use crate::vec_znx_dft_ops::*;
use crate::vec_znx_ops::*;
use crate::{
FFT64, MatZnxDft, MatZnxDftOps, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps,
alloc_aligned, znx_base::ZnxLayout,
FFT64, MatZnxDft, MatZnxDftOps, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, alloc_aligned,
};
use sampling::source::Source;
@@ -582,16 +607,19 @@ mod tests {
let mat_cols_in: usize = 2;
let mat_cols_out: usize = 2;
let mat_size: usize = 5;
let mut a: VecZnx = module.new_vec_znx(mat_cols_out, mat_size);
let mut b: VecZnx = module.new_vec_znx(mat_cols_out, mat_size);
let mut a_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
let mut a_big: VecZnxBig<FFT64> = module.new_vec_znx_big(mat_cols_out, mat_size);
let mut b_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
let mut vmpmat_0: MatZnxDft<FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
let mut vmpmat_1: MatZnxDft<FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
let mut a: VecZnx<_> = module.new_vec_znx(mat_cols_out, mat_size);
let mut b: VecZnx<_> = module.new_vec_znx(mat_cols_out, mat_size);
let mut a_dft: VecZnxDft<_, FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
let mut a_big: VecZnxBig<_, FFT64> = module.new_vec_znx_big(mat_cols_out, mat_size);
let mut b_dft: VecZnxDft<_, FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
let mut vmpmat_0: MatZnxDft<_, FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
let mut vmpmat_1: MatZnxDft<_, FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
// let mut tmp_bytes: Vec<u8> =
// alloc_aligned(module.vmp_prepare_row_tmp_bytes(mat_cols_out, mat_size) | module.vec_znx_big_normalize_tmp_bytes());
let mut scratch = ScratchSpace {};
let mut tmp_bytes: Vec<u8> =
alloc_aligned(module.vmp_prepare_row_tmp_bytes(mat_cols_out, mat_size) | module.vec_znx_big_normalize_tmp_bytes());
alloc_aligned::<u8>(<Module<FFT64> as VecZnxDftOps<Vec<u8>, Vec<u8>, _>>::vec_znx_idft_tmp_bytes(&module));
for col_in in 0..mat_cols_in {
for row_i in 0..mat_rows {
@@ -602,7 +630,7 @@ mod tests {
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
});
module.vmp_prepare_row(&mut vmpmat_0, row_i, col_in, &a, &mut tmp_bytes);
module.vmp_prepare_row(&mut vmpmat_0, row_i, col_in, &a, &mut scratch);
// Checks that prepare(mat_znx_dft, a) = prepare_dft(mat_znx_dft, a_dft)
module.vmp_prepare_row_dft(&mut vmpmat_1, row_i, col_in, &a_dft);
@@ -613,11 +641,11 @@ mod tests {
assert_eq!(a_dft.raw(), b_dft.raw());
// Checks that a_big = extract(prepare_dft(mat_znx_dft, a_dft), b_big)
module.vmp_extract_row(log_base2k, &mut b, &vmpmat_0, row_i, col_in, &mut tmp_bytes);
module.vmp_extract_row(log_base2k, &mut b, &vmpmat_0, row_i, col_in, &mut scratch);
(0..mat_cols_out).for_each(|col_out| {
module.vec_znx_idft(&mut a_big, col_out, &a_dft, col_out, &mut tmp_bytes);
module.vec_znx_big_normalize(log_base2k, &mut a, col_out, &a_big, col_out, &mut tmp_bytes);
module.vec_znx_big_normalize(log_base2k, &mut a, col_out, &a_big, col_out, &mut scratch);
});
assert_eq!(a.raw(), b.raw());

View File

@@ -33,7 +33,7 @@ impl Backend for NTT120 {
pub struct Module<B: Backend> {
pub ptr: *mut MODULE,
pub n: usize,
n: usize,
_marker: PhantomData<B>,
}

View File

@@ -1,16 +1,24 @@
use crate::{Backend, Module, VecZnx, znx_base::ZnxLayout};
use crate::znx_base::ZnxViewMut;
use crate::{Backend, Module, VecZnx};
use rand_distr::{Distribution, Normal};
use sampling::source::Source;
pub trait Sampling {
/// Fills the first `size` size with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
fn fill_uniform(&self, log_base2k: usize, a: &mut VecZnx, col_i: usize, size: usize, source: &mut Source);
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
fn add_dist_f64<D: Distribution<f64>>(
fn fill_uniform<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
&self,
log_base2k: usize,
a: &mut VecZnx,
a: &mut VecZnx<DataMut>,
col_i: usize,
size: usize,
source: &mut Source,
);
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
fn add_dist_f64<DataMut: AsMut<[u8]> + AsRef<[u8]>, D: Distribution<f64>>(
&self,
log_base2k: usize,
a: &mut VecZnx<DataMut>,
col_i: usize,
log_k: usize,
source: &mut Source,
@@ -19,10 +27,10 @@ pub trait Sampling {
);
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
fn add_normal(
fn add_normal<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
&self,
log_base2k: usize,
a: &mut VecZnx,
a: &mut VecZnx<DataMut>,
col_i: usize,
log_k: usize,
source: &mut Source,
@@ -32,22 +40,29 @@ pub trait Sampling {
}
impl<B: Backend> Sampling for Module<B> {
fn fill_uniform(&self, log_base2k: usize, a: &mut VecZnx, col_a: usize, size: usize, source: &mut Source) {
fn fill_uniform<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
&self,
log_base2k: usize,
a: &mut VecZnx<DataMut>,
col_i: usize,
size: usize,
source: &mut Source,
) {
let base2k: u64 = 1 << log_base2k;
let mask: u64 = base2k - 1;
let base2k_half: i64 = (base2k >> 1) as i64;
(0..size).for_each(|j| {
a.at_mut(col_a, j)
a.at_mut(col_i, j)
.iter_mut()
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
})
}
fn add_dist_f64<D: Distribution<f64>>(
fn add_dist_f64<DataMut: AsMut<[u8]> + AsRef<[u8]>, D: Distribution<f64>>(
&self,
log_base2k: usize,
a: &mut VecZnx,
col_a: usize,
a: &mut VecZnx<DataMut>,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
@@ -63,7 +78,7 @@ impl<B: Backend> Sampling for Module<B> {
let log_base2k_rem: usize = log_k % log_base2k;
if log_base2k_rem != 0 {
a.at_mut(col_a, limb).iter_mut().for_each(|a| {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
@@ -71,7 +86,7 @@ impl<B: Backend> Sampling for Module<B> {
*a += (dist_f64.round() as i64) << log_base2k_rem;
});
} else {
a.at_mut(col_a, limb).iter_mut().for_each(|a| {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
@@ -81,11 +96,11 @@ impl<B: Backend> Sampling for Module<B> {
}
}
fn add_normal(
fn add_normal<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
&self,
log_base2k: usize,
a: &mut VecZnx,
col_a: usize,
a: &mut VecZnx<DataMut>,
col_i: usize,
log_k: usize,
source: &mut Source,
sigma: f64,
@@ -94,7 +109,7 @@ impl<B: Backend> Sampling for Module<B> {
self.add_dist_f64(
log_base2k,
a,
col_a,
col_i,
log_k,
source,
Normal::new(0.0, sigma).unwrap(),
@@ -106,7 +121,9 @@ impl<B: Backend> Sampling for Module<B> {
#[cfg(test)]
mod tests {
use super::Sampling;
use crate::{FFT64, Module, Stats, VecZnx, VecZnxOps, znx_base::ZnxLayout};
use crate::vec_znx_ops::*;
use crate::znx_base::*;
use crate::{FFT64, Module, Stats, VecZnx};
use sampling::source::Source;
#[test]
@@ -120,7 +137,7 @@ mod tests {
let zero: Vec<i64> = vec![0; n];
let one_12_sqrt: f64 = 0.28867513459481287;
(0..cols).for_each(|col_i| {
let mut a: VecZnx = module.new_vec_znx(cols, size);
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
module.fill_uniform(log_base2k, &mut a, col_i, size, &mut source);
(0..cols).for_each(|col_j| {
if col_j != col_i {
@@ -154,7 +171,7 @@ mod tests {
let zero: Vec<i64> = vec![0; n];
let k_f64: f64 = (1u64 << log_k as u64) as f64;
(0..cols).for_each(|col_i| {
let mut a: VecZnx = module.new_vec_znx(cols, size);
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
module.add_normal(log_base2k, &mut a, col_i, log_k, &mut source, sigma, bound);
(0..cols).for_each(|col_j| {
if col_j != col_i {

View File

@@ -1,64 +1,59 @@
use crate::znx_base::{ZnxAlloc, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
use crate::{Backend, GetZnxBase, Module, VecZnx};
use crate::znx_base::ZnxInfos;
use crate::{Backend, DataView, DataViewMut, Module, ZnxView, ZnxViewMut, alloc_aligned};
use rand::seq::SliceRandom;
use rand_core::RngCore;
use rand_distr::{Distribution, weighted::WeightedIndex};
use sampling::source::Source;
pub const SCALAR_ZNX_ROWS: usize = 1;
pub const SCALAR_ZNX_SIZE: usize = 1;
// pub const SCALAR_ZNX_ROWS: usize = 1;
// pub const SCALAR_ZNX_SIZE: usize = 1;
pub struct Scalar {
pub inner: ZnxBase,
pub struct Scalar<D> {
data: D,
n: usize,
cols: usize,
}
impl GetZnxBase for Scalar {
fn znx(&self) -> &ZnxBase {
&self.inner
impl<D> ZnxInfos for Scalar<D> {
fn cols(&self) -> usize {
self.cols
}
fn znx_mut(&mut self) -> &mut ZnxBase {
&mut self.inner
}
}
impl ZnxInfos for Scalar {}
impl<B: Backend> ZnxAlloc<B> for Scalar {
type Scalar = i64;
fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
Self {
inner: ZnxBase::from_bytes_borrow(module.n(), SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes),
}
fn rows(&self) -> usize {
1
}
fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
debug_assert_eq!(
_rows, SCALAR_ZNX_ROWS,
"rows != {} not supported for Scalar",
SCALAR_ZNX_ROWS
);
debug_assert_eq!(
_size, SCALAR_ZNX_SIZE,
"rows != {} not supported for Scalar",
SCALAR_ZNX_SIZE
);
module.n() * cols * std::mem::size_of::<self::Scalar>()
fn n(&self) -> usize {
self.n
}
}
impl ZnxLayout for Scalar {
type Scalar = i64;
}
fn size(&self) -> usize {
1
}
impl ZnxSliceSize for Scalar {
fn sl(&self) -> usize {
self.n()
}
}
impl Scalar {
impl<D> DataView for Scalar<D> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D> DataViewMut for Scalar<D> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: AsRef<[u8]>> ZnxView for Scalar<D> {
type Scalar = i64;
}
impl<D: AsMut<[u8]> + AsRef<[u8]>> Scalar<D> {
pub fn fill_ternary_prob(&mut self, col: usize, prob: f64, source: &mut Source) {
let choices: [i64; 3] = [-1, 0, 1];
let weights: [f64; 3] = [prob / 2.0, 1.0 - prob, prob / 2.0];
@@ -76,38 +71,89 @@ impl Scalar {
self.at_mut(col, 0).shuffle(source);
}
pub fn alias_as_vec_znx(&self) -> VecZnx {
VecZnx {
inner: ZnxBase {
n: self.n(),
rows: 1,
cols: 1,
size: 1,
data: Vec::new(),
ptr: self.ptr() as *mut u8,
},
// pub fn alias_as_vec_znx(&self) -> VecZnx {
// VecZnx {
// inner: ZnxBase {
// n: self.n(),
// rows: 1,
// cols: 1,
// size: 1,
// data: Vec::new(),
// ptr: self.ptr() as *mut u8,
// },
// }
// }
}
impl<D: From<Vec<u8>>> Scalar<D> {
pub(crate) fn bytes_of<S: Sized>(n: usize, cols: usize) -> usize {
n * cols * size_of::<S>()
}
pub(crate) fn new<S: Sized>(n: usize, cols: usize) -> Self {
let data = alloc_aligned::<u8>(Self::bytes_of::<S>(n, cols));
Self {
data: data.into(),
n,
cols,
}
}
pub(crate) fn new_from_bytes<S: Sized>(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::bytes_of::<S>(n, cols));
Self {
data: data.into(),
n,
cols,
}
}
}
pub trait ScalarOps {
pub type ScalarOwned = Scalar<Vec<u8>>;
pub trait ScalarAlloc {
fn bytes_of_scalar(&self, cols: usize) -> usize;
fn new_scalar(&self, cols: usize) -> Scalar;
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> Scalar;
fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar;
fn new_scalar(&self, cols: usize) -> ScalarOwned;
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarOwned;
// fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar;
}
impl<B: Backend> ScalarOps for Module<B> {
impl<B: Backend> ScalarAlloc for Module<B> {
fn bytes_of_scalar(&self, cols: usize) -> usize {
Scalar::bytes_of(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE)
ScalarOwned::bytes_of::<i64>(self.n(), cols)
}
fn new_scalar(&self, cols: usize) -> Scalar {
Scalar::new(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE)
fn new_scalar(&self, cols: usize) -> ScalarOwned {
ScalarOwned::new::<i64>(self.n(), cols)
}
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> Scalar {
Scalar::from_bytes(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
}
fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar {
Scalar::from_bytes_borrow(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarOwned {
ScalarOwned::new_from_bytes::<i64>(self.n(), cols, bytes)
}
// fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar {
// Scalar::from_bytes_borrow(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
// }
}
// impl<B: Backend> ZnxAlloc<B> for Scalar {
// type Scalar = i64;
// fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
// Self {
// inner: ZnxBase::from_bytes_borrow(module.n(), SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes),
// }
// }
// fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
// debug_assert_eq!(
// _rows, SCALAR_ZNX_ROWS,
// "rows != {} not supported for Scalar",
// SCALAR_ZNX_ROWS
// );
// debug_assert_eq!(
// _size, SCALAR_ZNX_SIZE,
// "rows != {} not supported for Scalar",
// SCALAR_ZNX_SIZE
// );
// module.n() * cols * std::mem::size_of::<self::Scalar>()
// }
// }

View File

@@ -1,67 +1,97 @@
use std::marker::PhantomData;
use crate::ffi::svp;
use crate::znx_base::{ZnxAlloc, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
use crate::{Backend, FFT64, GetZnxBase, Module};
use crate::znx_base::ZnxInfos;
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
pub const SCALAR_ZNX_DFT_ROWS: usize = 1;
pub const SCALAR_ZNX_DFT_SIZE: usize = 1;
pub struct ScalarZnxDft<B: Backend> {
pub inner: ZnxBase,
_marker: PhantomData<B>,
pub struct ScalarZnxDft<D, B> {
data: D,
n: usize,
cols: usize,
_phantom: PhantomData<B>,
}
impl<B: Backend> GetZnxBase for ScalarZnxDft<B> {
fn znx(&self) -> &ZnxBase {
&self.inner
impl<D, B> ZnxInfos for ScalarZnxDft<D, B> {
fn cols(&self) -> usize {
self.cols
}
fn znx_mut(&mut self) -> &mut ZnxBase {
&mut self.inner
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
1
}
fn sl(&self) -> usize {
self.n()
}
}
impl<B: Backend> ZnxInfos for ScalarZnxDft<B> {}
impl<B: Backend> ZnxAlloc<B> for ScalarZnxDft<B> {
type Scalar = u8;
fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
debug_assert_eq!(bytes.len(), Self::bytes_of(module, _rows, cols, _size));
Self {
inner: ZnxBase::from_bytes_borrow(
module.n(),
SCALAR_ZNX_DFT_ROWS,
cols,
SCALAR_ZNX_DFT_SIZE,
bytes,
),
_marker: PhantomData,
}
}
fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
debug_assert_eq!(
_rows, SCALAR_ZNX_DFT_ROWS,
"rows != {} not supported for ScalarZnxDft",
SCALAR_ZNX_DFT_ROWS
);
debug_assert_eq!(
_size, SCALAR_ZNX_DFT_SIZE,
"rows != {} not supported for ScalarZnxDft",
SCALAR_ZNX_DFT_SIZE
);
unsafe { svp::bytes_of_svp_ppol(module.ptr) as usize * cols }
impl<D, B> DataView for ScalarZnxDft<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl ZnxLayout for ScalarZnxDft<FFT64> {
impl<D, B> DataViewMut for ScalarZnxDft<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: AsRef<[u8]>> ZnxView for ScalarZnxDft<D, FFT64> {
type Scalar = f64;
}
impl ZnxSliceSize for ScalarZnxDft<FFT64> {
fn sl(&self) -> usize {
self.n() * self.cols()
impl<D: From<Vec<u8>>, B: Backend> ScalarZnxDft<D, B> {
pub(crate) fn bytes_of(module: &Module<B>, cols: usize) -> usize {
unsafe { svp::bytes_of_svp_ppol(module.ptr) as usize * cols }
}
pub(crate) fn new(module: &Module<B>, cols: usize) -> Self {
let data = alloc_aligned::<u8>(Self::bytes_of(module, cols));
Self {
data: data.into(),
n: module.n(),
cols,
_phantom: PhantomData,
}
}
pub(crate) fn new_from_bytes(module: &Module<B>, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::bytes_of(module, cols));
Self {
data: data.into(),
n: module.n(),
cols,
_phantom: PhantomData,
}
}
// fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
// debug_assert_eq!(bytes.len(), Self::bytes_of(module, _rows, cols, _size));
// Self {
// inner: ZnxBase::from_bytes_borrow(
// module.n(),
// SCALAR_ZNX_DFT_ROWS,
// cols,
// SCALAR_ZNX_DFT_SIZE,
// bytes,
// ),
// _phantom: PhantomData,
// }
// }
}
pub type ScalarZnxDftOwned<B> = ScalarZnxDft<Vec<u8>, B>;

View File

@@ -1,35 +1,52 @@
use crate::ffi::svp::{self, svp_ppol_t};
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
use crate::znx_base::{ZnxAlloc, ZnxInfos, ZnxLayout, ZnxSliceSize};
use crate::{Backend, FFT64, Module, SCALAR_ZNX_DFT_ROWS, SCALAR_ZNX_DFT_SIZE, Scalar, ScalarZnxDft, VecZnx, VecZnxDft};
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
use crate::{Backend, FFT64, Module, Scalar, ScalarZnxDft, ScalarZnxDftOwned, VecZnx, VecZnxDft};
pub trait ScalarZnxDftOps<B: Backend> {
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDft<B>;
pub trait ScalarZnxDftAlloc<B> {
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDftOwned<B>;
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize;
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDft<B>;
fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<B>;
fn svp_prepare(&self, res: &mut ScalarZnxDft<B>, res_col: usize, a: &Scalar, a_col: usize);
fn svp_apply_dft(&self, res: &mut VecZnxDft<B>, res_col: usize, a: &ScalarZnxDft<B>, a_col: usize, b: &VecZnx, b_col: usize);
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDftOwned<B>;
// fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<B>;
}
impl ScalarZnxDftOps<FFT64> for Module<FFT64> {
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDft<FFT64> {
ScalarZnxDft::<FFT64>::new(&self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE)
pub trait ScalarZnxDftOps<DataMut, Data, B: Backend> {
fn svp_prepare(&self, res: &mut ScalarZnxDft<DataMut, B>, res_col: usize, a: &Scalar<Data>, a_col: usize);
fn svp_apply_dft(
&self,
res: &mut VecZnxDft<DataMut, B>,
res_col: usize,
a: &ScalarZnxDft<Data, B>,
a_col: usize,
b: &VecZnx<Data>,
b_col: usize,
);
}
impl<B: Backend> ScalarZnxDftAlloc<B> for Module<B> {
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDftOwned<B> {
ScalarZnxDftOwned::new(self, cols)
}
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize {
ScalarZnxDft::<FFT64>::bytes_of(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE)
ScalarZnxDftOwned::bytes_of(self, cols)
}
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDft<FFT64> {
ScalarZnxDft::from_bytes(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDftOwned<B> {
ScalarZnxDftOwned::new_from_bytes(self, cols, bytes)
}
fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<FFT64> {
ScalarZnxDft::from_bytes_borrow(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
}
// fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<FFT64> {
// ScalarZnxDft::from_bytes_borrow(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
// }
}
fn svp_prepare(&self, res: &mut ScalarZnxDft<FFT64>, res_col: usize, a: &Scalar, a_col: usize) {
impl<DataMut, Data> ScalarZnxDftOps<DataMut, Data, FFT64> for Module<FFT64>
where
DataMut: AsMut<[u8]> + AsRef<[u8]>,
Data: AsRef<[u8]>,
{
fn svp_prepare(&self, res: &mut ScalarZnxDft<DataMut, FFT64>, res_col: usize, a: &Scalar<Data>, a_col: usize) {
unsafe {
svp::svp_prepare(
self.ptr,
@@ -41,11 +58,11 @@ impl ScalarZnxDftOps<FFT64> for Module<FFT64> {
fn svp_apply_dft(
&self,
res: &mut VecZnxDft<FFT64>,
res: &mut VecZnxDft<DataMut, FFT64>,
res_col: usize,
a: &ScalarZnxDft<FFT64>,
a: &ScalarZnxDft<Data, FFT64>,
a_col: usize,
b: &VecZnx,
b: &VecZnx<Data>,
b_col: usize,
) {
unsafe {

View File

@@ -9,7 +9,7 @@ pub trait Stats {
fn std(&self, col_i: usize, log_base2k: usize) -> f64;
}
impl Stats for VecZnx {
impl<D: AsMut<[u8]> + AsRef<[u8]>> Stats for VecZnx<D> {
fn std(&self, col_i: usize, log_base2k: usize) -> f64 {
let prec: u32 = (self.size() * log_base2k) as u32;
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();

View File

@@ -1,13 +1,10 @@
use crate::Backend;
use crate::DataView;
use crate::DataViewMut;
use crate::Module;
use crate::ZnxView;
use crate::alloc_aligned;
use crate::assert_alignement;
use crate::cast_mut;
use crate::ffi::znx;
use crate::znx_base::{GetZnxBase, ZnxAlloc, ZnxBase, ZnxInfos, ZnxRsh, ZnxZero, switch_degree};
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut, switch_degree};
use std::{cmp::min, fmt};
// pub const VEC_ZNX_ROWS: usize = 1;
@@ -59,7 +56,7 @@ impl<D> DataView for VecZnx<D> {
}
impl<D> DataViewMut for VecZnx<D> {
fn data_mut(&self) -> &mut Self::D {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
@@ -84,7 +81,7 @@ impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
return;
}
self.inner.size -= k / log_base2k;
self.size -= k / log_base2k;
let k_rem: usize = k % log_base2k;
@@ -97,7 +94,7 @@ impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
}
/// Switches degree of from `a.n()` to `self.n()` into `self`
pub fn switch_degree<Data: AsRef<[u8]>>(&mut self, col: usize, a: &Data, col_a: usize) {
pub fn switch_degree<Data: AsRef<[u8]>>(&mut self, col: usize, a: &VecZnx<Data>, col_a: usize) {
switch_degree(self, col_a, a, col)
}
@@ -161,7 +158,7 @@ fn normalize_tmp_bytes(n: usize) -> usize {
n * std::mem::size_of::<i64>()
}
fn normalize<D: AsMut<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
fn normalize<D: AsMut<[u8]> + AsRef<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
let n: usize = a.n();
debug_assert!(

View File

@@ -1,11 +1,11 @@
use crate::ffi::vec_znx_big;
use crate::znx_base::{GetZnxBase, ZnxAlloc, ZnxBase, ZnxInfos, ZnxView};
use crate::znx_base::{ZnxInfos, ZnxView};
use crate::{Backend, DataView, DataViewMut, FFT64, Module, alloc_aligned};
use std::marker::PhantomData;
const VEC_ZNX_BIG_ROWS: usize = 1;
/// VecZnxBig is Backend dependent, denoted with backend generic `B`
/// VecZnxBig is `Backend` dependent, denoted with backend generic `B`
pub struct VecZnxBig<D, B> {
data: D,
n: usize,
@@ -44,7 +44,7 @@ impl<D, B> DataView for VecZnxBig<D, B> {
}
impl<D, B> DataViewMut for VecZnxBig<D, B> {
fn data_mut(&self) -> &mut Self::D {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}

View File

@@ -1,6 +1,6 @@
use crate::ffi::vec_znx;
use crate::znx_base::{ZnxAlloc, ZnxInfos, ZnxView, ZnxViewMut};
use crate::{Backend, DataView, FFT64, Module, VecZnx, VecZnxBig, VecZnxBigOwned, VecZnxOps, assert_alignement};
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
use crate::{Backend, DataView, FFT64, Module, ScratchSpace, VecZnx, VecZnxBig, VecZnxBigOwned, VecZnxOps, assert_alignement};
pub trait VecZnxBigAlloc<B> {
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
@@ -79,13 +79,13 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
b_col: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
/// Subtracts `a` from `b` and stores the result on `b`.
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `b`.
/// Subtracts `b` from `a` and stores the result on `b`.
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
/// Subtracts `b` from `a` and stores the result on `c`.
fn vec_znx_big_sub_small_a(
&self,
res: &mut VecZnxBig<DataMut, B>,
@@ -96,10 +96,10 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
b_col: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
/// Subtracts `a` from `res` and stores the result on `res`.
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
/// Subtracts `b` from `a` and stores the result on `c`.
fn vec_znx_big_sub_small_b(
&self,
res: &mut VecZnxBig<DataMut, B>,
@@ -110,7 +110,7 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
b_col: usize,
);
/// Subtracts `b` to `a` and stores the result on `b`.
/// Subtracts `res` from `a` and stores the result on `res`.
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
@@ -129,7 +129,7 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
res_col: usize,
a: &VecZnxBig<Data, B>,
a_col: usize,
tmp_bytes: &mut [u8],
scratch: &mut ScratchSpace,
);
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
@@ -160,7 +160,7 @@ impl VecZnxBigAlloc<FFT64> for Module<FFT64> {
// }
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
VecZnxBig::bytes_of(self, cols, size)
VecZnxBigOwned::bytes_of(self, cols, size)
}
}
@@ -208,8 +208,24 @@ where
a: &VecZnxBig<Data, FFT64>,
a_col: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_big_add(self, res, res_col, a, a_col, res, res_col);
vec_znx::vec_znx_add(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
@@ -245,7 +261,6 @@ where
}
}
//(Jay)TODO: check whether definitions sub_ab, sub_ba make sense to you
fn vec_znx_big_sub_ab_inplace(
&self,
res: &mut VecZnxBig<DataMut, FFT64>,
@@ -253,8 +268,24 @@ where
a: &VecZnxBig<Data, FFT64>,
a_col: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_big_sub(self, res, res_col, a, a_col, res, res_col);
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
@@ -265,8 +296,24 @@ where
a: &VecZnxBig<Data, FFT64>,
a_col: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_big_sub(self, res, res_col, res, res_col, a, a_col);
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
@@ -309,8 +356,24 @@ where
a: &VecZnx<Data>,
a_col: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_big_sub_small_b(self, res, res_col, res, res_col, a, a_col);
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
@@ -353,8 +416,24 @@ where
a: &VecZnx<Data>,
a_col: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_big_sub_small_a(self, res, res_col, a, a_col, res, res_col);
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
@@ -391,11 +470,29 @@ where
}
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<DataMut, FFT64>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
Self::vec_znx_big_add_small(self, res, res_col, res, res_col, a, a_col);
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
vec_znx::vec_znx_add(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
Self::vec_znx_normalize_tmp_bytes(self)
<Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(self)
}
fn vec_znx_big_normalize(
@@ -405,14 +502,16 @@ where
res_col: usize,
a: &VecZnxBig<Data, FFT64>,
a_col: usize,
tmp_bytes: &mut [u8],
scratch: &mut ScratchSpace,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
assert_alignement(tmp_bytes.as_ptr());
//(Jay)Note: This is calling VezZnxOps::vec_znx_normalize_tmp_bytes and not VecZnxBigOps::vec_znx_big_normalize_tmp_bytes.
// In the FFT backend the tmp sizes are same but will be different in the NTT backend
// assert!(tmp_bytes.len() >= <Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(&self));
// assert_alignement(tmp_bytes.as_ptr());
}
unsafe {
vec_znx::vec_znx_normalize_base2k(
@@ -424,7 +523,7 @@ where
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
tmp_bytes.as_mut_ptr(),
scratch.vec_znx_big_normalize_tmp_bytes(self).as_mut_ptr(),
);
}
}
@@ -457,8 +556,21 @@ where
}
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<DataMut, FFT64>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
}
unsafe {
Self::vec_znx_big_automorphism(self, k, a, a_col, a, a_col);
vec_znx::vec_znx_automorphism(
self.ptr,
k,
a.at_mut_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
}

View File

@@ -1,11 +1,12 @@
use std::marker::PhantomData;
use crate::ffi::vec_znx_dft;
use crate::znx_base::{ZnxAlloc, ZnxInfos};
use crate::znx_base::ZnxInfos;
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
const VEC_ZNX_DFT_ROWS: usize = 1;
// VecZnxDft is `Backend` dependent denoted with generic `B`
pub struct VecZnxDft<D, B> {
data: D,
n: usize,
@@ -44,7 +45,7 @@ impl<D, B> DataView for VecZnxDft<D, B> {
}
impl<D, B> DataViewMut for VecZnxDft<D, B> {
fn data_mut(&self) -> &mut Self::D {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
@@ -84,6 +85,18 @@ impl<D: From<Vec<u8>>, B: Backend> VecZnxDft<D, B> {
pub type VecZnxDftOwned<B> = VecZnxDft<Vec<u8>, B>;
impl<'a, D: ?Sized, B> VecZnxDft<&'a mut D, B> {
pub(crate) fn from_mut_slice(data: &'a mut D, n: usize, cols: usize, size: usize) -> Self {
Self {
data,
n,
cols,
size,
_phantom: PhantomData,
}
}
}
// impl<B: Backend> ZnxAlloc<B> for VecZnxDft<B> {
// type Scalar = u8;

View File

@@ -1,7 +1,5 @@
use crate::VecZnxDftOwned;
use crate::ffi::vec_znx_big;
use crate::ffi::vec_znx_dft;
use crate::znx_base::ZnxAlloc;
use crate::ffi::{vec_znx_big, vec_znx_dft};
use crate::znx_base::ZnxInfos;
use crate::{FFT64, Module, VecZnx, VecZnxBig, VecZnxDft, ZnxView, ZnxViewMut, ZnxZero, assert_alignement};
use std::cmp::min;
@@ -82,7 +80,7 @@ impl VecZnxDftAlloc<FFT64> for Module<FFT64> {
// }
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize {
VecZnxDft::bytes_of(&self, cols, size)
VecZnxDftOwned::bytes_of(&self, cols, size)
}
}
@@ -156,10 +154,10 @@ where
#[cfg(debug_assertions)]
{
assert!(
tmp_bytes.len() >= Self::vec_znx_idft_tmp_bytes(self),
tmp_bytes.len() >= <Self as VecZnxDftOps<DataMut, DataMut, FFT64>>::vec_znx_idft_tmp_bytes(self),
"invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_idft_tmp_bytes()={}",
tmp_bytes.len(),
Self::vec_znx_idft_tmp_bytes(self)
<Self as VecZnxDftOps<DataMut, DataMut, FFT64>>::vec_znx_idft_tmp_bytes(self)
);
assert_alignement(tmp_bytes.as_ptr())
}

View File

@@ -86,10 +86,14 @@ pub trait VecZnxOps<DataMut, Data> {
);
/// Subtracts the selected column of `a` from the selected column of `res` inplace.
///
/// res[res_col] -= a[a_col]
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
// /// Subtracts the selected column of `a` from the selected column of `res` and negates the selected column of `res`.
// fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
/// Subtracts the selected column of `res` from the selected column of `a` and inplace mutates `res`
///
/// res[res_col] = a[a_col] - res[res_col]
fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
// Negates the selected column of `a` and stores the result in `res_col` of `res`.
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
@@ -136,15 +140,15 @@ pub trait VecZnxOps<DataMut, Data> {
impl<B: Backend> VecZnxAlloc for Module<B> {
//(Jay)TODO: One must define the Scalar generic param here.
fn new_vec_znx(&self, cols: usize, size: usize) -> VecZnxOwned {
VecZnxOwned::new(self.n(), cols, size)
VecZnxOwned::new::<i64>(self.n(), cols, size)
}
fn bytes_of_vec_znx(&self, cols: usize, size: usize) -> usize {
VecZnxOwned::bytes_of(self.n(), cols, size)
VecZnxOwned::bytes_of::<i64>(self.n(), cols, size)
}
fn new_vec_znx_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned {
VecZnxOwned::new_from_bytes(self.n(), cols, size, bytes)
VecZnxOwned::new_from_bytes::<i64>(self.n(), cols, size, bytes)
}
}
@@ -170,7 +174,7 @@ where
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
assert!(tmp_bytes.len() >= <Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(&self));
assert_alignement(tmp_bytes.as_ptr());
}
unsafe {
@@ -190,16 +194,8 @@ where
fn vec_znx_normalize_inplace(&self, log_base2k: usize, a: &mut VecZnx<DataMut>, a_col: usize, tmp_bytes: &mut [u8]) {
unsafe {
let a_ptr: *mut VecZnx = a as *mut VecZnx;
Self::vec_znx_normalize(
self,
log_base2k,
&mut *a_ptr,
a_col,
&*a_ptr,
a_col,
tmp_bytes,
);
let a_ptr: *const VecZnx<_> = a;
Self::vec_znx_normalize(self, log_base2k, a, a_col, &*a_ptr, a_col, tmp_bytes);
}
}
@@ -236,8 +232,24 @@ where
}
fn vec_znx_add_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
Self::vec_znx_add(&self, res, res_col, a, a_col, res, res_col);
vec_znx::vec_znx_add(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
@@ -274,18 +286,48 @@ where
}
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
let res_ptr: *mut VecZnx = res as *mut VecZnx;
Self::vec_znx_sub(self, res, res_col, a, a_col, res, res_col);
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
// fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx, res_col: usize, a: &VecZnx, a_col: usize) {
// unsafe {
// let res_ptr: *mut VecZnx = res as *mut VecZnx;
// Self::vec_znx_sub(self, &mut *res_ptr, res_col, &*res_ptr, res_col, a, a_col);
// }
// }
fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
res.at_ptr(res_col, 0),
res.size() as u64,
res.sl() as u64,
)
}
}
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
#[cfg(debug_assertions)]
@@ -308,7 +350,8 @@ where
fn vec_znx_negate_inplace(&self, a: &mut VecZnx<DataMut>, a_col: usize) {
unsafe {
Self::vec_znx_negate(self, a, a_col, a, a_col);
let a_ref: *const VecZnx<_> = a;
Self::vec_znx_negate(self, a, a_col, a_ref.as_ref().unwrap(), a_col);
}
}
@@ -333,8 +376,21 @@ where
}
fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
}
unsafe {
Self::vec_znx_rotate(self, k, a, a_col, a, a_col);
vec_znx::vec_znx_rotate(
self.ptr,
k,
a.at_mut_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
@@ -359,8 +415,21 @@ where
}
fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
}
unsafe {
Self::vec_znx_automorphism(self, k, a, a_col, a, a_col);
vec_znx::vec_znx_automorphism(
self.ptr,
k,
a.at_mut_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
a.at_ptr(a_col, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
@@ -392,7 +461,7 @@ where
self.vec_znx_rotate(-1, buf, 0, a, a_col);
} else {
switch_degree(bi, res_col, buf, a_col);
self.vec_znx_rotate_inplace(-1, buf, a_col);
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, -1, buf, a_col);
}
})
}
@@ -414,9 +483,9 @@ where
a.iter().enumerate().for_each(|(_, ai)| {
switch_degree(res, res_col, ai, a_col);
self.vec_znx_rotate_inplace(-1, res, res_col);
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, -1, res, res_col);
});
self.vec_znx_rotate_inplace(a.len() as i64, res, res_col);
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, a.len() as i64, res, res_col);
}
}

View File

@@ -85,26 +85,26 @@ pub trait ZnxInfos {
// pub trait ZnxSliceSize {}
//(Jay) TODO: Remove ZnxAlloc
pub trait ZnxAlloc<B: Backend>
where
Self: Sized + ZnxInfos,
{
type Scalar;
fn new(module: &Module<B>, rows: usize, cols: usize, size: usize) -> Self {
let bytes: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(module, rows, cols, size));
Self::from_bytes(module, rows, cols, size, bytes)
}
// pub trait ZnxAlloc<B: Backend>
// where
// Self: Sized + ZnxInfos,
// {
// type Scalar;
// fn new(module: &Module<B>, rows: usize, cols: usize, size: usize) -> Self {
// let bytes: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(module, rows, cols, size));
// Self::from_bytes(module, rows, cols, size, bytes)
// }
fn from_bytes(module: &Module<B>, rows: usize, cols: usize, size: usize, mut bytes: Vec<u8>) -> Self {
let mut res: Self = Self::from_bytes_borrow(module, rows, cols, size, &mut bytes);
res.znx_mut().data = bytes;
res
}
// fn from_bytes(module: &Module<B>, rows: usize, cols: usize, size: usize, mut bytes: Vec<u8>) -> Self {
// let mut res: Self = Self::from_bytes_borrow(module, rows, cols, size, &mut bytes);
// res.znx_mut().data = bytes;
// res
// }
fn from_bytes_borrow(module: &Module<B>, rows: usize, cols: usize, size: usize, bytes: &mut [u8]) -> Self;
// fn from_bytes_borrow(module: &Module<B>, rows: usize, cols: usize, size: usize, bytes: &mut [u8]) -> Self;
fn bytes_of(module: &Module<B>, rows: usize, cols: usize, size: usize) -> usize;
}
// fn bytes_of(module: &Module<B>, rows: usize, cols: usize, size: usize) -> usize;
// }
pub trait DataView {
type D;
@@ -112,11 +112,11 @@ pub trait DataView {
}
pub trait DataViewMut: DataView {
fn data_mut(&self) -> &mut Self::D;
fn data_mut(&mut self) -> &mut Self::D;
}
pub trait ZnxView: ZnxInfos + DataView<D: AsRef<[u8]>> {
type Scalar;
type Scalar: Copy;
/// Returns a non-mutable pointer to the underlying coefficients array.
fn as_ptr(&self) -> *const Self::Scalar {
@@ -177,11 +177,9 @@ pub trait ZnxViewMut: ZnxView + DataViewMut<D: AsMut<[u8]>> {
impl<T> ZnxViewMut for T where T: ZnxView + DataViewMut<D: AsMut<[u8]>> {}
use std::convert::TryFrom;
use std::num::TryFromIntError;
use std::ops::{Add, AddAssign, Div, Mul, Neg, Shl, Shr, Sub};
pub trait IntegerType:
pub trait Num:
Copy
+ std::fmt::Debug
+ Default
+ PartialEq
+ PartialOrd
@@ -190,22 +188,23 @@ pub trait IntegerType:
+ Mul<Output = Self>
+ Div<Output = Self>
+ Neg<Output = Self>
+ Shr<Output = Self>
+ Shl<Output = Self>
+ AddAssign
+ TryFrom<usize, Error = TryFromIntError>
{
const BITS: u32;
}
impl IntegerType for i64 {
impl Num for i64 {
const BITS: u32 = 64;
}
impl IntegerType for i128 {
impl Num for i128 {
const BITS: u32 = 128;
}
impl Num for f64 {
const BITS: u32 = 64;
}
pub trait ZnxZero: ZnxViewMut
where
Self: Sized,
@@ -231,79 +230,16 @@ where
}
}
pub trait ZnxRsh: ZnxZero {
fn rsh(&mut self, k: usize, log_base2k: usize, col: usize, carry: &mut [u8]) {
rsh(k, log_base2k, self, col, carry)
}
}
// Blanket implementations
impl<T> ZnxZero for T where T: ZnxViewMut {}
impl<T> ZnxRsh for T where T: ZnxZero {}
// impl<T> ZnxRsh for T where T: ZnxZero {}
pub fn rsh<V: ZnxRsh + ZnxZero>(k: usize, log_base2k: usize, a: &mut V, a_col: usize, tmp_bytes: &mut [u8])
where
V::Scalar: IntegerType,
{
let n: usize = a.n();
let size: usize = a.size();
let cols: usize = a.cols();
#[cfg(debug_assertions)]
{
assert!(
tmp_bytes.len() >= rsh_tmp_bytes::<V::Scalar>(n),
"invalid carry: carry.len()/size_ofSelf::Scalar={} < rsh_tmp_bytes({}, {})",
tmp_bytes.len() / size_of::<V::Scalar>(),
n,
size,
);
assert_alignement(tmp_bytes.as_ptr());
}
let size: usize = a.size();
let steps: usize = k / log_base2k;
a.raw_mut().rotate_right(n * steps * cols);
(0..cols).for_each(|i| {
(0..steps).for_each(|j| {
a.zero_at(i, j);
})
});
let k_rem: usize = k % log_base2k;
if k_rem != 0 {
let carry: &mut [V::Scalar] = cast_mut(tmp_bytes);
unsafe {
std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
}
let log_base2k_t: V::Scalar = V::Scalar::try_from(log_base2k).unwrap();
let shift: V::Scalar = V::Scalar::try_from(V::Scalar::BITS as usize - k_rem).unwrap();
let k_rem_t: V::Scalar = V::Scalar::try_from(k_rem).unwrap();
(steps..size).for_each(|i| {
izip!(carry.iter_mut(), a.at_mut(a_col, i).iter_mut()).for_each(|(ci, xi)| {
*xi += *ci << log_base2k_t;
*ci = get_base_k_carry(*xi, shift);
*xi = (*xi - *ci) >> k_rem_t;
});
})
}
}
#[inline(always)]
fn get_base_k_carry<T: IntegerType>(x: T, shift: T) -> T {
(x << shift) >> shift
}
pub fn rsh_tmp_bytes<T: IntegerType>(n: usize) -> usize {
n * std::mem::size_of::<T>()
}
pub fn switch_degree<DMut: ZnxViewMut + ZnxZero, D: ZnxView>(b: &mut DMut, col_b: usize, a: &D, col_a: usize) {
pub fn switch_degree<S: Copy, DMut: ZnxViewMut<Scalar = S> + ZnxZero, D: ZnxView<Scalar = S>>(
b: &mut DMut,
col_b: usize,
a: &D,
col_a: usize,
) {
let (n_in, n_out) = (a.n(), b.n());
let (gap_in, gap_out): (usize, usize);
@@ -325,6 +261,71 @@ pub fn switch_degree<DMut: ZnxViewMut + ZnxZero, D: ZnxView>(b: &mut DMut, col_b
});
}
// (Jay)TODO: implement rsh for VecZnx, VecZnxBig
// pub trait ZnxRsh: ZnxZero {
// fn rsh(&mut self, k: usize, log_base2k: usize, col: usize, carry: &mut [u8]) {
// rsh(k, log_base2k, self, col, carry)
// }
// }
// pub fn rsh<V: ZnxRsh + ZnxZero>(k: usize, log_base2k: usize, a: &mut V, a_col: usize, tmp_bytes: &mut [u8]) {
// let n: usize = a.n();
// let size: usize = a.size();
// let cols: usize = a.cols();
// #[cfg(debug_assertions)]
// {
// assert!(
// tmp_bytes.len() >= rsh_tmp_bytes::<V::Scalar>(n),
// "invalid carry: carry.len()/size_ofSelf::Scalar={} < rsh_tmp_bytes({}, {})",
// tmp_bytes.len() / size_of::<V::Scalar>(),
// n,
// size,
// );
// assert_alignement(tmp_bytes.as_ptr());
// }
// let size: usize = a.size();
// let steps: usize = k / log_base2k;
// a.raw_mut().rotate_right(n * steps * cols);
// (0..cols).for_each(|i| {
// (0..steps).for_each(|j| {
// a.zero_at(i, j);
// })
// });
// let k_rem: usize = k % log_base2k;
// if k_rem != 0 {
// let carry: &mut [V::Scalar] = cast_mut(tmp_bytes);
// unsafe {
// std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
// }
// let log_base2k_t: V::Scalar = V::Scalar::try_from(log_base2k).unwrap();
// let shift: V::Scalar = V::Scalar::try_from(V::Scalar::BITS as usize - k_rem).unwrap();
// let k_rem_t: V::Scalar = V::Scalar::try_from(k_rem).unwrap();
// (steps..size).for_each(|i| {
// izip!(carry.iter_mut(), a.at_mut(a_col, i).iter_mut()).for_each(|(ci, xi)| {
// *xi += *ci << log_base2k_t;
// *ci = get_base_k_carry(*xi, shift);
// *xi = (*xi - *ci) >> k_rem_t;
// });
// })
// }
// }
// #[inline(always)]
// fn get_base_k_carry<T: Num>(x: T, shift: T) -> T {
// (x << shift) >> shift
// }
// pub fn rsh_tmp_bytes<T: Num>(n: usize) -> usize {
// n * std::mem::size_of::<T>()
// }
// pub trait ZnxLayout: ZnxInfos {
// type Scalar;