mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
everything compiles. Scratchpad not yet implemented
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
use base2k::{
|
use base2k::{
|
||||||
Encoding, FFT64, Module, Sampling, Scalar, ScalarOps, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps,
|
Encoding, FFT64, Module, Sampling, Scalar, ScalarAlloc, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps,
|
||||||
VecZnxDft, VecZnxDftOps, VecZnxOps, ZnxInfos, alloc_aligned,
|
VecZnxDft, VecZnxDftOps, VecZnxOps, ZnxInfos, alloc_aligned,
|
||||||
};
|
};
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::ffi::znx::znx_zero_i64_ref;
|
use crate::ffi::znx::znx_zero_i64_ref;
|
||||||
use crate::znx_base::ZnxLayout;
|
use crate::znx_base::{ZnxView, ZnxViewMut};
|
||||||
use crate::{VecZnx, znx_base::ZnxInfos};
|
use crate::{VecZnx, znx_base::ZnxInfos};
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
use rug::{Assign, Float};
|
use rug::{Assign, Float};
|
||||||
@@ -59,7 +59,7 @@ pub trait Encoding {
|
|||||||
fn decode_coeff_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64;
|
fn decode_coeff_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encoding for VecZnx {
|
impl<D: AsMut<[u8]> + AsRef<[u8]>> Encoding for VecZnx<D> {
|
||||||
fn encode_vec_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
|
fn encode_vec_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
|
||||||
encode_vec_i64(self, col_i, log_base2k, log_k, data, log_max)
|
encode_vec_i64(self, col_i, log_base2k, log_k, data, log_max)
|
||||||
}
|
}
|
||||||
@@ -81,7 +81,14 @@ impl Encoding for VecZnx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode_vec_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
|
fn encode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
|
a: &mut VecZnx<D>,
|
||||||
|
col_i: usize,
|
||||||
|
log_base2k: usize,
|
||||||
|
log_k: usize,
|
||||||
|
data: &[i64],
|
||||||
|
log_max: usize,
|
||||||
|
) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -132,7 +139,7 @@ fn encode_vec_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_vec_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
|
fn decode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -160,7 +167,7 @@ fn decode_vec_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, dat
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_vec_float(a: &VecZnx, col_i: usize, log_base2k: usize, data: &mut [Float]) {
|
fn decode_vec_float<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, data: &mut [Float]) {
|
||||||
let size: usize = a.size();
|
let size: usize = a.size();
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -194,7 +201,15 @@ fn decode_vec_float(a: &VecZnx, col_i: usize, log_base2k: usize, data: &mut [Flo
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode_coeff_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) {
|
fn encode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
|
a: &mut VecZnx<D>,
|
||||||
|
col_i: usize,
|
||||||
|
log_base2k: usize,
|
||||||
|
log_k: usize,
|
||||||
|
i: usize,
|
||||||
|
value: i64,
|
||||||
|
log_max: usize,
|
||||||
|
) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -237,7 +252,7 @@ fn encode_coeff_i64(a: &mut VecZnx, col_i: usize, log_base2k: usize, log_k: usiz
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_coeff_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
|
fn decode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(i < a.n());
|
assert!(i < a.n());
|
||||||
@@ -263,10 +278,9 @@ fn decode_coeff_i64(a: &VecZnx, col_i: usize, log_base2k: usize, log_k: usize, i
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::{
|
use crate::vec_znx_ops::*;
|
||||||
Encoding, FFT64, Module, VecZnx, VecZnxOps,
|
use crate::znx_base::*;
|
||||||
znx_base::{ZnxInfos, ZnxLayout},
|
use crate::{Encoding, FFT64, Module, VecZnx, znx_base::ZnxInfos};
|
||||||
};
|
|
||||||
use itertools::izip;
|
use itertools::izip;
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
@@ -277,7 +291,7 @@ mod tests {
|
|||||||
let log_base2k: usize = 17;
|
let log_base2k: usize = 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let log_k: usize = size * log_base2k - 5;
|
let log_k: usize = size * log_base2k - 5;
|
||||||
let mut a: VecZnx = module.new_vec_znx(2, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
||||||
let mut source: Source = Source::new([0u8; 32]);
|
let mut source: Source = Source::new([0u8; 32]);
|
||||||
let raw: &mut [i64] = a.raw_mut();
|
let raw: &mut [i64] = a.raw_mut();
|
||||||
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
||||||
@@ -299,7 +313,7 @@ mod tests {
|
|||||||
let log_base2k: usize = 17;
|
let log_base2k: usize = 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let log_k: usize = size * log_base2k - 5;
|
let log_k: usize = size * log_base2k - 5;
|
||||||
let mut a: VecZnx = module.new_vec_znx(2, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
||||||
let mut source = Source::new([0u8; 32]);
|
let mut source = Source::new([0u8; 32]);
|
||||||
let raw: &mut [i64] = a.raw_mut();
|
let raw: &mut [i64] = a.raw_mut();
|
||||||
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
||||||
|
|||||||
@@ -125,3 +125,29 @@ pub fn alloc_aligned<T>(size: usize) -> Vec<T> {
|
|||||||
DEFAULTALIGN,
|
DEFAULTALIGN,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) struct ScratchSpace {
|
||||||
|
// data: D,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ScratchSpace {
|
||||||
|
fn tmp_vec_znx_dft<D, B>(&mut self, n: usize, cols: usize, size: usize) -> VecZnxDft<D, B> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tmp_vec_znx_big<D, B>(&mut self, n: usize, cols: usize, size: usize) -> VecZnxBig<D, B> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_normalize_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vmp_apply_dft_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vmp_apply_dft_to_dft_tmp_bytes<B: Backend>(&mut self, module: &Module<B>) -> &mut [u8] {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::znx_base::{GetZnxBase, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
|
use crate::znx_base::{GetZnxBase, ZnxBase, ZnxInfos};
|
||||||
use crate::{Backend, FFT64, Module, alloc_aligned};
|
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
|
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
|
||||||
@@ -8,68 +8,67 @@ use std::marker::PhantomData;
|
|||||||
///
|
///
|
||||||
/// [MatZnxDft] is used to permform a vector matrix product between a [VecZnx]/[VecZnxDft] and a [MatZnxDft].
|
/// [MatZnxDft] is used to permform a vector matrix product between a [VecZnx]/[VecZnxDft] and a [MatZnxDft].
|
||||||
/// See the trait [MatZnxDftOps] for additional information.
|
/// See the trait [MatZnxDftOps] for additional information.
|
||||||
pub struct MatZnxDft<B: Backend> {
|
pub struct MatZnxDft<D, B> {
|
||||||
pub inner: ZnxBase,
|
data: D,
|
||||||
pub cols_in: usize,
|
n: usize,
|
||||||
pub cols_out: usize,
|
size: usize,
|
||||||
|
rows: usize,
|
||||||
|
cols_in: usize,
|
||||||
|
cols_out: usize,
|
||||||
_marker: PhantomData<B>,
|
_marker: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GetZnxBase for MatZnxDft<B> {
|
impl<D, B> ZnxInfos for MatZnxDft<D, B> {
|
||||||
fn znx(&self) -> &ZnxBase {
|
fn cols(&self) -> usize {
|
||||||
&self.inner
|
self.cols_in
|
||||||
}
|
}
|
||||||
|
|
||||||
fn znx_mut(&mut self) -> &mut ZnxBase {
|
fn rows(&self) -> usize {
|
||||||
&mut self.inner
|
self.rows
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: Backend> ZnxInfos for MatZnxDft<B> {}
|
fn n(&self) -> usize {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.size
|
||||||
|
}
|
||||||
|
|
||||||
impl ZnxSliceSize for MatZnxDft<FFT64> {
|
|
||||||
fn sl(&self) -> usize {
|
fn sl(&self) -> usize {
|
||||||
self.n()
|
self.n()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxLayout for MatZnxDft<FFT64> {
|
impl<D, B> DataView for MatZnxDft<D, B> {
|
||||||
|
type D = D;
|
||||||
|
fn data(&self) -> &Self::D {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D, B> DataViewMut for MatZnxDft<D, B> {
|
||||||
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
|
&mut self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: AsRef<[u8]>> ZnxView for MatZnxDft<D, FFT64> {
|
||||||
type Scalar = f64;
|
type Scalar = f64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> MatZnxDft<B> {
|
impl<D, B> MatZnxDft<D, B> {
|
||||||
pub fn new(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
|
pub(crate) fn cols_in(&self) -> usize {
|
||||||
let bytes: Vec<u8> = alloc_aligned(Self::bytes_of(module, rows, cols_in, cols_out, size));
|
self.cols_in
|
||||||
Self::from_bytes(module, rows, cols_in, cols_out, size, bytes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_bytes(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize, mut bytes: Vec<u8>) -> Self {
|
pub(crate) fn cols_out(&self) -> usize {
|
||||||
let mut mat: MatZnxDft<B> = Self::from_bytes_borrow(module, rows, cols_in, cols_out, size, &mut bytes);
|
self.cols_out
|
||||||
mat.znx_mut().data = bytes;
|
|
||||||
mat
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn from_bytes_borrow(
|
impl<D: From<Vec<u8>>, B: Backend> MatZnxDft<D, B> {
|
||||||
module: &Module<B>,
|
pub(crate) fn bytes_of(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
bytes: &mut [u8],
|
|
||||||
) -> Self {
|
|
||||||
debug_assert_eq!(
|
|
||||||
bytes.len(),
|
|
||||||
Self::bytes_of(module, rows, cols_in, cols_out, size)
|
|
||||||
);
|
|
||||||
Self {
|
|
||||||
inner: ZnxBase::from_bytes_borrow(module.n(), rows, cols_out, size, bytes),
|
|
||||||
cols_in: cols_in,
|
|
||||||
cols_out: cols_out,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
|
||||||
unsafe {
|
unsafe {
|
||||||
crate::ffi::vmp::bytes_of_vmp_pmat(
|
crate::ffi::vmp::bytes_of_vmp_pmat(
|
||||||
module.ptr,
|
module.ptr,
|
||||||
@@ -79,16 +78,62 @@ impl<B: Backend> MatZnxDft<B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cols_in(&self) -> usize {
|
pub(crate) fn new(module: &Module<B>, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
|
||||||
self.cols_in
|
let data: Vec<u8> = alloc_aligned(Self::bytes_of(module, rows, cols_in, cols_out, size));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n: module.n(),
|
||||||
|
size,
|
||||||
|
rows,
|
||||||
|
cols_in,
|
||||||
|
cols_out,
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cols_out(&self) -> usize {
|
pub(crate) fn new_from_bytes(
|
||||||
self.cols_out
|
module: &Module<B>,
|
||||||
|
rows: usize,
|
||||||
|
cols_in: usize,
|
||||||
|
cols_out: usize,
|
||||||
|
size: usize,
|
||||||
|
bytes: impl Into<Vec<u8>>,
|
||||||
|
) -> Self {
|
||||||
|
let data: Vec<u8> = bytes.into();
|
||||||
|
assert!(data.len() == Self::bytes_of(module, rows, cols_in, cols_out, size));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n: module.n(),
|
||||||
|
size,
|
||||||
|
rows,
|
||||||
|
cols_in,
|
||||||
|
cols_out,
|
||||||
|
_marker: PhantomData,
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pub fn from_bytes_borrow(
|
||||||
|
// module: &Module<B>,
|
||||||
|
// rows: usize,
|
||||||
|
// cols_in: usize,
|
||||||
|
// cols_out: usize,
|
||||||
|
// size: usize,
|
||||||
|
// bytes: &mut [u8],
|
||||||
|
// ) -> Self {
|
||||||
|
// debug_assert_eq!(
|
||||||
|
// bytes.len(),
|
||||||
|
// Self::bytes_of(module, rows, cols_in, cols_out, size)
|
||||||
|
// );
|
||||||
|
// Self {
|
||||||
|
// inner: ZnxBase::from_bytes_borrow(module.n(), rows, cols_out, size, bytes),
|
||||||
|
// cols_in: cols_in,
|
||||||
|
// cols_out: cols_out,
|
||||||
|
// _marker: PhantomData,
|
||||||
|
// }
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MatZnxDft<FFT64> {
|
impl<D: AsRef<[u8]>> MatZnxDft<D, FFT64> {
|
||||||
/// Returns a copy of the backend array at index (i, j) of the [MatZnxDft].
|
/// Returns a copy of the backend array at index (i, j) of the [MatZnxDft].
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@@ -123,3 +168,5 @@ impl MatZnxDft<FFT64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type MatZnxDftAllocOwned<B> = MatZnxDft<Vec<u8>, B>;
|
||||||
|
|||||||
@@ -1,20 +1,19 @@
|
|||||||
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
|
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
|
||||||
use crate::ffi::vmp;
|
use crate::ffi::vmp;
|
||||||
use crate::znx_base::{ZnxInfos, ZnxLayout};
|
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
|
||||||
use crate::{
|
use crate::{
|
||||||
Backend, FFT64, MatZnxDft, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, assert_alignement, is_aligned,
|
Backend, FFT64, MatZnxDft, MatZnxDftAllocOwned, Module, ScratchSpace, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft,
|
||||||
|
VecZnxDftAlloc, VecZnxDftOps, assert_alignement, is_aligned,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This trait implements methods for vector matrix product,
|
pub trait MatZnxDftAlloc<B> {
|
||||||
/// that is, multiplying a [VecZnx] with a [MatZnxDft].
|
|
||||||
pub trait MatZnxDftOps<B: Backend> {
|
|
||||||
/// Allocates a new [MatZnxDft] with the given number of rows and columns.
|
/// Allocates a new [MatZnxDft] with the given number of rows and columns.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `rows`: number of rows (number of [VecZnxDft]).
|
/// * `rows`: number of rows (number of [VecZnxDft]).
|
||||||
/// * `size`: number of size (number of size of each [VecZnxDft]).
|
/// * `size`: number of size (number of size of each [VecZnxDft]).
|
||||||
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDft<B>;
|
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B>;
|
||||||
|
|
||||||
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
|
||||||
|
|
||||||
@@ -25,17 +24,21 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
bytes: Vec<u8>,
|
bytes: Vec<u8>,
|
||||||
) -> MatZnxDft<FFT64>;
|
) -> MatZnxDftAllocOwned<B>;
|
||||||
|
|
||||||
fn new_mat_znx_dft_from_bytes_borrow(
|
// fn new_mat_znx_dft_from_bytes_borrow(
|
||||||
&self,
|
// &self,
|
||||||
rows: usize,
|
// rows: usize,
|
||||||
cols_in: usize,
|
// cols_in: usize,
|
||||||
cols_out: usize,
|
// cols_out: usize,
|
||||||
size: usize,
|
// size: usize,
|
||||||
bytes: &mut [u8],
|
// bytes: &mut [u8],
|
||||||
) -> MatZnxDft<FFT64>;
|
// ) -> MatZnxDft<FFT64>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This trait implements methods for vector matrix product,
|
||||||
|
/// that is, multiplying a [VecZnx] with a [MatZnxDft].
|
||||||
|
pub trait MatZnxDftOps<DataMut, Data, B: Backend> {
|
||||||
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_prepare_row]
|
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_prepare_row]
|
||||||
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
|
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
|
||||||
|
|
||||||
@@ -49,7 +52,14 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
/// * `buf`: scratch space, the size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
/// * `buf`: scratch space, the size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
||||||
///
|
///
|
||||||
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
||||||
fn vmp_prepare_row(&self, b: &mut MatZnxDft<B>, b_row: usize, b_col_in: usize, a: &VecZnx, tmp_bytes: &mut [u8]);
|
fn vmp_prepare_row(
|
||||||
|
&self,
|
||||||
|
b: &mut MatZnxDft<DataMut, B>,
|
||||||
|
b_row: usize,
|
||||||
|
b_col_in: usize,
|
||||||
|
a: &VecZnx<Data>,
|
||||||
|
scratch: &mut ScratchSpace,
|
||||||
|
);
|
||||||
|
|
||||||
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_extract_row]
|
/// Returns the of bytes needed as scratch space for [MatZnxDftOps::vmp_extract_row]
|
||||||
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
|
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize;
|
||||||
@@ -64,11 +74,11 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
fn vmp_extract_row(
|
fn vmp_extract_row(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
b: &mut VecZnx,
|
b: &mut VecZnx<DataMut>,
|
||||||
a: &MatZnxDft<B>,
|
a: &MatZnxDft<Data, B>,
|
||||||
b_row: usize,
|
b_row: usize,
|
||||||
b_col_in: usize,
|
b_col_in: usize,
|
||||||
tmp_bytes: &mut [u8],
|
scratch: &mut ScratchSpace,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Prepares the ith-row of [MatZnxDft] from a [VecZnxDft].
|
/// Prepares the ith-row of [MatZnxDft] from a [VecZnxDft].
|
||||||
@@ -80,7 +90,7 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
/// * `row_i`: the index of the row to prepare.
|
/// * `row_i`: the index of the row to prepare.
|
||||||
///
|
///
|
||||||
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
/// The size of buf can be obtained with [MatZnxDftOps::vmp_prepare_tmp_bytes].
|
||||||
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<B>, b_row: usize, b_col_in: usize, a: &VecZnxDft<B>);
|
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<DataMut, B>, b_row: usize, b_col_in: usize, a: &VecZnxDft<Data, B>);
|
||||||
|
|
||||||
/// Extracts the ith-row of [MatZnxDft] into a [VecZnxDft].
|
/// Extracts the ith-row of [MatZnxDft] into a [VecZnxDft].
|
||||||
///
|
///
|
||||||
@@ -89,7 +99,7 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
/// * `b`: the [VecZnxDft] to on which to extract the row of the [MatZnxDft].
|
/// * `b`: the [VecZnxDft] to on which to extract the row of the [MatZnxDft].
|
||||||
/// * `a`: [MatZnxDft] on which the values are encoded.
|
/// * `a`: [MatZnxDft] on which the values are encoded.
|
||||||
/// * `row_i`: the index of the row to extract.
|
/// * `row_i`: the index of the row to extract.
|
||||||
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<B>, a: &MatZnxDft<B>, a_row: usize, a_col_in: usize);
|
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<DataMut, B>, a: &MatZnxDft<Data, B>, a_row: usize, a_col_in: usize);
|
||||||
|
|
||||||
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft].
|
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft].
|
||||||
///
|
///
|
||||||
@@ -133,7 +143,7 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
/// * `a`: the left operand [VecZnx] of the vector matrix product.
|
/// * `a`: the left operand [VecZnx] of the vector matrix product.
|
||||||
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
|
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
|
||||||
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_tmp_bytes].
|
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_tmp_bytes].
|
||||||
fn vmp_apply_dft(&self, c: &mut VecZnxDft<B>, a: &VecZnx, b: &MatZnxDft<B>, buf: &mut [u8]);
|
fn vmp_apply_dft(&self, c: &mut VecZnxDft<DataMut, B>, a: &VecZnx<Data>, b: &MatZnxDft<Data, B>, scratch: &mut ScratchSpace);
|
||||||
|
|
||||||
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft_to_dft].
|
/// Returns the size of the stratch space necessary for [MatZnxDftOps::vmp_apply_dft_to_dft].
|
||||||
///
|
///
|
||||||
@@ -180,16 +190,22 @@ pub trait MatZnxDftOps<B: Backend> {
|
|||||||
/// * `a`: the left operand [VecZnxDft] of the vector matrix product.
|
/// * `a`: the left operand [VecZnxDft] of the vector matrix product.
|
||||||
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
|
/// * `b`: the right operand [MatZnxDft] of the vector matrix product.
|
||||||
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_to_dft_tmp_bytes].
|
/// * `buf`: scratch space, the size can be obtained with [MatZnxDftOps::vmp_apply_dft_to_dft_tmp_bytes].
|
||||||
fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft<B>, a: &VecZnxDft<B>, b: &MatZnxDft<B>, buf: &mut [u8]);
|
fn vmp_apply_dft_to_dft(
|
||||||
|
&self,
|
||||||
|
c: &mut VecZnxDft<DataMut, B>,
|
||||||
|
a: &VecZnxDft<Data, B>,
|
||||||
|
b: &MatZnxDft<Data, B>,
|
||||||
|
scratch: &mut ScratchSpace,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
impl<B: Backend> MatZnxDftAlloc<B> for Module<B> {
|
||||||
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDft<FFT64> {
|
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||||
MatZnxDft::<FFT64>::new(self, rows, cols_in, cols_out, size)
|
MatZnxDftAllocOwned::bytes_of(self, rows, cols_in, cols_out, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B> {
|
||||||
MatZnxDft::<FFT64>::bytes_of(self, rows, cols_in, cols_out, size)
|
MatZnxDftAllocOwned::new(self, rows, cols_in, cols_out, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_mat_znx_dft_from_bytes(
|
fn new_mat_znx_dft_from_bytes(
|
||||||
@@ -199,26 +215,28 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
bytes: Vec<u8>,
|
bytes: Vec<u8>,
|
||||||
) -> MatZnxDft<FFT64> {
|
) -> MatZnxDftAllocOwned<B> {
|
||||||
MatZnxDft::<FFT64>::from_bytes(self, rows, cols_in, cols_out, size, bytes)
|
MatZnxDftAllocOwned::new_from_bytes(self, rows, cols_in, cols_out, size, bytes)
|
||||||
}
|
|
||||||
|
|
||||||
fn new_mat_znx_dft_from_bytes_borrow(
|
|
||||||
&self,
|
|
||||||
rows: usize,
|
|
||||||
cols_in: usize,
|
|
||||||
cols_out: usize,
|
|
||||||
size: usize,
|
|
||||||
bytes: &mut [u8],
|
|
||||||
) -> MatZnxDft<FFT64> {
|
|
||||||
MatZnxDft::<FFT64>::from_bytes_borrow(self, rows, cols_in, cols_out, size, bytes)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<DataMut, Data> MatZnxDftOps<DataMut, Data, FFT64> for Module<FFT64>
|
||||||
|
where
|
||||||
|
DataMut: AsMut<[u8]> + AsRef<[u8]>,
|
||||||
|
Data: AsRef<[u8]>,
|
||||||
|
{
|
||||||
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
|
fn vmp_prepare_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
|
||||||
self.bytes_of_vec_znx_dft(cols_out, size)
|
<Self as VecZnxDftAlloc<FFT64>>::bytes_of_vec_znx_dft(self, cols_out, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_prepare_row(&self, b: &mut MatZnxDft<FFT64>, b_row: usize, b_col_in: usize, a: &VecZnx, tmp_bytes: &mut [u8]) {
|
fn vmp_prepare_row(
|
||||||
|
&self,
|
||||||
|
b: &mut MatZnxDft<DataMut, FFT64>,
|
||||||
|
b_row: usize,
|
||||||
|
b_col_in: usize,
|
||||||
|
a: &VecZnx<Data>,
|
||||||
|
scratch: &mut ScratchSpace,
|
||||||
|
) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(b.n(), self.n());
|
assert_eq!(b.n(), self.n());
|
||||||
@@ -249,33 +267,36 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
b.size(),
|
b.size(),
|
||||||
a.size()
|
a.size()
|
||||||
);
|
);
|
||||||
assert!(tmp_bytes.len() >= self.vmp_prepare_row_tmp_bytes(a.cols(), a.size()));
|
// assert!(
|
||||||
assert!(is_aligned(tmp_bytes.as_ptr()))
|
// tmp_bytes.len()
|
||||||
|
// >= <Self as MatZnxDftOps<DataMut, Data, FFT64>>::vmp_prepare_row_tmp_bytes(self, a.cols(), a.size())
|
||||||
|
// );
|
||||||
|
// assert!(is_aligned(tmp_bytes.as_ptr()))
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols_out: usize = a.cols();
|
let cols_out: usize = a.cols();
|
||||||
let a_size: usize = a.size();
|
let a_size: usize = a.size();
|
||||||
|
|
||||||
let (tmp_bytes_a_dft, _) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, a_size));
|
// let (tmp_bytes_a_dft, _) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, a_size));
|
||||||
|
let mut a_dft = scratch.tmp_vec_znx_dft::<DataMut, _>(self.n(), cols_out, a_size);
|
||||||
let mut a_dft: VecZnxDft<FFT64> = self.new_vec_znx_dft_from_bytes_borrow(cols_out, a_size, tmp_bytes_a_dft);
|
|
||||||
(0..cols_out).for_each(|i| self.vec_znx_dft(&mut a_dft, i, &a, i));
|
(0..cols_out).for_each(|i| self.vec_znx_dft(&mut a_dft, i, &a, i));
|
||||||
|
|
||||||
Self::vmp_prepare_row_dft(&self, b, b_row, b_col_in, &a_dft);
|
Self::vmp_prepare_row_dft(&self, b, b_row, b_col_in, &a_dft);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
|
fn vmp_extract_row_tmp_bytes(&self, cols_out: usize, size: usize) -> usize {
|
||||||
self.bytes_of_vec_znx_dft(cols_out, size) + self.vec_znx_big_normalize_tmp_bytes()
|
self.bytes_of_vec_znx_dft(cols_out, size)
|
||||||
|
+ <Self as VecZnxBigOps<DataMut, Data, FFT64>>::vec_znx_big_normalize_tmp_bytes(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_extract_row(
|
fn vmp_extract_row(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
b: &mut VecZnx,
|
b: &mut VecZnx<DataMut>,
|
||||||
a: &MatZnxDft<FFT64>,
|
a: &MatZnxDft<Data, FFT64>,
|
||||||
a_row: usize,
|
a_row: usize,
|
||||||
a_col_in: usize,
|
a_col_in: usize,
|
||||||
tmp_bytes: &mut [u8],
|
scratch: &mut ScratchSpace,
|
||||||
) {
|
) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -307,24 +328,24 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
b.size(),
|
b.size(),
|
||||||
a.size()
|
a.size()
|
||||||
);
|
);
|
||||||
assert!(tmp_bytes.len() >= self.vmp_extract_row_tmp_bytes(a.cols(), a.size()));
|
// assert!(tmp_bytes.len() >= self.vmp_extract_row_tmp_bytes(a.cols(), a.size()));
|
||||||
assert!(is_aligned(tmp_bytes.as_ptr()))
|
// assert!(is_aligned(tmp_bytes.as_ptr()))
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols_out: usize = b.cols();
|
let cols_out: usize = b.cols();
|
||||||
let size: usize = b.size();
|
let size: usize = b.size();
|
||||||
|
|
||||||
let (bytes_a_dft, tmp_bytes) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, size));
|
// let (bytes_a_dft, tmp_bytes) = tmp_bytes.split_at_mut(self.bytes_of_vec_znx_dft(cols_out, size));
|
||||||
let mut b_dft: VecZnxDft<FFT64> = self.new_vec_znx_dft_from_bytes_borrow(cols_out, size, bytes_a_dft);
|
let mut b_dft = scratch.tmp_vec_znx_dft::<DataMut, _>(self.n(), cols_out, size);
|
||||||
Self::vmp_extract_row_dft(&self, &mut b_dft, a, a_row, a_col_in);
|
Self::vmp_extract_row_dft(&self, &mut b_dft, a, a_row, a_col_in);
|
||||||
let mut b_big: VecZnxBig<FFT64> = b_dft.alias_as_vec_znx_big();
|
let mut b_big = scratch.tmp_vec_znx_big(self.n(), cols_out, size);
|
||||||
(0..cols_out).for_each(|i| {
|
(0..cols_out).for_each(|i| {
|
||||||
self.vec_znx_idft_tmp_a(&mut b_big, i, &mut b_dft, i);
|
<Self as VecZnxDftOps<DataMut, Data, FFT64>>::vec_znx_idft_tmp_a(self, &mut b_big, i, &mut b_dft, i);
|
||||||
self.vec_znx_big_normalize(log_base2k, b, i, &b_big, i, tmp_bytes);
|
self.vec_znx_big_normalize(log_base2k, b, i, &b_big, i, scratch);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<FFT64>, b_row: usize, b_col_in: usize, a: &VecZnxDft<FFT64>) {
|
fn vmp_prepare_row_dft(&self, b: &mut MatZnxDft<DataMut, FFT64>, b_row: usize, b_col_in: usize, a: &VecZnxDft<Data, FFT64>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(b.n(), self.n());
|
assert_eq!(b.n(), self.n());
|
||||||
@@ -369,7 +390,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<FFT64>, a: &MatZnxDft<FFT64>, a_row: usize, a_col_in: usize) {
|
fn vmp_extract_row_dft(&self, b: &mut VecZnxDft<DataMut, FFT64>, a: &MatZnxDft<Data, FFT64>, a_row: usize, a_col_in: usize) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(b.n(), self.n());
|
assert_eq!(b.n(), self.n());
|
||||||
@@ -433,18 +454,13 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_apply_dft(&self, c: &mut VecZnxDft<FFT64>, a: &VecZnx, b: &MatZnxDft<FFT64>, tmp_bytes: &mut [u8]) {
|
fn vmp_apply_dft(
|
||||||
debug_assert!(
|
&self,
|
||||||
tmp_bytes.len()
|
c: &mut VecZnxDft<DataMut, FFT64>,
|
||||||
>= self.vmp_apply_dft_tmp_bytes(
|
a: &VecZnx<Data>,
|
||||||
c.size(),
|
b: &MatZnxDft<Data, FFT64>,
|
||||||
a.size(),
|
scratch: &mut ScratchSpace,
|
||||||
b.rows(),
|
) {
|
||||||
b.cols_in(),
|
|
||||||
b.cols_out(),
|
|
||||||
b.size()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(c.n(), self.n());
|
assert_eq!(c.n(), self.n());
|
||||||
@@ -464,18 +480,18 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
a.cols(),
|
a.cols(),
|
||||||
b.cols_in()
|
b.cols_in()
|
||||||
);
|
);
|
||||||
assert!(
|
// assert!(
|
||||||
tmp_bytes.len()
|
// tmp_bytes.len()
|
||||||
>= self.vmp_apply_dft_tmp_bytes(
|
// >= self.vmp_apply_dft_tmp_bytes(
|
||||||
c.size(),
|
// c.size(),
|
||||||
a.size(),
|
// a.size(),
|
||||||
b.rows(),
|
// b.rows(),
|
||||||
b.cols_in(),
|
// b.cols_in(),
|
||||||
b.cols_out(),
|
// b.cols_out(),
|
||||||
b.size()
|
// b.size()
|
||||||
)
|
// )
|
||||||
);
|
// );
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
// assert_alignement(tmp_bytes.as_ptr());
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_apply_dft(
|
vmp::vmp_apply_dft(
|
||||||
@@ -488,7 +504,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
b.as_ptr() as *const vmp::vmp_pmat_t,
|
b.as_ptr() as *const vmp::vmp_pmat_t,
|
||||||
(b.rows() * b.cols_in()) as u64,
|
(b.rows() * b.cols_in()) as u64,
|
||||||
(b.size() * b.cols_out()) as u64,
|
(b.size() * b.cols_out()) as u64,
|
||||||
tmp_bytes.as_mut_ptr(),
|
scratch.vmp_apply_dft_tmp_bytes(self).as_mut_ptr(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -515,7 +531,13 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft<FFT64>, a: &VecZnxDft<FFT64>, b: &MatZnxDft<FFT64>, tmp_bytes: &mut [u8]) {
|
fn vmp_apply_dft_to_dft(
|
||||||
|
&self,
|
||||||
|
c: &mut VecZnxDft<DataMut, FFT64>,
|
||||||
|
a: &VecZnxDft<Data, FFT64>,
|
||||||
|
b: &MatZnxDft<Data, FFT64>,
|
||||||
|
scratch: &mut ScratchSpace,
|
||||||
|
) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(c.n(), self.n());
|
assert_eq!(c.n(), self.n());
|
||||||
@@ -535,20 +557,20 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
a.cols(),
|
a.cols(),
|
||||||
b.cols_in()
|
b.cols_in()
|
||||||
);
|
);
|
||||||
assert!(
|
// assert!(
|
||||||
tmp_bytes.len()
|
// tmp_bytes.len()
|
||||||
>= self.vmp_apply_dft_to_dft_tmp_bytes(
|
// >= self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
c.cols(),
|
// c.cols(),
|
||||||
c.size(),
|
// c.size(),
|
||||||
a.cols(),
|
// a.cols(),
|
||||||
a.size(),
|
// a.size(),
|
||||||
b.rows(),
|
// b.rows(),
|
||||||
b.cols_in(),
|
// b.cols_in(),
|
||||||
b.cols_out(),
|
// b.cols_out(),
|
||||||
b.size()
|
// b.size()
|
||||||
)
|
// )
|
||||||
);
|
// );
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
// assert_alignement(tmp_bytes.as_ptr());
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
vmp::vmp_apply_dft_to_dft(
|
vmp::vmp_apply_dft_to_dft(
|
||||||
@@ -560,7 +582,7 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
b.as_ptr() as *const vmp::vmp_pmat_t,
|
b.as_ptr() as *const vmp::vmp_pmat_t,
|
||||||
b.rows() as u64,
|
b.rows() as u64,
|
||||||
(b.size() * b.cols()) as u64,
|
(b.size() * b.cols()) as u64,
|
||||||
tmp_bytes.as_mut_ptr(),
|
scratch.vmp_apply_dft_to_dft_tmp_bytes(self).as_mut_ptr(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -568,9 +590,12 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use crate::mat_znx_dft_ops::*;
|
||||||
|
use crate::vec_znx_big_ops::*;
|
||||||
|
use crate::vec_znx_dft_ops::*;
|
||||||
|
use crate::vec_znx_ops::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
FFT64, MatZnxDft, MatZnxDftOps, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps,
|
FFT64, MatZnxDft, MatZnxDftOps, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, alloc_aligned,
|
||||||
alloc_aligned, znx_base::ZnxLayout,
|
|
||||||
};
|
};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
@@ -582,16 +607,19 @@ mod tests {
|
|||||||
let mat_cols_in: usize = 2;
|
let mat_cols_in: usize = 2;
|
||||||
let mat_cols_out: usize = 2;
|
let mat_cols_out: usize = 2;
|
||||||
let mat_size: usize = 5;
|
let mat_size: usize = 5;
|
||||||
let mut a: VecZnx = module.new_vec_znx(mat_cols_out, mat_size);
|
let mut a: VecZnx<_> = module.new_vec_znx(mat_cols_out, mat_size);
|
||||||
let mut b: VecZnx = module.new_vec_znx(mat_cols_out, mat_size);
|
let mut b: VecZnx<_> = module.new_vec_znx(mat_cols_out, mat_size);
|
||||||
let mut a_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
|
let mut a_dft: VecZnxDft<_, FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
|
||||||
let mut a_big: VecZnxBig<FFT64> = module.new_vec_znx_big(mat_cols_out, mat_size);
|
let mut a_big: VecZnxBig<_, FFT64> = module.new_vec_znx_big(mat_cols_out, mat_size);
|
||||||
let mut b_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
|
let mut b_dft: VecZnxDft<_, FFT64> = module.new_vec_znx_dft(mat_cols_out, mat_size);
|
||||||
let mut vmpmat_0: MatZnxDft<FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
|
let mut vmpmat_0: MatZnxDft<_, FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
|
||||||
let mut vmpmat_1: MatZnxDft<FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
|
let mut vmpmat_1: MatZnxDft<_, FFT64> = module.new_mat_znx_dft(mat_rows, mat_cols_in, mat_cols_out, mat_size);
|
||||||
|
|
||||||
|
// let mut tmp_bytes: Vec<u8> =
|
||||||
|
// alloc_aligned(module.vmp_prepare_row_tmp_bytes(mat_cols_out, mat_size) | module.vec_znx_big_normalize_tmp_bytes());
|
||||||
|
let mut scratch = ScratchSpace {};
|
||||||
let mut tmp_bytes: Vec<u8> =
|
let mut tmp_bytes: Vec<u8> =
|
||||||
alloc_aligned(module.vmp_prepare_row_tmp_bytes(mat_cols_out, mat_size) | module.vec_znx_big_normalize_tmp_bytes());
|
alloc_aligned::<u8>(<Module<FFT64> as VecZnxDftOps<Vec<u8>, Vec<u8>, _>>::vec_znx_idft_tmp_bytes(&module));
|
||||||
|
|
||||||
for col_in in 0..mat_cols_in {
|
for col_in in 0..mat_cols_in {
|
||||||
for row_i in 0..mat_rows {
|
for row_i in 0..mat_rows {
|
||||||
@@ -602,7 +630,7 @@ mod tests {
|
|||||||
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
|
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
|
||||||
});
|
});
|
||||||
|
|
||||||
module.vmp_prepare_row(&mut vmpmat_0, row_i, col_in, &a, &mut tmp_bytes);
|
module.vmp_prepare_row(&mut vmpmat_0, row_i, col_in, &a, &mut scratch);
|
||||||
|
|
||||||
// Checks that prepare(mat_znx_dft, a) = prepare_dft(mat_znx_dft, a_dft)
|
// Checks that prepare(mat_znx_dft, a) = prepare_dft(mat_znx_dft, a_dft)
|
||||||
module.vmp_prepare_row_dft(&mut vmpmat_1, row_i, col_in, &a_dft);
|
module.vmp_prepare_row_dft(&mut vmpmat_1, row_i, col_in, &a_dft);
|
||||||
@@ -613,11 +641,11 @@ mod tests {
|
|||||||
assert_eq!(a_dft.raw(), b_dft.raw());
|
assert_eq!(a_dft.raw(), b_dft.raw());
|
||||||
|
|
||||||
// Checks that a_big = extract(prepare_dft(mat_znx_dft, a_dft), b_big)
|
// Checks that a_big = extract(prepare_dft(mat_znx_dft, a_dft), b_big)
|
||||||
module.vmp_extract_row(log_base2k, &mut b, &vmpmat_0, row_i, col_in, &mut tmp_bytes);
|
module.vmp_extract_row(log_base2k, &mut b, &vmpmat_0, row_i, col_in, &mut scratch);
|
||||||
|
|
||||||
(0..mat_cols_out).for_each(|col_out| {
|
(0..mat_cols_out).for_each(|col_out| {
|
||||||
module.vec_znx_idft(&mut a_big, col_out, &a_dft, col_out, &mut tmp_bytes);
|
module.vec_znx_idft(&mut a_big, col_out, &a_dft, col_out, &mut tmp_bytes);
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut a, col_out, &a_big, col_out, &mut tmp_bytes);
|
module.vec_znx_big_normalize(log_base2k, &mut a, col_out, &a_big, col_out, &mut scratch);
|
||||||
});
|
});
|
||||||
|
|
||||||
assert_eq!(a.raw(), b.raw());
|
assert_eq!(a.raw(), b.raw());
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ impl Backend for NTT120 {
|
|||||||
|
|
||||||
pub struct Module<B: Backend> {
|
pub struct Module<B: Backend> {
|
||||||
pub ptr: *mut MODULE,
|
pub ptr: *mut MODULE,
|
||||||
pub n: usize,
|
n: usize,
|
||||||
_marker: PhantomData<B>,
|
_marker: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,24 @@
|
|||||||
use crate::{Backend, Module, VecZnx, znx_base::ZnxLayout};
|
use crate::znx_base::ZnxViewMut;
|
||||||
|
use crate::{Backend, Module, VecZnx};
|
||||||
use rand_distr::{Distribution, Normal};
|
use rand_distr::{Distribution, Normal};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
pub trait Sampling {
|
pub trait Sampling {
|
||||||
/// Fills the first `size` size with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
|
/// Fills the first `size` size with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
|
||||||
fn fill_uniform(&self, log_base2k: usize, a: &mut VecZnx, col_i: usize, size: usize, source: &mut Source);
|
fn fill_uniform<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
|
|
||||||
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
|
|
||||||
fn add_dist_f64<D: Distribution<f64>>(
|
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
a: &mut VecZnx,
|
a: &mut VecZnx<DataMut>,
|
||||||
|
col_i: usize,
|
||||||
|
size: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
);
|
||||||
|
|
||||||
|
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
|
||||||
|
fn add_dist_f64<DataMut: AsMut<[u8]> + AsRef<[u8]>, D: Distribution<f64>>(
|
||||||
|
&self,
|
||||||
|
log_base2k: usize,
|
||||||
|
a: &mut VecZnx<DataMut>,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
log_k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
@@ -19,10 +27,10 @@ pub trait Sampling {
|
|||||||
);
|
);
|
||||||
|
|
||||||
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
|
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
|
||||||
fn add_normal(
|
fn add_normal<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
a: &mut VecZnx,
|
a: &mut VecZnx<DataMut>,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
log_k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
@@ -32,22 +40,29 @@ pub trait Sampling {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> Sampling for Module<B> {
|
impl<B: Backend> Sampling for Module<B> {
|
||||||
fn fill_uniform(&self, log_base2k: usize, a: &mut VecZnx, col_a: usize, size: usize, source: &mut Source) {
|
fn fill_uniform<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
|
&self,
|
||||||
|
log_base2k: usize,
|
||||||
|
a: &mut VecZnx<DataMut>,
|
||||||
|
col_i: usize,
|
||||||
|
size: usize,
|
||||||
|
source: &mut Source,
|
||||||
|
) {
|
||||||
let base2k: u64 = 1 << log_base2k;
|
let base2k: u64 = 1 << log_base2k;
|
||||||
let mask: u64 = base2k - 1;
|
let mask: u64 = base2k - 1;
|
||||||
let base2k_half: i64 = (base2k >> 1) as i64;
|
let base2k_half: i64 = (base2k >> 1) as i64;
|
||||||
(0..size).for_each(|j| {
|
(0..size).for_each(|j| {
|
||||||
a.at_mut(col_a, j)
|
a.at_mut(col_i, j)
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_dist_f64<D: Distribution<f64>>(
|
fn add_dist_f64<DataMut: AsMut<[u8]> + AsRef<[u8]>, D: Distribution<f64>>(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
a: &mut VecZnx,
|
a: &mut VecZnx<DataMut>,
|
||||||
col_a: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
log_k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
@@ -63,7 +78,7 @@ impl<B: Backend> Sampling for Module<B> {
|
|||||||
let log_base2k_rem: usize = log_k % log_base2k;
|
let log_base2k_rem: usize = log_k % log_base2k;
|
||||||
|
|
||||||
if log_base2k_rem != 0 {
|
if log_base2k_rem != 0 {
|
||||||
a.at_mut(col_a, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
@@ -71,7 +86,7 @@ impl<B: Backend> Sampling for Module<B> {
|
|||||||
*a += (dist_f64.round() as i64) << log_base2k_rem;
|
*a += (dist_f64.round() as i64) << log_base2k_rem;
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
a.at_mut(col_a, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
@@ -81,11 +96,11 @@ impl<B: Backend> Sampling for Module<B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_normal(
|
fn add_normal<DataMut: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
log_base2k: usize,
|
||||||
a: &mut VecZnx,
|
a: &mut VecZnx<DataMut>,
|
||||||
col_a: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
log_k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
sigma: f64,
|
sigma: f64,
|
||||||
@@ -94,7 +109,7 @@ impl<B: Backend> Sampling for Module<B> {
|
|||||||
self.add_dist_f64(
|
self.add_dist_f64(
|
||||||
log_base2k,
|
log_base2k,
|
||||||
a,
|
a,
|
||||||
col_a,
|
col_i,
|
||||||
log_k,
|
log_k,
|
||||||
source,
|
source,
|
||||||
Normal::new(0.0, sigma).unwrap(),
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
@@ -106,7 +121,9 @@ impl<B: Backend> Sampling for Module<B> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Sampling;
|
use super::Sampling;
|
||||||
use crate::{FFT64, Module, Stats, VecZnx, VecZnxOps, znx_base::ZnxLayout};
|
use crate::vec_znx_ops::*;
|
||||||
|
use crate::znx_base::*;
|
||||||
|
use crate::{FFT64, Module, Stats, VecZnx};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -120,7 +137,7 @@ mod tests {
|
|||||||
let zero: Vec<i64> = vec![0; n];
|
let zero: Vec<i64> = vec![0; n];
|
||||||
let one_12_sqrt: f64 = 0.28867513459481287;
|
let one_12_sqrt: f64 = 0.28867513459481287;
|
||||||
(0..cols).for_each(|col_i| {
|
(0..cols).for_each(|col_i| {
|
||||||
let mut a: VecZnx = module.new_vec_znx(cols, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
||||||
module.fill_uniform(log_base2k, &mut a, col_i, size, &mut source);
|
module.fill_uniform(log_base2k, &mut a, col_i, size, &mut source);
|
||||||
(0..cols).for_each(|col_j| {
|
(0..cols).for_each(|col_j| {
|
||||||
if col_j != col_i {
|
if col_j != col_i {
|
||||||
@@ -154,7 +171,7 @@ mod tests {
|
|||||||
let zero: Vec<i64> = vec![0; n];
|
let zero: Vec<i64> = vec![0; n];
|
||||||
let k_f64: f64 = (1u64 << log_k as u64) as f64;
|
let k_f64: f64 = (1u64 << log_k as u64) as f64;
|
||||||
(0..cols).for_each(|col_i| {
|
(0..cols).for_each(|col_i| {
|
||||||
let mut a: VecZnx = module.new_vec_znx(cols, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
||||||
module.add_normal(log_base2k, &mut a, col_i, log_k, &mut source, sigma, bound);
|
module.add_normal(log_base2k, &mut a, col_i, log_k, &mut source, sigma, bound);
|
||||||
(0..cols).for_each(|col_j| {
|
(0..cols).for_each(|col_j| {
|
||||||
if col_j != col_i {
|
if col_j != col_i {
|
||||||
|
|||||||
@@ -1,64 +1,59 @@
|
|||||||
use crate::znx_base::{ZnxAlloc, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
|
use crate::znx_base::ZnxInfos;
|
||||||
use crate::{Backend, GetZnxBase, Module, VecZnx};
|
use crate::{Backend, DataView, DataViewMut, Module, ZnxView, ZnxViewMut, alloc_aligned};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand_core::RngCore;
|
use rand_core::RngCore;
|
||||||
use rand_distr::{Distribution, weighted::WeightedIndex};
|
use rand_distr::{Distribution, weighted::WeightedIndex};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
pub const SCALAR_ZNX_ROWS: usize = 1;
|
// pub const SCALAR_ZNX_ROWS: usize = 1;
|
||||||
pub const SCALAR_ZNX_SIZE: usize = 1;
|
// pub const SCALAR_ZNX_SIZE: usize = 1;
|
||||||
|
|
||||||
pub struct Scalar {
|
pub struct Scalar<D> {
|
||||||
pub inner: ZnxBase,
|
data: D,
|
||||||
|
n: usize,
|
||||||
|
cols: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GetZnxBase for Scalar {
|
impl<D> ZnxInfos for Scalar<D> {
|
||||||
fn znx(&self) -> &ZnxBase {
|
fn cols(&self) -> usize {
|
||||||
&self.inner
|
self.cols
|
||||||
}
|
}
|
||||||
|
|
||||||
fn znx_mut(&mut self) -> &mut ZnxBase {
|
fn rows(&self) -> usize {
|
||||||
&mut self.inner
|
1
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxInfos for Scalar {}
|
|
||||||
|
|
||||||
impl<B: Backend> ZnxAlloc<B> for Scalar {
|
|
||||||
type Scalar = i64;
|
|
||||||
|
|
||||||
fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: ZnxBase::from_bytes_borrow(module.n(), SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
|
fn n(&self) -> usize {
|
||||||
debug_assert_eq!(
|
self.n
|
||||||
_rows, SCALAR_ZNX_ROWS,
|
|
||||||
"rows != {} not supported for Scalar",
|
|
||||||
SCALAR_ZNX_ROWS
|
|
||||||
);
|
|
||||||
debug_assert_eq!(
|
|
||||||
_size, SCALAR_ZNX_SIZE,
|
|
||||||
"rows != {} not supported for Scalar",
|
|
||||||
SCALAR_ZNX_SIZE
|
|
||||||
);
|
|
||||||
module.n() * cols * std::mem::size_of::<self::Scalar>()
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxLayout for Scalar {
|
fn size(&self) -> usize {
|
||||||
type Scalar = i64;
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSliceSize for Scalar {
|
|
||||||
fn sl(&self) -> usize {
|
fn sl(&self) -> usize {
|
||||||
self.n()
|
self.n()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Scalar {
|
impl<D> DataView for Scalar<D> {
|
||||||
|
type D = D;
|
||||||
|
fn data(&self) -> &Self::D {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D> DataViewMut for Scalar<D> {
|
||||||
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
|
&mut self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: AsRef<[u8]>> ZnxView for Scalar<D> {
|
||||||
|
type Scalar = i64;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: AsMut<[u8]> + AsRef<[u8]>> Scalar<D> {
|
||||||
pub fn fill_ternary_prob(&mut self, col: usize, prob: f64, source: &mut Source) {
|
pub fn fill_ternary_prob(&mut self, col: usize, prob: f64, source: &mut Source) {
|
||||||
let choices: [i64; 3] = [-1, 0, 1];
|
let choices: [i64; 3] = [-1, 0, 1];
|
||||||
let weights: [f64; 3] = [prob / 2.0, 1.0 - prob, prob / 2.0];
|
let weights: [f64; 3] = [prob / 2.0, 1.0 - prob, prob / 2.0];
|
||||||
@@ -76,38 +71,89 @@ impl Scalar {
|
|||||||
self.at_mut(col, 0).shuffle(source);
|
self.at_mut(col, 0).shuffle(source);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alias_as_vec_znx(&self) -> VecZnx {
|
// pub fn alias_as_vec_znx(&self) -> VecZnx {
|
||||||
VecZnx {
|
// VecZnx {
|
||||||
inner: ZnxBase {
|
// inner: ZnxBase {
|
||||||
n: self.n(),
|
// n: self.n(),
|
||||||
rows: 1,
|
// rows: 1,
|
||||||
cols: 1,
|
// cols: 1,
|
||||||
size: 1,
|
// size: 1,
|
||||||
data: Vec::new(),
|
// data: Vec::new(),
|
||||||
ptr: self.ptr() as *mut u8,
|
// ptr: self.ptr() as *mut u8,
|
||||||
},
|
// },
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: From<Vec<u8>>> Scalar<D> {
|
||||||
|
pub(crate) fn bytes_of<S: Sized>(n: usize, cols: usize) -> usize {
|
||||||
|
n * cols * size_of::<S>()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new<S: Sized>(n: usize, cols: usize) -> Self {
|
||||||
|
let data = alloc_aligned::<u8>(Self::bytes_of::<S>(n, cols));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n,
|
||||||
|
cols,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_from_bytes<S: Sized>(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
|
||||||
|
let data: Vec<u8> = bytes.into();
|
||||||
|
assert!(data.len() == Self::bytes_of::<S>(n, cols));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n,
|
||||||
|
cols,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ScalarOps {
|
pub type ScalarOwned = Scalar<Vec<u8>>;
|
||||||
|
|
||||||
|
pub trait ScalarAlloc {
|
||||||
fn bytes_of_scalar(&self, cols: usize) -> usize;
|
fn bytes_of_scalar(&self, cols: usize) -> usize;
|
||||||
fn new_scalar(&self, cols: usize) -> Scalar;
|
fn new_scalar(&self, cols: usize) -> ScalarOwned;
|
||||||
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> Scalar;
|
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarOwned;
|
||||||
fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar;
|
// fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> ScalarOps for Module<B> {
|
impl<B: Backend> ScalarAlloc for Module<B> {
|
||||||
fn bytes_of_scalar(&self, cols: usize) -> usize {
|
fn bytes_of_scalar(&self, cols: usize) -> usize {
|
||||||
Scalar::bytes_of(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE)
|
ScalarOwned::bytes_of::<i64>(self.n(), cols)
|
||||||
}
|
}
|
||||||
fn new_scalar(&self, cols: usize) -> Scalar {
|
fn new_scalar(&self, cols: usize) -> ScalarOwned {
|
||||||
Scalar::new(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE)
|
ScalarOwned::new::<i64>(self.n(), cols)
|
||||||
}
|
}
|
||||||
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> Scalar {
|
fn new_scalar_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarOwned {
|
||||||
Scalar::from_bytes(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
|
ScalarOwned::new_from_bytes::<i64>(self.n(), cols, bytes)
|
||||||
}
|
|
||||||
fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar {
|
|
||||||
Scalar::from_bytes_borrow(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
|
|
||||||
}
|
}
|
||||||
|
// fn new_scalar_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> Scalar {
|
||||||
|
// Scalar::from_bytes_borrow(self, SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes)
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// impl<B: Backend> ZnxAlloc<B> for Scalar {
|
||||||
|
// type Scalar = i64;
|
||||||
|
|
||||||
|
// fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
|
||||||
|
// Self {
|
||||||
|
// inner: ZnxBase::from_bytes_borrow(module.n(), SCALAR_ZNX_ROWS, cols, SCALAR_ZNX_SIZE, bytes),
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
|
||||||
|
// debug_assert_eq!(
|
||||||
|
// _rows, SCALAR_ZNX_ROWS,
|
||||||
|
// "rows != {} not supported for Scalar",
|
||||||
|
// SCALAR_ZNX_ROWS
|
||||||
|
// );
|
||||||
|
// debug_assert_eq!(
|
||||||
|
// _size, SCALAR_ZNX_SIZE,
|
||||||
|
// "rows != {} not supported for Scalar",
|
||||||
|
// SCALAR_ZNX_SIZE
|
||||||
|
// );
|
||||||
|
// module.n() * cols * std::mem::size_of::<self::Scalar>()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|||||||
@@ -1,67 +1,97 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
use crate::ffi::svp;
|
use crate::ffi::svp;
|
||||||
use crate::znx_base::{ZnxAlloc, ZnxBase, ZnxInfos, ZnxLayout, ZnxSliceSize};
|
use crate::znx_base::ZnxInfos;
|
||||||
use crate::{Backend, FFT64, GetZnxBase, Module};
|
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
|
||||||
|
|
||||||
pub const SCALAR_ZNX_DFT_ROWS: usize = 1;
|
pub const SCALAR_ZNX_DFT_ROWS: usize = 1;
|
||||||
pub const SCALAR_ZNX_DFT_SIZE: usize = 1;
|
pub const SCALAR_ZNX_DFT_SIZE: usize = 1;
|
||||||
|
|
||||||
pub struct ScalarZnxDft<B: Backend> {
|
pub struct ScalarZnxDft<D, B> {
|
||||||
pub inner: ZnxBase,
|
data: D,
|
||||||
_marker: PhantomData<B>,
|
n: usize,
|
||||||
|
cols: usize,
|
||||||
|
_phantom: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GetZnxBase for ScalarZnxDft<B> {
|
impl<D, B> ZnxInfos for ScalarZnxDft<D, B> {
|
||||||
fn znx(&self) -> &ZnxBase {
|
fn cols(&self) -> usize {
|
||||||
&self.inner
|
self.cols
|
||||||
}
|
}
|
||||||
|
|
||||||
fn znx_mut(&mut self) -> &mut ZnxBase {
|
fn rows(&self) -> usize {
|
||||||
&mut self.inner
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> usize {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sl(&self) -> usize {
|
||||||
|
self.n()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> ZnxInfos for ScalarZnxDft<B> {}
|
impl<D, B> DataView for ScalarZnxDft<D, B> {
|
||||||
|
type D = D;
|
||||||
impl<B: Backend> ZnxAlloc<B> for ScalarZnxDft<B> {
|
fn data(&self) -> &Self::D {
|
||||||
type Scalar = u8;
|
&self.data
|
||||||
|
|
||||||
fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
|
|
||||||
debug_assert_eq!(bytes.len(), Self::bytes_of(module, _rows, cols, _size));
|
|
||||||
Self {
|
|
||||||
inner: ZnxBase::from_bytes_borrow(
|
|
||||||
module.n(),
|
|
||||||
SCALAR_ZNX_DFT_ROWS,
|
|
||||||
cols,
|
|
||||||
SCALAR_ZNX_DFT_SIZE,
|
|
||||||
bytes,
|
|
||||||
),
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bytes_of(module: &Module<B>, _rows: usize, cols: usize, _size: usize) -> usize {
|
|
||||||
debug_assert_eq!(
|
|
||||||
_rows, SCALAR_ZNX_DFT_ROWS,
|
|
||||||
"rows != {} not supported for ScalarZnxDft",
|
|
||||||
SCALAR_ZNX_DFT_ROWS
|
|
||||||
);
|
|
||||||
debug_assert_eq!(
|
|
||||||
_size, SCALAR_ZNX_DFT_SIZE,
|
|
||||||
"rows != {} not supported for ScalarZnxDft",
|
|
||||||
SCALAR_ZNX_DFT_SIZE
|
|
||||||
);
|
|
||||||
unsafe { svp::bytes_of_svp_ppol(module.ptr) as usize * cols }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxLayout for ScalarZnxDft<FFT64> {
|
impl<D, B> DataViewMut for ScalarZnxDft<D, B> {
|
||||||
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
|
&mut self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: AsRef<[u8]>> ZnxView for ScalarZnxDft<D, FFT64> {
|
||||||
type Scalar = f64;
|
type Scalar = f64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSliceSize for ScalarZnxDft<FFT64> {
|
impl<D: From<Vec<u8>>, B: Backend> ScalarZnxDft<D, B> {
|
||||||
fn sl(&self) -> usize {
|
pub(crate) fn bytes_of(module: &Module<B>, cols: usize) -> usize {
|
||||||
self.n() * self.cols()
|
unsafe { svp::bytes_of_svp_ppol(module.ptr) as usize * cols }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new(module: &Module<B>, cols: usize) -> Self {
|
||||||
|
let data = alloc_aligned::<u8>(Self::bytes_of(module, cols));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n: module.n(),
|
||||||
|
cols,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_from_bytes(module: &Module<B>, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
|
||||||
|
let data: Vec<u8> = bytes.into();
|
||||||
|
assert!(data.len() == Self::bytes_of(module, cols));
|
||||||
|
Self {
|
||||||
|
data: data.into(),
|
||||||
|
n: module.n(),
|
||||||
|
cols,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fn from_bytes_borrow(module: &Module<B>, _rows: usize, cols: usize, _size: usize, bytes: &mut [u8]) -> Self {
|
||||||
|
// debug_assert_eq!(bytes.len(), Self::bytes_of(module, _rows, cols, _size));
|
||||||
|
// Self {
|
||||||
|
// inner: ZnxBase::from_bytes_borrow(
|
||||||
|
// module.n(),
|
||||||
|
// SCALAR_ZNX_DFT_ROWS,
|
||||||
|
// cols,
|
||||||
|
// SCALAR_ZNX_DFT_SIZE,
|
||||||
|
// bytes,
|
||||||
|
// ),
|
||||||
|
// _phantom: PhantomData,
|
||||||
|
// }
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type ScalarZnxDftOwned<B> = ScalarZnxDft<Vec<u8>, B>;
|
||||||
|
|||||||
@@ -1,35 +1,52 @@
|
|||||||
use crate::ffi::svp::{self, svp_ppol_t};
|
use crate::ffi::svp::{self, svp_ppol_t};
|
||||||
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
|
use crate::ffi::vec_znx_dft::vec_znx_dft_t;
|
||||||
use crate::znx_base::{ZnxAlloc, ZnxInfos, ZnxLayout, ZnxSliceSize};
|
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
|
||||||
use crate::{Backend, FFT64, Module, SCALAR_ZNX_DFT_ROWS, SCALAR_ZNX_DFT_SIZE, Scalar, ScalarZnxDft, VecZnx, VecZnxDft};
|
use crate::{Backend, FFT64, Module, Scalar, ScalarZnxDft, ScalarZnxDftOwned, VecZnx, VecZnxDft};
|
||||||
|
|
||||||
pub trait ScalarZnxDftOps<B: Backend> {
|
pub trait ScalarZnxDftAlloc<B> {
|
||||||
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDft<B>;
|
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDftOwned<B>;
|
||||||
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize;
|
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize;
|
||||||
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDft<B>;
|
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDftOwned<B>;
|
||||||
fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<B>;
|
// fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<B>;
|
||||||
fn svp_prepare(&self, res: &mut ScalarZnxDft<B>, res_col: usize, a: &Scalar, a_col: usize);
|
|
||||||
fn svp_apply_dft(&self, res: &mut VecZnxDft<B>, res_col: usize, a: &ScalarZnxDft<B>, a_col: usize, b: &VecZnx, b_col: usize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ScalarZnxDftOps<FFT64> for Module<FFT64> {
|
pub trait ScalarZnxDftOps<DataMut, Data, B: Backend> {
|
||||||
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDft<FFT64> {
|
fn svp_prepare(&self, res: &mut ScalarZnxDft<DataMut, B>, res_col: usize, a: &Scalar<Data>, a_col: usize);
|
||||||
ScalarZnxDft::<FFT64>::new(&self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE)
|
fn svp_apply_dft(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxDft<DataMut, B>,
|
||||||
|
res_col: usize,
|
||||||
|
a: &ScalarZnxDft<Data, B>,
|
||||||
|
a_col: usize,
|
||||||
|
b: &VecZnx<Data>,
|
||||||
|
b_col: usize,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> ScalarZnxDftAlloc<B> for Module<B> {
|
||||||
|
fn new_scalar_znx_dft(&self, cols: usize) -> ScalarZnxDftOwned<B> {
|
||||||
|
ScalarZnxDftOwned::new(self, cols)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize {
|
fn bytes_of_scalar_znx_dft(&self, cols: usize) -> usize {
|
||||||
ScalarZnxDft::<FFT64>::bytes_of(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE)
|
ScalarZnxDftOwned::bytes_of(self, cols)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDft<FFT64> {
|
fn new_scalar_znx_dft_from_bytes(&self, cols: usize, bytes: Vec<u8>) -> ScalarZnxDftOwned<B> {
|
||||||
ScalarZnxDft::from_bytes(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
|
ScalarZnxDftOwned::new_from_bytes(self, cols, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<FFT64> {
|
// fn new_scalar_znx_dft_from_bytes_borrow(&self, cols: usize, bytes: &mut [u8]) -> ScalarZnxDft<FFT64> {
|
||||||
ScalarZnxDft::from_bytes_borrow(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
|
// ScalarZnxDft::from_bytes_borrow(self, SCALAR_ZNX_DFT_ROWS, cols, SCALAR_ZNX_DFT_SIZE, bytes)
|
||||||
}
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
fn svp_prepare(&self, res: &mut ScalarZnxDft<FFT64>, res_col: usize, a: &Scalar, a_col: usize) {
|
impl<DataMut, Data> ScalarZnxDftOps<DataMut, Data, FFT64> for Module<FFT64>
|
||||||
|
where
|
||||||
|
DataMut: AsMut<[u8]> + AsRef<[u8]>,
|
||||||
|
Data: AsRef<[u8]>,
|
||||||
|
{
|
||||||
|
fn svp_prepare(&self, res: &mut ScalarZnxDft<DataMut, FFT64>, res_col: usize, a: &Scalar<Data>, a_col: usize) {
|
||||||
unsafe {
|
unsafe {
|
||||||
svp::svp_prepare(
|
svp::svp_prepare(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
@@ -41,11 +58,11 @@ impl ScalarZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
|
|
||||||
fn svp_apply_dft(
|
fn svp_apply_dft(
|
||||||
&self,
|
&self,
|
||||||
res: &mut VecZnxDft<FFT64>,
|
res: &mut VecZnxDft<DataMut, FFT64>,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
a: &ScalarZnxDft<FFT64>,
|
a: &ScalarZnxDft<Data, FFT64>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
b: &VecZnx,
|
b: &VecZnx<Data>,
|
||||||
b_col: usize,
|
b_col: usize,
|
||||||
) {
|
) {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pub trait Stats {
|
|||||||
fn std(&self, col_i: usize, log_base2k: usize) -> f64;
|
fn std(&self, col_i: usize, log_base2k: usize) -> f64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stats for VecZnx {
|
impl<D: AsMut<[u8]> + AsRef<[u8]>> Stats for VecZnx<D> {
|
||||||
fn std(&self, col_i: usize, log_base2k: usize) -> f64 {
|
fn std(&self, col_i: usize, log_base2k: usize) -> f64 {
|
||||||
let prec: u32 = (self.size() * log_base2k) as u32;
|
let prec: u32 = (self.size() * log_base2k) as u32;
|
||||||
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
|
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
use crate::Backend;
|
|
||||||
use crate::DataView;
|
use crate::DataView;
|
||||||
use crate::DataViewMut;
|
use crate::DataViewMut;
|
||||||
use crate::Module;
|
|
||||||
use crate::ZnxView;
|
|
||||||
use crate::alloc_aligned;
|
use crate::alloc_aligned;
|
||||||
use crate::assert_alignement;
|
use crate::assert_alignement;
|
||||||
use crate::cast_mut;
|
use crate::cast_mut;
|
||||||
use crate::ffi::znx;
|
use crate::ffi::znx;
|
||||||
use crate::znx_base::{GetZnxBase, ZnxAlloc, ZnxBase, ZnxInfos, ZnxRsh, ZnxZero, switch_degree};
|
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut, switch_degree};
|
||||||
use std::{cmp::min, fmt};
|
use std::{cmp::min, fmt};
|
||||||
|
|
||||||
// pub const VEC_ZNX_ROWS: usize = 1;
|
// pub const VEC_ZNX_ROWS: usize = 1;
|
||||||
@@ -59,7 +56,7 @@ impl<D> DataView for VecZnx<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D> DataViewMut for VecZnx<D> {
|
impl<D> DataViewMut for VecZnx<D> {
|
||||||
fn data_mut(&self) -> &mut Self::D {
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
&mut self.data
|
&mut self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,7 +81,7 @@ impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.inner.size -= k / log_base2k;
|
self.size -= k / log_base2k;
|
||||||
|
|
||||||
let k_rem: usize = k % log_base2k;
|
let k_rem: usize = k % log_base2k;
|
||||||
|
|
||||||
@@ -97,7 +94,7 @@ impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Switches degree of from `a.n()` to `self.n()` into `self`
|
/// Switches degree of from `a.n()` to `self.n()` into `self`
|
||||||
pub fn switch_degree<Data: AsRef<[u8]>>(&mut self, col: usize, a: &Data, col_a: usize) {
|
pub fn switch_degree<Data: AsRef<[u8]>>(&mut self, col: usize, a: &VecZnx<Data>, col_a: usize) {
|
||||||
switch_degree(self, col_a, a, col)
|
switch_degree(self, col_a, a, col)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,7 +158,7 @@ fn normalize_tmp_bytes(n: usize) -> usize {
|
|||||||
n * std::mem::size_of::<i64>()
|
n * std::mem::size_of::<i64>()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn normalize<D: AsMut<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
|
fn normalize<D: AsMut<[u8]> + AsRef<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
|
||||||
let n: usize = a.n();
|
let n: usize = a.n();
|
||||||
|
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use crate::ffi::vec_znx_big;
|
use crate::ffi::vec_znx_big;
|
||||||
use crate::znx_base::{GetZnxBase, ZnxAlloc, ZnxBase, ZnxInfos, ZnxView};
|
use crate::znx_base::{ZnxInfos, ZnxView};
|
||||||
use crate::{Backend, DataView, DataViewMut, FFT64, Module, alloc_aligned};
|
use crate::{Backend, DataView, DataViewMut, FFT64, Module, alloc_aligned};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
const VEC_ZNX_BIG_ROWS: usize = 1;
|
const VEC_ZNX_BIG_ROWS: usize = 1;
|
||||||
|
|
||||||
/// VecZnxBig is Backend dependent, denoted with backend generic `B`
|
/// VecZnxBig is `Backend` dependent, denoted with backend generic `B`
|
||||||
pub struct VecZnxBig<D, B> {
|
pub struct VecZnxBig<D, B> {
|
||||||
data: D,
|
data: D,
|
||||||
n: usize,
|
n: usize,
|
||||||
@@ -44,7 +44,7 @@ impl<D, B> DataView for VecZnxBig<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D, B> DataViewMut for VecZnxBig<D, B> {
|
impl<D, B> DataViewMut for VecZnxBig<D, B> {
|
||||||
fn data_mut(&self) -> &mut Self::D {
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
&mut self.data
|
&mut self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::ffi::vec_znx;
|
use crate::ffi::vec_znx;
|
||||||
use crate::znx_base::{ZnxAlloc, ZnxInfos, ZnxView, ZnxViewMut};
|
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
|
||||||
use crate::{Backend, DataView, FFT64, Module, VecZnx, VecZnxBig, VecZnxBigOwned, VecZnxOps, assert_alignement};
|
use crate::{Backend, DataView, FFT64, Module, ScratchSpace, VecZnx, VecZnxBig, VecZnxBigOwned, VecZnxOps, assert_alignement};
|
||||||
|
|
||||||
pub trait VecZnxBigAlloc<B> {
|
pub trait VecZnxBigAlloc<B> {
|
||||||
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
|
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
|
||||||
@@ -79,13 +79,13 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
|
|||||||
b_col: usize,
|
b_col: usize,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Subtracts `a` to `b` and stores the result on `b`.
|
/// Subtracts `a` from `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
|
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `b`.
|
/// Subtracts `b` from `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
|
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnxBig<Data, B>, a_col: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `c`.
|
/// Subtracts `b` from `a` and stores the result on `c`.
|
||||||
fn vec_znx_big_sub_small_a(
|
fn vec_znx_big_sub_small_a(
|
||||||
&self,
|
&self,
|
||||||
res: &mut VecZnxBig<DataMut, B>,
|
res: &mut VecZnxBig<DataMut, B>,
|
||||||
@@ -96,10 +96,10 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
|
|||||||
b_col: usize,
|
b_col: usize,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Subtracts `a` to `b` and stores the result on `b`.
|
/// Subtracts `a` from `res` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `c`.
|
/// Subtracts `b` from `a` and stores the result on `c`.
|
||||||
fn vec_znx_big_sub_small_b(
|
fn vec_znx_big_sub_small_b(
|
||||||
&self,
|
&self,
|
||||||
res: &mut VecZnxBig<DataMut, B>,
|
res: &mut VecZnxBig<DataMut, B>,
|
||||||
@@ -110,7 +110,7 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
|
|||||||
b_col: usize,
|
b_col: usize,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `b`.
|
/// Subtracts `res` from `a` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<DataMut, B>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
||||||
|
|
||||||
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
|
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
|
||||||
@@ -129,7 +129,7 @@ pub trait VecZnxBigOps<DataMut, Data, B> {
|
|||||||
res_col: usize,
|
res_col: usize,
|
||||||
a: &VecZnxBig<Data, B>,
|
a: &VecZnxBig<Data, B>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
tmp_bytes: &mut [u8],
|
scratch: &mut ScratchSpace,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
|
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
|
||||||
@@ -160,7 +160,7 @@ impl VecZnxBigAlloc<FFT64> for Module<FFT64> {
|
|||||||
// }
|
// }
|
||||||
|
|
||||||
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
|
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize {
|
||||||
VecZnxBig::bytes_of(self, cols, size)
|
VecZnxBigOwned::bytes_of(self, cols, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,8 +208,24 @@ where
|
|||||||
a: &VecZnxBig<Data, FFT64>,
|
a: &VecZnxBig<Data, FFT64>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
) {
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_add(self, res, res_col, a, a_col, res, res_col);
|
vec_znx::vec_znx_add(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +261,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//(Jay)TODO: check whether definitions sub_ab, sub_ba make sense to you
|
|
||||||
fn vec_znx_big_sub_ab_inplace(
|
fn vec_znx_big_sub_ab_inplace(
|
||||||
&self,
|
&self,
|
||||||
res: &mut VecZnxBig<DataMut, FFT64>,
|
res: &mut VecZnxBig<DataMut, FFT64>,
|
||||||
@@ -253,8 +268,24 @@ where
|
|||||||
a: &VecZnxBig<Data, FFT64>,
|
a: &VecZnxBig<Data, FFT64>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
) {
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_sub(self, res, res_col, a, a_col, res, res_col);
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -265,8 +296,24 @@ where
|
|||||||
a: &VecZnxBig<Data, FFT64>,
|
a: &VecZnxBig<Data, FFT64>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
) {
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_sub(self, res, res_col, res, res_col, a, a_col);
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -309,8 +356,24 @@ where
|
|||||||
a: &VecZnx<Data>,
|
a: &VecZnx<Data>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
) {
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_sub_small_b(self, res, res_col, res, res_col, a, a_col);
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -353,8 +416,24 @@ where
|
|||||||
a: &VecZnx<Data>,
|
a: &VecZnx<Data>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
) {
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_sub_small_a(self, res, res_col, a, a_col, res, res_col);
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -391,11 +470,29 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<DataMut, FFT64>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<DataMut, FFT64>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
||||||
Self::vec_znx_big_add_small(self, res, res_col, res, res_col, a, a_col);
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_add(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
|
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
|
||||||
Self::vec_znx_normalize_tmp_bytes(self)
|
<Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_normalize(
|
fn vec_znx_big_normalize(
|
||||||
@@ -405,14 +502,16 @@ where
|
|||||||
res_col: usize,
|
res_col: usize,
|
||||||
a: &VecZnxBig<Data, FFT64>,
|
a: &VecZnxBig<Data, FFT64>,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
tmp_bytes: &mut [u8],
|
scratch: &mut ScratchSpace,
|
||||||
) {
|
) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(a.n(), self.n());
|
assert_eq!(a.n(), self.n());
|
||||||
assert_eq!(res.n(), self.n());
|
assert_eq!(res.n(), self.n());
|
||||||
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
|
//(Jay)Note: This is calling VezZnxOps::vec_znx_normalize_tmp_bytes and not VecZnxBigOps::vec_znx_big_normalize_tmp_bytes.
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
// In the FFT backend the tmp sizes are same but will be different in the NTT backend
|
||||||
|
// assert!(tmp_bytes.len() >= <Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(&self));
|
||||||
|
// assert_alignement(tmp_bytes.as_ptr());
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
@@ -424,7 +523,7 @@ where
|
|||||||
a.at_ptr(a_col, 0),
|
a.at_ptr(a_col, 0),
|
||||||
a.size() as u64,
|
a.size() as u64,
|
||||||
a.sl() as u64,
|
a.sl() as u64,
|
||||||
tmp_bytes.as_mut_ptr(),
|
scratch.vec_znx_big_normalize_tmp_bytes(self).as_mut_ptr(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -457,8 +556,21 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<DataMut, FFT64>, a_col: usize) {
|
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<DataMut, FFT64>, a_col: usize) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_big_automorphism(self, k, a, a_col, a, a_col);
|
vec_znx::vec_znx_automorphism(
|
||||||
|
self.ptr,
|
||||||
|
k,
|
||||||
|
a.at_mut_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
use crate::ffi::vec_znx_dft;
|
use crate::ffi::vec_znx_dft;
|
||||||
use crate::znx_base::{ZnxAlloc, ZnxInfos};
|
use crate::znx_base::ZnxInfos;
|
||||||
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
|
use crate::{Backend, DataView, DataViewMut, FFT64, Module, ZnxView, alloc_aligned};
|
||||||
|
|
||||||
const VEC_ZNX_DFT_ROWS: usize = 1;
|
const VEC_ZNX_DFT_ROWS: usize = 1;
|
||||||
|
|
||||||
|
// VecZnxDft is `Backend` dependent denoted with generic `B`
|
||||||
pub struct VecZnxDft<D, B> {
|
pub struct VecZnxDft<D, B> {
|
||||||
data: D,
|
data: D,
|
||||||
n: usize,
|
n: usize,
|
||||||
@@ -44,7 +45,7 @@ impl<D, B> DataView for VecZnxDft<D, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D, B> DataViewMut for VecZnxDft<D, B> {
|
impl<D, B> DataViewMut for VecZnxDft<D, B> {
|
||||||
fn data_mut(&self) -> &mut Self::D {
|
fn data_mut(&mut self) -> &mut Self::D {
|
||||||
&mut self.data
|
&mut self.data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -84,6 +85,18 @@ impl<D: From<Vec<u8>>, B: Backend> VecZnxDft<D, B> {
|
|||||||
|
|
||||||
pub type VecZnxDftOwned<B> = VecZnxDft<Vec<u8>, B>;
|
pub type VecZnxDftOwned<B> = VecZnxDft<Vec<u8>, B>;
|
||||||
|
|
||||||
|
impl<'a, D: ?Sized, B> VecZnxDft<&'a mut D, B> {
|
||||||
|
pub(crate) fn from_mut_slice(data: &'a mut D, n: usize, cols: usize, size: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
data,
|
||||||
|
n,
|
||||||
|
cols,
|
||||||
|
size,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// impl<B: Backend> ZnxAlloc<B> for VecZnxDft<B> {
|
// impl<B: Backend> ZnxAlloc<B> for VecZnxDft<B> {
|
||||||
// type Scalar = u8;
|
// type Scalar = u8;
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
use crate::VecZnxDftOwned;
|
use crate::VecZnxDftOwned;
|
||||||
use crate::ffi::vec_znx_big;
|
use crate::ffi::{vec_znx_big, vec_znx_dft};
|
||||||
use crate::ffi::vec_znx_dft;
|
|
||||||
use crate::znx_base::ZnxAlloc;
|
|
||||||
use crate::znx_base::ZnxInfos;
|
use crate::znx_base::ZnxInfos;
|
||||||
use crate::{FFT64, Module, VecZnx, VecZnxBig, VecZnxDft, ZnxView, ZnxViewMut, ZnxZero, assert_alignement};
|
use crate::{FFT64, Module, VecZnx, VecZnxBig, VecZnxDft, ZnxView, ZnxViewMut, ZnxZero, assert_alignement};
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
@@ -82,7 +80,7 @@ impl VecZnxDftAlloc<FFT64> for Module<FFT64> {
|
|||||||
// }
|
// }
|
||||||
|
|
||||||
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize {
|
fn bytes_of_vec_znx_dft(&self, cols: usize, size: usize) -> usize {
|
||||||
VecZnxDft::bytes_of(&self, cols, size)
|
VecZnxDftOwned::bytes_of(&self, cols, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,10 +154,10 @@ where
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
tmp_bytes.len() >= Self::vec_znx_idft_tmp_bytes(self),
|
tmp_bytes.len() >= <Self as VecZnxDftOps<DataMut, DataMut, FFT64>>::vec_znx_idft_tmp_bytes(self),
|
||||||
"invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_idft_tmp_bytes()={}",
|
"invalid tmp_bytes: tmp_bytes.len()={} < self.vec_znx_idft_tmp_bytes()={}",
|
||||||
tmp_bytes.len(),
|
tmp_bytes.len(),
|
||||||
Self::vec_znx_idft_tmp_bytes(self)
|
<Self as VecZnxDftOps<DataMut, DataMut, FFT64>>::vec_znx_idft_tmp_bytes(self)
|
||||||
);
|
);
|
||||||
assert_alignement(tmp_bytes.as_ptr())
|
assert_alignement(tmp_bytes.as_ptr())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,10 +86,14 @@ pub trait VecZnxOps<DataMut, Data> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
/// Subtracts the selected column of `a` from the selected column of `res` inplace.
|
/// Subtracts the selected column of `a` from the selected column of `res` inplace.
|
||||||
|
///
|
||||||
|
/// res[res_col] -= a[a_col]
|
||||||
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
||||||
|
|
||||||
// /// Subtracts the selected column of `a` from the selected column of `res` and negates the selected column of `res`.
|
/// Subtracts the selected column of `res` from the selected column of `a` and inplace mutates `res`
|
||||||
// fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
///
|
||||||
|
/// res[res_col] = a[a_col] - res[res_col]
|
||||||
|
fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
||||||
|
|
||||||
// Negates the selected column of `a` and stores the result in `res_col` of `res`.
|
// Negates the selected column of `a` and stores the result in `res_col` of `res`.
|
||||||
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize);
|
||||||
@@ -136,15 +140,15 @@ pub trait VecZnxOps<DataMut, Data> {
|
|||||||
impl<B: Backend> VecZnxAlloc for Module<B> {
|
impl<B: Backend> VecZnxAlloc for Module<B> {
|
||||||
//(Jay)TODO: One must define the Scalar generic param here.
|
//(Jay)TODO: One must define the Scalar generic param here.
|
||||||
fn new_vec_znx(&self, cols: usize, size: usize) -> VecZnxOwned {
|
fn new_vec_znx(&self, cols: usize, size: usize) -> VecZnxOwned {
|
||||||
VecZnxOwned::new(self.n(), cols, size)
|
VecZnxOwned::new::<i64>(self.n(), cols, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_of_vec_znx(&self, cols: usize, size: usize) -> usize {
|
fn bytes_of_vec_znx(&self, cols: usize, size: usize) -> usize {
|
||||||
VecZnxOwned::bytes_of(self.n(), cols, size)
|
VecZnxOwned::bytes_of::<i64>(self.n(), cols, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_vec_znx_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned {
|
fn new_vec_znx_from_bytes(&self, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxOwned {
|
||||||
VecZnxOwned::new_from_bytes(self.n(), cols, size, bytes)
|
VecZnxOwned::new_from_bytes::<i64>(self.n(), cols, size, bytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,7 +174,7 @@ where
|
|||||||
{
|
{
|
||||||
assert_eq!(a.n(), self.n());
|
assert_eq!(a.n(), self.n());
|
||||||
assert_eq!(res.n(), self.n());
|
assert_eq!(res.n(), self.n());
|
||||||
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
|
assert!(tmp_bytes.len() >= <Self as VecZnxOps<DataMut, Data>>::vec_znx_normalize_tmp_bytes(&self));
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
assert_alignement(tmp_bytes.as_ptr());
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
@@ -190,16 +194,8 @@ where
|
|||||||
|
|
||||||
fn vec_znx_normalize_inplace(&self, log_base2k: usize, a: &mut VecZnx<DataMut>, a_col: usize, tmp_bytes: &mut [u8]) {
|
fn vec_znx_normalize_inplace(&self, log_base2k: usize, a: &mut VecZnx<DataMut>, a_col: usize, tmp_bytes: &mut [u8]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a_ptr: *mut VecZnx = a as *mut VecZnx;
|
let a_ptr: *const VecZnx<_> = a;
|
||||||
Self::vec_znx_normalize(
|
Self::vec_znx_normalize(self, log_base2k, a, a_col, &*a_ptr, a_col, tmp_bytes);
|
||||||
self,
|
|
||||||
log_base2k,
|
|
||||||
&mut *a_ptr,
|
|
||||||
a_col,
|
|
||||||
&*a_ptr,
|
|
||||||
a_col,
|
|
||||||
tmp_bytes,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,8 +232,24 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_add_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
fn vec_znx_add_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_add(&self, res, res_col, a, a_col, res, res_col);
|
vec_znx::vec_znx_add(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -274,18 +286,48 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
fn vec_znx_sub_ab_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
let res_ptr: *mut VecZnx = res as *mut VecZnx;
|
vec_znx::vec_znx_sub(
|
||||||
Self::vec_znx_sub(self, res, res_col, a, a_col, res, res_col);
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx, res_col: usize, a: &VecZnx, a_col: usize) {
|
fn vec_znx_sub_ba_inplace(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
||||||
// unsafe {
|
#[cfg(debug_assertions)]
|
||||||
// let res_ptr: *mut VecZnx = res as *mut VecZnx;
|
{
|
||||||
// Self::vec_znx_sub(self, &mut *res_ptr, res_col, &*res_ptr, res_col, a, a_col);
|
assert_eq!(a.n(), self.n());
|
||||||
// }
|
assert_eq!(res.n(), self.n());
|
||||||
// }
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
res.at_ptr(res_col, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
fn vec_znx_negate(&self, res: &mut VecZnx<DataMut>, res_col: usize, a: &VecZnx<Data>, a_col: usize) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -308,7 +350,8 @@ where
|
|||||||
|
|
||||||
fn vec_znx_negate_inplace(&self, a: &mut VecZnx<DataMut>, a_col: usize) {
|
fn vec_znx_negate_inplace(&self, a: &mut VecZnx<DataMut>, a_col: usize) {
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_negate(self, a, a_col, a, a_col);
|
let a_ref: *const VecZnx<_> = a;
|
||||||
|
Self::vec_znx_negate(self, a, a_col, a_ref.as_ref().unwrap(), a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,8 +376,21 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
|
fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_rotate(self, k, a, a_col, a, a_col);
|
vec_znx::vec_znx_rotate(
|
||||||
|
self.ptr,
|
||||||
|
k,
|
||||||
|
a.at_mut_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -359,8 +415,21 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
|
fn vec_znx_automorphism_inplace(&self, k: i64, a: &mut VecZnx<DataMut>, a_col: usize) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
Self::vec_znx_automorphism(self, k, a, a_col, a, a_col);
|
vec_znx::vec_znx_automorphism(
|
||||||
|
self.ptr,
|
||||||
|
k,
|
||||||
|
a.at_mut_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
a.at_ptr(a_col, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -392,7 +461,7 @@ where
|
|||||||
self.vec_znx_rotate(-1, buf, 0, a, a_col);
|
self.vec_znx_rotate(-1, buf, 0, a, a_col);
|
||||||
} else {
|
} else {
|
||||||
switch_degree(bi, res_col, buf, a_col);
|
switch_degree(bi, res_col, buf, a_col);
|
||||||
self.vec_znx_rotate_inplace(-1, buf, a_col);
|
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, -1, buf, a_col);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -414,9 +483,9 @@ where
|
|||||||
|
|
||||||
a.iter().enumerate().for_each(|(_, ai)| {
|
a.iter().enumerate().for_each(|(_, ai)| {
|
||||||
switch_degree(res, res_col, ai, a_col);
|
switch_degree(res, res_col, ai, a_col);
|
||||||
self.vec_znx_rotate_inplace(-1, res, res_col);
|
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, -1, res, res_col);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.vec_znx_rotate_inplace(a.len() as i64, res, res_col);
|
<Self as VecZnxOps<DataMut, Data>>::vec_znx_rotate_inplace(self, a.len() as i64, res, res_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,26 +85,26 @@ pub trait ZnxInfos {
|
|||||||
// pub trait ZnxSliceSize {}
|
// pub trait ZnxSliceSize {}
|
||||||
|
|
||||||
//(Jay) TODO: Remove ZnxAlloc
|
//(Jay) TODO: Remove ZnxAlloc
|
||||||
pub trait ZnxAlloc<B: Backend>
|
// pub trait ZnxAlloc<B: Backend>
|
||||||
where
|
// where
|
||||||
Self: Sized + ZnxInfos,
|
// Self: Sized + ZnxInfos,
|
||||||
{
|
// {
|
||||||
type Scalar;
|
// type Scalar;
|
||||||
fn new(module: &Module<B>, rows: usize, cols: usize, size: usize) -> Self {
|
// fn new(module: &Module<B>, rows: usize, cols: usize, size: usize) -> Self {
|
||||||
let bytes: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(module, rows, cols, size));
|
// let bytes: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(module, rows, cols, size));
|
||||||
Self::from_bytes(module, rows, cols, size, bytes)
|
// Self::from_bytes(module, rows, cols, size, bytes)
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn from_bytes(module: &Module<B>, rows: usize, cols: usize, size: usize, mut bytes: Vec<u8>) -> Self {
|
// fn from_bytes(module: &Module<B>, rows: usize, cols: usize, size: usize, mut bytes: Vec<u8>) -> Self {
|
||||||
let mut res: Self = Self::from_bytes_borrow(module, rows, cols, size, &mut bytes);
|
// let mut res: Self = Self::from_bytes_borrow(module, rows, cols, size, &mut bytes);
|
||||||
res.znx_mut().data = bytes;
|
// res.znx_mut().data = bytes;
|
||||||
res
|
// res
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn from_bytes_borrow(module: &Module<B>, rows: usize, cols: usize, size: usize, bytes: &mut [u8]) -> Self;
|
// fn from_bytes_borrow(module: &Module<B>, rows: usize, cols: usize, size: usize, bytes: &mut [u8]) -> Self;
|
||||||
|
|
||||||
fn bytes_of(module: &Module<B>, rows: usize, cols: usize, size: usize) -> usize;
|
// fn bytes_of(module: &Module<B>, rows: usize, cols: usize, size: usize) -> usize;
|
||||||
}
|
// }
|
||||||
|
|
||||||
pub trait DataView {
|
pub trait DataView {
|
||||||
type D;
|
type D;
|
||||||
@@ -112,11 +112,11 @@ pub trait DataView {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait DataViewMut: DataView {
|
pub trait DataViewMut: DataView {
|
||||||
fn data_mut(&self) -> &mut Self::D;
|
fn data_mut(&mut self) -> &mut Self::D;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ZnxView: ZnxInfos + DataView<D: AsRef<[u8]>> {
|
pub trait ZnxView: ZnxInfos + DataView<D: AsRef<[u8]>> {
|
||||||
type Scalar;
|
type Scalar: Copy;
|
||||||
|
|
||||||
/// Returns a non-mutable pointer to the underlying coefficients array.
|
/// Returns a non-mutable pointer to the underlying coefficients array.
|
||||||
fn as_ptr(&self) -> *const Self::Scalar {
|
fn as_ptr(&self) -> *const Self::Scalar {
|
||||||
@@ -177,11 +177,9 @@ pub trait ZnxViewMut: ZnxView + DataViewMut<D: AsMut<[u8]>> {
|
|||||||
impl<T> ZnxViewMut for T where T: ZnxView + DataViewMut<D: AsMut<[u8]>> {}
|
impl<T> ZnxViewMut for T where T: ZnxView + DataViewMut<D: AsMut<[u8]>> {}
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::num::TryFromIntError;
|
|
||||||
use std::ops::{Add, AddAssign, Div, Mul, Neg, Shl, Shr, Sub};
|
use std::ops::{Add, AddAssign, Div, Mul, Neg, Shl, Shr, Sub};
|
||||||
pub trait IntegerType:
|
pub trait Num:
|
||||||
Copy
|
Copy
|
||||||
+ std::fmt::Debug
|
|
||||||
+ Default
|
+ Default
|
||||||
+ PartialEq
|
+ PartialEq
|
||||||
+ PartialOrd
|
+ PartialOrd
|
||||||
@@ -190,22 +188,23 @@ pub trait IntegerType:
|
|||||||
+ Mul<Output = Self>
|
+ Mul<Output = Self>
|
||||||
+ Div<Output = Self>
|
+ Div<Output = Self>
|
||||||
+ Neg<Output = Self>
|
+ Neg<Output = Self>
|
||||||
+ Shr<Output = Self>
|
|
||||||
+ Shl<Output = Self>
|
|
||||||
+ AddAssign
|
+ AddAssign
|
||||||
+ TryFrom<usize, Error = TryFromIntError>
|
|
||||||
{
|
{
|
||||||
const BITS: u32;
|
const BITS: u32;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntegerType for i64 {
|
impl Num for i64 {
|
||||||
const BITS: u32 = 64;
|
const BITS: u32 = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntegerType for i128 {
|
impl Num for i128 {
|
||||||
const BITS: u32 = 128;
|
const BITS: u32 = 128;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Num for f64 {
|
||||||
|
const BITS: u32 = 64;
|
||||||
|
}
|
||||||
|
|
||||||
pub trait ZnxZero: ZnxViewMut
|
pub trait ZnxZero: ZnxViewMut
|
||||||
where
|
where
|
||||||
Self: Sized,
|
Self: Sized,
|
||||||
@@ -231,79 +230,16 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ZnxRsh: ZnxZero {
|
|
||||||
fn rsh(&mut self, k: usize, log_base2k: usize, col: usize, carry: &mut [u8]) {
|
|
||||||
rsh(k, log_base2k, self, col, carry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blanket implementations
|
// Blanket implementations
|
||||||
impl<T> ZnxZero for T where T: ZnxViewMut {}
|
impl<T> ZnxZero for T where T: ZnxViewMut {}
|
||||||
impl<T> ZnxRsh for T where T: ZnxZero {}
|
// impl<T> ZnxRsh for T where T: ZnxZero {}
|
||||||
|
|
||||||
pub fn rsh<V: ZnxRsh + ZnxZero>(k: usize, log_base2k: usize, a: &mut V, a_col: usize, tmp_bytes: &mut [u8])
|
pub fn switch_degree<S: Copy, DMut: ZnxViewMut<Scalar = S> + ZnxZero, D: ZnxView<Scalar = S>>(
|
||||||
where
|
b: &mut DMut,
|
||||||
V::Scalar: IntegerType,
|
col_b: usize,
|
||||||
{
|
a: &D,
|
||||||
let n: usize = a.n();
|
col_a: usize,
|
||||||
let size: usize = a.size();
|
) {
|
||||||
let cols: usize = a.cols();
|
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
{
|
|
||||||
assert!(
|
|
||||||
tmp_bytes.len() >= rsh_tmp_bytes::<V::Scalar>(n),
|
|
||||||
"invalid carry: carry.len()/size_ofSelf::Scalar={} < rsh_tmp_bytes({}, {})",
|
|
||||||
tmp_bytes.len() / size_of::<V::Scalar>(),
|
|
||||||
n,
|
|
||||||
size,
|
|
||||||
);
|
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
let size: usize = a.size();
|
|
||||||
let steps: usize = k / log_base2k;
|
|
||||||
|
|
||||||
a.raw_mut().rotate_right(n * steps * cols);
|
|
||||||
(0..cols).for_each(|i| {
|
|
||||||
(0..steps).for_each(|j| {
|
|
||||||
a.zero_at(i, j);
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
let k_rem: usize = k % log_base2k;
|
|
||||||
|
|
||||||
if k_rem != 0 {
|
|
||||||
let carry: &mut [V::Scalar] = cast_mut(tmp_bytes);
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
|
|
||||||
}
|
|
||||||
|
|
||||||
let log_base2k_t: V::Scalar = V::Scalar::try_from(log_base2k).unwrap();
|
|
||||||
let shift: V::Scalar = V::Scalar::try_from(V::Scalar::BITS as usize - k_rem).unwrap();
|
|
||||||
let k_rem_t: V::Scalar = V::Scalar::try_from(k_rem).unwrap();
|
|
||||||
|
|
||||||
(steps..size).for_each(|i| {
|
|
||||||
izip!(carry.iter_mut(), a.at_mut(a_col, i).iter_mut()).for_each(|(ci, xi)| {
|
|
||||||
*xi += *ci << log_base2k_t;
|
|
||||||
*ci = get_base_k_carry(*xi, shift);
|
|
||||||
*xi = (*xi - *ci) >> k_rem_t;
|
|
||||||
});
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn get_base_k_carry<T: IntegerType>(x: T, shift: T) -> T {
|
|
||||||
(x << shift) >> shift
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rsh_tmp_bytes<T: IntegerType>(n: usize) -> usize {
|
|
||||||
n * std::mem::size_of::<T>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn switch_degree<DMut: ZnxViewMut + ZnxZero, D: ZnxView>(b: &mut DMut, col_b: usize, a: &D, col_a: usize) {
|
|
||||||
let (n_in, n_out) = (a.n(), b.n());
|
let (n_in, n_out) = (a.n(), b.n());
|
||||||
let (gap_in, gap_out): (usize, usize);
|
let (gap_in, gap_out): (usize, usize);
|
||||||
|
|
||||||
@@ -325,6 +261,71 @@ pub fn switch_degree<DMut: ZnxViewMut + ZnxZero, D: ZnxView>(b: &mut DMut, col_b
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// (Jay)TODO: implement rsh for VecZnx, VecZnxBig
|
||||||
|
// pub trait ZnxRsh: ZnxZero {
|
||||||
|
// fn rsh(&mut self, k: usize, log_base2k: usize, col: usize, carry: &mut [u8]) {
|
||||||
|
// rsh(k, log_base2k, self, col, carry)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// pub fn rsh<V: ZnxRsh + ZnxZero>(k: usize, log_base2k: usize, a: &mut V, a_col: usize, tmp_bytes: &mut [u8]) {
|
||||||
|
// let n: usize = a.n();
|
||||||
|
// let size: usize = a.size();
|
||||||
|
// let cols: usize = a.cols();
|
||||||
|
|
||||||
|
// #[cfg(debug_assertions)]
|
||||||
|
// {
|
||||||
|
// assert!(
|
||||||
|
// tmp_bytes.len() >= rsh_tmp_bytes::<V::Scalar>(n),
|
||||||
|
// "invalid carry: carry.len()/size_ofSelf::Scalar={} < rsh_tmp_bytes({}, {})",
|
||||||
|
// tmp_bytes.len() / size_of::<V::Scalar>(),
|
||||||
|
// n,
|
||||||
|
// size,
|
||||||
|
// );
|
||||||
|
// assert_alignement(tmp_bytes.as_ptr());
|
||||||
|
// }
|
||||||
|
|
||||||
|
// let size: usize = a.size();
|
||||||
|
// let steps: usize = k / log_base2k;
|
||||||
|
|
||||||
|
// a.raw_mut().rotate_right(n * steps * cols);
|
||||||
|
// (0..cols).for_each(|i| {
|
||||||
|
// (0..steps).for_each(|j| {
|
||||||
|
// a.zero_at(i, j);
|
||||||
|
// })
|
||||||
|
// });
|
||||||
|
|
||||||
|
// let k_rem: usize = k % log_base2k;
|
||||||
|
|
||||||
|
// if k_rem != 0 {
|
||||||
|
// let carry: &mut [V::Scalar] = cast_mut(tmp_bytes);
|
||||||
|
|
||||||
|
// unsafe {
|
||||||
|
// std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
|
||||||
|
// }
|
||||||
|
|
||||||
|
// let log_base2k_t: V::Scalar = V::Scalar::try_from(log_base2k).unwrap();
|
||||||
|
// let shift: V::Scalar = V::Scalar::try_from(V::Scalar::BITS as usize - k_rem).unwrap();
|
||||||
|
// let k_rem_t: V::Scalar = V::Scalar::try_from(k_rem).unwrap();
|
||||||
|
|
||||||
|
// (steps..size).for_each(|i| {
|
||||||
|
// izip!(carry.iter_mut(), a.at_mut(a_col, i).iter_mut()).for_each(|(ci, xi)| {
|
||||||
|
// *xi += *ci << log_base2k_t;
|
||||||
|
// *ci = get_base_k_carry(*xi, shift);
|
||||||
|
// *xi = (*xi - *ci) >> k_rem_t;
|
||||||
|
// });
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// #[inline(always)]
|
||||||
|
// fn get_base_k_carry<T: Num>(x: T, shift: T) -> T {
|
||||||
|
// (x << shift) >> shift
|
||||||
|
// }
|
||||||
|
|
||||||
|
// pub fn rsh_tmp_bytes<T: Num>(n: usize) -> usize {
|
||||||
|
// n * std::mem::size_of::<T>()
|
||||||
|
// }
|
||||||
|
|
||||||
// pub trait ZnxLayout: ZnxInfos {
|
// pub trait ZnxLayout: ZnxInfos {
|
||||||
// type Scalar;
|
// type Scalar;
|
||||||
|
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ use crate::{
|
|||||||
parameters::Parameters,
|
parameters::Parameters,
|
||||||
};
|
};
|
||||||
use base2k::{
|
use base2k::{
|
||||||
Module, Scalar, ScalarOps, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, MatZnxDft,
|
MatZnxDft, MatZnxDftOps, Module, Scalar, ScalarAlloc, ScalarZnxDft, ScalarZnxDftOps, VecZnx, VecZnxBig, VecZnxBigOps,
|
||||||
MatZnxDftOps, assert_alignement,
|
VecZnxDft, VecZnxDftOps, VecZnxOps, assert_alignement,
|
||||||
};
|
};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|||||||
Reference in New Issue
Block a user