Crates io (#76)

* crates re-organisation

* fixed typo in layout & added test for vmp_apply

* updated dependencies
This commit is contained in:
Jean-Philippe Bossuat
2025-08-18 11:16:27 +02:00
committed by GitHub
parent dce4d82706
commit a1de248567
415 changed files with 32933 additions and 1889 deletions

View File

@@ -0,0 +1,204 @@
use itertools::izip;
use rug::{Assign, Float};
use crate::{
api::{ZnxInfos, ZnxView, ZnxViewMut, ZnxZero},
layouts::{DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef},
};
impl<D: DataMut> VecZnx<D> {
pub fn encode_vec_i64(&mut self, basek: usize, col: usize, k: usize, data: &[i64], log_max: usize) {
let size: usize = k.div_ceil(basek);
#[cfg(debug_assertions)]
{
let a: VecZnx<&mut [u8]> = self.to_mut();
assert!(
size <= a.size(),
"invalid argument k.div_ceil(basek)={} > a.size()={}",
size,
a.size()
);
assert!(col < a.cols());
assert!(data.len() <= a.n())
}
let data_len: usize = data.len();
let mut a: VecZnx<&mut [u8]> = self.to_mut();
let k_rem: usize = basek - (k % basek);
// Zeroes coefficients of the i-th column
(0..a.size()).for_each(|i| {
a.zero_at(col, i);
});
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == basek {
a.at_mut(col, size - 1)[..data_len].copy_from_slice(&data[..data_len]);
} else {
let mask: i64 = (1 << basek) - 1;
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size)
.rev()
.enumerate()
.for_each(|(i, i_rev)| {
let shift: usize = i * basek;
izip!(a.at_mut(col, i_rev).iter_mut(), data.iter()).for_each(|(y, x)| *y = (x >> shift) & mask);
})
}
// Case where self.prec % self.k != 0.
if k_rem != basek {
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size).rev().for_each(|i| {
a.at_mut(col, i)[..data_len]
.iter_mut()
.for_each(|x| *x <<= k_rem);
})
}
}
pub fn encode_coeff_i64(&mut self, basek: usize, col: usize, k: usize, idx: usize, data: i64, log_max: usize) {
let size: usize = k.div_ceil(basek);
#[cfg(debug_assertions)]
{
let a: VecZnx<&mut [u8]> = self.to_mut();
assert!(idx < a.n());
assert!(
size <= a.size(),
"invalid argument k.div_ceil(basek)={} > a.size()={}",
size,
a.size()
);
assert!(col < a.cols());
}
let k_rem: usize = basek - (k % basek);
let mut a: VecZnx<&mut [u8]> = self.to_mut();
(0..a.size()).for_each(|j| a.at_mut(col, j)[idx] = 0);
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == basek {
a.at_mut(col, size - 1)[idx] = data;
} else {
let mask: i64 = (1 << basek) - 1;
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size)
.rev()
.enumerate()
.for_each(|(j, j_rev)| {
a.at_mut(col, j_rev)[idx] = (data >> (j * basek)) & mask;
})
}
// Case where prec % k != 0.
if k_rem != basek {
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size).rev().for_each(|j| {
a.at_mut(col, j)[idx] <<= k_rem;
})
}
}
}
impl<D: DataRef> VecZnx<D> {
pub fn decode_vec_i64(&self, basek: usize, col: usize, k: usize, data: &mut [i64]) {
let size: usize = k.div_ceil(basek);
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
assert!(
data.len() >= a.n(),
"invalid data: data.len()={} < a.n()={}",
data.len(),
a.n()
);
assert!(col < a.cols());
}
let a: VecZnx<&[u8]> = self.to_ref();
data.copy_from_slice(a.at(col, 0));
let rem: usize = basek - (k % basek);
if k < basek {
data.iter_mut().for_each(|x| *x >>= rem);
} else {
(1..size).for_each(|i| {
if i == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
izip!(a.at(col, i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << k_rem) + (x >> rem);
});
} else {
izip!(a.at(col, i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << basek) + x;
});
}
})
}
}
pub fn decode_coeff_i64(&self, basek: usize, col: usize, k: usize, idx: usize) -> i64 {
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
assert!(idx < a.n());
assert!(col < a.cols())
}
let a: VecZnx<&[u8]> = self.to_ref();
let size: usize = k.div_ceil(basek);
let mut res: i64 = 0;
let rem: usize = basek - (k % basek);
(0..size).for_each(|j| {
let x: i64 = a.at(col, j)[idx];
if j == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
res = (res << k_rem) + (x >> rem);
} else {
res = (res << basek) + x;
}
});
res
}
pub fn decode_vec_float(&self, basek: usize, col: usize, data: &mut [Float]) {
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
assert!(
data.len() >= a.n(),
"invalid data: data.len()={} < a.n()={}",
data.len(),
a.n()
);
assert!(col < a.cols());
}
let a: VecZnx<&[u8]> = self.to_ref();
let size: usize = a.size();
let prec: u32 = (basek * size) as u32;
// 2^{basek}
let base = Float::with_val(prec, (1 << basek) as f64);
// y[i] = sum x[j][i] * 2^{-basek*j}
(0..size).for_each(|i| {
if i == 0 {
izip!(a.at(col, size - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| {
y.assign(*x);
*y /= &base;
});
} else {
izip!(a.at(col, size - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| {
*y += Float::with_val(prec, *x);
*y /= &base;
});
}
});
}
}

View File

@@ -0,0 +1,304 @@
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, ToOwnedDeep, VecZnx, WriterTo},
source::Source,
};
use std::fmt;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rand::RngCore;
#[derive(PartialEq, Eq, Clone)]
pub struct MatZnx<D: Data> {
data: D,
n: usize,
size: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
}
impl<D: DataRef> ToOwnedDeep for MatZnx<D> {
type Owned = MatZnx<Vec<u8>>;
fn to_owned_deep(&self) -> Self::Owned {
MatZnx {
data: self.data.as_ref().to_vec(),
n: self.n,
size: self.size,
rows: self.rows,
cols_in: self.cols_in,
cols_out: self.cols_out,
}
}
}
impl<D: DataRef> fmt::Debug for MatZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: Data> ZnxInfos for MatZnx<D> {
fn cols(&self) -> usize {
self.cols_in
}
fn rows(&self) -> usize {
self.rows
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
}
impl<D: Data> ZnxSliceSize for MatZnx<D> {
fn sl(&self) -> usize {
self.n() * self.cols_out()
}
}
impl<D: Data> DataView for MatZnx<D> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data> DataViewMut for MatZnx<D> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: DataRef> ZnxView for MatZnx<D> {
type Scalar = i64;
}
impl<D: Data> MatZnx<D> {
pub fn cols_in(&self) -> usize {
self.cols_in
}
pub fn cols_out(&self) -> usize {
self.cols_out
}
}
impl MatZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
rows * cols_in * VecZnx::<Vec<u8>>::alloc_bytes(n, cols_out, size)
}
pub fn alloc(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned(Self::alloc_bytes(n, rows, cols_in, cols_out, size));
Self {
data,
n,
size,
rows,
cols_in,
cols_out,
}
}
pub fn from_bytes(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::alloc_bytes(n, rows, cols_in, cols_out, size));
Self {
data,
n,
size,
rows,
cols_in,
cols_out,
}
}
}
impl<D: DataRef> MatZnx<D> {
pub fn at(&self, row: usize, col: usize) -> VecZnx<&[u8]> {
#[cfg(debug_assertions)]
{
assert!(row < self.rows(), "rows: {} >= {}", row, self.rows());
assert!(col < self.cols_in(), "cols: {} >= {}", col, self.cols_in());
}
let self_ref: MatZnx<&[u8]> = self.to_ref();
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes(self.n, self.cols_out, self.size);
let start: usize = nb_bytes * self.cols() * row + col * nb_bytes;
let end: usize = start + nb_bytes;
VecZnx {
data: &self_ref.data[start..end],
n: self.n,
cols: self.cols_out,
size: self.size,
max_size: self.size,
}
}
}
impl<D: DataMut> MatZnx<D> {
pub fn at_mut(&mut self, row: usize, col: usize) -> VecZnx<&mut [u8]> {
#[cfg(debug_assertions)]
{
assert!(row < self.rows(), "rows: {} >= {}", row, self.rows());
assert!(col < self.cols_in(), "cols: {} >= {}", col, self.cols_in());
}
let n: usize = self.n();
let cols_out: usize = self.cols_out();
let cols_in: usize = self.cols_in();
let size: usize = self.size();
let self_ref: MatZnx<&mut [u8]> = self.to_mut();
let nb_bytes: usize = VecZnx::<Vec<u8>>::alloc_bytes(n, cols_out, size);
let start: usize = nb_bytes * cols_in * row + col * nb_bytes;
let end: usize = start + nb_bytes;
VecZnx {
data: &mut self_ref.data[start..end],
n,
cols: cols_out,
size,
max_size: size,
}
}
}
impl<D: DataMut> FillUniform for MatZnx<D> {
fn fill_uniform(&mut self, source: &mut Source) {
source.fill_bytes(self.data.as_mut());
}
}
impl<D: DataMut> Reset for MatZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.size = 0;
self.rows = 0;
self.cols_in = 0;
self.cols_out = 0;
}
}
pub type MatZnxOwned = MatZnx<Vec<u8>>;
pub type MatZnxMut<'a> = MatZnx<&'a mut [u8]>;
pub type MatZnxRef<'a> = MatZnx<&'a [u8]>;
pub trait MatZnxToRef {
fn to_ref(&self) -> MatZnx<&[u8]>;
}
impl<D: DataRef> MatZnxToRef for MatZnx<D> {
fn to_ref(&self) -> MatZnx<&[u8]> {
MatZnx {
data: self.data.as_ref(),
n: self.n,
rows: self.rows,
cols_in: self.cols_in,
cols_out: self.cols_out,
size: self.size,
}
}
}
pub trait MatZnxToMut {
fn to_mut(&mut self) -> MatZnx<&mut [u8]>;
}
impl<D: DataMut> MatZnxToMut for MatZnx<D> {
fn to_mut(&mut self) -> MatZnx<&mut [u8]> {
MatZnx {
data: self.data.as_mut(),
n: self.n,
rows: self.rows,
cols_in: self.cols_in,
cols_out: self.cols_out,
size: self.size,
}
}
}
impl<D: Data> MatZnx<D> {
pub fn from_data(data: D, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
Self {
data,
n,
rows,
cols_in,
cols_out,
size,
}
}
}
impl<D: DataMut> ReaderFrom for MatZnx<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.n = reader.read_u64::<LittleEndian>()? as usize;
self.size = reader.read_u64::<LittleEndian>()? as usize;
self.rows = reader.read_u64::<LittleEndian>()? as usize;
self.cols_in = reader.read_u64::<LittleEndian>()? as usize;
self.cols_out = reader.read_u64::<LittleEndian>()? as usize;
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
let buf: &mut [u8] = self.data.as_mut();
if buf.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("self.data.len()={} != read len={}", buf.len(), len),
));
}
reader.read_exact(&mut buf[..len])?;
Ok(())
}
}
impl<D: DataRef> WriterTo for MatZnx<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.n as u64)?;
writer.write_u64::<LittleEndian>(self.size as u64)?;
writer.write_u64::<LittleEndian>(self.rows as u64)?;
writer.write_u64::<LittleEndian>(self.cols_in as u64)?;
writer.write_u64::<LittleEndian>(self.cols_out as u64)?;
let buf: &[u8] = self.data.as_ref();
writer.write_u64::<LittleEndian>(buf.len() as u64)?;
writer.write_all(buf)?;
Ok(())
}
}
impl<D: DataRef> fmt::Display for MatZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"MatZnx(n={}, rows={}, cols_in={}, cols_out={}, size={})",
self.n, self.rows, self.cols_in, self.cols_out, self.size
)?;
for row_i in 0..self.rows {
writeln!(f, "Row {}:", row_i)?;
for col_i in 0..self.cols_in {
writeln!(f, "cols_in {}:", col_i)?;
writeln!(f, "{}:", self.at(row_i, col_i))?;
}
}
Ok(())
}
}
impl<D: DataMut> ZnxZero for MatZnx<D> {
fn zero(&mut self) {
self.raw_mut().fill(0)
}
fn zero_at(&mut self, i: usize, j: usize) {
self.at_mut(i, j).zero();
}
}

View File

@@ -0,0 +1,32 @@
mod encoding;
mod mat_znx;
mod module;
mod scalar_znx;
mod scratch;
mod serialization;
mod stats;
mod svp_ppol;
mod vec_znx;
mod vec_znx_big;
mod vec_znx_dft;
mod vmp_pmat;
pub use mat_znx::*;
pub use module::*;
pub use scalar_znx::*;
pub use scratch::*;
pub use serialization::*;
pub use svp_ppol::*;
pub use vec_znx::*;
pub use vec_znx_big::*;
pub use vec_znx_dft::*;
pub use vmp_pmat::*;
pub trait Data = PartialEq + Eq + Sized;
pub trait DataRef = Data + AsRef<[u8]>;
pub trait DataMut = DataRef + AsMut<[u8]>;
pub trait ToOwnedDeep {
type Owned;
fn to_owned_deep(&self) -> Self::Owned;
}

View File

@@ -0,0 +1,103 @@
use std::{fmt::Display, marker::PhantomData, ptr::NonNull};
use rand_distr::num_traits::Zero;
use crate::GALOISGENERATOR;
#[allow(clippy::missing_safety_doc)]
pub trait Backend: Sized {
type ScalarBig: Copy + Zero + Display;
type ScalarPrep: Copy + Zero + Display;
type Handle: 'static;
fn layout_prep_word_count() -> usize;
fn layout_big_word_count() -> usize;
unsafe fn destroy(handle: NonNull<Self::Handle>);
}
pub struct Module<B: Backend> {
ptr: NonNull<B::Handle>,
n: u64,
_marker: PhantomData<B>,
}
impl<B: Backend> Module<B> {
/// Construct from a raw pointer managed elsewhere.
/// SAFETY: `ptr` must be non-null and remain valid for the lifetime of this Module.
#[inline]
#[allow(clippy::missing_safety_doc)]
pub unsafe fn from_raw_parts(ptr: *mut B::Handle, n: u64) -> Self {
Self {
ptr: NonNull::new(ptr).expect("null module ptr"),
n,
_marker: PhantomData,
}
}
#[allow(clippy::missing_safety_doc)]
#[inline]
pub unsafe fn ptr(&self) -> *mut <B as Backend>::Handle {
self.ptr.as_ptr()
}
#[inline]
pub fn n(&self) -> usize {
self.n as usize
}
#[inline]
pub fn as_mut_ptr(&self) -> *mut B::Handle {
self.ptr.as_ptr()
}
#[inline]
pub fn log_n(&self) -> usize {
(usize::BITS - (self.n() - 1).leading_zeros()) as _
}
#[inline]
pub fn cyclotomic_order(&self) -> u64 {
(self.n() << 1) as _
}
// Returns GALOISGENERATOR^|generator| * sign(generator)
#[inline]
pub fn galois_element(&self, generator: i64) -> i64 {
if generator == 0 {
return 1;
}
((mod_exp_u64(GALOISGENERATOR, generator.unsigned_abs() as usize) & (self.cyclotomic_order() - 1)) as i64)
* generator.signum()
}
// Returns gen^-1
#[inline]
pub fn galois_element_inv(&self, gal_el: i64) -> i64 {
if gal_el == 0 {
panic!("cannot invert 0")
}
((mod_exp_u64(
gal_el.unsigned_abs(),
(self.cyclotomic_order() - 1) as usize,
) & (self.cyclotomic_order() - 1)) as i64)
* gal_el.signum()
}
}
impl<B: Backend> Drop for Module<B> {
fn drop(&mut self) {
unsafe { B::destroy(self.ptr) }
}
}
pub fn mod_exp_u64(x: u64, e: usize) -> u64 {
let mut y: u64 = 1;
let mut x_pow: u64 = x;
let mut exp = e;
while exp > 0 {
if exp & 1 == 1 {
y = y.wrapping_mul(x_pow);
}
x_pow = x_pow.wrapping_mul(x_pow);
exp >>= 1;
}
y
}

View File

@@ -0,0 +1,247 @@
use rand::seq::SliceRandom;
use rand_core::RngCore;
use rand_distr::{Distribution, weighted::WeightedIndex};
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, ToOwnedDeep, VecZnx, WriterTo},
source::Source,
};
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct ScalarZnx<D: Data> {
pub data: D,
pub n: usize,
pub cols: usize,
}
impl<D: DataRef> ToOwnedDeep for ScalarZnx<D> {
type Owned = ScalarZnx<Vec<u8>>;
fn to_owned_deep(&self) -> Self::Owned {
ScalarZnx {
data: self.data.as_ref().to_vec(),
n: self.n,
cols: self.cols,
}
}
}
impl<D: Data> ZnxInfos for ScalarZnx<D> {
fn cols(&self) -> usize {
self.cols
}
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
1
}
}
impl<D: Data> ZnxSliceSize for ScalarZnx<D> {
fn sl(&self) -> usize {
self.n()
}
}
impl<D: Data> DataView for ScalarZnx<D> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data> DataViewMut for ScalarZnx<D> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: DataRef> ZnxView for ScalarZnx<D> {
type Scalar = i64;
}
impl<D: DataMut> ScalarZnx<D> {
pub fn fill_ternary_prob(&mut self, col: usize, prob: f64, source: &mut Source) {
let choices: [i64; 3] = [-1, 0, 1];
let weights: [f64; 3] = [prob / 2.0, 1.0 - prob, prob / 2.0];
let dist: WeightedIndex<f64> = WeightedIndex::new(weights).unwrap();
self.at_mut(col, 0)
.iter_mut()
.for_each(|x: &mut i64| *x = choices[dist.sample(source)]);
}
pub fn fill_ternary_hw(&mut self, col: usize, hw: usize, source: &mut Source) {
assert!(hw <= self.n());
self.at_mut(col, 0)[..hw]
.iter_mut()
.for_each(|x: &mut i64| *x = (((source.next_u32() & 1) as i64) << 1) - 1);
self.at_mut(col, 0).shuffle(source);
}
pub fn fill_binary_prob(&mut self, col: usize, prob: f64, source: &mut Source) {
let choices: [i64; 2] = [0, 1];
let weights: [f64; 2] = [1.0 - prob, prob];
let dist: WeightedIndex<f64> = WeightedIndex::new(weights).unwrap();
self.at_mut(col, 0)
.iter_mut()
.for_each(|x: &mut i64| *x = choices[dist.sample(source)]);
}
pub fn fill_binary_hw(&mut self, col: usize, hw: usize, source: &mut Source) {
assert!(hw <= self.n());
self.at_mut(col, 0)[..hw]
.iter_mut()
.for_each(|x: &mut i64| *x = (source.next_u32() & 1) as i64);
self.at_mut(col, 0).shuffle(source);
}
pub fn fill_binary_block(&mut self, col: usize, block_size: usize, source: &mut Source) {
assert!(self.n().is_multiple_of(block_size));
let max_idx: u64 = (block_size + 1) as u64;
let mask_idx: u64 = (1 << ((u64::BITS - max_idx.leading_zeros()) as u64)) - 1;
for block in self.at_mut(col, 0).chunks_mut(block_size) {
let idx: usize = source.next_u64n(max_idx, mask_idx) as usize;
if idx != block_size {
block[idx] = 1;
}
}
}
}
impl ScalarZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, cols: usize) -> usize {
n * cols * size_of::<i64>()
}
pub fn alloc(n: usize, cols: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(Self::alloc_bytes(n, cols));
Self { data, n, cols }
}
pub fn from_bytes(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::alloc_bytes(n, cols));
Self { data, n, cols }
}
}
impl<D: DataMut> ZnxZero for ScalarZnx<D> {
fn zero(&mut self) {
self.raw_mut().fill(0)
}
fn zero_at(&mut self, i: usize, j: usize) {
self.at_mut(i, j).fill(0);
}
}
impl<D: DataMut> FillUniform for ScalarZnx<D> {
fn fill_uniform(&mut self, source: &mut Source) {
source.fill_bytes(self.data.as_mut());
}
}
impl<D: DataMut> Reset for ScalarZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
}
}
pub type ScalarZnxOwned = ScalarZnx<Vec<u8>>;
impl<D: Data> ScalarZnx<D> {
pub fn from_data(data: D, n: usize, cols: usize) -> Self {
Self { data, n, cols }
}
}
pub trait ScalarZnxToRef {
fn to_ref(&self) -> ScalarZnx<&[u8]>;
}
impl<D: DataRef> ScalarZnxToRef for ScalarZnx<D> {
fn to_ref(&self) -> ScalarZnx<&[u8]> {
ScalarZnx {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
}
}
}
pub trait ScalarZnxToMut {
fn to_mut(&mut self) -> ScalarZnx<&mut [u8]>;
}
impl<D: DataMut> ScalarZnxToMut for ScalarZnx<D> {
fn to_mut(&mut self) -> ScalarZnx<&mut [u8]> {
ScalarZnx {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
}
}
}
impl<D: DataRef> ScalarZnx<D> {
pub fn as_vec_znx(&self) -> VecZnx<&[u8]> {
VecZnx {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
size: 1,
max_size: 1,
}
}
}
impl<D: DataMut> ScalarZnx<D> {
pub fn as_vec_znx_mut(&mut self) -> VecZnx<&mut [u8]> {
VecZnx {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
size: 1,
max_size: 1,
}
}
}
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
impl<D: DataMut> ReaderFrom for ScalarZnx<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.n = reader.read_u64::<LittleEndian>()? as usize;
self.cols = reader.read_u64::<LittleEndian>()? as usize;
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
let buf: &mut [u8] = self.data.as_mut();
if buf.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("self.data.len()={} != read len={}", buf.len(), len),
));
}
reader.read_exact(&mut buf[..len])?;
Ok(())
}
}
impl<D: DataRef> WriterTo for ScalarZnx<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.n as u64)?;
writer.write_u64::<LittleEndian>(self.cols as u64)?;
let buf: &[u8] = self.data.as_ref();
writer.write_u64::<LittleEndian>(buf.len() as u64)?;
writer.write_all(buf)?;
Ok(())
}
}

View File

@@ -0,0 +1,13 @@
use std::marker::PhantomData;
use crate::layouts::Backend;
pub struct ScratchOwned<B: Backend> {
pub data: Vec<u8>,
pub _phantom: PhantomData<B>,
}
pub struct Scratch<B: Backend> {
pub _phantom: PhantomData<B>,
pub data: [u8],
}

View File

@@ -0,0 +1,9 @@
use std::io::{Read, Result, Write};
pub trait WriterTo {
fn write_to<W: Write>(&self, writer: &mut W) -> Result<()>;
}
pub trait ReaderFrom {
fn read_from<R: Read>(&mut self, reader: &mut R) -> Result<()>;
}

View File

@@ -0,0 +1,32 @@
use rug::{
Float,
float::Round,
ops::{AddAssignRound, DivAssignRound, SubAssignRound},
};
use crate::{
api::ZnxInfos,
layouts::{DataRef, VecZnx},
};
impl<D: DataRef> VecZnx<D> {
pub fn std(&self, basek: usize, col: usize) -> f64 {
let prec: u32 = (self.size() * basek) as u32;
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
self.decode_vec_float(basek, col, &mut data);
// std = sqrt(sum((xi - avg)^2) / n)
let mut avg: Float = Float::with_val(prec, 0);
data.iter().for_each(|x| {
avg.add_assign_round(x, Round::Nearest);
});
avg.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
data.iter_mut().for_each(|x| {
x.sub_assign_round(&avg, Round::Nearest);
});
let mut std: Float = Float::with_val(prec, 0);
data.iter().for_each(|x| std += x * x);
std.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
std = std.sqrt();
std.to_f64()
}
}

View File

@@ -0,0 +1,156 @@
use std::marker::PhantomData;
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, ZnxInfos, ZnxSliceSize, ZnxView},
layouts::{Backend, Data, DataMut, DataRef, ReaderFrom, WriterTo},
oep::SvpPPolAllocBytesImpl,
};
#[derive(PartialEq, Eq)]
pub struct SvpPPol<D: Data, B: Backend> {
pub data: D,
pub n: usize,
pub cols: usize,
pub _phantom: PhantomData<B>,
}
impl<D: Data, B: Backend> ZnxSliceSize for SvpPPol<D, B> {
fn sl(&self) -> usize {
B::layout_prep_word_count() * self.n()
}
}
impl<D: DataRef, B: Backend> ZnxView for SvpPPol<D, B> {
type Scalar = B::ScalarPrep;
}
impl<D: Data, B: Backend> ZnxInfos for SvpPPol<D, B> {
fn cols(&self) -> usize {
self.cols
}
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
1
}
}
impl<D: Data, B: Backend> DataView for SvpPPol<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data, B: Backend> DataViewMut for SvpPPol<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: Data + From<Vec<u8>>, B: Backend> SvpPPol<D, B>
where
B: SvpPPolAllocBytesImpl<B>,
{
pub fn alloc(n: usize, cols: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(B::svp_ppol_alloc_bytes_impl(n, cols));
Self {
data: data.into(),
n,
cols,
_phantom: PhantomData,
}
}
pub fn from_bytes(n: usize, cols: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == B::svp_ppol_alloc_bytes_impl(n, cols));
Self {
data: data.into(),
n,
cols,
_phantom: PhantomData,
}
}
}
pub type SvpPPolOwned<B> = SvpPPol<Vec<u8>, B>;
pub trait SvpPPolToRef<B: Backend> {
fn to_ref(&self) -> SvpPPol<&[u8], B>;
}
impl<D: DataRef, B: Backend> SvpPPolToRef<B> for SvpPPol<D, B> {
fn to_ref(&self) -> SvpPPol<&[u8], B> {
SvpPPol {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
_phantom: PhantomData,
}
}
}
pub trait SvpPPolToMut<B: Backend> {
fn to_mut(&mut self) -> SvpPPol<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> SvpPPolToMut<B> for SvpPPol<D, B> {
fn to_mut(&mut self) -> SvpPPol<&mut [u8], B> {
SvpPPol {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
_phantom: PhantomData,
}
}
}
impl<D: Data, B: Backend> SvpPPol<D, B> {
pub fn from_data(data: D, n: usize, cols: usize) -> Self {
Self {
data,
n,
cols,
_phantom: PhantomData,
}
}
}
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
impl<D: DataMut, B: Backend> ReaderFrom for SvpPPol<D, B> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.n = reader.read_u64::<LittleEndian>()? as usize;
self.cols = reader.read_u64::<LittleEndian>()? as usize;
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
let buf: &mut [u8] = self.data.as_mut();
if buf.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("self.data.len()={} != read len={}", buf.len(), len),
));
}
reader.read_exact(&mut buf[..len])?;
Ok(())
}
}
impl<D: DataRef, B: Backend> WriterTo for SvpPPol<D, B> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.n as u64)?;
writer.write_u64::<LittleEndian>(self.cols as u64)?;
let buf: &[u8] = self.data.as_ref();
writer.write_u64::<LittleEndian>(buf.len() as u64)?;
writer.write_all(buf)?;
Ok(())
}
}

View File

@@ -0,0 +1,255 @@
use std::fmt;
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, FillUniform, Reset, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Data, DataMut, DataRef, ReaderFrom, ToOwnedDeep, WriterTo},
source::Source,
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rand::RngCore;
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct VecZnx<D: Data> {
pub data: D,
pub n: usize,
pub cols: usize,
pub size: usize,
pub max_size: usize,
}
impl<D: DataRef> ToOwnedDeep for VecZnx<D> {
type Owned = VecZnx<Vec<u8>>;
fn to_owned_deep(&self) -> Self::Owned {
VecZnx {
data: self.data.as_ref().to_vec(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
}
}
}
impl<D: DataRef> fmt::Debug for VecZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: Data> ZnxInfos for VecZnx<D> {
fn cols(&self) -> usize {
self.cols
}
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
}
impl<D: Data> ZnxSliceSize for VecZnx<D> {
fn sl(&self) -> usize {
self.n() * self.cols()
}
}
impl<D: Data> DataView for VecZnx<D> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data> DataViewMut for VecZnx<D> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: DataRef> ZnxView for VecZnx<D> {
type Scalar = i64;
}
impl VecZnx<Vec<u8>> {
pub fn rsh_scratch_space(n: usize) -> usize {
n * std::mem::size_of::<i64>()
}
}
impl<D: DataMut> ZnxZero for VecZnx<D> {
fn zero(&mut self) {
self.raw_mut().fill(0)
}
fn zero_at(&mut self, i: usize, j: usize) {
self.at_mut(i, j).fill(0);
}
}
impl VecZnx<Vec<u8>> {
pub fn alloc_bytes(n: usize, cols: usize, size: usize) -> usize {
n * cols * size * size_of::<i64>()
}
pub fn alloc(n: usize, cols: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(Self::alloc_bytes(n, cols, size));
Self {
data,
n,
cols,
size,
max_size: size,
}
}
pub fn from_bytes<Scalar: Sized>(n: usize, cols: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == Self::alloc_bytes(n, cols, size));
Self {
data,
n,
cols,
size,
max_size: size,
}
}
}
impl<D: Data> VecZnx<D> {
pub fn from_data(data: D, n: usize, cols: usize, size: usize) -> Self {
Self {
data,
n,
cols,
size,
max_size: size,
}
}
}
impl<D: DataRef> fmt::Display for VecZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"VecZnx(n={}, cols={}, size={})",
self.n, self.cols, self.size
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
for (i, &coeff) in coeffs.iter().take(show_count).enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
}
if coeffs.len() > max_show {
write!(f, ", ... ({} more)", coeffs.len() - max_show)?;
}
writeln!(f, "]")?;
}
}
Ok(())
}
}
impl<D: DataMut> FillUniform for VecZnx<D> {
fn fill_uniform(&mut self, source: &mut Source) {
source.fill_bytes(self.data.as_mut());
}
}
impl<D: DataMut> Reset for VecZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
self.size = 0;
self.max_size = 0;
}
}
pub type VecZnxOwned = VecZnx<Vec<u8>>;
pub type VecZnxMut<'a> = VecZnx<&'a mut [u8]>;
pub type VecZnxRef<'a> = VecZnx<&'a [u8]>;
pub trait VecZnxToRef {
fn to_ref(&self) -> VecZnx<&[u8]>;
}
impl<D: DataRef> VecZnxToRef for VecZnx<D> {
fn to_ref(&self) -> VecZnx<&[u8]> {
VecZnx {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
}
}
}
pub trait VecZnxToMut {
fn to_mut(&mut self) -> VecZnx<&mut [u8]>;
}
impl<D: DataMut> VecZnxToMut for VecZnx<D> {
fn to_mut(&mut self) -> VecZnx<&mut [u8]> {
VecZnx {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
}
}
}
impl<D: DataMut> ReaderFrom for VecZnx<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.n = reader.read_u64::<LittleEndian>()? as usize;
self.cols = reader.read_u64::<LittleEndian>()? as usize;
self.size = reader.read_u64::<LittleEndian>()? as usize;
self.max_size = reader.read_u64::<LittleEndian>()? as usize;
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
let buf: &mut [u8] = self.data.as_mut();
if buf.len() != len {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("self.data.len()={} != read len={}", buf.len(), len),
));
}
reader.read_exact(&mut buf[..len])?;
Ok(())
}
}
impl<D: DataRef> WriterTo for VecZnx<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.n as u64)?;
writer.write_u64::<LittleEndian>(self.cols as u64)?;
writer.write_u64::<LittleEndian>(self.size as u64)?;
writer.write_u64::<LittleEndian>(self.max_size as u64)?;
let buf: &[u8] = self.data.as_ref();
writer.write_u64::<LittleEndian>(buf.len() as u64)?;
writer.write_all(buf)?;
Ok(())
}
}

View File

@@ -0,0 +1,189 @@
use std::marker::PhantomData;
use rand_distr::num_traits::Zero;
use std::fmt;
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Backend, Data, DataMut, DataRef},
oep::VecZnxBigAllocBytesImpl,
};
#[derive(PartialEq, Eq)]
pub struct VecZnxBig<D: Data, B: Backend> {
pub data: D,
pub n: usize,
pub cols: usize,
pub size: usize,
pub max_size: usize,
pub _phantom: PhantomData<B>,
}
impl<D: Data, B: Backend> ZnxSliceSize for VecZnxBig<D, B> {
fn sl(&self) -> usize {
B::layout_big_word_count() * self.n() * self.cols()
}
}
impl<D: DataRef, B: Backend> ZnxView for VecZnxBig<D, B> {
type Scalar = B::ScalarBig;
}
impl<D: Data, B: Backend> ZnxInfos for VecZnxBig<D, B> {
fn cols(&self) -> usize {
self.cols
}
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
}
impl<D: Data, B: Backend> DataView for VecZnxBig<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data, B: Backend> DataViewMut for VecZnxBig<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: DataMut, B: Backend> ZnxZero for VecZnxBig<D, B>
where
Self: ZnxViewMut,
<Self as ZnxView>::Scalar: Zero + Copy,
{
fn zero(&mut self) {
self.raw_mut().fill(<Self as ZnxView>::Scalar::zero())
}
fn zero_at(&mut self, i: usize, j: usize) {
self.at_mut(i, j).fill(<Self as ZnxView>::Scalar::zero());
}
}
impl<D: DataRef + From<Vec<u8>>, B: Backend> VecZnxBig<D, B>
where
B: VecZnxBigAllocBytesImpl<B>,
{
pub fn alloc(n: usize, cols: usize, size: usize) -> Self {
let data = alloc_aligned::<u8>(B::vec_znx_big_alloc_bytes_impl(n, cols, size));
Self {
data: data.into(),
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
pub fn from_bytes(n: usize, cols: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == B::vec_znx_big_alloc_bytes_impl(n, cols, size));
Self {
data: data.into(),
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
}
impl<D: Data, B: Backend> VecZnxBig<D, B> {
pub fn from_data(data: D, n: usize, cols: usize, size: usize) -> Self {
Self {
data,
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
}
pub type VecZnxBigOwned<B> = VecZnxBig<Vec<u8>, B>;
pub trait VecZnxBigToRef<B: Backend> {
fn to_ref(&self) -> VecZnxBig<&[u8], B>;
}
impl<D: DataRef, B: Backend> VecZnxBigToRef<B> for VecZnxBig<D, B> {
fn to_ref(&self) -> VecZnxBig<&[u8], B> {
VecZnxBig {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
_phantom: std::marker::PhantomData,
}
}
}
pub trait VecZnxBigToMut<B: Backend> {
fn to_mut(&mut self) -> VecZnxBig<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> VecZnxBigToMut<B> for VecZnxBig<D, B> {
fn to_mut(&mut self) -> VecZnxBig<&mut [u8], B> {
VecZnxBig {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
_phantom: std::marker::PhantomData,
}
}
}
impl<D: DataRef, B: Backend> fmt::Display for VecZnxBig<D, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"VecZnxBig(n={}, cols={}, size={})",
self.n, self.cols, self.size
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
for (i, &coeff) in coeffs.iter().take(show_count).enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
}
if coeffs.len() > max_show {
write!(f, ", ... ({} more)", coeffs.len() - max_show)?;
}
writeln!(f, "]")?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,206 @@
use std::{fmt, marker::PhantomData};
use rand_distr::num_traits::Zero;
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero},
layouts::{Backend, Data, DataMut, DataRef, VecZnxBig},
oep::VecZnxBigAllocBytesImpl,
};
#[derive(PartialEq, Eq)]
pub struct VecZnxDft<D: Data, B: Backend> {
pub data: D,
pub n: usize,
pub cols: usize,
pub size: usize,
pub max_size: usize,
pub _phantom: PhantomData<B>,
}
impl<D: Data, B: Backend> ZnxSliceSize for VecZnxDft<D, B> {
fn sl(&self) -> usize {
B::layout_prep_word_count() * self.n() * self.cols()
}
}
impl<D: DataRef, B: Backend> ZnxView for VecZnxDft<D, B> {
type Scalar = B::ScalarPrep;
}
impl<D: Data, B: Backend> VecZnxDft<D, B> {
pub fn into_big(self) -> VecZnxBig<D, B> {
VecZnxBig::<D, B>::from_data(self.data, self.n, self.cols, self.size)
}
}
impl<D: Data, B: Backend> ZnxInfos for VecZnxDft<D, B> {
fn cols(&self) -> usize {
self.cols
}
fn rows(&self) -> usize {
1
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
}
impl<D: Data, B: Backend> DataView for VecZnxDft<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data, B: Backend> DataViewMut for VecZnxDft<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: DataRef, B: Backend> VecZnxDft<D, B> {
pub fn max_size(&self) -> usize {
self.max_size
}
}
impl<D: DataMut, B: Backend> VecZnxDft<D, B> {
pub fn set_size(&mut self, size: usize) {
assert!(size <= self.max_size);
self.size = size
}
}
impl<D: DataMut, B: Backend> ZnxZero for VecZnxDft<D, B>
where
Self: ZnxViewMut,
<Self as ZnxView>::Scalar: Zero + Copy,
{
fn zero(&mut self) {
self.raw_mut().fill(<Self as ZnxView>::Scalar::zero())
}
fn zero_at(&mut self, i: usize, j: usize) {
self.at_mut(i, j).fill(<Self as ZnxView>::Scalar::zero());
}
}
impl<D: DataRef + From<Vec<u8>>, B: Backend> VecZnxDft<D, B>
where
B: VecZnxBigAllocBytesImpl<B>,
{
pub fn alloc(n: usize, cols: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned::<u8>(B::vec_znx_big_alloc_bytes_impl(n, cols, size));
Self {
data: data.into(),
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
pub fn from_bytes(n: usize, cols: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == B::vec_znx_big_alloc_bytes_impl(n, cols, size));
Self {
data: data.into(),
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
}
pub type VecZnxDftOwned<B> = VecZnxDft<Vec<u8>, B>;
impl<D: Data, B: Backend> VecZnxDft<D, B> {
pub fn from_data(data: D, n: usize, cols: usize, size: usize) -> Self {
Self {
data,
n,
cols,
size,
max_size: size,
_phantom: PhantomData,
}
}
}
pub trait VecZnxDftToRef<B: Backend> {
fn to_ref(&self) -> VecZnxDft<&[u8], B>;
}
impl<D: DataRef, B: Backend> VecZnxDftToRef<B> for VecZnxDft<D, B> {
fn to_ref(&self) -> VecZnxDft<&[u8], B> {
VecZnxDft {
data: self.data.as_ref(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
_phantom: std::marker::PhantomData,
}
}
}
pub trait VecZnxDftToMut<B: Backend> {
fn to_mut(&mut self) -> VecZnxDft<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> VecZnxDftToMut<B> for VecZnxDft<D, B> {
fn to_mut(&mut self) -> VecZnxDft<&mut [u8], B> {
VecZnxDft {
data: self.data.as_mut(),
n: self.n,
cols: self.cols,
size: self.size,
max_size: self.max_size,
_phantom: std::marker::PhantomData,
}
}
}
impl<D: DataRef, B: Backend> fmt::Display for VecZnxDft<D, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"VecZnxDft(n={}, cols={}, size={})",
self.n, self.cols, self.size
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
for (i, &coeff) in coeffs.iter().take(show_count).enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
}
if coeffs.len() > max_show {
write!(f, ", ... ({} more)", coeffs.len() - max_show)?;
}
writeln!(f, "]")?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,151 @@
use std::marker::PhantomData;
use crate::{
alloc_aligned,
api::{DataView, DataViewMut, ZnxInfos, ZnxView},
layouts::{Backend, Data, DataMut, DataRef},
oep::VmpPMatAllocBytesImpl,
};
#[derive(PartialEq, Eq)]
pub struct VmpPMat<D: Data, B: Backend> {
data: D,
n: usize,
size: usize,
rows: usize,
cols_in: usize,
cols_out: usize,
_phantom: PhantomData<B>,
}
impl<D: DataRef, B: Backend> ZnxView for VmpPMat<D, B> {
type Scalar = B::ScalarPrep;
}
impl<D: Data, B: Backend> ZnxInfos for VmpPMat<D, B> {
fn cols(&self) -> usize {
self.cols_in
}
fn rows(&self) -> usize {
self.rows
}
fn n(&self) -> usize {
self.n
}
fn size(&self) -> usize {
self.size
}
}
impl<D: Data, B: Backend> DataView for VmpPMat<D, B> {
type D = D;
fn data(&self) -> &Self::D {
&self.data
}
}
impl<D: Data, B: Backend> DataViewMut for VmpPMat<D, B> {
fn data_mut(&mut self) -> &mut Self::D {
&mut self.data
}
}
impl<D: Data, B: Backend> VmpPMat<D, B> {
pub fn cols_in(&self) -> usize {
self.cols_in
}
pub fn cols_out(&self) -> usize {
self.cols_out
}
}
impl<D: DataRef + From<Vec<u8>>, B: Backend> VmpPMat<D, B>
where
B: VmpPMatAllocBytesImpl<B>,
{
pub fn alloc(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
let data: Vec<u8> = alloc_aligned(B::vmp_pmat_alloc_bytes_impl(
n, rows, cols_in, cols_out, size,
));
Self {
data: data.into(),
n,
size,
rows,
cols_in,
cols_out,
_phantom: PhantomData,
}
}
pub fn from_bytes(n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
let data: Vec<u8> = bytes.into();
assert!(data.len() == B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size));
Self {
data: data.into(),
n,
size,
rows,
cols_in,
cols_out,
_phantom: PhantomData,
}
}
}
pub type VmpPMatOwned<B> = VmpPMat<Vec<u8>, B>;
pub type VmpPMatRef<'a, B> = VmpPMat<&'a [u8], B>;
pub trait VmpPMatToRef<B: Backend> {
fn to_ref(&self) -> VmpPMat<&[u8], B>;
}
impl<D: DataRef, B: Backend> VmpPMatToRef<B> for VmpPMat<D, B> {
fn to_ref(&self) -> VmpPMat<&[u8], B> {
VmpPMat {
data: self.data.as_ref(),
n: self.n,
rows: self.rows,
cols_in: self.cols_in,
cols_out: self.cols_out,
size: self.size,
_phantom: std::marker::PhantomData,
}
}
}
pub trait VmpPMatToMut<B: Backend> {
fn to_mut(&mut self) -> VmpPMat<&mut [u8], B>;
}
impl<D: DataMut, B: Backend> VmpPMatToMut<B> for VmpPMat<D, B> {
fn to_mut(&mut self) -> VmpPMat<&mut [u8], B> {
VmpPMat {
data: self.data.as_mut(),
n: self.n,
rows: self.rows,
cols_in: self.cols_in,
cols_out: self.cols_out,
size: self.size,
_phantom: std::marker::PhantomData,
}
}
}
impl<D: Data, B: Backend> VmpPMat<D, B> {
pub fn from_data(data: D, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> Self {
Self {
data,
n,
rows,
cols_in,
cols_out,
size,
_phantom: PhantomData,
}
}
}