mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 21:26:41 +01:00
Remove Zn (replaced by VecZnx), add more cross-base2k ops & tests
This commit is contained in:
@@ -6,7 +6,6 @@ mod vec_znx;
|
||||
mod vec_znx_big;
|
||||
mod vec_znx_dft;
|
||||
mod vmp_pmat;
|
||||
mod zn;
|
||||
|
||||
pub use convolution::*;
|
||||
pub use module::*;
|
||||
@@ -16,4 +15,3 @@ pub use vec_znx::*;
|
||||
pub use vec_znx_big::*;
|
||||
pub use vec_znx_dft::*;
|
||||
pub use vmp_pmat::*;
|
||||
pub use zn::*;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
api::{ModuleN, SvpPPolBytesOf, VecZnxBigBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, Zn},
|
||||
layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
};
|
||||
|
||||
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
||||
@@ -69,11 +69,6 @@ where
|
||||
(SvpPPol::from_data(take_slice, module.n(), cols), rem_slice)
|
||||
}
|
||||
|
||||
fn take_zn(&mut self, n: usize, cols: usize, size: usize) -> (Zn<&mut [u8]>, &mut Self) {
|
||||
let (take_slice, rem_slice) = self.take_slice(Zn::bytes_of(n, cols, size));
|
||||
(Zn::from_data(take_slice, n, cols, size), rem_slice)
|
||||
}
|
||||
|
||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
||||
let (take_slice, rem_slice) = self.take_slice(VecZnx::bytes_of(n, cols, size));
|
||||
(VecZnx::from_data(take_slice, n, cols, size), rem_slice)
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
use crate::{
|
||||
layouts::{Backend, Scratch, ZnToMut},
|
||||
reference::zn::zn_normalize_tmp_bytes,
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub trait ZnNormalizeTmpBytes {
|
||||
fn zn_normalize_tmp_bytes(&self, n: usize) -> usize {
|
||||
zn_normalize_tmp_bytes(n)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ZnNormalizeInplace<B: Backend> {
|
||||
/// Normalizes the selected column of `a`.
|
||||
fn zn_normalize_inplace<R>(&self, n: usize, base2k: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
pub trait ZnFillUniform {
|
||||
/// Fills the first `size` size with uniform values in \[-2^{base2k-1}, 2^{base2k-1}\]
|
||||
fn zn_fill_uniform<R>(&self, n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||
where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub trait ZnFillNormal {
|
||||
fn zn_fill_normal<R>(
|
||||
&self,
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub trait ZnAddNormal {
|
||||
/// Adds a discrete normal vector scaled by 2^{-k} with the provided standard deviation and bounded to \[-bound, bound\].
|
||||
fn zn_add_normal<R>(
|
||||
&self,
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut;
|
||||
}
|
||||
@@ -5,4 +5,3 @@ mod vec_znx;
|
||||
mod vec_znx_big;
|
||||
mod vec_znx_dft;
|
||||
mod vmp_pmat;
|
||||
mod zn;
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
use crate::{
|
||||
api::{ZnAddNormal, ZnFillNormal, ZnFillUniform, ZnNormalizeInplace, ZnNormalizeTmpBytes},
|
||||
layouts::{Backend, Module, Scratch, ZnToMut},
|
||||
oep::{ZnAddNormalImpl, ZnFillNormalImpl, ZnFillUniformImpl, ZnNormalizeInplaceImpl, ZnNormalizeTmpBytesImpl},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
impl<B> ZnNormalizeTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + ZnNormalizeTmpBytesImpl<B>,
|
||||
{
|
||||
fn zn_normalize_tmp_bytes(&self, n: usize) -> usize {
|
||||
B::zn_normalize_tmp_bytes_impl(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ZnNormalizeInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + ZnNormalizeInplaceImpl<B>,
|
||||
{
|
||||
fn zn_normalize_inplace<A>(&self, n: usize, base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
A: ZnToMut,
|
||||
{
|
||||
B::zn_normalize_inplace_impl(n, base2k, a, a_col, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ZnFillUniform for Module<B>
|
||||
where
|
||||
B: Backend + ZnFillUniformImpl<B>,
|
||||
{
|
||||
fn zn_fill_uniform<R>(&self, n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||
where
|
||||
R: ZnToMut,
|
||||
{
|
||||
B::zn_fill_uniform_impl(n, base2k, res, res_col, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ZnFillNormal for Module<B>
|
||||
where
|
||||
B: Backend + ZnFillNormalImpl<B>,
|
||||
{
|
||||
fn zn_fill_normal<R>(
|
||||
&self,
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut,
|
||||
{
|
||||
B::zn_fill_normal_impl(n, base2k, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ZnAddNormal for Module<B>
|
||||
where
|
||||
B: Backend + ZnAddNormalImpl<B>,
|
||||
{
|
||||
fn zn_add_normal<R>(
|
||||
&self,
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut,
|
||||
{
|
||||
B::zn_add_normal_impl(n, base2k, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use itertools::izip;
|
||||
use rug::{Assign, Float};
|
||||
|
||||
use crate::{
|
||||
layouts::{DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef, Zn, ZnToMut, ZnToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
layouts::{DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{
|
||||
ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStepInplace, ZnxRef, ZnxZero,
|
||||
get_carry_i128, get_digit_i128, znx_zero_ref,
|
||||
@@ -245,90 +245,6 @@ impl<D: DataRef> VecZnx<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> Zn<D> {
|
||||
pub fn encode_i64(&mut self, base2k: usize, k: usize, data: i64) {
|
||||
let size: usize = k.div_ceil(base2k);
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let a: Zn<&mut [u8]> = self.to_mut();
|
||||
assert!(
|
||||
size <= a.size(),
|
||||
"invalid argument k.div_ceil(base2k)={} > a.size()={}",
|
||||
size,
|
||||
a.size()
|
||||
);
|
||||
}
|
||||
|
||||
let mut a: Zn<&mut [u8]> = self.to_mut();
|
||||
let a_size = a.size();
|
||||
|
||||
for j in 0..a_size {
|
||||
a.at_mut(0, j)[0] = 0
|
||||
}
|
||||
|
||||
a.at_mut(0, size - 1)[0] = data;
|
||||
|
||||
let mut carry: Vec<i64> = vec![0i64; 1];
|
||||
let k_rem: usize = (base2k - (k % base2k)) % base2k;
|
||||
|
||||
for j in (0..size).rev() {
|
||||
let slice = &mut a.at_mut(0, j)[..1];
|
||||
|
||||
if j == size - 1 {
|
||||
ZnxRef::znx_normalize_first_step_inplace(base2k, k_rem, slice, &mut carry);
|
||||
} else if j == 0 {
|
||||
ZnxRef::znx_normalize_final_step_inplace(base2k, k_rem, slice, &mut carry);
|
||||
} else {
|
||||
ZnxRef::znx_normalize_middle_step_inplace(base2k, k_rem, slice, &mut carry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> Zn<D> {
|
||||
pub fn decode_i64(&self, base2k: usize, k: usize) -> i64 {
|
||||
let a: Zn<&[u8]> = self.to_ref();
|
||||
let size: usize = k.div_ceil(base2k);
|
||||
let mut res: i64 = 0;
|
||||
let rem: usize = base2k - (k % base2k);
|
||||
(0..size).for_each(|j| {
|
||||
let x: i64 = a.at(0, j)[0];
|
||||
if j == size - 1 && rem != base2k {
|
||||
let k_rem: usize = (base2k - rem) % base2k;
|
||||
let scale: i64 = 1 << rem as i64;
|
||||
res = (res << k_rem) + div_round(x, scale);
|
||||
} else {
|
||||
res = (res << base2k) + x;
|
||||
}
|
||||
});
|
||||
res
|
||||
}
|
||||
|
||||
pub fn decode_float(&self, base2k: usize) -> Float {
|
||||
let a: Zn<&[u8]> = self.to_ref();
|
||||
let size: usize = a.size();
|
||||
let prec: u32 = (base2k * size) as u32;
|
||||
|
||||
// 2^{base2k}
|
||||
let base: Float = Float::with_val(prec, (1 << base2k) as f64);
|
||||
let mut res: Float = Float::with_val(prec, (1 << base2k) as f64);
|
||||
|
||||
// y[i] = sum x[j][i] * 2^{-base2k*j}
|
||||
(0..size).for_each(|i| {
|
||||
if i == 0 {
|
||||
res.assign(a.at(0, size - i - 1)[0]);
|
||||
res /= &base;
|
||||
} else {
|
||||
res += Float::with_val(prec, a.at(0, size - i - 1)[0]);
|
||||
res /= &base;
|
||||
}
|
||||
});
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn div_round(a: i64, b: i64) -> i64 {
|
||||
assert!(b != 0, "division by zero");
|
||||
|
||||
@@ -10,7 +10,6 @@ mod vec_znx;
|
||||
mod vec_znx_big;
|
||||
mod vec_znx_dft;
|
||||
mod vmp_pmat;
|
||||
mod zn;
|
||||
mod znx_base;
|
||||
|
||||
pub use mat_znx::*;
|
||||
@@ -24,7 +23,6 @@ pub use vec_znx::*;
|
||||
pub use vec_znx_big::*;
|
||||
pub use vec_znx_dft::*;
|
||||
pub use vmp_pmat::*;
|
||||
pub use zn::*;
|
||||
pub use znx_base::*;
|
||||
|
||||
pub trait Data = PartialEq + Eq + Sized + Default;
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
use std::{
|
||||
fmt,
|
||||
hash::{DefaultHasher, Hasher},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
alloc_aligned,
|
||||
layouts::{
|
||||
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, ToOwnedDeep, WriterTo, ZnxInfos,
|
||||
ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use rand::RngCore;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(PartialEq, Eq, Clone, Copy, Hash)]
|
||||
pub struct Zn<D: Data> {
|
||||
pub data: D,
|
||||
pub n: usize,
|
||||
pub cols: usize,
|
||||
pub size: usize,
|
||||
pub max_size: usize,
|
||||
}
|
||||
|
||||
impl<D: DataRef> DigestU64 for Zn<D> {
|
||||
fn digest_u64(&self) -> u64 {
|
||||
let mut h: DefaultHasher = DefaultHasher::new();
|
||||
h.write(self.data.as_ref());
|
||||
h.write_usize(self.n);
|
||||
h.write_usize(self.cols);
|
||||
h.write_usize(self.size);
|
||||
h.write_usize(self.max_size);
|
||||
h.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> ToOwnedDeep for Zn<D> {
|
||||
type Owned = Zn<Vec<u8>>;
|
||||
fn to_owned_deep(&self) -> Self::Owned {
|
||||
Zn {
|
||||
data: self.data.as_ref().to_vec(),
|
||||
n: self.n,
|
||||
cols: self.cols,
|
||||
size: self.size,
|
||||
max_size: self.max_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Debug for Zn<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{self}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> ZnxInfos for Zn<D> {
|
||||
fn cols(&self) -> usize {
|
||||
self.cols
|
||||
}
|
||||
|
||||
fn rows(&self) -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn n(&self) -> usize {
|
||||
self.n
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> ZnxSliceSize for Zn<D> {
|
||||
fn sl(&self) -> usize {
|
||||
self.n() * self.cols()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> DataView for Zn<D> {
|
||||
type D = D;
|
||||
fn data(&self) -> &Self::D {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> DataViewMut for Zn<D> {
|
||||
fn data_mut(&mut self) -> &mut Self::D {
|
||||
&mut self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> ZnxView for Zn<D> {
|
||||
type Scalar = i64;
|
||||
}
|
||||
|
||||
impl Zn<Vec<u8>> {
|
||||
pub fn rsh_tmp_bytes(n: usize) -> usize {
|
||||
n * std::mem::size_of::<i64>()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ZnxZero for Zn<D> {
|
||||
fn zero(&mut self) {
|
||||
self.raw_mut().fill(0)
|
||||
}
|
||||
fn zero_at(&mut self, i: usize, j: usize) {
|
||||
self.at_mut(i, j).fill(0);
|
||||
}
|
||||
}
|
||||
|
||||
impl Zn<Vec<u8>> {
|
||||
pub fn bytes_of(n: usize, cols: usize, size: usize) -> usize {
|
||||
n * cols * size * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn alloc(n: usize, cols: usize, size: usize) -> Self {
|
||||
let data: Vec<u8> = alloc_aligned::<u8>(Self::bytes_of(n, cols, size));
|
||||
Self {
|
||||
data,
|
||||
n,
|
||||
cols,
|
||||
size,
|
||||
max_size: size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bytes<Scalar: Sized>(n: usize, cols: usize, size: usize, bytes: impl Into<Vec<u8>>) -> Self {
|
||||
let data: Vec<u8> = bytes.into();
|
||||
assert!(data.len() == Self::bytes_of(n, cols, size));
|
||||
Self {
|
||||
data,
|
||||
n,
|
||||
cols,
|
||||
size,
|
||||
max_size: size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Data> Zn<D> {
|
||||
pub fn from_data(data: D, n: usize, cols: usize, size: usize) -> Self {
|
||||
Self {
|
||||
data,
|
||||
n,
|
||||
cols,
|
||||
size,
|
||||
max_size: size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> fmt::Display for Zn<D> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Zn(n={}, cols={}, size={})",
|
||||
self.n, self.cols, self.size
|
||||
)?;
|
||||
|
||||
for col in 0..self.cols {
|
||||
writeln!(f, "Column {col}:")?;
|
||||
for size in 0..self.size {
|
||||
let coeffs = self.at(col, size);
|
||||
write!(f, " Size {size}: [")?;
|
||||
|
||||
let max_show = 100;
|
||||
let show_count = coeffs.len().min(max_show);
|
||||
|
||||
for (i, &coeff) in coeffs.iter().take(show_count).enumerate() {
|
||||
if i > 0 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
write!(f, "{coeff}")?;
|
||||
}
|
||||
|
||||
if coeffs.len() > max_show {
|
||||
write!(f, ", ... ({} more)", coeffs.len() - max_show)?;
|
||||
}
|
||||
|
||||
writeln!(f, "]")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> FillUniform for Zn<D> {
|
||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||
match log_bound {
|
||||
64 => source.fill_bytes(self.data.as_mut()),
|
||||
0 => panic!("invalid log_bound, cannot be zero"),
|
||||
_ => {
|
||||
let mask: u64 = (1u64 << log_bound) - 1;
|
||||
for x in self.raw_mut().iter_mut() {
|
||||
let r = source.next_u64() & mask;
|
||||
*x = ((r << (64 - log_bound)) as i64) >> (64 - log_bound);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type ZnOwned = Zn<Vec<u8>>;
|
||||
pub type ZnMut<'a> = Zn<&'a mut [u8]>;
|
||||
pub type ZnRef<'a> = Zn<&'a [u8]>;
|
||||
|
||||
pub trait ZnToRef {
|
||||
fn to_ref(&self) -> Zn<&[u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataRef> ZnToRef for Zn<D> {
|
||||
fn to_ref(&self) -> Zn<&[u8]> {
|
||||
Zn {
|
||||
data: self.data.as_ref(),
|
||||
n: self.n,
|
||||
cols: self.cols,
|
||||
size: self.size,
|
||||
max_size: self.max_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ZnToMut {
|
||||
fn to_mut(&mut self) -> Zn<&mut [u8]>;
|
||||
}
|
||||
|
||||
impl<D: DataMut> ZnToMut for Zn<D> {
|
||||
fn to_mut(&mut self) -> Zn<&mut [u8]> {
|
||||
Zn {
|
||||
data: self.data.as_mut(),
|
||||
n: self.n,
|
||||
cols: self.cols,
|
||||
size: self.size,
|
||||
max_size: self.max_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataMut> ReaderFrom for Zn<D> {
|
||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||
self.n = reader.read_u64::<LittleEndian>()? as usize;
|
||||
self.cols = reader.read_u64::<LittleEndian>()? as usize;
|
||||
self.size = reader.read_u64::<LittleEndian>()? as usize;
|
||||
self.max_size = reader.read_u64::<LittleEndian>()? as usize;
|
||||
let len: usize = reader.read_u64::<LittleEndian>()? as usize;
|
||||
let buf: &mut [u8] = self.data.as_mut();
|
||||
if buf.len() != len {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
format!("self.data.len()={} != read len={}", buf.len(), len),
|
||||
));
|
||||
}
|
||||
reader.read_exact(&mut buf[..len])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DataRef> WriterTo for Zn<D> {
|
||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
writer.write_u64::<LittleEndian>(self.n as u64)?;
|
||||
writer.write_u64::<LittleEndian>(self.cols as u64)?;
|
||||
writer.write_u64::<LittleEndian>(self.size as u64)?;
|
||||
writer.write_u64::<LittleEndian>(self.max_size as u64)?;
|
||||
let buf: &[u8] = self.data.as_ref();
|
||||
writer.write_u64::<LittleEndian>(buf.len() as u64)?;
|
||||
writer.write_all(buf)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ mod vec_znx;
|
||||
mod vec_znx_big;
|
||||
mod vec_znx_dft;
|
||||
mod vmp_pmat;
|
||||
mod zn;
|
||||
|
||||
pub use module::*;
|
||||
pub use scratch::*;
|
||||
@@ -14,4 +13,3 @@ pub use vec_znx::*;
|
||||
pub use vec_znx_big::*;
|
||||
pub use vec_znx_dft::*;
|
||||
pub use vmp_pmat::*;
|
||||
pub use zn::*;
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
use crate::{
|
||||
layouts::{Backend, Scratch, ZnToMut},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See [poulpy-backend/src/cpu_fft64_ref/zn.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/zn.rs) for reference implementation.
|
||||
/// * See [crate::api::ZnNormalizeTmpBytes] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait ZnNormalizeTmpBytesImpl<B: Backend> {
|
||||
fn zn_normalize_tmp_bytes_impl(n: usize) -> usize;
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See [poulpy-backend/src/cpu_fft64_ref/zn.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/zn.rs) for reference implementation.
|
||||
/// * See [crate::api::ZnNormalizeInplace] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait ZnNormalizeInplaceImpl<B: Backend> {
|
||||
fn zn_normalize_inplace_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See [poulpy-backend/src/cpu_fft64_ref/zn.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/zn.rs) for reference implementation.
|
||||
/// * See [crate::api::ZnFillUniform] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait ZnFillUniformImpl<B: Backend> {
|
||||
fn zn_fill_uniform_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||
where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See [poulpy-backend/src/cpu_fft64_ref/zn.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/zn.rs) for reference implementation.
|
||||
/// * See [crate::api::ZnFillNormal] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait ZnFillNormalImpl<B: Backend> {
|
||||
fn zn_fill_normal_impl<R>(
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
/// # THIS TRAIT IS AN OPEN EXTENSION POINT (unsafe)
|
||||
/// * See [poulpy-backend/src/cpu_fft64_ref/zn.rs](https://github.com/phantomzone-org/poulpy/blob/main/poulpy-backend/src/cpu_fft64_ref/zn.rs) for reference implementation.
|
||||
/// * See [crate::api::ZnAddNormal] for corresponding public API.
|
||||
/// # Safety [crate::doc::backend_safety] for safety contract.
|
||||
pub unsafe trait ZnAddNormalImpl<B: Backend> {
|
||||
fn zn_add_normal_impl<R>(
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut;
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
pub mod fft64;
|
||||
pub mod vec_znx;
|
||||
pub mod zn;
|
||||
pub mod znx;
|
||||
|
||||
@@ -53,6 +53,8 @@ pub fn vec_znx_normalize<R, A, ZNXARI>(
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let carry = &mut carry[..2 * n];
|
||||
|
||||
if res_base2k == a_base2k {
|
||||
if a_size > res_size {
|
||||
for j in (res_size..a_size).rev() {
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
mod normalization;
|
||||
mod sampling;
|
||||
|
||||
pub use normalization::*;
|
||||
pub use sampling::*;
|
||||
@@ -1,72 +0,0 @@
|
||||
use crate::{
|
||||
api::{ScratchOwnedAlloc, ScratchOwnedBorrow, ZnNormalizeInplace, ZnNormalizeTmpBytes},
|
||||
layouts::{Backend, Module, ScratchOwned, Zn, ZnToMut, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStepInplace, ZnxRef},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn zn_normalize_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn zn_normalize_inplace<R, ARI>(n: usize, base2k: usize, res: &mut R, res_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: ZnToMut,
|
||||
ARI: ZnxNormalizeFirstStepInplace + ZnxNormalizeFinalStepInplace + ZnxNormalizeMiddleStepInplace,
|
||||
{
|
||||
let mut res: Zn<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(carry.len(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
|
||||
for j in (0..res_size).rev() {
|
||||
let out = &mut res.at_mut(res_col, j)[..n];
|
||||
|
||||
if j == res_size - 1 {
|
||||
ARI::znx_normalize_first_step_inplace(base2k, 0, out, carry);
|
||||
} else if j == 0 {
|
||||
ARI::znx_normalize_final_step_inplace(base2k, 0, out, carry);
|
||||
} else {
|
||||
ARI::znx_normalize_middle_step_inplace(base2k, 0, out, carry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_zn_normalize_inplace<B: Backend>(module: &Module<B>)
|
||||
where
|
||||
Module<B>: ZnNormalizeInplace<B> + ZnNormalizeTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
let cols: usize = 2;
|
||||
let base2k: usize = 12;
|
||||
|
||||
let n = 33;
|
||||
|
||||
let mut carry: Vec<i64> = vec![0i64; zn_normalize_tmp_bytes(n)];
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(module.zn_normalize_tmp_bytes(module.n()));
|
||||
|
||||
for res_size in [1, 2, 6, 11] {
|
||||
let mut res_0: Zn<Vec<u8>> = Zn::alloc(n, cols, res_size);
|
||||
let mut res_1: Zn<Vec<u8>> = Zn::alloc(n, cols, res_size);
|
||||
|
||||
res_0
|
||||
.raw_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = source.next_i32() as i64);
|
||||
res_1.raw_mut().copy_from_slice(res_0.raw());
|
||||
|
||||
// Reference
|
||||
for i in 0..cols {
|
||||
zn_normalize_inplace::<_, ZnxRef>(n, base2k, &mut res_0, i, &mut carry);
|
||||
module.zn_normalize_inplace(n, base2k, &mut res_1, i, scratch.borrow());
|
||||
}
|
||||
|
||||
assert_eq!(res_0.raw(), res_1.raw());
|
||||
}
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
use crate::{
|
||||
layouts::{Zn, ZnToMut, ZnxInfos, ZnxViewMut},
|
||||
reference::znx::{znx_add_normal_f64_ref, znx_fill_normal_f64_ref, znx_fill_uniform_ref},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn zn_fill_uniform<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||
where
|
||||
R: ZnToMut,
|
||||
{
|
||||
let mut res: Zn<&mut [u8]> = res.to_mut();
|
||||
for j in 0..res.size() {
|
||||
znx_fill_uniform_ref(base2k, &mut res.at_mut(res_col, j)[..n], source)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn zn_fill_normal<R>(
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut,
|
||||
{
|
||||
let mut res: Zn<&mut [u8]> = res.to_mut();
|
||||
assert!(
|
||||
(bound.log2().ceil() as i64) < 64,
|
||||
"invalid bound: ceil(log2(bound))={} > 63",
|
||||
(bound.log2().ceil() as i64)
|
||||
);
|
||||
|
||||
let limb: usize = k.div_ceil(base2k) - 1;
|
||||
let scale: f64 = (1 << ((limb + 1) * base2k - k)) as f64;
|
||||
znx_fill_normal_f64_ref(
|
||||
&mut res.at_mut(res_col, limb)[..n],
|
||||
sigma * scale,
|
||||
bound * scale,
|
||||
source,
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn zn_add_normal<R>(
|
||||
n: usize,
|
||||
base2k: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: ZnToMut,
|
||||
{
|
||||
let mut res: Zn<&mut [u8]> = res.to_mut();
|
||||
assert!(
|
||||
(bound.log2().ceil() as i64) < 64,
|
||||
"invalid bound: ceil(log2(bound))={} > 63",
|
||||
(bound.log2().ceil() as i64)
|
||||
);
|
||||
|
||||
let limb: usize = k.div_ceil(base2k) - 1;
|
||||
let scale: f64 = (1 << ((limb + 1) * base2k - k)) as f64;
|
||||
znx_add_normal_f64_ref(
|
||||
&mut res.at_mut(res_col, limb)[..n],
|
||||
sigma * scale,
|
||||
bound * scale,
|
||||
source,
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user