updated sampling traits

This commit is contained in:
Jean-Philippe Bossuat
2025-05-06 11:30:55 +02:00
parent ffa363804b
commit 08e81f50c9
22 changed files with 251 additions and 2778 deletions

View File

@@ -1,7 +1,7 @@
use base2k::{
Encoding, FFT64, Module, Sampling, Scalar, ScalarAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScratchOwned,
VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps,
VecZnxOps, ZnxInfos,
AddNormal, Encoding, FFT64, FillUniform, Module, Scalar, ScalarAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps,
ScratchOwned, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc,
VecZnxDftOps, VecZnxOps, ZnxInfos,
};
use itertools::izip;
use sampling::source::Source;
@@ -36,7 +36,7 @@ fn main() {
);
// Fill the second column with random values: ct = (0, a)
module.fill_uniform(log_base2k, &mut ct, 1, ct_size, &mut source);
ct.fill_uniform(log_base2k, 1, ct_size, &mut source);
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_size);
@@ -88,9 +88,8 @@ fn main() {
// Add noise to ct[0]
// ct[0] <- ct[0] + e
module.add_normal(
ct.add_normal(
log_base2k,
&mut ct,
0, // Selects the first column of ct (ct[0])
log_base2k * ct_size, // Scaling of the noise: 2^{-log_base2k * limbs}
&mut source,

View File

@@ -151,7 +151,7 @@ impl<D: AsRef<[u8]>> MatZnxDft<D, FFT64> {
}
}
pub type MatZnxDftAllocOwned<B> = MatZnxDft<Vec<u8>, B>;
pub type MatZnxDftOwned<B> = MatZnxDft<Vec<u8>, B>;
pub trait MatZnxDftToRef<B: Backend> {
fn to_ref(&self) -> MatZnxDft<&[u8], B>;

View File

@@ -2,7 +2,7 @@ use crate::ffi::vec_znx_dft::vec_znx_dft_t;
use crate::ffi::vmp;
use crate::znx_base::{ZnxInfos, ZnxView, ZnxViewMut};
use crate::{
Backend, FFT64, MatZnxDft, MatZnxDftAllocOwned, MatZnxDftToMut, MatZnxDftToRef, Module, Scratch, VecZnxDft, VecZnxDftToMut,
Backend, FFT64, MatZnxDft, MatZnxDftOwned, MatZnxDftToMut, MatZnxDftToRef, Module, Scratch, VecZnxDft, VecZnxDftToMut,
VecZnxDftToRef,
};
@@ -13,7 +13,7 @@ pub trait MatZnxDftAlloc<B: Backend> {
///
/// * `rows`: number of rows (number of [VecZnxDft]).
/// * `size`: number of size (number of size of each [VecZnxDft]).
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B>;
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftOwned<B>;
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize;
@@ -24,7 +24,7 @@ pub trait MatZnxDftAlloc<B: Backend> {
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxDftAllocOwned<B>;
) -> MatZnxDftOwned<B>;
}
pub trait MatZnxDftScratch {
@@ -103,11 +103,11 @@ pub trait MatZnxDftOps<BACKEND: Backend> {
impl<B: Backend> MatZnxDftAlloc<B> for Module<B> {
fn bytes_of_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
MatZnxDftAllocOwned::bytes_of(self, rows, cols_in, cols_out, size)
MatZnxDftOwned::bytes_of(self, rows, cols_in, cols_out, size)
}
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftAllocOwned<B> {
MatZnxDftAllocOwned::new(self, rows, cols_in, cols_out, size)
fn new_mat_znx_dft(&self, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> MatZnxDftOwned<B> {
MatZnxDftOwned::new(self, rows, cols_in, cols_out, size)
}
fn new_mat_znx_dft_from_bytes(
@@ -117,8 +117,8 @@ impl<B: Backend> MatZnxDftAlloc<B> for Module<B> {
cols_out: usize,
size: usize,
bytes: Vec<u8>,
) -> MatZnxDftAllocOwned<B> {
MatZnxDftAllocOwned::new_from_bytes(self, rows, cols_in, cols_out, size, bytes)
) -> MatZnxDftOwned<B> {
MatZnxDftOwned::new_from_bytes(self, rows, cols_in, cols_out, size, bytes)
}
}
@@ -305,8 +305,8 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
#[cfg(test)]
mod tests {
use crate::{
Encoding, FFT64, MatZnxDft, MatZnxDftOps, Module, Sampling, ScratchOwned, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc,
VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, ZnxInfos, ZnxView, ZnxViewMut,
Encoding, FFT64, FillUniform, MatZnxDft, MatZnxDftOps, Module, ScratchOwned, VecZnx, VecZnxAlloc, VecZnxBig,
VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, ZnxInfos, ZnxView, ZnxViewMut,
};
use sampling::source::Source;
@@ -329,7 +329,7 @@ mod tests {
for row_i in 0..mat_rows {
let mut source: Source = Source::new([0u8; 32]);
(0..mat_cols_out).for_each(|col_out| {
module.fill_uniform(log_base2k, &mut a, col_out, mat_size, &mut source);
a.fill_uniform(log_base2k, col_out, mat_size, &mut source);
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
});
module.vmp_prepare_row(&mut mat, row_i, col_in, &a_dft);

View File

@@ -1,47 +1,53 @@
use crate::znx_base::ZnxViewMut;
use crate::{Backend, Module, VecZnx, VecZnxToMut};
use crate::{FFT64, VecZnx, VecZnxBig, VecZnxBigToMut, VecZnxToMut};
use rand_distr::{Distribution, Normal};
use sampling::source::Source;
pub trait Sampling {
pub trait FillUniform {
/// Fills the first `size` size with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
fn fill_uniform<A>(&self, log_base2k: usize, a: &mut A, col_i: usize, size: usize, source: &mut Source)
where
A: VecZnxToMut;
fn fill_uniform(&mut self, log_base2k: usize, col_i: usize, size: usize, source: &mut Source);
}
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
fn add_dist_f64<A, D: Distribution<f64>>(
&self,
pub trait FillDistF64 {
fn fill_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
a: &mut A,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
bound: f64,
) where
A: VecZnxToMut;
);
}
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
fn add_normal<A>(
&self,
pub trait AddDistF64 {
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
fn add_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
a: &mut A,
col_i: usize,
log_k: usize,
source: &mut Source,
sigma: f64,
dist: D,
bound: f64,
) where
A: VecZnxToMut;
);
}
impl<B: Backend> Sampling for Module<B> {
fn fill_uniform<A>(&self, log_base2k: usize, a: &mut A, col_i: usize, size: usize, source: &mut Source)
where
A: VecZnxToMut,
{
let mut a: VecZnx<&mut [u8]> = a.to_mut();
pub trait FillNormal {
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64);
}
pub trait AddNormal {
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64);
}
impl<T> FillUniform for VecZnx<T>
where
VecZnx<T>: VecZnxToMut,
{
fn fill_uniform(&mut self, log_base2k: usize, col_i: usize, size: usize, source: &mut Source) {
let mut a: VecZnx<&mut [u8]> = self.to_mut();
let base2k: u64 = 1 << log_base2k;
let mask: u64 = base2k - 1;
let base2k_half: i64 = (base2k >> 1) as i64;
@@ -51,20 +57,65 @@ impl<B: Backend> Sampling for Module<B> {
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
})
}
}
fn add_dist_f64<A, D: Distribution<f64>>(
&self,
impl<T> FillDistF64 for VecZnx<T>
where
VecZnx<T>: VecZnxToMut,
{
fn fill_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
a: &mut A,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
bound: f64,
) where
A: VecZnxToMut,
{
let mut a: VecZnx<&mut [u8]> = a.to_mut();
) {
let mut a: VecZnx<&mut [u8]> = self.to_mut();
assert!(
(bound.log2().ceil() as i64) < 64,
"invalid bound: ceil(log2(bound))={} > 63",
(bound.log2().ceil() as i64)
);
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
let log_base2k_rem: usize = log_k % log_base2k;
if log_base2k_rem != 0 {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a = (dist_f64.round() as i64) << log_base2k_rem;
});
} else {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a = dist_f64.round() as i64
});
}
}
}
impl<T> AddDistF64 for VecZnx<T>
where
VecZnx<T>: VecZnxToMut,
{
fn add_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
bound: f64,
) {
let mut a: VecZnx<&mut [u8]> = self.to_mut();
assert!(
(bound.log2().ceil() as i64) < 64,
"invalid bound: ceil(log2(bound))={} > 63",
@@ -92,14 +143,149 @@ impl<B: Backend> Sampling for Module<B> {
});
}
}
}
fn add_normal<A>(&self, log_base2k: usize, a: &mut A, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64)
where
A: VecZnxToMut,
{
impl<T> FillNormal for VecZnx<T>
where
VecZnx<T>: VecZnxToMut,
{
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
self.fill_dist_f64(
log_base2k,
col_i,
log_k,
source,
Normal::new(0.0, sigma).unwrap(),
bound,
);
}
}
impl<T> AddNormal for VecZnx<T>
where
VecZnx<T>: VecZnxToMut,
{
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
self.add_dist_f64(
log_base2k,
col_i,
log_k,
source,
Normal::new(0.0, sigma).unwrap(),
bound,
);
}
}
impl<T> FillDistF64 for VecZnxBig<T, FFT64>
where
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
{
fn fill_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
bound: f64,
) {
let mut a: VecZnxBig<&mut [u8], FFT64> = self.to_mut();
assert!(
(bound.log2().ceil() as i64) < 64,
"invalid bound: ceil(log2(bound))={} > 63",
(bound.log2().ceil() as i64)
);
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
let log_base2k_rem: usize = log_k % log_base2k;
if log_base2k_rem != 0 {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a = (dist_f64.round() as i64) << log_base2k_rem;
});
} else {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a = dist_f64.round() as i64
});
}
}
}
impl<T> AddDistF64 for VecZnxBig<T, FFT64>
where
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
{
fn add_dist_f64<D: Distribution<f64>>(
&mut self,
log_base2k: usize,
col_i: usize,
log_k: usize,
source: &mut Source,
dist: D,
bound: f64,
) {
let mut a: VecZnxBig<&mut [u8], FFT64> = self.to_mut();
assert!(
(bound.log2().ceil() as i64) < 64,
"invalid bound: ceil(log2(bound))={} > 63",
(bound.log2().ceil() as i64)
);
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
let log_base2k_rem: usize = log_k % log_base2k;
if log_base2k_rem != 0 {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a += (dist_f64.round() as i64) << log_base2k_rem;
});
} else {
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
let mut dist_f64: f64 = dist.sample(source);
while dist_f64.abs() > bound {
dist_f64 = dist.sample(source)
}
*a += dist_f64.round() as i64
});
}
}
}
impl<T> FillNormal for VecZnxBig<T, FFT64>
where
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
{
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
self.fill_dist_f64(
log_base2k,
col_i,
log_k,
source,
Normal::new(0.0, sigma).unwrap(),
bound,
);
}
}
impl<T> AddNormal for VecZnxBig<T, FFT64>
where
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
{
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
self.add_dist_f64(
log_base2k,
a,
col_i,
log_k,
source,
@@ -111,14 +297,16 @@ impl<B: Backend> Sampling for Module<B> {
#[cfg(test)]
mod tests {
use super::Sampling;
use std::fmt::Display;
use super::{AddNormal, FillUniform};
use crate::vec_znx_ops::*;
use crate::znx_base::*;
use crate::{FFT64, Module, Stats, VecZnx};
use sampling::source::Source;
#[test]
fn fill_uniform() {
fn vec_znx_fill_uniform() {
let n: usize = 4096;
let module: Module<FFT64> = Module::<FFT64>::new(n);
let log_base2k: usize = 17;
@@ -129,7 +317,7 @@ mod tests {
let one_12_sqrt: f64 = 0.28867513459481287;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
module.fill_uniform(log_base2k, &mut a, col_i, size, &mut source);
a.fill_uniform(log_base2k, col_i, size, &mut source);
(0..cols).for_each(|col_j| {
if col_j != col_i {
(0..size).for_each(|limb_i| {
@@ -149,7 +337,7 @@ mod tests {
}
#[test]
fn add_normal() {
fn vec_znx_add_normal() {
let n: usize = 4096;
let module: Module<FFT64> = Module::<FFT64>::new(n);
let log_base2k: usize = 17;
@@ -163,7 +351,7 @@ mod tests {
let k_f64: f64 = (1u64 << log_k as u64) as f64;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
module.add_normal(log_base2k, &mut a, col_i, log_k, &mut source, sigma, bound);
a.add_normal(log_base2k, col_i, log_k, &mut source, sigma, bound);
(0..cols).for_each(|col_j| {
if col_j != col_i {
(0..size).for_each(|limb_i| {