mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
Ref. + AVX code & generic tests + benches (#85)
This commit is contained in:
committed by
GitHub
parent
99b9e3e10e
commit
56dbd29c59
177
poulpy-hal/src/reference/vec_znx/add.rs
Normal file
177
poulpy-hal/src/reference/vec_znx/add.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, VecZnxAdd, VecZnxAddInplace},
|
||||
layouts::{Backend, FillUniform, Module, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxAdd, ZnxAddInplace, ZnxCopy, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_add<R, A, B, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
B: VecZnxToRef,
|
||||
ZNXARI: ZnxAdd + ZnxCopy + ZnxZero,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let b: VecZnx<&[u8]> = b.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
assert_eq!(b.n(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
let b_size: usize = b.size();
|
||||
|
||||
if a_size <= b_size {
|
||||
let sum_size: usize = a_size.min(res_size);
|
||||
let cpy_size: usize = b_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_add(res.at_mut(res_col, j), a.at(a_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in sum_size..cpy_size {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in cpy_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
} else {
|
||||
let sum_size: usize = b_size.min(res_size);
|
||||
let cpy_size: usize = a_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_add(res.at_mut(res_col, j), a.at(a_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in sum_size..cpy_size {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in cpy_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_add_inplace<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxAddInplace,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let sum_size: usize = a_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_add_inplace(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_add<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxAdd + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_add::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxAdd + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut c: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_add(&mut c, i, &a, i, &b, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_add_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxAddInplace + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_add_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxAddInplace + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_add_inplace(&mut b, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
57
poulpy-hal/src/reference/vec_znx/add_scalar.rs
Normal file
57
poulpy-hal/src/reference/vec_znx/add_scalar.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use crate::{
|
||||
layouts::{ScalarZnx, ScalarZnxToRef, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxAdd, ZnxAddInplace, ZnxCopy, ZnxZero},
|
||||
};
|
||||
|
||||
pub fn vec_znx_add_scalar<R, A, B, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize, b_limb: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
B: VecZnxToRef,
|
||||
ZNXARI: ZnxAdd + ZnxCopy + ZnxZero,
|
||||
{
|
||||
let a: ScalarZnx<&[u8]> = a.to_ref();
|
||||
let b: VecZnx<&[u8]> = b.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
let min_size: usize = b.size().min(res.size());
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(
|
||||
b_limb < min_size,
|
||||
"b_limb: {} > min_size: {}",
|
||||
b_limb,
|
||||
min_size
|
||||
);
|
||||
}
|
||||
|
||||
for j in 0..min_size {
|
||||
if j == b_limb {
|
||||
ZNXARI::znx_add(res.at_mut(res_col, j), a.at(a_col, 0), b.at(b_col, j));
|
||||
} else {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), b.at(b_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
for j in min_size..res.size() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_add_scalar_inplace<R, A, ZNXARI>(res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
ZNXARI: ZnxAddInplace,
|
||||
{
|
||||
let a: ScalarZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(res_limb < res.size());
|
||||
}
|
||||
|
||||
ZNXARI::znx_add_inplace(res.at_mut(res_col, res_limb), a.at(a_col, 0));
|
||||
}
|
||||
150
poulpy-hal/src/reference/vec_znx/automorphism.rs
Normal file
150
poulpy-hal/src/reference/vec_znx/automorphism.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxAutomorphismInplaceTmpBytes,
|
||||
},
|
||||
layouts::{Backend, FillUniform, Module, ScratchOwned, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxAutomorphism, ZnxCopy, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_automorphism_inplace_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_automorphism<R, A, ZNXARI>(p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxAutomorphism + ZnxZero,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
use crate::layouts::ZnxInfos;
|
||||
|
||||
assert_eq!(a.n(), res.n());
|
||||
}
|
||||
|
||||
let min_size: usize = res.size().min(a.size());
|
||||
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_automorphism(p, res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in min_size..res.size() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_automorphism_inplace<R, ZNXARI>(p: i64, res: &mut R, res_col: usize, tmp: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxAutomorphism + ZnxCopy,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.n(), tmp.len());
|
||||
}
|
||||
for j in 0..res.size() {
|
||||
ZNXARI::znx_automorphism(p, tmp, res.at(res_col, j));
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_automorphism<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxAutomorphism + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_automorphism::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxAutomorphism + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_automorphism(-7, &mut res, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_automorphism_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxAutomorphismInplace<B> + VecZnxAutomorphismInplaceTmpBytes + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_automorphism_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxAutomorphismInplace<B> + ModuleNew<B> + VecZnxAutomorphismInplaceTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch = ScratchOwned::alloc(module.vec_znx_automorphism_inplace_tmp_bytes());
|
||||
|
||||
// Fill a with random i64
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_automorphism_inplace(-7, &mut res, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
32
poulpy-hal/src/reference/vec_znx/copy.rs
Normal file
32
poulpy-hal/src/reference/vec_znx/copy.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use crate::{
|
||||
layouts::{VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxCopy, ZnxZero},
|
||||
};
|
||||
|
||||
pub fn vec_znx_copy<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxCopy + ZnxZero,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.n(), a.n())
|
||||
}
|
||||
|
||||
let res_size = res.size();
|
||||
let a_size = a.size();
|
||||
|
||||
let min_size = res_size.min(a_size);
|
||||
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in min_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
49
poulpy-hal/src/reference/vec_znx/merge_rings.rs
Normal file
49
poulpy-hal/src/reference/vec_znx/merge_rings.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use crate::{
|
||||
layouts::{VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos},
|
||||
reference::{
|
||||
vec_znx::{vec_znx_rotate_inplace, vec_znx_switch_ring},
|
||||
znx::{ZnxCopy, ZnxRotate, ZnxSwitchRing, ZnxZero},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn vec_znx_merge_rings_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_merge_rings<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &[A], a_col: usize, tmp: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxCopy + ZnxSwitchRing + ZnxRotate + ZnxZero,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
let (n_out, n_in) = (res.n(), a[0].to_ref().n());
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(tmp.len(), res.n());
|
||||
|
||||
debug_assert!(
|
||||
n_out > n_in,
|
||||
"invalid a: output ring degree should be greater"
|
||||
);
|
||||
a[1..].iter().for_each(|ai| {
|
||||
debug_assert_eq!(
|
||||
ai.to_ref().n(),
|
||||
n_in,
|
||||
"invalid input a: all VecZnx must have the same degree"
|
||||
)
|
||||
});
|
||||
|
||||
assert!(n_out.is_multiple_of(n_in));
|
||||
assert_eq!(a.len(), n_out / n_in);
|
||||
}
|
||||
|
||||
a.iter().for_each(|ai| {
|
||||
vec_znx_switch_ring::<_, _, ZNXARI>(&mut res, res_col, ai, a_col);
|
||||
vec_znx_rotate_inplace::<_, ZNXARI>(-1, &mut res, res_col, tmp);
|
||||
});
|
||||
|
||||
vec_znx_rotate_inplace::<_, ZNXARI>(a.len() as i64, &mut res, res_col, tmp);
|
||||
}
|
||||
31
poulpy-hal/src/reference/vec_znx/mod.rs
Normal file
31
poulpy-hal/src/reference/vec_znx/mod.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
mod add;
|
||||
mod add_scalar;
|
||||
mod automorphism;
|
||||
mod copy;
|
||||
mod merge_rings;
|
||||
mod mul_xp_minus_one;
|
||||
mod negate;
|
||||
mod normalize;
|
||||
mod rotate;
|
||||
mod sampling;
|
||||
mod shift;
|
||||
mod split_ring;
|
||||
mod sub;
|
||||
mod sub_scalar;
|
||||
mod switch_ring;
|
||||
|
||||
pub use add::*;
|
||||
pub use add_scalar::*;
|
||||
pub use automorphism::*;
|
||||
pub use copy::*;
|
||||
pub use merge_rings::*;
|
||||
pub use mul_xp_minus_one::*;
|
||||
pub use negate::*;
|
||||
pub use normalize::*;
|
||||
pub use rotate::*;
|
||||
pub use sampling::*;
|
||||
pub use shift::*;
|
||||
pub use split_ring::*;
|
||||
pub use sub::*;
|
||||
pub use sub_scalar::*;
|
||||
pub use switch_ring::*;
|
||||
136
poulpy-hal/src/reference/vec_znx/mul_xp_minus_one.rs
Normal file
136
poulpy-hal/src/reference/vec_znx/mul_xp_minus_one.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace,
|
||||
VecZnxMulXpMinusOneInplaceTmpBytes,
|
||||
},
|
||||
layouts::{Backend, FillUniform, Module, ScratchOwned, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::{
|
||||
vec_znx::{vec_znx_rotate, vec_znx_sub_ab_inplace},
|
||||
znx::{ZnxNegate, ZnxRotate, ZnxSubABInplace, ZnxSubBAInplace, ZnxZero},
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_mul_xp_minus_one_inplace_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_mul_xp_minus_one<R, A, ZNXARI>(p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxRotate + ZnxZero + ZnxSubABInplace,
|
||||
{
|
||||
vec_znx_rotate::<_, _, ZNXARI>(p, res, res_col, a, a_col);
|
||||
vec_znx_sub_ab_inplace::<_, _, ZNXARI>(res, res_col, a, a_col);
|
||||
}
|
||||
|
||||
pub fn vec_znx_mul_xp_minus_one_inplace<R, ZNXARI>(p: i64, res: &mut R, res_col: usize, tmp: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxRotate + ZnxNegate + ZnxSubBAInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.n(), tmp.len());
|
||||
}
|
||||
for j in 0..res.size() {
|
||||
ZNXARI::znx_rotate(p, tmp, res.at(res_col, j));
|
||||
ZNXARI::znx_sub_ba_inplace(res.at_mut(res_col, j), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_mul_xp_minus_one<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxMulXpMinusOne + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_mul_xp_minus_one::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxMulXpMinusOne + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_mul_xp_minus_one(-7, &mut res, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_mul_xp_minus_one_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxMulXpMinusOneInplace<B> + VecZnxMulXpMinusOneInplaceTmpBytes + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_mul_xp_minus_one_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxMulXpMinusOneInplace<B> + ModuleNew<B> + VecZnxMulXpMinusOneInplaceTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch = ScratchOwned::alloc(module.vec_znx_mul_xp_minus_one_inplace_tmp_bytes());
|
||||
|
||||
// Fill a with random i64
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_mul_xp_minus_one_inplace(-7, &mut res, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
131
poulpy-hal/src/reference/vec_znx/negate.rs
Normal file
131
poulpy-hal/src/reference/vec_znx/negate.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, VecZnxNegate, VecZnxNegateInplace},
|
||||
layouts::{Backend, FillUniform, Module, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxNegate, ZnxNegateInplace, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_negate<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxNegate + ZnxZero,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
}
|
||||
|
||||
let min_size: usize = res.size().min(a.size());
|
||||
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_negate(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in min_size..res.size() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_negate_inplace<R, ZNXARI>(res: &mut R, res_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxNegateInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
for j in 0..res.size() {
|
||||
ZNXARI::znx_negate_inplace(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_negate<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxNegate + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_negate::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxNegate + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_negate(&mut b, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_negate_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxNegateInplace + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_negate_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxNegateInplace + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_negate_inplace(&mut a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
193
poulpy-hal/src/reference/vec_znx/normalize.rs
Normal file
193
poulpy-hal/src/reference/vec_znx/normalize.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||
layouts::{Backend, FillUniform, Module, ScratchOwned, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{
|
||||
ZnxNormalizeFinalStep, ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly,
|
||||
ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace,
|
||||
ZnxZero,
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_normalize_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_normalize<R, A, ZNXARI>(basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxZero
|
||||
+ ZnxNormalizeFirstStepCarryOnly
|
||||
+ ZnxNormalizeMiddleStepCarryOnly
|
||||
+ ZnxNormalizeMiddleStep
|
||||
+ ZnxNormalizeFinalStep
|
||||
+ ZnxNormalizeFirstStep,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(carry.len() >= res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size = a.size();
|
||||
|
||||
if a_size > res_size {
|
||||
for j in (res_size..a_size).rev() {
|
||||
if j == a_size - 1 {
|
||||
ZNXARI::znx_normalize_first_step_carry_only(basek, 0, a.at(a_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_carry_only(basek, 0, a.at(a_col, j), carry);
|
||||
}
|
||||
}
|
||||
|
||||
for j in (1..res_size).rev() {
|
||||
ZNXARI::znx_normalize_middle_step(basek, 0, res.at_mut(res_col, j), a.at(a_col, j), carry);
|
||||
}
|
||||
|
||||
ZNXARI::znx_normalize_final_step(basek, 0, res.at_mut(res_col, 0), a.at(a_col, 0), carry);
|
||||
} else {
|
||||
for j in (0..a_size).rev() {
|
||||
if j == a_size - 1 {
|
||||
ZNXARI::znx_normalize_first_step(basek, 0, res.at_mut(res_col, j), a.at(a_col, j), carry);
|
||||
} else if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step(basek, 0, res.at_mut(res_col, j), a.at(a_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step(basek, 0, res.at_mut(res_col, j), a.at(a_col, j), carry);
|
||||
}
|
||||
}
|
||||
|
||||
for j in a_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_normalize_inplace<R: VecZnxToMut, ZNXARI>(basek: usize, res: &mut R, res_col: usize, carry: &mut [i64])
|
||||
where
|
||||
ZNXARI: ZnxNormalizeFirstStepInplace + ZnxNormalizeMiddleStepInplace + ZnxNormalizeFinalStepInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(carry.len() >= res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
|
||||
for j in (0..res_size).rev() {
|
||||
if j == res_size - 1 {
|
||||
ZNXARI::znx_normalize_first_step_inplace(basek, 0, res.at_mut(res_col, j), carry);
|
||||
} else if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step_inplace(basek, 0, res.at_mut(res_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_inplace(basek, 0, res.at_mut(res_col, j), carry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_normalize<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxNormalize<B> + ModuleNew<B> + VecZnxNormalizeTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_normalize::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxNormalize<B> + ModuleNew<B> + VecZnxNormalizeTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_normalize(basek, &mut res, i, &a, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_normalize_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxNormalizeInplace<B> + ModuleNew<B> + VecZnxNormalizeTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_normalize_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxNormalizeInplace<B> + ModuleNew<B> + VecZnxNormalizeTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(module.vec_znx_normalize_tmp_bytes());
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_normalize_inplace(basek, &mut a, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
148
poulpy-hal/src/reference/vec_znx/rotate.rs
Normal file
148
poulpy-hal/src/reference/vec_znx/rotate.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes},
|
||||
layouts::{Backend, FillUniform, Module, ScratchOwned, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxCopy, ZnxRotate, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_rotate_inplace_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_rotate<R, A, ZNXARI>(p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxRotate + ZnxZero,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.n(), a.n())
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let min_size: usize = res_size.min(a_size);
|
||||
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_rotate(p, res.at_mut(res_col, j), a.at(a_col, j))
|
||||
}
|
||||
|
||||
for j in min_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_rotate_inplace<R, ZNXARI>(p: i64, res: &mut R, res_col: usize, tmp: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxRotate + ZnxCopy,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(res.n(), tmp.len());
|
||||
}
|
||||
for j in 0..res.size() {
|
||||
ZNXARI::znx_rotate(p, tmp, res.at(res_col, j));
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_rotate<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxRotate + ModuleNew<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_rotate::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxRotate + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_rotate(-7, &mut res, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_rotate_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace<B> + VecZnxRotateInplaceTmpBytes + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_rotate_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxRotateInplace<B> + ModuleNew<B> + VecZnxRotateInplaceTmpBytes,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch = ScratchOwned::alloc(module.vec_znx_rotate_inplace_tmp_bytes());
|
||||
|
||||
// Fill a with random i64
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_rotate_inplace(-7, &mut res, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
64
poulpy-hal/src/reference/vec_znx/sampling.rs
Normal file
64
poulpy-hal/src/reference/vec_znx/sampling.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use crate::{
|
||||
layouts::{VecZnx, VecZnxToMut, ZnxInfos, ZnxViewMut},
|
||||
reference::znx::{znx_add_normal_f64_ref, znx_fill_normal_f64_ref, znx_fill_uniform_ref},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_fill_uniform_ref<R>(basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
for j in 0..res.size() {
|
||||
znx_fill_uniform_ref(basek, res.at_mut(res_col, j), source)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_fill_normal_ref<R>(
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
source: &mut Source,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
assert!(
|
||||
(bound.log2().ceil() as i64) < 64,
|
||||
"invalid bound: ceil(log2(bound))={} > 63",
|
||||
(bound.log2().ceil() as i64)
|
||||
);
|
||||
|
||||
let limb: usize = k.div_ceil(basek) - 1;
|
||||
let scale: f64 = (1 << ((limb + 1) * basek - k)) as f64;
|
||||
znx_fill_normal_f64_ref(
|
||||
res.at_mut(res_col, limb),
|
||||
sigma * scale,
|
||||
bound * scale,
|
||||
source,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn vec_znx_add_normal_ref<R>(basek: usize, res: &mut R, res_col: usize, k: usize, sigma: f64, bound: f64, source: &mut Source)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
assert!(
|
||||
(bound.log2().ceil() as i64) < 64,
|
||||
"invalid bound: ceil(log2(bound))={} > 63",
|
||||
(bound.log2().ceil() as i64)
|
||||
);
|
||||
|
||||
let limb: usize = k.div_ceil(basek) - 1;
|
||||
let scale: f64 = (1 << ((limb + 1) * basek - k)) as f64;
|
||||
znx_add_normal_f64_ref(
|
||||
res.at_mut(res_col, limb),
|
||||
sigma * scale,
|
||||
bound * scale,
|
||||
source,
|
||||
)
|
||||
}
|
||||
672
poulpy-hal/src/reference/vec_znx/shift.rs
Normal file
672
poulpy-hal/src/reference/vec_znx/shift.rs
Normal file
@@ -0,0 +1,672 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxLsh, VecZnxLshInplace, VecZnxRsh, VecZnxRshInplace},
|
||||
layouts::{Backend, FillUniform, Module, ScratchOwned, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::{
|
||||
vec_znx::vec_znx_copy,
|
||||
znx::{
|
||||
ZnxCopy, ZnxNormalizeFinalStep, ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly,
|
||||
ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace,
|
||||
ZnxZero,
|
||||
},
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_lsh_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_lsh_inplace<R, ZNXARI>(basek: usize, k: usize, res: &mut R, res_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxZero
|
||||
+ ZnxCopy
|
||||
+ ZnxNormalizeFirstStepInplace
|
||||
+ ZnxNormalizeMiddleStepInplace
|
||||
+ ZnxNormalizeFirstStepInplace
|
||||
+ ZnxNormalizeFinalStepInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
let n: usize = res.n();
|
||||
let cols: usize = res.cols();
|
||||
let size: usize = res.size();
|
||||
let steps: usize = k / basek;
|
||||
let k_rem: usize = k % basek;
|
||||
|
||||
if steps >= size {
|
||||
for j in 0..size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Inplace shift of limbs by a k/basek
|
||||
if steps > 0 {
|
||||
let start: usize = n * res_col;
|
||||
let end: usize = start + n;
|
||||
let slice_size: usize = n * cols;
|
||||
let res_raw: &mut [i64] = res.raw_mut();
|
||||
|
||||
(0..size - steps).for_each(|j| {
|
||||
let (lhs, rhs) = res_raw.split_at_mut(slice_size * (j + steps));
|
||||
ZNXARI::znx_copy(
|
||||
&mut lhs[start + j * slice_size..end + j * slice_size],
|
||||
&rhs[start..end],
|
||||
);
|
||||
});
|
||||
|
||||
for j in size - steps..size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
// Inplace normalization with left shift of k % basek
|
||||
if !k.is_multiple_of(basek) {
|
||||
for j in (0..size - steps).rev() {
|
||||
if j == size - steps - 1 {
|
||||
ZNXARI::znx_normalize_first_step_inplace(basek, k_rem, res.at_mut(res_col, j), carry);
|
||||
} else if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step_inplace(basek, k_rem, res.at_mut(res_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_inplace(basek, k_rem, res.at_mut(res_col, j), carry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_lsh<R, A, ZNXARI>(basek: usize, k: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxZero + ZnxNormalizeFirstStep + ZnxNormalizeMiddleStep + ZnxNormalizeFirstStep + ZnxCopy + ZnxNormalizeFinalStep,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size = a.size();
|
||||
let steps: usize = k / basek;
|
||||
let k_rem: usize = k % basek;
|
||||
|
||||
if steps >= res_size.min(a_size) {
|
||||
for j in 0..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let min_size: usize = a_size.min(res_size) - steps;
|
||||
|
||||
// Simply a left shifted normalization of limbs
|
||||
// by k/basek and intra-limb by basek - k%basek
|
||||
if !k.is_multiple_of(basek) {
|
||||
for j in (0..min_size).rev() {
|
||||
if j == min_size - 1 {
|
||||
ZNXARI::znx_normalize_first_step(
|
||||
basek,
|
||||
k_rem,
|
||||
res.at_mut(res_col, j),
|
||||
a.at(a_col, j + steps),
|
||||
carry,
|
||||
);
|
||||
} else if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step(
|
||||
basek,
|
||||
k_rem,
|
||||
res.at_mut(res_col, j),
|
||||
a.at(a_col, j + steps),
|
||||
carry,
|
||||
);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step(
|
||||
basek,
|
||||
k_rem,
|
||||
res.at_mut(res_col, j),
|
||||
a.at(a_col, j + steps),
|
||||
carry,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If k % basek = 0, then this is simply a copy.
|
||||
for j in (0..min_size).rev() {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), a.at(a_col, j + steps));
|
||||
}
|
||||
}
|
||||
|
||||
// Zeroes bottom
|
||||
for j in min_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_rsh_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_rsh_inplace<R, ZNXARI>(basek: usize, k: usize, res: &mut R, res_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
ZNXARI: ZnxZero
|
||||
+ ZnxCopy
|
||||
+ ZnxNormalizeFirstStepCarryOnly
|
||||
+ ZnxNormalizeMiddleStepCarryOnly
|
||||
+ ZnxNormalizeMiddleStep
|
||||
+ ZnxNormalizeMiddleStepInplace
|
||||
+ ZnxNormalizeFirstStepInplace
|
||||
+ ZnxNormalizeFinalStepInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let n: usize = res.n();
|
||||
let cols: usize = res.cols();
|
||||
let size: usize = res.size();
|
||||
|
||||
let mut steps: usize = k / basek;
|
||||
let k_rem: usize = k % basek;
|
||||
|
||||
if k == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
if steps >= size {
|
||||
for j in 0..size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let start: usize = n * res_col;
|
||||
let end: usize = start + n;
|
||||
let slice_size: usize = n * cols;
|
||||
|
||||
if !k.is_multiple_of(basek) {
|
||||
// We rsh by an additional basek and then lsh by basek-k
|
||||
// Allows to re-use efficient normalization code, avoids
|
||||
// avoids overflows & produce output that is normalized
|
||||
steps += 1;
|
||||
|
||||
// All limbs of a that would fall outside of the limbs of res are discarded,
|
||||
// but the carry still need to be computed.
|
||||
(size - steps..size).rev().for_each(|j| {
|
||||
if j == size - 1 {
|
||||
ZNXARI::znx_normalize_first_step_carry_only(basek, basek - k_rem, res.at(res_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_carry_only(basek, basek - k_rem, res.at(res_col, j), carry);
|
||||
}
|
||||
});
|
||||
|
||||
// Continues with shifted normalization
|
||||
let res_raw: &mut [i64] = res.raw_mut();
|
||||
(steps..size).rev().for_each(|j| {
|
||||
let (lhs, rhs) = res_raw.split_at_mut(slice_size * j);
|
||||
let rhs_slice: &mut [i64] = &mut rhs[start..end];
|
||||
let lhs_slice: &[i64] = &lhs[(j - steps) * slice_size + start..(j - steps) * slice_size + end];
|
||||
ZNXARI::znx_normalize_middle_step(basek, basek - k_rem, rhs_slice, lhs_slice, carry);
|
||||
});
|
||||
|
||||
// Propagates carry on the rest of the limbs of res
|
||||
for j in (0..steps).rev() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step_inplace(basek, basek - k_rem, res.at_mut(res_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_inplace(basek, basek - k_rem, res.at_mut(res_col, j), carry);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Shift by multiples of basek
|
||||
let res_raw: &mut [i64] = res.raw_mut();
|
||||
(steps..size).rev().for_each(|j| {
|
||||
let (lhs, rhs) = res_raw.split_at_mut(slice_size * j);
|
||||
ZNXARI::znx_copy(
|
||||
&mut rhs[start..end],
|
||||
&lhs[(j - steps) * slice_size + start..(j - steps) * slice_size + end],
|
||||
);
|
||||
});
|
||||
|
||||
// Zeroes the top
|
||||
(0..steps).for_each(|j| {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_rsh<R, A, ZNXARI>(basek: usize, k: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, carry: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxZero
|
||||
+ ZnxCopy
|
||||
+ ZnxNormalizeFirstStepCarryOnly
|
||||
+ ZnxNormalizeMiddleStepCarryOnly
|
||||
+ ZnxNormalizeFirstStep
|
||||
+ ZnxNormalizeMiddleStep
|
||||
+ ZnxNormalizeMiddleStepInplace
|
||||
+ ZnxNormalizeFirstStepInplace
|
||||
+ ZnxNormalizeFinalStepInplace,
|
||||
{
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let mut steps: usize = k / basek;
|
||||
let k_rem: usize = k % basek;
|
||||
|
||||
if k == 0 {
|
||||
vec_znx_copy::<_, _, ZNXARI>(&mut res, res_col, &a, a_col);
|
||||
return;
|
||||
}
|
||||
|
||||
if steps >= res_size {
|
||||
for j in 0..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if !k.is_multiple_of(basek) {
|
||||
// We rsh by an additional basek and then lsh by basek-k
|
||||
// Allows to re-use efficient normalization code, avoids
|
||||
// avoids overflows & produce output that is normalized
|
||||
steps += 1;
|
||||
|
||||
// All limbs of a that are moved outside of the limbs of res are discarded,
|
||||
// but the carry still need to be computed.
|
||||
for j in (res_size..a_size + steps).rev() {
|
||||
if j == a_size + steps - 1 {
|
||||
ZNXARI::znx_normalize_first_step_carry_only(basek, basek - k_rem, a.at(a_col, j - steps), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_carry_only(basek, basek - k_rem, a.at(a_col, j - steps), carry);
|
||||
}
|
||||
}
|
||||
|
||||
// Avoids over flow of limbs of res
|
||||
let min_size: usize = res_size.min(a_size + steps);
|
||||
|
||||
// Zeroes lower limbs of res if a_size + steps < res_size
|
||||
(min_size..res_size).for_each(|j| {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
});
|
||||
|
||||
// Continues with shifted normalization
|
||||
for j in (steps..min_size).rev() {
|
||||
// Case if no limb of a was previously discarded
|
||||
if res_size.saturating_sub(steps) >= a_size && j == min_size - 1 {
|
||||
ZNXARI::znx_normalize_first_step(
|
||||
basek,
|
||||
basek - k_rem,
|
||||
res.at_mut(res_col, j),
|
||||
a.at(a_col, j - steps),
|
||||
carry,
|
||||
);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step(
|
||||
basek,
|
||||
basek - k_rem,
|
||||
res.at_mut(res_col, j),
|
||||
a.at(a_col, j - steps),
|
||||
carry,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Propagates carry on the rest of the limbs of res
|
||||
for j in (0..steps).rev() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
if j == 0 {
|
||||
ZNXARI::znx_normalize_final_step_inplace(basek, basek - k_rem, res.at_mut(res_col, j), carry);
|
||||
} else {
|
||||
ZNXARI::znx_normalize_middle_step_inplace(basek, basek - k_rem, res.at_mut(res_col, j), carry);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let min_size: usize = res_size.min(a_size + steps);
|
||||
|
||||
// Zeroes the top
|
||||
(0..steps).for_each(|j| {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
});
|
||||
|
||||
// Shift a into res, up to the maximum
|
||||
for j in (steps..min_size).rev() {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), a.at(a_col, j - steps));
|
||||
}
|
||||
|
||||
// Zeroes bottom if a_size + steps < res_size
|
||||
(min_size..res_size).for_each(|j| {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_lsh_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: ModuleNew<B> + VecZnxLshInplace<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_lsh_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxLshInplace<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(n * size_of::<i64>());
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_lsh_inplace(basek, basek - 1, &mut b, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_lsh<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxLsh<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_lsh::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxLsh<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(n * size_of::<i64>());
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_lsh(basek, basek - 1, &mut res, i, &a, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_rsh_inplace<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxRshInplace<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_rsh_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxRshInplace<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(n * size_of::<i64>());
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_rsh_inplace(basek, basek - 1, &mut b, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_rsh<B: Backend>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
Module<B>: VecZnxRsh<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_rsh::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxRsh<B> + ModuleNew<B>,
|
||||
ScratchOwned<B>: ScratchOwnedAlloc<B> + ScratchOwnedBorrow<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(n * size_of::<i64>());
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
res.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_rsh(basek, basek - 1, &mut res, i, &a, i, scratch.borrow());
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
layouts::{FillUniform, VecZnx, ZnxView},
|
||||
reference::{
|
||||
vec_znx::{
|
||||
vec_znx_copy, vec_znx_lsh, vec_znx_lsh_inplace, vec_znx_normalize_inplace, vec_znx_rsh, vec_znx_rsh_inplace,
|
||||
vec_znx_sub_ab_inplace,
|
||||
},
|
||||
znx::ZnxRef,
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_vec_znx_lsh() {
|
||||
let n: usize = 8;
|
||||
let cols: usize = 2;
|
||||
let size: usize = 7;
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut carry: Vec<i64> = vec![0i64; n];
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
for k in 0..256 {
|
||||
a.fill_uniform(50, &mut source);
|
||||
|
||||
for i in 0..cols {
|
||||
vec_znx_normalize_inplace::<_, ZnxRef>(basek, &mut a, i, &mut carry);
|
||||
vec_znx_copy::<_, _, ZnxRef>(&mut res_ref, i, &a, i);
|
||||
}
|
||||
|
||||
for i in 0..cols {
|
||||
vec_znx_lsh_inplace::<_, ZnxRef>(basek, k, &mut res_ref, i, &mut carry);
|
||||
vec_znx_lsh::<_, _, ZnxRef>(basek, k, &mut res_test, i, &a, i, &mut carry);
|
||||
vec_znx_normalize_inplace::<_, ZnxRef>(basek, &mut res_test, i, &mut carry);
|
||||
}
|
||||
|
||||
assert_eq!(res_ref, res_test);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_znx_rsh() {
|
||||
let n: usize = 8;
|
||||
let cols: usize = 2;
|
||||
|
||||
let res_size: usize = 7;
|
||||
|
||||
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
|
||||
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
|
||||
|
||||
let mut carry: Vec<i64> = vec![0i64; n];
|
||||
|
||||
let basek: usize = 50;
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let zero: Vec<i64> = vec![0i64; n];
|
||||
|
||||
for a_size in [res_size - 1, res_size, res_size + 1] {
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
|
||||
|
||||
for k in 0..res_size * basek {
|
||||
a.fill_uniform(50, &mut source);
|
||||
|
||||
for i in 0..cols {
|
||||
vec_znx_normalize_inplace::<_, ZnxRef>(basek, &mut a, i, &mut carry);
|
||||
vec_znx_copy::<_, _, ZnxRef>(&mut res_ref, i, &a, i);
|
||||
}
|
||||
|
||||
res_test.fill_uniform(50, &mut source);
|
||||
|
||||
for j in 0..cols {
|
||||
vec_znx_rsh_inplace::<_, ZnxRef>(basek, k, &mut res_ref, j, &mut carry);
|
||||
vec_znx_rsh::<_, _, ZnxRef>(basek, k, &mut res_test, j, &a, j, &mut carry);
|
||||
}
|
||||
|
||||
for j in 0..cols {
|
||||
vec_znx_lsh_inplace::<_, ZnxRef>(basek, k, &mut res_ref, j, &mut carry);
|
||||
vec_znx_lsh_inplace::<_, ZnxRef>(basek, k, &mut res_test, j, &mut carry);
|
||||
}
|
||||
|
||||
// Case where res has enough to fully store a right shifted without any loss
|
||||
// In this case we can check exact equality.
|
||||
if a_size + k.div_ceil(basek) <= res_size {
|
||||
assert_eq!(res_ref, res_test);
|
||||
|
||||
for i in 0..cols {
|
||||
for j in 0..a_size {
|
||||
assert_eq!(res_ref.at(i, j), a.at(i, j), "r0 {} {}", i, j);
|
||||
assert_eq!(res_test.at(i, j), a.at(i, j), "r1 {} {}", i, j);
|
||||
}
|
||||
|
||||
for j in a_size..res_size {
|
||||
assert_eq!(res_ref.at(i, j), zero, "r0 {} {}", i, j);
|
||||
assert_eq!(res_test.at(i, j), zero, "r1 {} {}", i, j);
|
||||
}
|
||||
}
|
||||
// Some loss occures, either because a initially has more precision than res
|
||||
// or because the storage of the right shift of a requires more precision than
|
||||
// res.
|
||||
} else {
|
||||
for j in 0..cols {
|
||||
vec_znx_sub_ab_inplace::<_, _, ZnxRef>(&mut res_ref, j, &a, j);
|
||||
vec_znx_sub_ab_inplace::<_, _, ZnxRef>(&mut res_test, j, &a, j);
|
||||
|
||||
vec_znx_normalize_inplace::<_, ZnxRef>(basek, &mut res_ref, j, &mut carry);
|
||||
vec_znx_normalize_inplace::<_, ZnxRef>(basek, &mut res_test, j, &mut carry);
|
||||
|
||||
assert!(res_ref.std(basek, j).log2() - (k as f64) <= (k * basek) as f64);
|
||||
assert!(res_test.std(basek, j).log2() - (k as f64) <= (k * basek) as f64);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
62
poulpy-hal/src/reference/vec_znx/split_ring.rs
Normal file
62
poulpy-hal/src/reference/vec_znx/split_ring.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use crate::{
|
||||
layouts::{VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxRotate, ZnxSwitchRing, ZnxZero},
|
||||
};
|
||||
|
||||
pub fn vec_znx_split_ring_tmp_bytes(n: usize) -> usize {
|
||||
n * size_of::<i64>()
|
||||
}
|
||||
|
||||
pub fn vec_znx_split_ring<R, A, ZNXARI>(res: &mut [R], res_col: usize, a: &A, a_col: usize, tmp: &mut [i64])
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxSwitchRing + ZnxRotate + ZnxZero,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let a_size = a.size();
|
||||
|
||||
let (n_in, n_out) = (a.n(), res[0].to_mut().n());
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(tmp.len(), a.n());
|
||||
|
||||
assert!(
|
||||
n_out < n_in,
|
||||
"invalid a: output ring degree should be smaller"
|
||||
);
|
||||
|
||||
res[1..].iter_mut().for_each(|bi| {
|
||||
assert_eq!(
|
||||
bi.to_mut().n(),
|
||||
n_out,
|
||||
"invalid input a: all VecZnx must have the same degree"
|
||||
)
|
||||
});
|
||||
|
||||
assert!(n_in.is_multiple_of(n_out));
|
||||
assert_eq!(res.len(), n_in / n_out);
|
||||
}
|
||||
|
||||
res.iter_mut().enumerate().for_each(|(i, bi)| {
|
||||
let mut bi: VecZnx<&mut [u8]> = bi.to_mut();
|
||||
|
||||
let min_size = bi.size().min(a_size);
|
||||
|
||||
if i == 0 {
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_switch_ring(bi.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
} else {
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_rotate(-(i as i64), tmp, a.at(a_col, j));
|
||||
ZNXARI::znx_switch_ring(bi.at_mut(res_col, j), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
for j in min_size..bi.size() {
|
||||
ZNXARI::znx_zero(bi.at_mut(res_col, j));
|
||||
}
|
||||
})
|
||||
}
|
||||
250
poulpy-hal/src/reference/vec_znx/sub.rs
Normal file
250
poulpy-hal/src/reference/vec_znx/sub.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{BenchmarkId, Criterion};
|
||||
|
||||
use crate::{
|
||||
api::{ModuleNew, VecZnxSub, VecZnxSubABInplace, VecZnxSubBAInplace},
|
||||
layouts::{Backend, FillUniform, Module, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
oep::{ModuleNewImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl},
|
||||
reference::znx::{ZnxCopy, ZnxNegate, ZnxNegateInplace, ZnxSub, ZnxSubABInplace, ZnxSubBAInplace, ZnxZero},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
pub fn vec_znx_sub<R, A, B, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
B: VecZnxToRef,
|
||||
ZNXARI: ZnxSub + ZnxNegate + ZnxZero + ZnxCopy,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let b: VecZnx<&[u8]> = b.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
assert_eq!(b.n(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
let b_size: usize = b.size();
|
||||
|
||||
if a_size <= b_size {
|
||||
let sum_size: usize = a_size.min(res_size);
|
||||
let cpy_size: usize = b_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_sub(res.at_mut(res_col, j), a.at(a_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in sum_size..cpy_size {
|
||||
ZNXARI::znx_negate(res.at_mut(res_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in cpy_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
} else {
|
||||
let sum_size: usize = b_size.min(res_size);
|
||||
let cpy_size: usize = a_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_sub(res.at_mut(res_col, j), a.at(a_col, j), b.at(b_col, j));
|
||||
}
|
||||
|
||||
for j in sum_size..cpy_size {
|
||||
ZNXARI::znx_copy(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in cpy_size..res_size {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_sub_ab_inplace<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxSubABInplace,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let sum_size: usize = a_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_sub_ab_inplace(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_sub_ba_inplace<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxSubBAInplace + ZnxNegateInplace,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert_eq!(a.n(), res.n());
|
||||
}
|
||||
|
||||
let res_size: usize = res.size();
|
||||
let a_size: usize = a.size();
|
||||
|
||||
let sum_size: usize = a_size.min(res_size);
|
||||
|
||||
for j in 0..sum_size {
|
||||
ZNXARI::znx_sub_ba_inplace(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in sum_size..res_size {
|
||||
ZNXARI::znx_negate_inplace(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_sub<B>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
B: Backend + ModuleNewImpl<B> + VecZnxSubImpl<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_sub::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxSub + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut c: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_sub(&mut c, i, &a, i, &b, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2],));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_sub_ab_inplace<B>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
B: Backend + ModuleNewImpl<B> + VecZnxSubABInplaceImpl<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_sub_ab_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxSubABInplace + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_sub_ab_inplace(&mut b, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn bench_vec_znx_sub_ba_inplace<B>(c: &mut Criterion, label: &str)
|
||||
where
|
||||
B: Backend + ModuleNewImpl<B> + VecZnxSubBAInplaceImpl<B>,
|
||||
{
|
||||
let group_name: String = format!("vec_znx_sub_ba_inplace::{}", label);
|
||||
|
||||
let mut group = c.benchmark_group(group_name);
|
||||
|
||||
fn runner<B: Backend>(params: [usize; 3]) -> impl FnMut()
|
||||
where
|
||||
Module<B>: VecZnxSubBAInplace + ModuleNew<B>,
|
||||
{
|
||||
let n: usize = 1 << params[0];
|
||||
let cols: usize = params[1];
|
||||
let size: usize = params[2];
|
||||
|
||||
let module: Module<B> = Module::<B>::new(n as u64);
|
||||
|
||||
let mut source: Source = Source::new([0u8; 32]);
|
||||
|
||||
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
|
||||
|
||||
// Fill a with random i64
|
||||
a.fill_uniform(50, &mut source);
|
||||
b.fill_uniform(50, &mut source);
|
||||
|
||||
move || {
|
||||
for i in 0..cols {
|
||||
module.vec_znx_sub_ba_inplace(&mut b, i, &a, i);
|
||||
}
|
||||
black_box(());
|
||||
}
|
||||
}
|
||||
|
||||
for params in [[10, 2, 2], [11, 2, 4], [12, 2, 8], [13, 2, 16], [14, 2, 32]] {
|
||||
let id: BenchmarkId = BenchmarkId::from_parameter(format!("{}x({}x{})", 1 << params[0], params[1], params[2]));
|
||||
let mut runner = runner::<B>(params);
|
||||
group.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
58
poulpy-hal/src/reference/vec_znx/sub_scalar.rs
Normal file
58
poulpy-hal/src/reference/vec_znx/sub_scalar.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use crate::layouts::{ScalarZnxToRef, VecZnxToMut, VecZnxToRef};
|
||||
use crate::{
|
||||
layouts::{ScalarZnx, VecZnx, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::znx::{ZnxSub, ZnxSubABInplace, ZnxZero},
|
||||
};
|
||||
|
||||
pub fn vec_znx_sub_scalar<R, A, B, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize, b: &B, b_col: usize, b_limb: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
B: VecZnxToRef,
|
||||
ZNXARI: ZnxSub + ZnxZero,
|
||||
{
|
||||
let a: ScalarZnx<&[u8]> = a.to_ref();
|
||||
let b: VecZnx<&[u8]> = b.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
let min_size: usize = b.size().min(res.size());
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(
|
||||
b_limb < min_size,
|
||||
"b_limb: {} > min_size: {}",
|
||||
b_limb,
|
||||
min_size
|
||||
);
|
||||
}
|
||||
|
||||
for j in 0..min_size {
|
||||
if j == b_limb {
|
||||
ZNXARI::znx_sub(res.at_mut(res_col, j), b.at(b_col, j), a.at(a_col, 0));
|
||||
} else {
|
||||
res.at_mut(res_col, j).copy_from_slice(b.at(b_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
for j in min_size..res.size() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vec_znx_sub_scalar_inplace<R, A, ZNXARI>(res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
ZNXARI: ZnxSubABInplace,
|
||||
{
|
||||
let a: ScalarZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(res_limb < res.size());
|
||||
}
|
||||
|
||||
ZNXARI::znx_sub_ab_inplace(res.at_mut(res_col, res_limb), a.at(a_col, 0));
|
||||
}
|
||||
37
poulpy-hal/src/reference/vec_znx/switch_ring.rs
Normal file
37
poulpy-hal/src/reference/vec_znx/switch_ring.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use crate::{
|
||||
layouts::{VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxView, ZnxViewMut},
|
||||
reference::{
|
||||
vec_znx::vec_znx_copy,
|
||||
znx::{ZnxCopy, ZnxSwitchRing, ZnxZero},
|
||||
},
|
||||
};
|
||||
|
||||
/// Maps between negacyclic rings by changing the polynomial degree.
|
||||
/// Up: Z[X]/(X^N+1) -> Z[X]/(X^{2^d N}+1) via X ↦ X^{2^d}
|
||||
/// Down: Z[X]/(X^N+1) -> Z[X]/(X^{N/2^d}+1) by folding indices.
|
||||
pub fn vec_znx_switch_ring<R, A, ZNXARI>(res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
ZNXARI: ZnxCopy + ZnxSwitchRing + ZnxZero,
|
||||
{
|
||||
let a: VecZnx<&[u8]> = a.to_ref();
|
||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
||||
|
||||
let (n_in, n_out) = (a.n(), res.n());
|
||||
|
||||
if n_in == n_out {
|
||||
vec_znx_copy::<_, _, ZNXARI>(&mut res, res_col, &a, a_col);
|
||||
return;
|
||||
}
|
||||
|
||||
let min_size: usize = a.size().min(res.size());
|
||||
|
||||
for j in 0..min_size {
|
||||
ZNXARI::znx_switch_ring(res.at_mut(res_col, j), a.at(a_col, j));
|
||||
}
|
||||
|
||||
for j in min_size..res.size() {
|
||||
ZNXARI::znx_zero(res.at_mut(res_col, j));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user