Ref. + AVX code & generic tests + benches (#85)

This commit is contained in:
Jean-Philippe Bossuat
2025-09-15 16:16:11 +02:00
committed by GitHub
parent 99b9e3e10e
commit 56dbd29c59
286 changed files with 27797 additions and 7270 deletions

View File

@@ -1,18 +1,16 @@
use rand_distr::Distribution;
use crate::{
api::{
VecZnxBigAdd, VecZnxBigAddDistF64, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace,
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigFillDistF64,
VecZnxBigFillNormal, VecZnxBigFromBytes, VecZnxBigNegateInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA, VecZnxBigSubSmallAInplace,
VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes,
VecZnxBigFromBytes, VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize,
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA,
VecZnxBigSubSmallAInplace, VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
},
layouts::{Backend, Module, Scratch, VecZnxBigOwned, VecZnxBigToMut, VecZnxBigToRef, VecZnxToMut, VecZnxToRef},
oep::{
VecZnxBigAddDistF64Impl, VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl,
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
VecZnxBigAutomorphismInplaceImpl, VecZnxBigFillDistF64Impl, VecZnxBigFillNormalImpl, VecZnxBigFromBytesImpl,
VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl, VecZnxBigAddSmallInplaceImpl,
VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl, VecZnxBigAutomorphismInplaceImpl,
VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl, VecZnxBigFromSmallImpl, VecZnxBigNegateImpl,
VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl, VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl,
VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl, VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl,
VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
@@ -20,6 +18,19 @@ use crate::{
source::Source,
};
impl<B> VecZnxBigFromSmall<B> for Module<B>
where
B: Backend + VecZnxBigFromSmallImpl<B>,
{
fn vec_znx_big_from_small<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxToRef,
{
B::vec_znx_big_from_small_impl(res, res_col, a, a_col);
}
}
impl<B> VecZnxBigAlloc<B> for Module<B>
where
B: Backend + VecZnxBigAllocImpl<B>,
@@ -47,24 +58,6 @@ where
}
}
impl<B> VecZnxBigAddDistF64<B> for Module<B>
where
B: Backend + VecZnxBigAddDistF64Impl<B>,
{
fn vec_znx_big_add_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
&self,
basek: usize,
res: &mut R,
res_col: usize,
k: usize,
source: &mut Source,
dist: D,
bound: f64,
) {
B::add_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
}
}
impl<B> VecZnxBigAddNormal<B> for Module<B>
where
B: Backend + VecZnxBigAddNormalImpl<B>,
@@ -83,42 +76,6 @@ where
}
}
impl<B> VecZnxBigFillDistF64<B> for Module<B>
where
B: Backend + VecZnxBigFillDistF64Impl<B>,
{
fn vec_znx_big_fill_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
&self,
basek: usize,
res: &mut R,
res_col: usize,
k: usize,
source: &mut Source,
dist: D,
bound: f64,
) {
B::fill_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
}
}
impl<B> VecZnxBigFillNormal<B> for Module<B>
where
B: Backend + VecZnxBigFillNormalImpl<B>,
{
fn vec_znx_big_fill_normal<R: VecZnxBigToMut<B>>(
&self,
basek: usize,
res: &mut R,
res_col: usize,
k: usize,
source: &mut Source,
sigma: f64,
bound: f64,
) {
B::fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
}
}
impl<B> VecZnxBigAdd<B> for Module<B>
where
B: Backend + VecZnxBigAddImpl<B>,
@@ -267,6 +224,19 @@ where
}
}
impl<B> VecZnxBigNegate<B> for Module<B>
where
B: Backend + VecZnxBigNegateImpl<B>,
{
fn vec_znx_big_negate<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxBigToRef<B>,
{
B::vec_znx_big_negate_impl(self, res, res_col, a, a_col);
}
}
impl<B> VecZnxBigNegateInplace<B> for Module<B>
where
B: Backend + VecZnxBigNegateInplaceImpl<B>,
@@ -321,14 +291,23 @@ where
}
}
impl<B> VecZnxBigAutomorphismInplaceTmpBytes for Module<B>
where
B: Backend + VecZnxBigAutomorphismInplaceTmpBytesImpl<B>,
{
fn vec_znx_big_automorphism_inplace_tmp_bytes(&self) -> usize {
B::vec_znx_big_automorphism_inplace_tmp_bytes_impl(self)
}
}
impl<B> VecZnxBigAutomorphismInplace<B> for Module<B>
where
B: Backend + VecZnxBigAutomorphismInplaceImpl<B>,
{
fn vec_znx_big_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
fn vec_znx_big_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
where
A: VecZnxBigToMut<B>,
{
B::vec_znx_big_automorphism_inplace_impl(self, k, a, a_col);
B::vec_znx_big_automorphism_inplace_impl(self, k, a, a_col, scratch);
}
}