Add cross-basek normalization (#90)

* added cross_basek_normalization

* updated method signatures to take layouts

* fixed cross-base normalization

fix #91
fix #93
This commit is contained in:
Jean-Philippe Bossuat
2025-09-30 14:40:10 +02:00
committed by GitHub
parent 4da790ea6a
commit 37e13b965c
216 changed files with 12481 additions and 7745 deletions

View File

@@ -1,11 +1,11 @@
use crate::{
api::{
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeLike, TakeMatZnx, TakeScalarZnx,
TakeSlice, TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeMatZnx, TakeScalarZnx, TakeSlice,
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
},
layouts::{Backend, DataRef, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
layouts::{Backend, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
oep::{
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeLikeImpl, TakeMatZnxImpl,
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeMatZnxImpl,
TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl,
TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl,
},
@@ -156,80 +156,3 @@ where
B::take_mat_znx_impl(self, n, rows, cols_in, cols_out, size)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, ScalarZnx<D>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, ScalarZnx<D>, Output = ScalarZnx<&'a mut [u8]>>,
D: DataRef,
{
type Output = ScalarZnx<&'a mut [u8]>;
fn take_like(&'a mut self, template: &ScalarZnx<D>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, SvpPPol<D, B>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, SvpPPol<D, B>, Output = SvpPPol<&'a mut [u8], B>>,
D: DataRef,
{
type Output = SvpPPol<&'a mut [u8], B>;
fn take_like(&'a mut self, template: &SvpPPol<D, B>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnx<D>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, VecZnx<D>, Output = VecZnx<&'a mut [u8]>>,
D: DataRef,
{
type Output = VecZnx<&'a mut [u8]>;
fn take_like(&'a mut self, template: &VecZnx<D>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnxBig<D, B>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, VecZnxBig<D, B>, Output = VecZnxBig<&'a mut [u8], B>>,
D: DataRef,
{
type Output = VecZnxBig<&'a mut [u8], B>;
fn take_like(&'a mut self, template: &VecZnxBig<D, B>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnxDft<D, B>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, VecZnxDft<D, B>, Output = VecZnxDft<&'a mut [u8], B>>,
D: DataRef,
{
type Output = VecZnxDft<&'a mut [u8], B>;
fn take_like(&'a mut self, template: &VecZnxDft<D, B>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, MatZnx<D>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, MatZnx<D>, Output = MatZnx<&'a mut [u8]>>,
D: DataRef,
{
type Output = MatZnx<&'a mut [u8]>;
fn take_like(&'a mut self, template: &MatZnx<D>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}
impl<'a, B: Backend, D> TakeLike<'a, B, VmpPMat<D, B>> for Scratch<B>
where
B: TakeLikeImpl<'a, B, VmpPMat<D, B>, Output = VmpPMat<&'a mut [u8], B>>,
D: DataRef,
{
type Output = VmpPMat<&'a mut [u8], B>;
fn take_like(&'a mut self, template: &VmpPMat<D, B>) -> (Self::Output, &'a mut Self) {
B::take_like_impl(self, template)
}
}

View File

@@ -5,8 +5,8 @@ use crate::{
VecZnxLshInplace, VecZnxLshTmpBytes, VecZnxMergeRings, VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOne,
VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes,
VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubABInplace,
VecZnxSubBAInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
VecZnxRsh, VecZnxRshInplace, VecZnxRshTmpBytes, VecZnxSplitRing, VecZnxSplitRingTmpBytes, VecZnxSub, VecZnxSubInplace,
VecZnxSubNegateInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
},
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
oep::{
@@ -17,7 +17,7 @@ use crate::{
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
VecZnxSplitRingTmpBytesImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarImpl,
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
},
source::Source,
@@ -36,12 +36,21 @@ impl<B> VecZnxNormalize<B> for Module<B>
where
B: Backend + VecZnxNormalizeImpl<B>,
{
fn vec_znx_normalize<R, A>(&self, basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
where
#[allow(clippy::too_many_arguments)]
fn vec_znx_normalize<R, A>(
&self,
res_basek: usize,
res: &mut R,
res_col: usize,
a_basek: usize,
a: &A,
a_col: usize,
scratch: &mut Scratch<B>,
) where
R: VecZnxToMut,
A: VecZnxToRef,
{
B::vec_znx_normalize_impl(self, basek, res, res_col, a, a_col, scratch)
B::vec_znx_normalize_impl(self, res_basek, res, res_col, a_basek, a, a_col, scratch)
}
}
@@ -49,11 +58,11 @@ impl<B> VecZnxNormalizeInplace<B> for Module<B>
where
B: Backend + VecZnxNormalizeInplaceImpl<B>,
{
fn vec_znx_normalize_inplace<A>(&self, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
fn vec_znx_normalize_inplace<A>(&self, base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
where
A: VecZnxToMut,
{
B::vec_znx_normalize_inplace_impl(self, basek, a, a_col, scratch)
B::vec_znx_normalize_inplace_impl(self, base2k, a, a_col, scratch)
}
}
@@ -125,29 +134,29 @@ where
}
}
impl<B> VecZnxSubABInplace for Module<B>
impl<B> VecZnxSubInplace for Module<B>
where
B: Backend + VecZnxSubABInplaceImpl<B>,
B: Backend + VecZnxSubInplaceImpl<B>,
{
fn vec_znx_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_sub_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxToMut,
A: VecZnxToRef,
{
B::vec_znx_sub_ab_inplace_impl(self, res, res_col, a, a_col)
B::vec_znx_sub_inplace_impl(self, res, res_col, a, a_col)
}
}
impl<B> VecZnxSubBAInplace for Module<B>
impl<B> VecZnxSubNegateInplace for Module<B>
where
B: Backend + VecZnxSubBAInplaceImpl<B>,
B: Backend + VecZnxSubNegateInplaceImpl<B>,
{
fn vec_znx_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_sub_negate_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxToMut,
A: VecZnxToRef,
{
B::vec_znx_sub_ba_inplace_impl(self, res, res_col, a, a_col)
B::vec_znx_sub_negate_inplace_impl(self, res, res_col, a, a_col)
}
}
@@ -227,7 +236,7 @@ where
{
fn vec_znx_lsh<R, A>(
&self,
basek: usize,
base2k: usize,
k: usize,
res: &mut R,
res_col: usize,
@@ -238,7 +247,7 @@ where
R: VecZnxToMut,
A: VecZnxToRef,
{
B::vec_znx_lsh_inplace_impl(self, basek, k, res, res_col, a, a_col, scratch);
B::vec_znx_lsh_impl(self, base2k, k, res, res_col, a, a_col, scratch);
}
}
@@ -248,7 +257,7 @@ where
{
fn vec_znx_rsh<R, A>(
&self,
basek: usize,
base2k: usize,
k: usize,
res: &mut R,
res_col: usize,
@@ -259,7 +268,7 @@ where
R: VecZnxToMut,
A: VecZnxToRef,
{
B::vec_znx_rsh_inplace_impl(self, basek, k, res, res_col, a, a_col, scratch);
B::vec_znx_rsh_impl(self, base2k, k, res, res_col, a, a_col, scratch);
}
}
@@ -267,11 +276,11 @@ impl<B> VecZnxLshInplace<B> for Module<B>
where
B: Backend + VecZnxLshInplaceImpl<B>,
{
fn vec_znx_lsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
fn vec_znx_lsh_inplace<A>(&self, base2k: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
where
A: VecZnxToMut,
{
B::vec_znx_lsh_inplace_impl(self, basek, k, a, a_col, scratch)
B::vec_znx_lsh_inplace_impl(self, base2k, k, a, a_col, scratch)
}
}
@@ -279,11 +288,11 @@ impl<B> VecZnxRshInplace<B> for Module<B>
where
B: Backend + VecZnxRshInplaceImpl<B>,
{
fn vec_znx_rsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
fn vec_znx_rsh_inplace<A>(&self, base2k: usize, k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
where
A: VecZnxToMut,
{
B::vec_znx_rsh_inplace_impl(self, basek, k, a, a_col, scratch)
B::vec_znx_rsh_inplace_impl(self, base2k, k, a, a_col, scratch)
}
}
@@ -463,11 +472,11 @@ impl<B> VecZnxFillUniform for Module<B>
where
B: Backend + VecZnxFillUniformImpl<B>,
{
fn vec_znx_fill_uniform<R>(&self, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
fn vec_znx_fill_uniform<R>(&self, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
where
R: VecZnxToMut,
{
B::vec_znx_fill_uniform_impl(self, basek, res, res_col, source);
B::vec_znx_fill_uniform_impl(self, base2k, res, res_col, source);
}
}
@@ -477,7 +486,7 @@ where
{
fn vec_znx_fill_normal<R>(
&self,
basek: usize,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
@@ -487,7 +496,7 @@ where
) where
R: VecZnxToMut,
{
B::vec_znx_fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
B::vec_znx_fill_normal_impl(self, base2k, res, res_col, k, source, sigma, bound);
}
}
@@ -497,7 +506,7 @@ where
{
fn vec_znx_add_normal<R>(
&self,
basek: usize,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
@@ -507,6 +516,6 @@ where
) where
R: VecZnxToMut,
{
B::vec_znx_add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
B::vec_znx_add_normal_impl(self, base2k, res, res_col, k, source, sigma, bound);
}
}

View File

@@ -3,17 +3,17 @@ use crate::{
VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace, VecZnxBigAlloc,
VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes,
VecZnxBigFromBytes, VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize,
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA,
VecZnxBigSubSmallAInplace, VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
VecZnxBigNormalizeTmpBytes, VecZnxBigSub, VecZnxBigSubInplace, VecZnxBigSubNegateInplace, VecZnxBigSubSmallA,
VecZnxBigSubSmallB, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace,
},
layouts::{Backend, Module, Scratch, VecZnxBigOwned, VecZnxBigToMut, VecZnxBigToRef, VecZnxToMut, VecZnxToRef},
oep::{
VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl, VecZnxBigAddSmallInplaceImpl,
VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl, VecZnxBigAutomorphismInplaceImpl,
VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl, VecZnxBigFromSmallImpl, VecZnxBigNegateImpl,
VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl, VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl,
VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl, VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl,
VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl, VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubImpl,
VecZnxBigSubInplaceImpl, VecZnxBigSubNegateInplaceImpl, VecZnxBigSubSmallAImpl, VecZnxBigSubSmallBImpl,
VecZnxBigSubSmallInplaceImpl, VecZnxBigSubSmallNegateInplaceImpl,
},
source::Source,
};
@@ -64,7 +64,7 @@ where
{
fn vec_znx_big_add_normal<R: VecZnxBigToMut<B>>(
&self,
basek: usize,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
@@ -72,7 +72,7 @@ where
sigma: f64,
bound: f64,
) {
B::add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
B::add_normal_impl(self, base2k, res, res_col, k, source, sigma, bound);
}
}
@@ -144,29 +144,29 @@ where
}
}
impl<B> VecZnxBigSubABInplace<B> for Module<B>
impl<B> VecZnxBigSubInplace<B> for Module<B>
where
B: Backend + VecZnxBigSubABInplaceImpl<B>,
B: Backend + VecZnxBigSubInplaceImpl<B>,
{
fn vec_znx_big_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_big_sub_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxBigToRef<B>,
{
B::vec_znx_big_sub_ab_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_big_sub_inplace_impl(self, res, res_col, a, a_col);
}
}
impl<B> VecZnxBigSubBAInplace<B> for Module<B>
impl<B> VecZnxBigSubNegateInplace<B> for Module<B>
where
B: Backend + VecZnxBigSubBAInplaceImpl<B>,
B: Backend + VecZnxBigSubNegateInplaceImpl<B>,
{
fn vec_znx_big_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_big_sub_negate_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxBigToRef<B>,
{
B::vec_znx_big_sub_ba_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_big_sub_negate_inplace_impl(self, res, res_col, a, a_col);
}
}
@@ -184,16 +184,16 @@ where
}
}
impl<B> VecZnxBigSubSmallAInplace<B> for Module<B>
impl<B> VecZnxBigSubSmallInplace<B> for Module<B>
where
B: Backend + VecZnxBigSubSmallAInplaceImpl<B>,
B: Backend + VecZnxBigSubSmallInplaceImpl<B>,
{
fn vec_znx_big_sub_small_a_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_big_sub_small_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxToRef,
{
B::vec_znx_big_sub_small_a_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_big_sub_small_inplace_impl(self, res, res_col, a, a_col);
}
}
@@ -211,16 +211,16 @@ where
}
}
impl<B> VecZnxBigSubSmallBInplace<B> for Module<B>
impl<B> VecZnxBigSubSmallNegateInplace<B> for Module<B>
where
B: Backend + VecZnxBigSubSmallBInplaceImpl<B>,
B: Backend + VecZnxBigSubSmallNegateInplaceImpl<B>,
{
fn vec_znx_big_sub_small_b_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_big_sub_small_negate_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxBigToMut<B>,
A: VecZnxToRef,
{
B::vec_znx_big_sub_small_b_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_big_sub_small_negate_inplace_impl(self, res, res_col, a, a_col);
}
}
@@ -264,9 +264,10 @@ where
{
fn vec_znx_big_normalize<R, A>(
&self,
basek: usize,
res_basek: usize,
res: &mut R,
res_col: usize,
a_basek: usize,
a: &A,
a_col: usize,
scratch: &mut Scratch<B>,
@@ -274,7 +275,7 @@ where
R: VecZnxToMut,
A: VecZnxBigToRef<B>,
{
B::vec_znx_big_normalize_impl(self, basek, res, res_col, a, a_col, scratch);
B::vec_znx_big_normalize_impl(self, res_basek, res, res_col, a_basek, a, a_col, scratch);
}
}

View File

@@ -1,7 +1,7 @@
use crate::{
api::{
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
VecZnxDftFromBytes, VecZnxDftSub, VecZnxDftSubABInplace, VecZnxDftSubBAInplace, VecZnxDftZero, VecZnxIdftApply,
VecZnxDftFromBytes, VecZnxDftSub, VecZnxDftSubInplace, VecZnxDftSubNegateInplace, VecZnxDftZero, VecZnxIdftApply,
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
},
layouts::{
@@ -10,7 +10,7 @@ use crate::{
},
oep::{
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubABInplaceImpl, VecZnxDftSubBAInplaceImpl, VecZnxDftSubImpl,
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubImpl, VecZnxDftSubInplaceImpl, VecZnxDftSubNegateInplaceImpl,
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
},
};
@@ -143,29 +143,29 @@ where
}
}
impl<B> VecZnxDftSubABInplace<B> for Module<B>
impl<B> VecZnxDftSubInplace<B> for Module<B>
where
B: Backend + VecZnxDftSubABInplaceImpl<B>,
B: Backend + VecZnxDftSubInplaceImpl<B>,
{
fn vec_znx_dft_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_dft_sub_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<B>,
A: VecZnxDftToRef<B>,
{
B::vec_znx_dft_sub_ab_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_dft_sub_inplace_impl(self, res, res_col, a, a_col);
}
}
impl<B> VecZnxDftSubBAInplace<B> for Module<B>
impl<B> VecZnxDftSubNegateInplace<B> for Module<B>
where
B: Backend + VecZnxDftSubBAInplaceImpl<B>,
B: Backend + VecZnxDftSubNegateInplaceImpl<B>,
{
fn vec_znx_dft_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
fn vec_znx_dft_sub_negate_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
where
R: VecZnxDftToMut<B>,
A: VecZnxDftToRef<B>,
{
B::vec_znx_dft_sub_ba_inplace_impl(self, res, res_col, a, a_col);
B::vec_znx_dft_sub_negate_inplace_impl(self, res, res_col, a, a_col);
}
}

View File

@@ -18,11 +18,11 @@ impl<B> ZnNormalizeInplace<B> for Module<B>
where
B: Backend + ZnNormalizeInplaceImpl<B>,
{
fn zn_normalize_inplace<A>(&self, n: usize, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
fn zn_normalize_inplace<A>(&self, n: usize, base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
where
A: ZnToMut,
{
B::zn_normalize_inplace_impl(n, basek, a, a_col, scratch)
B::zn_normalize_inplace_impl(n, base2k, a, a_col, scratch)
}
}
@@ -30,11 +30,11 @@ impl<B> ZnFillUniform for Module<B>
where
B: Backend + ZnFillUniformImpl<B>,
{
fn zn_fill_uniform<R>(&self, n: usize, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
fn zn_fill_uniform<R>(&self, n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
where
R: ZnToMut,
{
B::zn_fill_uniform_impl(n, basek, res, res_col, source);
B::zn_fill_uniform_impl(n, base2k, res, res_col, source);
}
}
@@ -45,7 +45,7 @@ where
fn zn_fill_normal<R>(
&self,
n: usize,
basek: usize,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
@@ -55,7 +55,7 @@ where
) where
R: ZnToMut,
{
B::zn_fill_normal_impl(n, basek, res, res_col, k, source, sigma, bound);
B::zn_fill_normal_impl(n, base2k, res, res_col, k, source, sigma, bound);
}
}
@@ -66,7 +66,7 @@ where
fn zn_add_normal<R>(
&self,
n: usize,
basek: usize,
base2k: usize,
res: &mut R,
res_col: usize,
k: usize,
@@ -76,6 +76,6 @@ where
) where
R: ZnToMut,
{
B::zn_add_normal_impl(n, basek, res, res_col, k, source, sigma, bound);
B::zn_add_normal_impl(n, base2k, res, res_col, k, source, sigma, bound);
}
}