mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
Crates io (#76)
* crates re-organisation * fixed typo in layout & added test for vmp_apply * updated dependencies
This commit is contained in:
committed by
GitHub
parent
dce4d82706
commit
a1de248567
7
poulpy-hal/src/delegates/mod.rs
Normal file
7
poulpy-hal/src/delegates/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
mod module;
|
||||
mod scratch;
|
||||
mod svp_ppol;
|
||||
mod vec_znx;
|
||||
mod vec_znx_big;
|
||||
mod vec_znx_dft;
|
||||
mod vmp_pmat;
|
||||
14
poulpy-hal/src/delegates/module.rs
Normal file
14
poulpy-hal/src/delegates/module.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use crate::{
|
||||
api::ModuleNew,
|
||||
layouts::{Backend, Module},
|
||||
oep::ModuleNewImpl,
|
||||
};
|
||||
|
||||
impl<B> ModuleNew<B> for Module<B>
|
||||
where
|
||||
B: Backend + ModuleNewImpl<B>,
|
||||
{
|
||||
fn new(n: u64) -> Self {
|
||||
B::new_impl(n)
|
||||
}
|
||||
}
|
||||
235
poulpy-hal/src/delegates/scratch.rs
Normal file
235
poulpy-hal/src/delegates/scratch.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
use crate::{
|
||||
api::{
|
||||
ScratchAvailable, ScratchFromBytes, ScratchOwnedAlloc, ScratchOwnedBorrow, TakeLike, TakeMatZnx, TakeScalarZnx,
|
||||
TakeSlice, TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice, TakeVmpPMat,
|
||||
},
|
||||
layouts::{Backend, DataRef, MatZnx, ScalarZnx, Scratch, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||
oep::{
|
||||
ScratchAvailableImpl, ScratchFromBytesImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeLikeImpl, TakeMatZnxImpl,
|
||||
TakeScalarZnxImpl, TakeSliceImpl, TakeSvpPPolImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl, TakeVecZnxDftSliceImpl,
|
||||
TakeVecZnxImpl, TakeVecZnxSliceImpl, TakeVmpPMatImpl,
|
||||
},
|
||||
};
|
||||
|
||||
impl<B> ScratchOwnedAlloc<B> for ScratchOwned<B>
|
||||
where
|
||||
B: Backend + ScratchOwnedAllocImpl<B>,
|
||||
{
|
||||
fn alloc(size: usize) -> Self {
|
||||
B::scratch_owned_alloc_impl(size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ScratchOwnedBorrow<B> for ScratchOwned<B>
|
||||
where
|
||||
B: Backend + ScratchOwnedBorrowImpl<B>,
|
||||
{
|
||||
fn borrow(&mut self) -> &mut Scratch<B> {
|
||||
B::scratch_owned_borrow_impl(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ScratchFromBytes<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + ScratchFromBytesImpl<B>,
|
||||
{
|
||||
fn from_bytes(data: &mut [u8]) -> &mut Scratch<B> {
|
||||
B::scratch_from_bytes_impl(data)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ScratchAvailable for Scratch<B>
|
||||
where
|
||||
B: Backend + ScratchAvailableImpl<B>,
|
||||
{
|
||||
fn available(&self) -> usize {
|
||||
B::scratch_available_impl(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeSlice for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeSliceImpl<B>,
|
||||
{
|
||||
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self) {
|
||||
B::take_slice_impl(self, len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeScalarZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeScalarZnxImpl<B>,
|
||||
{
|
||||
fn take_scalar_znx(&mut self, n: usize, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_scalar_znx_impl(self, n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeSvpPPol<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeSvpPPolImpl<B>,
|
||||
{
|
||||
fn take_svp_ppol(&mut self, n: usize, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self) {
|
||||
B::take_svp_ppol_impl(self, n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxImpl<B>,
|
||||
{
|
||||
fn take_vec_znx(&mut self, n: usize, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_vec_znx_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxSlice for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxSliceImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_slice(&mut self, len: usize, n: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
||||
B::take_vec_znx_slice_impl(self, len, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxBig<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxBigImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_big(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self) {
|
||||
B::take_vec_znx_big_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxDft<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxDftImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft(&mut self, n: usize, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self) {
|
||||
B::take_vec_znx_dft_impl(self, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVecZnxDftSlice<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVecZnxDftSliceImpl<B>,
|
||||
{
|
||||
fn take_vec_znx_dft_slice(
|
||||
&mut self,
|
||||
len: usize,
|
||||
n: usize,
|
||||
cols: usize,
|
||||
size: usize,
|
||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self) {
|
||||
B::take_vec_znx_dft_slice_impl(self, len, n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeVmpPMat<B> for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeVmpPMatImpl<B>,
|
||||
{
|
||||
fn take_vmp_pmat(
|
||||
&mut self,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (VmpPMat<&mut [u8], B>, &mut Self) {
|
||||
B::take_vmp_pmat_impl(self, n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> TakeMatZnx for Scratch<B>
|
||||
where
|
||||
B: Backend + TakeMatZnxImpl<B>,
|
||||
{
|
||||
fn take_mat_znx(
|
||||
&mut self,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
||||
B::take_mat_znx_impl(self, n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, ScalarZnx<D>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, ScalarZnx<D>, Output = ScalarZnx<&'a mut [u8]>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = ScalarZnx<&'a mut [u8]>;
|
||||
fn take_like(&'a mut self, template: &ScalarZnx<D>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, SvpPPol<D, B>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, SvpPPol<D, B>, Output = SvpPPol<&'a mut [u8], B>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = SvpPPol<&'a mut [u8], B>;
|
||||
fn take_like(&'a mut self, template: &SvpPPol<D, B>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnx<D>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, VecZnx<D>, Output = VecZnx<&'a mut [u8]>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = VecZnx<&'a mut [u8]>;
|
||||
fn take_like(&'a mut self, template: &VecZnx<D>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnxBig<D, B>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, VecZnxBig<D, B>, Output = VecZnxBig<&'a mut [u8], B>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = VecZnxBig<&'a mut [u8], B>;
|
||||
fn take_like(&'a mut self, template: &VecZnxBig<D, B>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, VecZnxDft<D, B>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, VecZnxDft<D, B>, Output = VecZnxDft<&'a mut [u8], B>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = VecZnxDft<&'a mut [u8], B>;
|
||||
fn take_like(&'a mut self, template: &VecZnxDft<D, B>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, MatZnx<D>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, MatZnx<D>, Output = MatZnx<&'a mut [u8]>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = MatZnx<&'a mut [u8]>;
|
||||
fn take_like(&'a mut self, template: &MatZnx<D>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Backend, D> TakeLike<'a, B, VmpPMat<D, B>> for Scratch<B>
|
||||
where
|
||||
B: TakeLikeImpl<'a, B, VmpPMat<D, B>, Output = VmpPMat<&'a mut [u8], B>>,
|
||||
D: DataRef,
|
||||
{
|
||||
type Output = VmpPMat<&'a mut [u8], B>;
|
||||
fn take_like(&'a mut self, template: &VmpPMat<D, B>) -> (Self::Output, &'a mut Self) {
|
||||
B::take_like_impl(self, template)
|
||||
}
|
||||
}
|
||||
72
poulpy-hal/src/delegates/svp_ppol.rs
Normal file
72
poulpy-hal/src/delegates/svp_ppol.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use crate::{
|
||||
api::{SvpApply, SvpApplyInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPPolFromBytes, SvpPrepare},
|
||||
layouts::{Backend, Module, ScalarZnxToRef, SvpPPolOwned, SvpPPolToMut, SvpPPolToRef, VecZnxDftToMut, VecZnxDftToRef},
|
||||
oep::{SvpApplyImpl, SvpApplyInplaceImpl, SvpPPolAllocBytesImpl, SvpPPolAllocImpl, SvpPPolFromBytesImpl, SvpPrepareImpl},
|
||||
};
|
||||
|
||||
impl<B> SvpPPolFromBytes<B> for Module<B>
|
||||
where
|
||||
B: Backend + SvpPPolFromBytesImpl<B>,
|
||||
{
|
||||
fn svp_ppol_from_bytes(&self, n: usize, cols: usize, bytes: Vec<u8>) -> SvpPPolOwned<B> {
|
||||
B::svp_ppol_from_bytes_impl(n, cols, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpPPolAlloc<B> for Module<B>
|
||||
where
|
||||
B: Backend + SvpPPolAllocImpl<B>,
|
||||
{
|
||||
fn svp_ppol_alloc(&self, n: usize, cols: usize) -> SvpPPolOwned<B> {
|
||||
B::svp_ppol_alloc_impl(n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpPPolAllocBytes for Module<B>
|
||||
where
|
||||
B: Backend + SvpPPolAllocBytesImpl<B>,
|
||||
{
|
||||
fn svp_ppol_alloc_bytes(&self, n: usize, cols: usize) -> usize {
|
||||
B::svp_ppol_alloc_bytes_impl(n, cols)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpPrepare<B> for Module<B>
|
||||
where
|
||||
B: Backend + SvpPrepareImpl<B>,
|
||||
{
|
||||
fn svp_prepare<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: SvpPPolToMut<B>,
|
||||
A: ScalarZnxToRef,
|
||||
{
|
||||
B::svp_prepare_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpApply<B> for Module<B>
|
||||
where
|
||||
B: Backend + SvpApplyImpl<B>,
|
||||
{
|
||||
fn svp_apply<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: SvpPPolToRef<B>,
|
||||
C: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::svp_apply_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> SvpApplyInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + SvpApplyInplaceImpl,
|
||||
{
|
||||
fn svp_apply_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: SvpPPolToRef<B>,
|
||||
{
|
||||
B::svp_apply_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
414
poulpy-hal/src/delegates/vec_znx.rs
Normal file
414
poulpy-hal/src/delegates/vec_znx.rs
Normal file
@@ -0,0 +1,414 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VecZnxAdd, VecZnxAddDistF64, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism,
|
||||
VecZnxAutomorphismInplace, VecZnxCopy, VecZnxFillDistF64, VecZnxFillNormal, VecZnxFillUniform, VecZnxLshInplace,
|
||||
VecZnxMerge, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSplit,
|
||||
VecZnxSub, VecZnxSubABInplace, VecZnxSubBAInplace, VecZnxSubScalarInplace, VecZnxSwithcDegree,
|
||||
},
|
||||
layouts::{Backend, Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
||||
oep::{
|
||||
VecZnxAddDistF64Impl, VecZnxAddImpl, VecZnxAddInplaceImpl, VecZnxAddNormalImpl, VecZnxAddScalarInplaceImpl,
|
||||
VecZnxAutomorphismImpl, VecZnxAutomorphismInplaceImpl, VecZnxCopyImpl, VecZnxFillDistF64Impl, VecZnxFillNormalImpl,
|
||||
VecZnxFillUniformImpl, VecZnxLshInplaceImpl, VecZnxMergeImpl, VecZnxMulXpMinusOneImpl, VecZnxMulXpMinusOneInplaceImpl,
|
||||
VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl, VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl,
|
||||
VecZnxRotateImpl, VecZnxRotateInplaceImpl, VecZnxRshInplaceImpl, VecZnxSplitImpl, VecZnxSubABInplaceImpl,
|
||||
VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarInplaceImpl, VecZnxSwithcDegreeImpl,
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
impl<B> VecZnxNormalizeTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxNormalizeTmpBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_normalize_tmp_bytes(&self, n: usize) -> usize {
|
||||
B::vec_znx_normalize_tmp_bytes_impl(self, n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxNormalize<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxNormalizeImpl<B>,
|
||||
{
|
||||
fn vec_znx_normalize<R, A>(&self, basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_normalize_impl(self, basek, res, res_col, a, a_col, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxNormalizeInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxNormalizeInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_normalize_inplace<A>(&self, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_normalize_inplace_impl(self, basek, a, a_col, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAdd for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAddImpl<B>,
|
||||
{
|
||||
fn vec_znx_add<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
C: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_add_impl(self, res, res_col, a, a_col, b, b_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAddInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAddInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_add_inplace_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAddScalarInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAddScalarInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_add_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
{
|
||||
B::vec_znx_add_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSub for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSubImpl<B>,
|
||||
{
|
||||
fn vec_znx_sub<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
C: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_sub_impl(self, res, res_col, a, a_col, b, b_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSubABInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSubABInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_sub_ab_inplace_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSubBAInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSubBAInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_sub_ba_inplace_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSubScalarInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSubScalarInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_sub_scalar_inplace<R, A>(&self, res: &mut R, res_col: usize, res_limb: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: ScalarZnxToRef,
|
||||
{
|
||||
B::vec_znx_sub_scalar_inplace_impl(self, res, res_col, res_limb, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxNegate for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxNegateImpl<B>,
|
||||
{
|
||||
fn vec_znx_negate<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_negate_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxNegateInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxNegateInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_negate_inplace<A>(&self, a: &mut A, a_col: usize)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_negate_inplace_impl(self, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxLshInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxLshInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_lsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_lsh_inplace_impl(self, basek, k, a)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxRshInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxRshInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_rsh_inplace<A>(&self, basek: usize, k: usize, a: &mut A)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_rsh_inplace_impl(self, basek, k, a)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxRotate for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxRotateImpl<B>,
|
||||
{
|
||||
fn vec_znx_rotate<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_rotate_impl(self, k, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxRotateInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxRotateInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_rotate_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_rotate_inplace_impl(self, k, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAutomorphism for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAutomorphismImpl<B>,
|
||||
{
|
||||
fn vec_znx_automorphism<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_automorphism_impl(self, k, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAutomorphismInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAutomorphismInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
|
||||
where
|
||||
A: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_automorphism_inplace_impl(self, k, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxMulXpMinusOne for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxMulXpMinusOneImpl<B>,
|
||||
{
|
||||
fn vec_znx_mul_xp_minus_one<R, A>(&self, p: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_mul_xp_minus_one_impl(self, p, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxMulXpMinusOneInplace for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxMulXpMinusOneInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_mul_xp_minus_one_inplace<R>(&self, p: i64, res: &mut R, res_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_mul_xp_minus_one_inplace_impl(self, p, res, res_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSplit<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSplitImpl<B>,
|
||||
{
|
||||
fn vec_znx_split<R, A>(&self, res: &mut [R], res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_split_impl(self, res, res_col, a, a_col, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxMerge for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxMergeImpl<B>,
|
||||
{
|
||||
fn vec_znx_merge<R, A>(&self, res: &mut R, res_col: usize, a: &[A], a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_merge_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxSwithcDegree for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxSwithcDegreeImpl<B>,
|
||||
{
|
||||
fn vec_znx_switch_degree<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_switch_degree_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxCopy for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxCopyImpl<B>,
|
||||
{
|
||||
fn vec_znx_copy<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_copy_impl(self, res, res_col, a, a_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxFillUniform for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxFillUniformImpl<B>,
|
||||
{
|
||||
fn vec_znx_fill_uniform<R>(&self, basek: usize, res: &mut R, res_col: usize, k: usize, source: &mut Source)
|
||||
where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_fill_uniform_impl(self, basek, res, res_col, k, source);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxFillDistF64 for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxFillDistF64Impl<B>,
|
||||
{
|
||||
fn vec_znx_fill_dist_f64<R, D: rand::prelude::Distribution<f64>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
dist: D,
|
||||
bound: f64,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_fill_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAddDistF64 for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAddDistF64Impl<B>,
|
||||
{
|
||||
fn vec_znx_add_dist_f64<R, D: rand::prelude::Distribution<f64>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
dist: D,
|
||||
bound: f64,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_add_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxFillNormal for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxFillNormalImpl<B>,
|
||||
{
|
||||
fn vec_znx_fill_normal<R>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxAddNormal for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxAddNormalImpl<B>,
|
||||
{
|
||||
fn vec_znx_add_normal<R>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
{
|
||||
B::vec_znx_add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
334
poulpy-hal/src/delegates/vec_znx_big.rs
Normal file
334
poulpy-hal/src/delegates/vec_znx_big.rs
Normal file
@@ -0,0 +1,334 @@
|
||||
use rand_distr::Distribution;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
VecZnxBigAdd, VecZnxBigAddDistF64, VecZnxBigAddInplace, VecZnxBigAddNormal, VecZnxBigAddSmall, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigFillDistF64,
|
||||
VecZnxBigFillNormal, VecZnxBigFromBytes, VecZnxBigNegateInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA, VecZnxBigSubSmallAInplace,
|
||||
VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
|
||||
},
|
||||
layouts::{Backend, Module, Scratch, VecZnxBigOwned, VecZnxBigToMut, VecZnxBigToRef, VecZnxToMut, VecZnxToRef},
|
||||
oep::{
|
||||
VecZnxBigAddDistF64Impl, VecZnxBigAddImpl, VecZnxBigAddInplaceImpl, VecZnxBigAddNormalImpl, VecZnxBigAddSmallImpl,
|
||||
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
||||
VecZnxBigAutomorphismInplaceImpl, VecZnxBigFillDistF64Impl, VecZnxBigFillNormalImpl, VecZnxBigFromBytesImpl,
|
||||
VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl, VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl,
|
||||
VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl, VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl,
|
||||
VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
|
||||
},
|
||||
source::Source,
|
||||
};
|
||||
|
||||
impl<B> VecZnxBigAlloc<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAllocImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxBigOwned<B> {
|
||||
B::vec_znx_big_alloc_impl(n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigFromBytes<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigFromBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxBigOwned<B> {
|
||||
B::vec_znx_big_from_bytes_impl(n, cols, size, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAllocBytes for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAllocBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize {
|
||||
B::vec_znx_big_alloc_bytes_impl(n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAddDistF64<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddDistF64Impl<B>,
|
||||
{
|
||||
fn vec_znx_big_add_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
dist: D,
|
||||
bound: f64,
|
||||
) {
|
||||
B::add_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAddNormal<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddNormalImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_add_normal<R: VecZnxBigToMut<B>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) {
|
||||
B::add_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigFillDistF64<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigFillDistF64Impl<B>,
|
||||
{
|
||||
fn vec_znx_big_fill_dist_f64<R: VecZnxBigToMut<B>, D: Distribution<f64>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
dist: D,
|
||||
bound: f64,
|
||||
) {
|
||||
B::fill_dist_f64_impl(self, basek, res, res_col, k, source, dist, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigFillNormal<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigFillNormalImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_fill_normal<R: VecZnxBigToMut<B>>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
k: usize,
|
||||
source: &mut Source,
|
||||
sigma: f64,
|
||||
bound: f64,
|
||||
) {
|
||||
B::fill_normal_impl(self, basek, res, res_col, k, source, sigma, bound);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAdd<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_add<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
C: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_add_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAddInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_add_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAddSmall<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddSmallImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_add_small<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
C: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_big_add_small_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAddSmallInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAddSmallInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_add_small_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_big_add_small_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSub<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
C: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_sub_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubABInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubABInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_sub_ab_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubBAInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubBAInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_sub_ba_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubSmallA<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubSmallAImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_small_a<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxToRef,
|
||||
C: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_sub_small_a_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubSmallAInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubSmallAInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_small_a_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_big_sub_small_a_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubSmallB<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubSmallBImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_small_b<R, A, C>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &C, b_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
C: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_big_sub_small_b_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigSubSmallBInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigSubSmallBInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_sub_small_b_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_big_sub_small_b_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigNegateInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigNegateInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_negate_inplace<A>(&self, a: &mut A, a_col: usize)
|
||||
where
|
||||
A: VecZnxBigToMut<B>,
|
||||
{
|
||||
B::vec_znx_big_negate_inplace_impl(self, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigNormalizeTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigNormalizeTmpBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_normalize_tmp_bytes(&self, n: usize) -> usize {
|
||||
B::vec_znx_big_normalize_tmp_bytes_impl(self, n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigNormalize<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigNormalizeImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_normalize<R, A>(
|
||||
&self,
|
||||
basek: usize,
|
||||
res: &mut R,
|
||||
res_col: usize,
|
||||
a: &A,
|
||||
a_col: usize,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
R: VecZnxToMut,
|
||||
A: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_normalize_impl(self, basek, res, res_col, a, a_col, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAutomorphism<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAutomorphismImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_automorphism<R, A>(&self, k: i64, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxBigToRef<B>,
|
||||
{
|
||||
B::vec_znx_big_automorphism_impl(self, k, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxBigAutomorphismInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxBigAutomorphismInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_big_automorphism_inplace<A>(&self, k: i64, a: &mut A, a_col: usize)
|
||||
where
|
||||
A: VecZnxBigToMut<B>,
|
||||
{
|
||||
B::vec_znx_big_automorphism_inplace_impl(self, k, a, a_col);
|
||||
}
|
||||
}
|
||||
196
poulpy-hal/src/delegates/vec_znx_dft.rs
Normal file
196
poulpy-hal/src/delegates/vec_znx_dft.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftCopy, VecZnxDftFromBytes,
|
||||
VecZnxDftFromVecZnx, VecZnxDftSub, VecZnxDftSubABInplace, VecZnxDftSubBAInplace, VecZnxDftToVecZnxBig,
|
||||
VecZnxDftToVecZnxBigConsume, VecZnxDftToVecZnxBigTmpA, VecZnxDftToVecZnxBigTmpBytes, VecZnxDftZero,
|
||||
},
|
||||
layouts::{
|
||||
Backend, Data, Module, Scratch, VecZnxBig, VecZnxBigToMut, VecZnxDft, VecZnxDftOwned, VecZnxDftToMut, VecZnxDftToRef,
|
||||
VecZnxToRef,
|
||||
},
|
||||
oep::{
|
||||
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftCopyImpl,
|
||||
VecZnxDftFromBytesImpl, VecZnxDftFromVecZnxImpl, VecZnxDftSubABInplaceImpl, VecZnxDftSubBAInplaceImpl, VecZnxDftSubImpl,
|
||||
VecZnxDftToVecZnxBigConsumeImpl, VecZnxDftToVecZnxBigImpl, VecZnxDftToVecZnxBigTmpAImpl,
|
||||
VecZnxDftToVecZnxBigTmpBytesImpl, VecZnxDftZeroImpl,
|
||||
},
|
||||
};
|
||||
|
||||
impl<B> VecZnxDftFromBytes<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftFromBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_from_bytes(&self, n: usize, cols: usize, size: usize, bytes: Vec<u8>) -> VecZnxDftOwned<B> {
|
||||
B::vec_znx_dft_from_bytes_impl(n, cols, size, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftAllocBytes for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftAllocBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_alloc_bytes(&self, n: usize, cols: usize, size: usize) -> usize {
|
||||
B::vec_znx_dft_alloc_bytes_impl(n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftAlloc<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftAllocImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_alloc(&self, n: usize, cols: usize, size: usize) -> VecZnxDftOwned<B> {
|
||||
B::vec_znx_dft_alloc_impl(n, cols, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftToVecZnxBigTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftToVecZnxBigTmpBytesImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_to_vec_znx_big_tmp_bytes(&self, n: usize) -> usize {
|
||||
B::vec_znx_dft_to_vec_znx_big_tmp_bytes_impl(self, n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftToVecZnxBig<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftToVecZnxBigImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_to_vec_znx_big<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_to_vec_znx_big_impl(self, res, res_col, a, a_col, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftToVecZnxBigTmpA<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftToVecZnxBigTmpAImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_to_vec_znx_big_tmp_a<R, A>(&self, res: &mut R, res_col: usize, a: &mut A, a_col: usize)
|
||||
where
|
||||
R: VecZnxBigToMut<B>,
|
||||
A: VecZnxDftToMut<B>,
|
||||
{
|
||||
B::vec_znx_dft_to_vec_znx_big_tmp_a_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftToVecZnxBigConsume<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftToVecZnxBigConsumeImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_to_vec_znx_big_consume<D: Data>(&self, a: VecZnxDft<D, B>) -> VecZnxBig<D, B>
|
||||
where
|
||||
VecZnxDft<D, B>: VecZnxDftToMut<B>,
|
||||
{
|
||||
B::vec_znx_dft_to_vec_znx_big_consume_impl(self, a)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftFromVecZnx<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftFromVecZnxImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_from_vec_znx<R, A>(&self, step: usize, offset: usize, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxToRef,
|
||||
{
|
||||
B::vec_znx_dft_from_vec_znx_impl(self, step, offset, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftAdd<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftAddImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_add<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
D: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_add_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftAddInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftAddInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_add_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_add_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftSub<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftSubImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_sub<R, A, D>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize, b: &D, b_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
D: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_sub_impl(self, res, res_col, a, a_col, b, b_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftSubABInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftSubABInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_sub_ab_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_sub_ab_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftSubBAInplace<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftSubBAInplaceImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_sub_ba_inplace<R, A>(&self, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_sub_ba_inplace_impl(self, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftCopy<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftCopyImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_copy<R, A>(&self, step: usize, offset: usize, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
{
|
||||
B::vec_znx_dft_copy_impl(self, step, offset, res, res_col, a, a_col);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VecZnxDftZero<B> for Module<B>
|
||||
where
|
||||
B: Backend + VecZnxDftZeroImpl<B>,
|
||||
{
|
||||
fn vec_znx_dft_zero<R>(&self, res: &mut R)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
{
|
||||
B::vec_znx_dft_zero_impl(self, res);
|
||||
}
|
||||
}
|
||||
136
poulpy-hal/src/delegates/vmp_pmat.rs
Normal file
136
poulpy-hal/src/delegates/vmp_pmat.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
use crate::{
|
||||
api::{
|
||||
VmpApply, VmpApplyAdd, VmpApplyAddTmpBytes, VmpApplyTmpBytes, VmpPMatAlloc, VmpPMatAllocBytes, VmpPMatFromBytes,
|
||||
VmpPrepare, VmpPrepareTmpBytes,
|
||||
},
|
||||
layouts::{Backend, MatZnxToRef, Module, Scratch, VecZnxDftToMut, VecZnxDftToRef, VmpPMatOwned, VmpPMatToMut, VmpPMatToRef},
|
||||
oep::{
|
||||
VmpApplyAddImpl, VmpApplyAddTmpBytesImpl, VmpApplyImpl, VmpApplyTmpBytesImpl, VmpPMatAllocBytesImpl, VmpPMatAllocImpl,
|
||||
VmpPMatFromBytesImpl, VmpPMatPrepareImpl, VmpPrepareTmpBytesImpl,
|
||||
},
|
||||
};
|
||||
|
||||
impl<B> VmpPMatAlloc<B> for Module<B>
|
||||
where
|
||||
B: Backend + VmpPMatAllocImpl<B>,
|
||||
{
|
||||
fn vmp_pmat_alloc(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> VmpPMatOwned<B> {
|
||||
B::vmp_pmat_alloc_impl(n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpPMatAllocBytes for Module<B>
|
||||
where
|
||||
B: Backend + VmpPMatAllocBytesImpl<B>,
|
||||
{
|
||||
fn vmp_pmat_alloc_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||
B::vmp_pmat_alloc_bytes_impl(n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpPMatFromBytes<B> for Module<B>
|
||||
where
|
||||
B: Backend + VmpPMatFromBytesImpl<B>,
|
||||
{
|
||||
fn vmp_pmat_from_bytes(
|
||||
&self,
|
||||
n: usize,
|
||||
rows: usize,
|
||||
cols_in: usize,
|
||||
cols_out: usize,
|
||||
size: usize,
|
||||
bytes: Vec<u8>,
|
||||
) -> VmpPMatOwned<B> {
|
||||
B::vmp_pmat_from_bytes_impl(n, rows, cols_in, cols_out, size, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpPrepareTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VmpPrepareTmpBytesImpl<B>,
|
||||
{
|
||||
fn vmp_prepare_tmp_bytes(&self, n: usize, rows: usize, cols_in: usize, cols_out: usize, size: usize) -> usize {
|
||||
B::vmp_prepare_tmp_bytes_impl(self, n, rows, cols_in, cols_out, size)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpPrepare<B> for Module<B>
|
||||
where
|
||||
B: Backend + VmpPMatPrepareImpl<B>,
|
||||
{
|
||||
fn vmp_prepare<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VmpPMatToMut<B>,
|
||||
A: MatZnxToRef,
|
||||
{
|
||||
B::vmp_prepare_impl(self, res, a, scratch)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpApplyTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VmpApplyTmpBytesImpl<B>,
|
||||
{
|
||||
fn vmp_apply_tmp_bytes(
|
||||
&self,
|
||||
n: usize,
|
||||
res_size: usize,
|
||||
a_size: usize,
|
||||
b_rows: usize,
|
||||
b_cols_in: usize,
|
||||
b_cols_out: usize,
|
||||
b_size: usize,
|
||||
) -> usize {
|
||||
B::vmp_apply_tmp_bytes_impl(
|
||||
self, n, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpApply<B> for Module<B>
|
||||
where
|
||||
B: Backend + VmpApplyImpl<B>,
|
||||
{
|
||||
fn vmp_apply<R, A, C>(&self, res: &mut R, a: &A, b: &C, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
C: VmpPMatToRef<B>,
|
||||
{
|
||||
B::vmp_apply_impl(self, res, a, b, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpApplyAddTmpBytes for Module<B>
|
||||
where
|
||||
B: Backend + VmpApplyAddTmpBytesImpl<B>,
|
||||
{
|
||||
fn vmp_apply_add_tmp_bytes(
|
||||
&self,
|
||||
n: usize,
|
||||
res_size: usize,
|
||||
a_size: usize,
|
||||
b_rows: usize,
|
||||
b_cols_in: usize,
|
||||
b_cols_out: usize,
|
||||
b_size: usize,
|
||||
) -> usize {
|
||||
B::vmp_apply_add_tmp_bytes_impl(
|
||||
self, n, res_size, a_size, b_rows, b_cols_in, b_cols_out, b_size,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> VmpApplyAdd<B> for Module<B>
|
||||
where
|
||||
B: Backend + VmpApplyAddImpl<B>,
|
||||
{
|
||||
fn vmp_apply_add<R, A, C>(&self, res: &mut R, a: &A, b: &C, scale: usize, scratch: &mut Scratch<B>)
|
||||
where
|
||||
R: VecZnxDftToMut<B>,
|
||||
A: VecZnxDftToRef<B>,
|
||||
C: VmpPMatToRef<B>,
|
||||
{
|
||||
B::vmp_apply_add_impl(self, res, a, b, scale, scratch);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user