mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
@@ -2,8 +2,8 @@ use std::marker::PhantomData;
|
||||
|
||||
use poulpy_core::layouts::{Base2K, GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, Rank, TorusPrecision};
|
||||
|
||||
use poulpy_core::{TakeGLWEPt, layouts::prepared::GLWESecretPrepared};
|
||||
use poulpy_hal::api::VecZnxBigAllocBytes;
|
||||
use poulpy_core::{TakeGLWEPlaintext, layouts::prepared::GLWESecretPrepared};
|
||||
use poulpy_hal::api::VecZnxBigBytesOf;
|
||||
#[cfg(test)]
|
||||
use poulpy_hal::api::{
|
||||
ScratchAvailable, TakeVecZnx, VecZnxAddInplace, VecZnxAddNormal, VecZnxFillUniform, VecZnxNormalize, VecZnxSub,
|
||||
@@ -12,8 +12,8 @@ use poulpy_hal::api::{
|
||||
use poulpy_hal::source::Source;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||
VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||
},
|
||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||
};
|
||||
@@ -83,7 +83,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
S: DataRef,
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -96,7 +96,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxSub,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
use poulpy_core::layouts::GLWEPlaintextLayout;
|
||||
|
||||
@@ -136,7 +136,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxBigAddInplace<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -175,8 +175,8 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) -> Vec<f64>
|
||||
where
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
@@ -186,7 +186,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits + ToBits> FheUintBlocks<D, T> {
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxNormalizeInplace<BE>,
|
||||
Scratch<BE>: TakeGLWEPt<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
||||
Scratch<BE>: TakeGLWEPlaintext<BE> + TakeVecZnxDft<BE> + TakeVecZnxBig<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -14,8 +14,8 @@ use poulpy_hal::{
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||
VecZnxSubInplace, VmpPrepare,
|
||||
},
|
||||
@@ -123,7 +123,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits, BE: Backend> FheUintBlocksPrep<D,
|
||||
) where
|
||||
S: DataRef,
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -190,8 +190,8 @@ impl<D: DataRef, T: UnsignedInteger + ToBits> FheUintBlocksPrepDebug<D, T> {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn noise<S: DataRef, BE: Backend>(&self, module: &Module<BE>, sk: &GLWESecretPrepared<S, BE>, want: T)
|
||||
where
|
||||
Module<BE>: VecZnxDftAllocBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
Module<BE>: VecZnxDftBytesOf
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
+ VecZnxIdftApplyConsume<BE>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use poulpy_core::{
|
||||
GLWEOperations, TakeGLWECtSlice, TakeGLWEPt, glwe_packing,
|
||||
GLWEOperations, TakeGLWEPlaintext, TakeGLWESlice, glwe_packing,
|
||||
layouts::{
|
||||
GLWE, GLWEInfos, GLWEPlaintextLayout, LWEInfos, TorusPrecision,
|
||||
prepared::{AutomorphismKeyPrepared, GLWESecretPrepared},
|
||||
@@ -11,7 +11,7 @@ use poulpy_hal::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||
VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||
VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||
VmpApplyDftToDftTmpBytes,
|
||||
@@ -39,7 +39,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
||||
Module<BE>: VecZnxSub
|
||||
+ VecZnxCopy
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxAddInplace
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
@@ -62,7 +62,7 @@ impl<D: DataMut, T: UnsignedInteger> FheUintWord<D, T> {
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
+ VecZnxRotate,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWECtSlice,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWESlice,
|
||||
{
|
||||
// Repacks the GLWE ciphertexts bits
|
||||
let gap: usize = module.n() / T::WORD_SIZE;
|
||||
@@ -109,7 +109,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
||||
scratch: &mut Scratch<BE>,
|
||||
) where
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -122,7 +122,7 @@ impl<D: DataMut, T: UnsignedInteger + ToBits> FheUintWord<D, T> {
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxSub,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
@@ -167,7 +167,7 @@ impl<D: DataRef, T: UnsignedInteger + FromBits> FheUintWord<D, T> {
|
||||
+ VecZnxBigAddInplace<BE>
|
||||
+ VecZnxBigAddSmallInplace<BE>
|
||||
+ VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use poulpy_core::{
|
||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWECtSlice,
|
||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWESlice,
|
||||
layouts::{
|
||||
GLWE, GLWEToMut, LWEInfos,
|
||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
||||
@@ -49,7 +49,7 @@ impl<C: BitCircuitInfo, const N: usize, T: UnsignedInteger, BE: Backend> Circuit
|
||||
where
|
||||
Self: GetBitCircuitInfo<T>,
|
||||
Module<BE>: Cmux<BE> + VecZnxCopy,
|
||||
Scratch<BE>: TakeGLWECtSlice,
|
||||
Scratch<BE>: TakeGLWESlice,
|
||||
{
|
||||
fn execute<O>(
|
||||
&self,
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::tfhe::{
|
||||
},
|
||||
};
|
||||
use poulpy_core::{
|
||||
TakeGGSW, TakeGLWECt,
|
||||
TakeGGSW, TakeGLWE,
|
||||
layouts::{
|
||||
GLWESecret, GLWEToLWEKeyLayout, GLWEToLWESwitchingKey, LWE, LWESecret,
|
||||
prepared::{GLWEToLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
||||
@@ -17,10 +17,10 @@ use poulpy_core::{
|
||||
};
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPrepare,
|
||||
},
|
||||
@@ -77,7 +77,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
||||
Module<BE>: SvpApplyDftToDft<BE>
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -92,7 +92,7 @@ impl<BRA: BlindRotationAlgo> BDDKey<Vec<u8>, Vec<u8>, BRA> {
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<BE>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<BE>
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxAutomorphismInplace<BE>,
|
||||
@@ -157,7 +157,7 @@ where
|
||||
BE: Backend,
|
||||
Module<BE>: VmpPrepare<BE>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
@@ -168,7 +168,7 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||
{
|
||||
fn prepare(
|
||||
@@ -206,7 +206,7 @@ where
|
||||
BE: Backend,
|
||||
Module<BE>: VmpPrepare<BE>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<BE>
|
||||
@@ -217,7 +217,7 @@ where
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxNormalize<BE>
|
||||
+ VecZnxNormalizeTmpBytes,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWECt + TakeVecZnx + TakeGGSW,
|
||||
Scratch<BE>: ScratchAvailable + TakeVecZnxDft<BE> + TakeGLWE + TakeVecZnx + TakeGGSW,
|
||||
CircuitBootstrappingKeyPrepared<CBT, BRA, BE>: CirtuitBootstrappingExecute<BE>,
|
||||
{
|
||||
fn prepare(
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::time::Instant;
|
||||
|
||||
use poulpy_backend::FFT64Ref;
|
||||
use poulpy_core::{
|
||||
TakeGGSW, TakeGLWEPt,
|
||||
TakeGGSW, TakeGLWEPlaintext,
|
||||
layouts::{
|
||||
GGSWCiphertextLayout, GLWELayout, GLWESecret, LWEInfos, LWESecret,
|
||||
prepared::{GLWESecretPrepared, PrepareAlloc},
|
||||
@@ -11,11 +11,11 @@ use poulpy_core::{
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ModuleNew, ScratchAvailable, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace,
|
||||
SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||
SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft,
|
||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||
@@ -51,7 +51,7 @@ where
|
||||
Module<BE>: ModuleNew<BE> + SvpPPolAlloc<BE> + SvpPrepare<BE> + VmpPMatAlloc<BE>,
|
||||
ScratchOwned<BE>: ScratchOwnedAlloc<BE> + ScratchOwnedBorrow<BE>,
|
||||
Module<BE>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<BE>
|
||||
+ VecZnxDftApply<BE>
|
||||
+ SvpApplyDftToDftInplace<BE>
|
||||
@@ -68,16 +68,16 @@ where
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeGGSW + TakeScalarZnx + TakeSlice,
|
||||
Module<BE>: VecZnxCopy + VecZnxNegateInplace + VmpApplyDftToDftTmpBytes + VmpApplyDftToDft<BE> + VmpApplyDftToDftAdd<BE>,
|
||||
Module<BE>: VecZnxBigAddInplace<BE> + VecZnxBigAddSmallInplace<BE> + VecZnxBigNormalize<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPt<BE>,
|
||||
Scratch<BE>: TakeVecZnxDft<BE> + TakeVecZnxBig<BE> + TakeGLWEPlaintext<BE>,
|
||||
Module<BE>: VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<BE>
|
||||
+ SvpApplyDftToDft<BE>
|
||||
+ VecZnxBigAlloc<BE>
|
||||
+ VecZnxDftAlloc<BE>
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<BE>
|
||||
+ VecZnxBigAutomorphismInplace<BE>
|
||||
+ VecZnxRshInplace<BE>
|
||||
@@ -85,7 +85,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<BE>
|
||||
+ VecZnxBigSubSmallNegateInplace<BE>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<BE>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use itertools::izip;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolAllocBytes, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpPPolBytesOf, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice,
|
||||
TakeVecZnxSlice, VecZnxAddInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||
VecZnxDftSubInplace, VecZnxDftZero, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes,
|
||||
VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
@@ -12,7 +12,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use poulpy_core::{
|
||||
Distribution, GLWEOperations, TakeGLWECt,
|
||||
Distribution, GLWEOperations, TakeGLWE,
|
||||
layouts::{GGSWInfos, GLWE, GLWEInfos, GLWEToMut, LWE, LWECiphertextToRef, LWEInfos},
|
||||
};
|
||||
|
||||
@@ -31,10 +31,10 @@ pub fn cggi_blind_rotate_scratch_space<B: Backend, OUT, GGSW>(
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
GGSW: GGSWInfos,
|
||||
Module<B>: VecZnxDftAllocBytes
|
||||
Module<B>: VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes,
|
||||
{
|
||||
@@ -43,10 +43,10 @@ where
|
||||
if block_size > 1 {
|
||||
let cols: usize = (brk_infos.rank() + 1).into();
|
||||
let dnum: usize = brk_infos.dnum().into();
|
||||
let acc_dft: usize = module.vec_znx_dft_bytes_of(cols, dnum) * extension_factor;
|
||||
let acc_big: usize = module.vec_znx_big_bytes_of(1, brk_size);
|
||||
let vmp_res: usize = module.vec_znx_dft_bytes_of(cols, brk_size) * extension_factor;
|
||||
let vmp_xai: usize = module.vec_znx_dft_bytes_of(1, brk_size);
|
||||
let acc_dft: usize = module.bytes_of_vec_znx_dft(cols, dnum) * extension_factor;
|
||||
let acc_big: usize = module.bytes_of_vec_znx_big(1, brk_size);
|
||||
let vmp_res: usize = module.bytes_of_vec_znx_dft(cols, brk_size) * extension_factor;
|
||||
let vmp_xai: usize = module.bytes_of_vec_znx_dft(1, brk_size);
|
||||
let acc_dft_add: usize = vmp_res;
|
||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(brk_size, dnum, dnum, 2, 2, brk_size); // GGSW product: (1 x 2) x (2 x 2)
|
||||
let acc: usize = if extension_factor > 1 {
|
||||
@@ -67,9 +67,9 @@ where
|
||||
|
||||
impl<D: DataRef, B: Backend> BlincRotationExecute<B> for BlindRotationKeyPrepared<D, CGGI, B>
|
||||
where
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -129,9 +129,9 @@ fn execute_block_binary_extended<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -296,9 +296,9 @@ fn execute_block_binary<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
@@ -418,9 +418,9 @@ fn execute_standard<DataRes, DataIn, DataBrk, B: Backend>(
|
||||
DataRes: DataMut,
|
||||
DataIn: DataRef,
|
||||
DataBrk: DataRef,
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VmpPMatAlloc, VmpPrepare,
|
||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxView, ZnxViewMut},
|
||||
source::Source,
|
||||
@@ -47,7 +46,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSW::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -56,7 +55,7 @@ impl BlindRotationKey<Vec<u8>, CGGI> {
|
||||
impl<D: DataMut, B: Backend> BlindRotationKeyEncryptSk<B> for BlindRotationKey<D, CGGI>
|
||||
where
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -149,7 +148,7 @@ impl BlindRotationKeyCompressed<Vec<u8>, CGGI> {
|
||||
pub fn generate_from_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||
where
|
||||
A: GGSWInfos,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf,
|
||||
{
|
||||
GGSWCompressed::encrypt_sk_scratch_space(module, infos)
|
||||
}
|
||||
@@ -169,7 +168,7 @@ impl<D: DataMut> BlindRotationKeyCompressed<D, CGGI> {
|
||||
DataSkGLWE: DataRef,
|
||||
DataSkLWE: DataRef,
|
||||
Module<B>: VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAdd, VecZnxDftAddInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftSubInplace, VecZnxDftZero, VecZnxFillUniform, VecZnxIdftApply,
|
||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpBytes, VecZnxMulXpMinusOneInplace, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||
VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal,
|
||||
@@ -29,9 +29,9 @@ use poulpy_core::layouts::{
|
||||
|
||||
pub fn test_blind_rotation<B>(module: &Module<B>, n_lwe: usize, block_size: usize, extension_factor: usize)
|
||||
where
|
||||
Module<B>: VecZnxBigAllocBytes
|
||||
+ VecZnxDftAllocBytes
|
||||
+ SvpPPolAllocBytes
|
||||
Module<B>: VecZnxBigBytesOf
|
||||
+ VecZnxDftBytesOf
|
||||
+ SvpPPolBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VecZnxIdftApplyTmpBytes
|
||||
|
||||
@@ -3,9 +3,9 @@ use std::collections::HashMap;
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeMatZnx, TakeSlice, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, TakeVecZnxDftSlice, TakeVecZnxSlice,
|
||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace,
|
||||
VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft,
|
||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
@@ -15,7 +15,7 @@ use poulpy_hal::{
|
||||
};
|
||||
|
||||
use poulpy_core::{
|
||||
GLWEOperations, TakeGGLWE, TakeGLWECt,
|
||||
GLWEOperations, TakeGGLWE, TakeGLWE,
|
||||
layouts::{Dsize, GGLWECiphertextLayout, GGSWInfos, GLWEInfos, LWEInfos},
|
||||
};
|
||||
|
||||
@@ -44,7 +44,7 @@ where
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -56,7 +56,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxNormalize<B>,
|
||||
@@ -145,7 +145,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
@@ -156,7 +156,7 @@ pub fn circuit_bootstrap_core<DRes, DLwe, DBrk, BRA: BlindRotationAlgo, B>(
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxRotate
|
||||
@@ -286,7 +286,7 @@ fn post_process<DataRes, DataA, B: Backend>(
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
|
||||
@@ -7,9 +7,9 @@ use std::collections::HashMap;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx,
|
||||
TakeSvpPPol, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||
VecZnxSwitchRing, VmpPMatAlloc, VmpPrepare,
|
||||
},
|
||||
@@ -78,7 +78,7 @@ where
|
||||
Module<B>: SvpApplyDftToDft<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -93,7 +93,7 @@ where
|
||||
+ VecZnxSub
|
||||
+ SvpPrepare<B>
|
||||
+ VecZnxSwitchRing
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ SvpPPolAlloc<B>
|
||||
+ VecZnxAutomorphism,
|
||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeSvpPPol<B> + TakeVecZnxBig<B>,
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::time::Instant;
|
||||
|
||||
use poulpy_hal::{
|
||||
api::{
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes,
|
||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolBytesOf,
|
||||
SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAllocBytes, VecZnxBigAutomorphismInplace,
|
||||
VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigAutomorphismInplace, VecZnxBigBytesOf,
|
||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftAddInplace,
|
||||
VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||
VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
|
||||
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRshInplace, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes, VmpPMatAlloc, VmpPrepare, ZnAddNormal, ZnFillUniform,
|
||||
@@ -45,7 +45,7 @@ where
|
||||
Module<B>: VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -58,7 +58,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -73,7 +73,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
@@ -83,7 +83,7 @@ where
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
@@ -267,7 +267,7 @@ where
|
||||
Module<B>: VecZnxFillUniform
|
||||
+ VecZnxAddNormal
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VecZnxDftBytesOf
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ SvpApplyDftToDftInplace<B>
|
||||
@@ -280,7 +280,7 @@ where
|
||||
+ VecZnxAddScalarInplace
|
||||
+ VecZnxAutomorphism
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ SvpApplyDftToDft<B>
|
||||
+ VecZnxBigAddInplace<B>
|
||||
@@ -295,7 +295,7 @@ where
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ SvpPPolAllocBytes
|
||||
+ SvpPPolBytesOf
|
||||
+ VecZnxRotateInplace<B>
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRotateInplaceTmpBytes
|
||||
@@ -305,7 +305,7 @@ where
|
||||
+ VecZnxCopy
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxBigAllocBytes
|
||||
+ VecZnxBigBytesOf
|
||||
+ VecZnxDftAddInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ ZnFillUniform
|
||||
|
||||
Reference in New Issue
Block a user