mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
@@ -1,8 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
@@ -61,7 +61,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -149,7 +149,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize,
|
||||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume,
|
||||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -83,7 +83,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -151,7 +151,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
// Keyswitch the j-th row of the col 0
|
// Keyswitch the j-th row of the col 0
|
||||||
(0..self.dnum().into()).for_each(|row_i| {
|
(0..self.dnum().into()).for_each(|row_i| {
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace,
|
VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxBigSubSmallNegateInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
||||||
};
|
};
|
||||||
@@ -56,7 +56,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.keyswitch(module, lhs, &rhs.key, scratch);
|
self.keyswitch(module, lhs, &rhs.key, scratch);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
@@ -82,7 +82,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.keyswitch_inplace(module, &rhs.key, scratch);
|
self.keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
(0..(self.rank() + 1).into()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
@@ -109,7 +109,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -150,7 +150,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -193,7 +193,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigSubSmallInplace<B>
|
+ VecZnxBigSubSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -235,7 +235,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigSubSmallInplace<B>
|
+ VecZnxBigSubSmallInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -278,7 +278,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -320,7 +320,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared};
|
||||||
TakeGLWE,
|
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, Rank, prepared::GLWEToLWESwitchingKeyPrepared},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl LWE<Vec<u8>> {
|
impl LWE<Vec<u8>> {
|
||||||
pub fn from_glwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
pub fn from_glwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
@@ -80,7 +77,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared};
|
||||||
TakeGLWE,
|
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWE, LWEInfos, prepared::LWEToGLWESwitchingKeyPrepared},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl GLWE<Vec<u8>> {
|
||||||
pub fn from_lwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
pub fn from_lwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
@@ -62,7 +59,7 @@ impl<D: DataMut> GLWE<D> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWE + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize,
|
SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -33,7 +33,7 @@ impl<DataSelf: DataRef> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret,
|
|
||||||
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
|
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
|
||||||
@@ -41,7 +40,7 @@ pub trait GGLWEAutomorphismKeyCompressedEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
impl<B: Backend> GGLWEAutomorphismKeyCompressedEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
|
Module<B>: GGLWEKeyCompressedEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + SvpPPolBytesOf + VecZnxAutomorphism,
|
||||||
Scratch<B>: TakeGLWESecret + ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
fn gglwe_automorphism_key_compressed_encrypt_sk<R, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPlaintext,
|
|
||||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWE, GGLWEInfos, LWEInfos,
|
GGLWE, GGLWEInfos, LWEInfos,
|
||||||
@@ -67,7 +66,7 @@ where
|
|||||||
+ VecZnxDftBytesOf
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ ZnNormalizeInplace<B>,
|
+ ZnNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPlaintext<B> + ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
fn gglwe_compressed_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{ScratchAvailable, SvpPPolBytesOf, SvpPrepare, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing},
|
||||||
ScratchAvailable, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, VecZnxDftBytesOf, VecZnxNormalizeTmpBytes, VecZnxSwitchRing,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecretPrepared,
|
|
||||||
encryption::compressed::gglwe_ct::GGLWECompressedEncryptSk,
|
encryption::compressed::gglwe_ct::GGLWECompressedEncryptSk,
|
||||||
layouts::{
|
layouts::{
|
||||||
Degree, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
|
Degree, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos,
|
||||||
@@ -68,7 +65,7 @@ where
|
|||||||
+ VecZnxDftBytesOf
|
+ VecZnxDftBytesOf
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPrepare<B>,
|
+ SvpPrepare<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_key_compressed_encrypt_sk<R, SI, SO>(
|
fn gglwe_key_compressed_encrypt_sk<R, SI, SO>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigBytesOf, VecZnxBigNormalize,
|
SvpApplyDftToDft, SvpPPolBytesOf, SvpPrepare, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
|
||||||
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
encryption::compressed::gglwe_ksk::GGLWEKeyCompressedEncryptSk,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, Rank, TensorKey,
|
GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, LWEInfos, Rank, TensorKey,
|
||||||
@@ -47,7 +46,7 @@ where
|
|||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ SvpPrepare<B>,
|
+ SvpPrepare<B>,
|
||||||
Scratch<B>: TakeGLWESecretPrepared<B> + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeGLWESecret,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
fn gglwe_tensor_key_encrypt_sk<R, S>(
|
fn gglwe_tensor_key_encrypt_sk<R, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPlaintext,
|
|
||||||
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
encryption::{SIGMA, glwe_ct::GLWEEncryptSkInternal},
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
GGSW, GGSWInfos, GLWEInfos, LWEInfos,
|
||||||
@@ -42,7 +41,7 @@ pub trait GGSWCompressedEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
impl<B: Backend> GGSWCompressedEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPlaintext<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
fn ggsw_compressed_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply,
|
VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, Scratch},
|
layouts::{Backend, DataMut, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
AutomorphismKey, AutomorphismKeyToMut, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, GLWESwitchingKey, LWEInfos,
|
||||||
layouts::{
|
|
||||||
AutomorphismKey, AutomorphismKeyToMut, GGLWEInfos, GLWEInfos, GLWESecret, GLWESecretToRef, GLWESwitchingKey, LWEInfos,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl AutomorphismKey<Vec<u8>> {
|
impl AutomorphismKey<Vec<u8>> {
|
||||||
@@ -27,7 +24,7 @@ impl AutomorphismKey<Vec<u8>> {
|
|||||||
infos.rank_out(),
|
infos.rank_out(),
|
||||||
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
);
|
);
|
||||||
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of(&infos.glwe_layout())
|
GLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::bytes_of_from_infos(module, &infos.glwe_layout())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
pub fn encrypt_pk_scratch_space<BE: Backend, A>(module: &Module<BE>, _infos: &A) -> usize
|
||||||
@@ -97,7 +94,7 @@ where
|
|||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf
|
+ SvpPPolBytesOf
|
||||||
+ VecZnxAutomorphism,
|
+ VecZnxAutomorphism,
|
||||||
Scratch<BE>: TakeVecZnxDft<BE> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<BE>,
|
Scratch<BE>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_automorphism_key_encrypt_sk<A, B>(
|
fn gglwe_automorphism_key_encrypt_sk<A, B>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{ScratchAvailable, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes},
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddScalarInplace, VecZnxDftBytesOf, VecZnxNormalizeInplace,
|
|
||||||
VecZnxNormalizeTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, ScalarZnxToRef, Scratch, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPlaintext,
|
|
||||||
encryption::glwe_ct::GLWEEncryptSk,
|
encryption::glwe_ct::GLWEEncryptSk,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
GGLWE, GGLWEInfos, GGLWEToMut, GLWE, GLWEPlaintext, LWEInfos,
|
||||||
@@ -23,7 +19,7 @@ impl GGLWE<Vec<u8>> {
|
|||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
GLWE::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||||
+ (GLWEPlaintext::bytes_of(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
+ (GLWEPlaintext::bytes_of_from_infos(module, &infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||||
@@ -52,7 +48,7 @@ pub trait GGLWEEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
impl<B: Backend> GGLWEEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
Module<B>: GLWEEncryptSk<B> + VecZnxNormalizeTmpBytes + VecZnxDftBytesOf + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn gglwe_encrypt_sk<R, P, S>(
|
fn gglwe_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,17 +1,15 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
VecZnxSubInplace, VecZnxSwitchRing,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{
|
||||||
TakeGLWESecretPrepared,
|
Degree, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, prepared::GLWESecretPrepared,
|
||||||
layouts::{Degree, GGLWE, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, prepared::GLWESecretPrepared},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWESwitchingKey<Vec<u8>> {
|
impl GLWESwitchingKey<Vec<u8>> {
|
||||||
@@ -61,7 +59,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigBytesOf,
|
VecZnxAddScalarInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
Degree, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, Rank, TensorKey, prepared::GLWESecretPrepared,
|
||||||
layouts::{
|
|
||||||
Degree, GGLWEInfos, GLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, Rank, TensorKey, prepared::GLWESecretPrepared,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl TensorKey<Vec<u8>> {
|
impl TensorKey<Vec<u8>> {
|
||||||
@@ -59,8 +56,7 @@ impl<DataSelf: DataMut> TensorKey<DataSelf> {
|
|||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>:
|
Scratch<B>:,
|
||||||
TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B> + TakeVecZnxBig<B>,
|
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use poulpy_hal::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
SIGMA, TakeGLWEPlaintext,
|
SIGMA,
|
||||||
encryption::glwe_ct::GLWEEncryptSkInternal,
|
encryption::glwe_ct::GLWEEncryptSkInternal,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
GGSW, GGSWInfos, GGSWToMut, GLWE, GLWEInfos, LWEInfos,
|
||||||
@@ -45,7 +45,7 @@ pub trait GGSWEncryptSk<B: Backend> {
|
|||||||
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
impl<B: Backend> GGSWEncryptSk<B> for Module<B>
|
||||||
where
|
where
|
||||||
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
Module<B>: GLWEEncryptSkInternal<B> + VecZnxAddScalarInplace + VecZnxNormalizeInplace<B>,
|
||||||
Scratch<B>: TakeGLWEPlaintext<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
fn ggsw_encrypt_sk<R, P, S>(
|
fn ggsw_encrypt_sk<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace,
|
||||||
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply,
|
||||||
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
layouts::{Backend, DataMut, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, VecZnxToMut, ZnxInfos, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -331,7 +331,7 @@ where
|
|||||||
+ VecZnxBigAddNormal<B>
|
+ VecZnxBigAddNormal<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>,
|
||||||
Scratch<B>: TakeSvpPPol<B> + TakeScalarZnx + TakeVecZnxDft<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
fn glwe_encrypt_pk_internal<R, P, K>(
|
fn glwe_encrypt_pk_internal<R, P, K>(
|
||||||
&self,
|
&self,
|
||||||
@@ -453,7 +453,7 @@ where
|
|||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxSub,
|
+ VecZnxSub,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn glwe_encrypt_sk_internal<R, P, S>(
|
fn glwe_encrypt_sk_internal<R, P, S>(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
GGLWEInfos, GLWESecret, GLWESwitchingKey, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank, prepared::GLWESecretPrepared,
|
||||||
layouts::{
|
|
||||||
GGLWEInfos, GLWESecret, GLWESwitchingKey, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank, prepared::GLWESecretPrepared,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||||
@@ -58,7 +55,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
|||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,20 +1,16 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxFillUniform,
|
||||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWESwitchingKey, Rank, prepared::GLWESecretPrepared,
|
||||||
layouts::{
|
|
||||||
Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWESwitchingKey, Rank,
|
|
||||||
prepared::GLWESecretPrepared,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl LWESwitchingKey<Vec<u8>> {
|
impl LWESwitchingKey<Vec<u8>> {
|
||||||
@@ -75,7 +71,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
|||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,18 +1,15 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolBytesOf, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply,
|
VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
||||||
VecZnxDftBytesOf, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank};
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
|
||||||
layouts::{Degree, GGLWEInfos, GLWESecret, GLWESwitchingKey, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
@@ -61,7 +58,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
|||||||
+ SvpPrepare<B>
|
+ SvpPrepare<B>
|
||||||
+ VecZnxSwitchRing
|
+ VecZnxSwitchRing
|
||||||
+ SvpPPolBytesOf,
|
+ SvpPPolBytesOf,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeScalarZnx + TakeGLWESecretPrepared<B>,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
ScratchAvailable, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
@@ -56,7 +55,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.key.external_product(module, &lhs.key, rhs, scratch);
|
self.key.external_product(module, &lhs.key, rhs, scratch);
|
||||||
}
|
}
|
||||||
@@ -76,7 +75,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.key.external_product_inplace(module, rhs, scratch);
|
self.key.external_product_inplace(module, rhs, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
ScratchAvailable, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
@@ -61,7 +60,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -119,7 +118,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,47 +1,116 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::ScratchAvailable,
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf,
|
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
|
||||||
VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGSW, GGSWInfos, GLWE, GLWEInfos, prepared::GGSWPrepared};
|
use crate::{
|
||||||
|
GLWEExternalProduct, ScratchTakeCore,
|
||||||
|
layouts::{
|
||||||
|
GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GLWEInfos, LWEInfos,
|
||||||
|
prepared::{GGSWPrepared, GGSWPreparedToRef},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
impl GGSW<Vec<u8>> {
|
pub trait GGSWExternalProduct<BE: Backend>
|
||||||
#[allow(clippy::too_many_arguments)]
|
where
|
||||||
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
Self: GLWEExternalProduct<BE>,
|
||||||
module: &Module<B>,
|
{
|
||||||
out_infos: &OUT,
|
fn ggsw_external_product_tmp_bytes<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||||
in_infos: &IN,
|
|
||||||
ggsw_infos: &GGSW,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
R: GGSWInfos,
|
||||||
IN: GGSWInfos,
|
A: GGSWInfos,
|
||||||
GGSW: GGSWInfos,
|
B: GGSWInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
GLWE::external_product_scratch_space(
|
self.glwe_external_product_scratch_space(res_infos, a_infos, b_infos)
|
||||||
module,
|
|
||||||
&out_infos.glwe_layout(),
|
|
||||||
&in_infos.glwe_layout(),
|
|
||||||
ggsw_infos,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
fn ggsw_external_product<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||||
module: &Module<B>,
|
where
|
||||||
out_infos: &OUT,
|
R: GGSWToMut,
|
||||||
ggsw_infos: &GGSW,
|
A: GGSWToRef,
|
||||||
|
B: GGSWPreparedToRef<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &GGSW<&[u8]> = &a.to_ref();
|
||||||
|
let b: &GGSWPrepared<&[u8], BE> = &b.to_ref();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
res.rank(),
|
||||||
|
a.rank(),
|
||||||
|
"res rank: {} != a rank: {}",
|
||||||
|
res.rank(),
|
||||||
|
a.rank()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
res.rank(),
|
||||||
|
b.rank(),
|
||||||
|
"res rank: {} != b rank: {}",
|
||||||
|
res.rank(),
|
||||||
|
b.rank()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(scratch.available() >= self.ggsw_external_product_tmp_bytes(res, a, b));
|
||||||
|
|
||||||
|
let min_dnum: usize = res.dnum().min(a.dnum()).into();
|
||||||
|
|
||||||
|
for row in 0..min_dnum {
|
||||||
|
for col in 0..(res.rank() + 1).into() {
|
||||||
|
self.glwe_external_product(&mut res.at_mut(row, col), &a.at(row, col), b, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for row in min_dnum..res.dnum().into() {
|
||||||
|
for col in 0..(res.rank() + 1).into() {
|
||||||
|
res.at_mut(row, col).data.zero();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ggsw_external_product_inplace<R, A>(&self, res: &mut R, a: &A, scratch: &mut Scratch<BE>)
|
||||||
|
where
|
||||||
|
R: GGSWToMut,
|
||||||
|
A: GGSWPreparedToRef<BE>,
|
||||||
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
|
{
|
||||||
|
let res: &mut GGSW<&mut [u8]> = &mut res.to_mut();
|
||||||
|
let a: &GGSWPrepared<&[u8], BE> = &a.to_ref();
|
||||||
|
|
||||||
|
assert_eq!(res.n(), self.n() as u32);
|
||||||
|
assert_eq!(a.n(), self.n() as u32);
|
||||||
|
assert_eq!(
|
||||||
|
res.rank(),
|
||||||
|
a.rank(),
|
||||||
|
"res rank: {} != a rank: {}",
|
||||||
|
res.rank(),
|
||||||
|
a.rank()
|
||||||
|
);
|
||||||
|
|
||||||
|
for row in 0..res.dnum().into() {
|
||||||
|
for col in 0..(res.rank() + 1).into() {
|
||||||
|
self.glwe_external_product_inplace(&mut res.at_mut(row, col), a, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<BE: Backend> GGSWExternalProduct<BE> for Module<BE> where Self: GLWEExternalProduct<BE> {}
|
||||||
|
|
||||||
|
impl GGSW<Vec<u8>> {
|
||||||
|
pub fn external_product_tmp_bytes<R, A, B, M, BE: Backend>(
|
||||||
|
&self,
|
||||||
|
module: &M,
|
||||||
|
res_infos: &R,
|
||||||
|
a_infos: &A,
|
||||||
|
b_infos: &B,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
OUT: GGSWInfos,
|
R: GGSWInfos,
|
||||||
GGSW: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
B: GGSWInfos,
|
||||||
|
M: GGSWExternalProduct<BE>,
|
||||||
{
|
{
|
||||||
GLWE::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos)
|
module.ggsw_external_product_tmp_bytes(res_infos, a_infos, b_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,54 +121,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
lhs: &GGSW<DataLhs>,
|
lhs: &GGSW<DataLhs>,
|
||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) {
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
|
||||||
{
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
{
|
|
||||||
use crate::layouts::LWEInfos;
|
|
||||||
|
|
||||||
assert_eq!(lhs.n(), self.n());
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
self.rank(),
|
|
||||||
lhs.rank(),
|
|
||||||
"ggsw_out rank: {} != ggsw_in rank: {}",
|
|
||||||
self.rank(),
|
|
||||||
lhs.rank()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
self.rank(),
|
|
||||||
rhs.rank(),
|
|
||||||
"ggsw_in rank: {} != ggsw_apply rank: {}",
|
|
||||||
self.rank(),
|
|
||||||
rhs.rank()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(scratch.available() >= GGSW::external_product_scratch_space(module, self, lhs, rhs))
|
|
||||||
}
|
|
||||||
|
|
||||||
let min_dnum: usize = self.dnum().min(lhs.dnum()).into();
|
|
||||||
|
|
||||||
(0..(self.rank() + 1).into()).for_each(|col_i| {
|
|
||||||
(0..min_dnum).for_each(|row_j| {
|
|
||||||
self.at_mut(row_j, col_i)
|
|
||||||
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
|
|
||||||
});
|
|
||||||
(min_dnum..self.dnum().into()).for_each(|row_i| {
|
|
||||||
self.at_mut(row_i, col_i).data.zero();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
|
pub fn external_product_inplace<DataRhs: DataRef, B: Backend>(
|
||||||
@@ -107,37 +129,6 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
rhs: &GGSWPrepared<DataRhs, B>,
|
rhs: &GGSWPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) {
|
||||||
Module<B>: VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ VmpApplyDftToDft<B>
|
|
||||||
+ VmpApplyDftToDftAdd<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxNormalize<B>,
|
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
|
||||||
{
|
|
||||||
#[cfg(debug_assertions)]
|
|
||||||
{
|
|
||||||
use crate::layouts::LWEInfos;
|
|
||||||
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
|
||||||
assert_eq!(
|
|
||||||
self.rank(),
|
|
||||||
rhs.rank(),
|
|
||||||
"ggsw_out rank: {} != ggsw_apply: {}",
|
|
||||||
self.rank(),
|
|
||||||
rhs.rank()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
(0..(self.rank() + 1).into()).for_each(|col_i| {
|
|
||||||
(0..self.dnum().into()).for_each(|row_j| {
|
|
||||||
self.at_mut(row_j, col_i)
|
|
||||||
.external_product_inplace(module, rhs, scratch);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,58 +1,59 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
ModuleN, ScratchTakeBasic, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
layouts::{Backend, DataMut, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScratchTakeCore,
|
ScratchTakeCore,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGSWInfos, GGSWToRef, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, GetDegree, LWEInfos,
|
GGSWInfos, GLWE, GLWEInfos, GLWEToMut, GLWEToRef, LWEInfos,
|
||||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
prepared::{GGSWPrepared, GGSWPreparedToRef},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
impl<DataSelf: DataMut> GLWE<DataSelf> {
|
||||||
pub fn external_product_scratch_space<OUT, IN, GGSW, B: Backend>(
|
pub fn external_product_scratch_space<R, A, B, BE: Backend>(
|
||||||
module: Module<B>,
|
module: Module<BE>,
|
||||||
out_infos: &OUT,
|
res_infos: &R,
|
||||||
in_infos: &IN,
|
a_infos: &A,
|
||||||
ggsw_infos: &GGSW,
|
b_infos: &B,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
A: GLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
B: GGSWInfos,
|
||||||
Module<B>: GLWEExternalProduct<B>,
|
Module<BE>: GLWEExternalProduct<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_external_product_scratch_space(out_infos, in_infos, ggsw_infos)
|
module.glwe_external_product_scratch_space(res_infos, a_infos, b_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product<L, R, B: Backend>(&mut self, module: &Module<B>, lhs: &L, rhs: &R, scratch: &mut Scratch<B>)
|
pub fn external_product<A, B, BE: Backend>(&mut self, module: &Module<BE>, a: &A, b: &B, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
L: GLWEToRef,
|
A: GLWEToRef,
|
||||||
R: GGSWToRef,
|
B: GGSWPreparedToRef<BE>,
|
||||||
Module<B>: GLWEExternalProduct<B>,
|
Module<BE>: GLWEExternalProduct<BE>,
|
||||||
Scratch<B>: ScratchTakeCore<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_external_product(self, lhs, rhs, scratch);
|
module.glwe_external_product(self, a, b, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace<R, B: Backend>(&mut self, module: &Module<B>, rhs: &R, scratch: &mut Scratch<B>)
|
pub fn external_product_inplace<A, BE: Backend>(&mut self, module: &Module<BE>, a: &A, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
R: GGSWToRef,
|
A: GGSWPreparedToRef<BE>,
|
||||||
Module<B>: GLWEExternalProduct<B>,
|
Module<BE>: GLWEExternalProduct<BE>,
|
||||||
Scratch<B>: ScratchTakeCore<B>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
module.glwe_external_product_inplace(self, rhs, scratch);
|
module.glwe_external_product_inplace(self, a, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GLWEExternalProduct<BE: Backend>
|
pub trait GLWEExternalProduct<BE: Backend>
|
||||||
where
|
where
|
||||||
Self: GetDegree
|
Self: Sized
|
||||||
|
+ ModuleN
|
||||||
+ VecZnxDftBytesOf
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
@@ -61,52 +62,48 @@ where
|
|||||||
+ VmpApplyDftToDftAdd<BE>
|
+ VmpApplyDftToDftAdd<BE>
|
||||||
+ VecZnxIdftApplyConsume<BE>
|
+ VecZnxIdftApplyConsume<BE>
|
||||||
+ VecZnxBigNormalize<BE>
|
+ VecZnxBigNormalize<BE>
|
||||||
+ VecZnxNormalize<BE>
|
+ VecZnxNormalize<BE>,
|
||||||
+ VecZnxDftBytesOf
|
|
||||||
+ VmpApplyDftToDftTmpBytes
|
|
||||||
+ VecZnxNormalizeTmpBytes,
|
|
||||||
{
|
{
|
||||||
#[allow(clippy::too_many_arguments)]
|
fn glwe_external_product_scratch_space<R, A, B>(&self, res_infos: &R, a_infos: &A, b_infos: &B) -> usize
|
||||||
fn glwe_external_product_scratch_space<OUT, IN, GGSW>(&self, out_infos: &OUT, in_infos: &IN, ggsw_infos: &GGSW) -> usize
|
|
||||||
where
|
where
|
||||||
OUT: GLWEInfos,
|
R: GLWEInfos,
|
||||||
IN: GLWEInfos,
|
A: GLWEInfos,
|
||||||
GGSW: GGSWInfos,
|
B: GGSWInfos,
|
||||||
{
|
{
|
||||||
let in_size: usize = in_infos
|
let in_size: usize = a_infos
|
||||||
.k()
|
.k()
|
||||||
.div_ceil(ggsw_infos.base2k())
|
.div_ceil(b_infos.base2k())
|
||||||
.div_ceil(ggsw_infos.dsize().into()) as usize;
|
.div_ceil(b_infos.dsize().into()) as usize;
|
||||||
let out_size: usize = out_infos.size();
|
let out_size: usize = res_infos.size();
|
||||||
let ggsw_size: usize = ggsw_infos.size();
|
let ggsw_size: usize = b_infos.size();
|
||||||
let res_dft: usize = self.bytes_of_vec_znx_dft((ggsw_infos.rank() + 1).into(), ggsw_size);
|
let res_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank() + 1).into(), ggsw_size);
|
||||||
let a_dft: usize = self.bytes_of_vec_znx_dft((ggsw_infos.rank() + 1).into(), in_size);
|
let a_dft: usize = self.bytes_of_vec_znx_dft((b_infos.rank() + 1).into(), in_size);
|
||||||
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = self.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
out_size,
|
out_size,
|
||||||
in_size,
|
in_size,
|
||||||
in_size, // rows
|
in_size, // rows
|
||||||
(ggsw_infos.rank() + 1).into(), // cols in
|
(b_infos.rank() + 1).into(), // cols in
|
||||||
(ggsw_infos.rank() + 1).into(), // cols out
|
(b_infos.rank() + 1).into(), // cols out
|
||||||
ggsw_size,
|
ggsw_size,
|
||||||
);
|
);
|
||||||
let normalize_big: usize = self.vec_znx_normalize_tmp_bytes();
|
let normalize_big: usize = self.vec_znx_normalize_tmp_bytes();
|
||||||
|
|
||||||
if in_infos.base2k() == ggsw_infos.base2k() {
|
if a_infos.base2k() == b_infos.base2k() {
|
||||||
res_dft + a_dft + (vmp | normalize_big)
|
res_dft + a_dft + (vmp | normalize_big)
|
||||||
} else {
|
} else {
|
||||||
let normalize_conv: usize = VecZnx::bytes_of(self.n().into(), (ggsw_infos.rank() + 1).into(), in_size);
|
let normalize_conv: usize = VecZnx::bytes_of(self.n(), (b_infos.rank() + 1).into(), in_size);
|
||||||
res_dft + ((a_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
res_dft + ((a_dft + normalize_conv + (self.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, ggsw: &D, scratch: &mut Scratch<BE>)
|
fn glwe_external_product_inplace<R, D>(&self, res: &mut R, a: &D, scratch: &mut Scratch<BE>)
|
||||||
where
|
where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
D: GGSWCiphertextPreparedToRef<BE>,
|
D: GGSWPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
let rhs: &GGSWPrepared<&[u8], BE> = &ggsw.to_ref();
|
let rhs: &GGSWPrepared<&[u8], BE> = &a.to_ref();
|
||||||
|
|
||||||
let basek_in: usize = res.base2k().into();
|
let basek_in: usize = res.base2k().into();
|
||||||
let basek_ggsw: usize = rhs.base2k().into();
|
let basek_ggsw: usize = rhs.base2k().into();
|
||||||
@@ -124,8 +121,8 @@ where
|
|||||||
let dsize: usize = rhs.dsize().into();
|
let dsize: usize = rhs.dsize().into();
|
||||||
let a_size: usize = (res.size() * basek_in).div_ceil(basek_ggsw);
|
let a_size: usize = (res.size() * basek_in).div_ceil(basek_ggsw);
|
||||||
|
|
||||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(res.n().into(), cols, rhs.size()); // Todo optimise
|
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, rhs.size()); // Todo optimise
|
||||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(res.n().into(), cols, a_size.div_ceil(dsize));
|
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, a_size.div_ceil(dsize));
|
||||||
a_dft.data_mut().fill(0);
|
a_dft.data_mut().fill(0);
|
||||||
|
|
||||||
if basek_in == basek_ggsw {
|
if basek_in == basek_ggsw {
|
||||||
@@ -153,7 +150,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self, cols, a_size);
|
||||||
|
|
||||||
for j in 0..cols {
|
for j in 0..cols {
|
||||||
self.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
@@ -211,7 +208,7 @@ where
|
|||||||
where
|
where
|
||||||
R: GLWEToMut,
|
R: GLWEToMut,
|
||||||
A: GLWEToRef,
|
A: GLWEToRef,
|
||||||
D: GGSWCiphertextPreparedToRef<BE>,
|
D: GGSWPreparedToRef<BE>,
|
||||||
Scratch<BE>: ScratchTakeCore<BE>,
|
Scratch<BE>: ScratchTakeCore<BE>,
|
||||||
{
|
{
|
||||||
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
let res: &mut GLWE<&mut [u8]> = &mut res.to_mut();
|
||||||
@@ -239,8 +236,8 @@ where
|
|||||||
|
|
||||||
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
||||||
|
|
||||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), cols, rhs.size()); // Todo optimise
|
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self, cols, rhs.size()); // Todo optimise
|
||||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), cols, a_size.div_ceil(dsize));
|
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self, cols, a_size.div_ceil(dsize));
|
||||||
a_dft.data_mut().fill(0);
|
a_dft.data_mut().fill(0);
|
||||||
|
|
||||||
if basek_in == basek_ggsw {
|
if basek_in == basek_ggsw {
|
||||||
@@ -268,7 +265,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self.n().into(), cols, a_size);
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(self, cols, a_size);
|
||||||
|
|
||||||
for j in 0..cols {
|
for j in 0..cols {
|
||||||
self.vec_znx_normalize(
|
self.vec_znx_normalize(
|
||||||
@@ -324,7 +321,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> where
|
impl<BE: Backend> GLWEExternalProduct<BE> for Module<BE> where
|
||||||
Self: GetDegree
|
Self: ModuleN
|
||||||
+ VecZnxDftBytesOf
|
+ VecZnxDftBytesOf
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
|||||||
@@ -2,17 +2,17 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace, VecZnxNormalize,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||||
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
GLWEOperations, TakeGLWE,
|
GLWEOperations,
|
||||||
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
layouts::{GGLWEInfos, GLWE, GLWEInfos, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -43,12 +43,12 @@ impl Accumulator {
|
|||||||
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
||||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
||||||
/// * `rank`: rank of the GLWE ciphertext.
|
/// * `rank`: rank of the GLWE ciphertext.
|
||||||
pub fn alloc<A>(infos: &A) -> Self
|
pub fn alloc<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
{
|
{
|
||||||
Self {
|
Self {
|
||||||
data: GLWE::alloc_from_infos(infos),
|
data: GLWE::alloc_from_infos(module, infos),
|
||||||
value: false,
|
value: false,
|
||||||
control: false,
|
control: false,
|
||||||
}
|
}
|
||||||
@@ -66,13 +66,13 @@ impl GLWEPacker {
|
|||||||
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
||||||
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
||||||
/// can be packed.
|
/// can be packed.
|
||||||
pub fn new<A>(infos: &A, log_batch: usize) -> Self
|
pub fn new<A, B: Backend>(module: Module<B>, infos: &A, log_batch: usize) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
{
|
{
|
||||||
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
||||||
let log_n: usize = infos.n().log2();
|
let log_n: usize = infos.n().log2();
|
||||||
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(infos)));
|
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(module, infos)));
|
||||||
Self {
|
Self {
|
||||||
accumulators,
|
accumulators,
|
||||||
log_batch,
|
log_batch,
|
||||||
@@ -142,7 +142,7 @@ impl GLWEPacker {
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
(self.counter as u32) < self.accumulators[0].data.n(),
|
(self.counter as u32) < self.accumulators[0].data.n(),
|
||||||
@@ -217,7 +217,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let log_n: usize = module.log_n();
|
let log_n: usize = module.log_n();
|
||||||
|
|
||||||
@@ -274,7 +274,7 @@ where
|
|||||||
KEY: GGLWEInfos,
|
KEY: GGLWEInfos,
|
||||||
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftBytesOf + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWE::bytes_of(out_infos)
|
GLWE::bytes_of_from_infos(module, out_infos)
|
||||||
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
+ (GLWE::rsh_scratch_space(module.n()) | GLWE::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,7 +310,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWE,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let log_n: usize = acc.data.n().log2();
|
let log_n: usize = acc.data.n().log2();
|
||||||
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
let a: &mut GLWE<Vec<u8>> = &mut acc.data;
|
||||||
@@ -426,7 +426,7 @@ pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
|
|||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -493,7 +493,7 @@ fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxBigSubSmallNegateInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
||||||
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
||||||
|
|||||||
@@ -2,15 +2,14 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWE,
|
|
||||||
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
layouts::{Base2K, GGLWEInfos, GLWE, GLWEInfos, GLWELayout, LWEInfos, prepared::AutomorphismKeyPrepared},
|
||||||
operations::GLWEOperations,
|
operations::GLWEOperations,
|
||||||
};
|
};
|
||||||
@@ -87,7 +86,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxCopy
|
+ VecZnxCopy
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.copy(module, lhs);
|
self.copy(module, lhs);
|
||||||
self.trace_inplace(module, start, end, auto_keys, scratch);
|
self.trace_inplace(module, start, end, auto_keys, scratch);
|
||||||
@@ -114,7 +113,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let basek_ksk: Base2K = auto_keys
|
let basek_ksk: Base2K = auto_keys
|
||||||
.get(auto_keys.keys().next().unwrap())
|
.get(auto_keys.keys().next().unwrap())
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
@@ -57,7 +57,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
||||||
}
|
}
|
||||||
@@ -79,7 +79,7 @@ impl<DataSelf: DataMut> AutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
@@ -130,7 +130,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -201,7 +201,7 @@ impl<DataSelf: DataMut> GLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigBytesOf,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf,
|
VecZnxDftAddInplace, VecZnxDftApply, VecZnxDftBytesOf, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
||||||
};
|
};
|
||||||
@@ -130,7 +129,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -172,7 +171,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
(0..lhs.dnum().into()).for_each(|row_i| {
|
(0..lhs.dnum().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
@@ -206,7 +205,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
(0..self.dnum().into()).for_each(|row_i| {
|
(0..self.dnum().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
@@ -235,7 +234,7 @@ impl<DataSelf: DataMut> GGSW<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let basek_in: usize = self.base2k().into();
|
let basek_in: usize = self.base2k().into();
|
||||||
let basek_tsk: usize = tsk.base2k().into();
|
let basek_tsk: usize = tsk.base2k().into();
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftApply,
|
||||||
VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||||
@@ -163,7 +163,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -206,7 +206,7 @@ impl<DataSelf: DataMut> GLWE<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes,
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -254,7 +254,7 @@ impl<D: DataRef> GLWE<D> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
if rhs.dsize() == 1 {
|
if rhs.dsize() == 1 {
|
||||||
return keyswitch_vmp_one_digit(
|
return keyswitch_vmp_one_digit(
|
||||||
@@ -300,7 +300,7 @@ where
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
let cols: usize = a.cols();
|
let cols: usize = a.cols();
|
||||||
|
|
||||||
@@ -347,7 +347,7 @@ where
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxNormalize<B>,
|
+ VecZnxNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
let cols: usize = a.cols();
|
let cols: usize = a.cols();
|
||||||
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||||
|
|||||||
@@ -1,16 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftApply,
|
||||||
VecZnxCopy, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared};
|
||||||
TakeGLWE,
|
|
||||||
layouts::{GGLWEInfos, GLWE, GLWELayout, LWE, LWEInfos, Rank, TorusPrecision, prepared::LWESwitchingKeyPrepared},
|
|
||||||
};
|
|
||||||
|
|
||||||
impl LWE<Vec<u8>> {
|
impl LWE<Vec<u8>> {
|
||||||
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
@@ -51,8 +48,8 @@ impl LWE<Vec<u8>> {
|
|||||||
rank: Rank(1),
|
rank: Rank(1),
|
||||||
};
|
};
|
||||||
|
|
||||||
let glwe_in: usize = GLWE::bytes_of(&glwe_in_infos);
|
let glwe_in: usize = GLWE::bytes_of_from_infos(module, &glwe_in_infos);
|
||||||
let glwe_out: usize = GLWE::bytes_of(&glwe_out_infos);
|
let glwe_out: usize = GLWE::bytes_of_from_infos(module, &glwe_out_infos);
|
||||||
let ks: usize = GLWE::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
let ks: usize = GLWE::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||||
|
|
||||||
glwe_in + glwe_out + ks
|
glwe_in + glwe_out + ks
|
||||||
@@ -81,7 +78,7 @@ impl<DLwe: DataMut> LWE<DLwe> {
|
|||||||
+ VecZnxNormalize<B>
|
+ VecZnxNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxCopy,
|
+ VecZnxCopy,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -138,31 +138,31 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AutomorphismKeyCompressed<Vec<u8>> {
|
impl AutomorphismKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyCompressedAlloc,
|
M: AutomorphismKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key_compressed_from_infos(infos)
|
module.alloc_automorphism_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyCompressedAlloc,
|
M: AutomorphismKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
|
module.alloc_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyCompressedAlloc,
|
M: AutomorphismKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key_compressed_from_infos(infos)
|
module.bytes_of_automorphism_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank: Rank,
|
rank: Rank,
|
||||||
@@ -170,7 +170,7 @@ impl AutomorphismKeyCompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyCompressedAlloc,
|
M: AutomorphismKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
|
module.bytes_of_automorphism_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -210,10 +210,10 @@ impl<D: DataMut> AutomorphismKey<D>
|
|||||||
where
|
where
|
||||||
Self: SetAutomorphismGaloisElement,
|
Self: SetAutomorphismGaloisElement,
|
||||||
{
|
{
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
|
O: AutomorphismKeyCompressedToRef + GetAutomorphismGaloisElement,
|
||||||
Module<B>: AutomorphismKeyDecompress,
|
M: AutomorphismKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_automorphism_key(self, other);
|
module.decompress_automorphism_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -186,16 +186,16 @@ where
|
|||||||
impl<B: Backend> GGLWECompressedAlloc for Module<B> where Self: GetDegree {}
|
impl<B: Backend> GGLWECompressedAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GGLWECompressed<Vec<u8>> {
|
impl GGLWECompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GGLWECompressedAlloc,
|
M: GGLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_gglwe_compressed_from_infos(infos)
|
module.alloc_gglwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(
|
pub fn alloc<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -204,21 +204,21 @@ impl GGLWECompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> Self
|
) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GGLWECompressedAlloc,
|
M: GGLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_gglwe_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_gglwe_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GGLWECompressedAlloc,
|
M: GGLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_gglwe_compressed_from_infos(infos)
|
module.bytes_of_gglwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn byte_of<B: Backend>(
|
pub fn byte_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -226,7 +226,7 @@ impl GGLWECompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GGLWECompressedAlloc,
|
M: GGLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_gglwe_compressed(base2k, k, rank_in, dnum, dsize)
|
module.bytes_of_gglwe_compressed(base2k, k, rank_in, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -315,10 +315,10 @@ where
|
|||||||
impl<B: Backend> GGLWEDecompress for Module<B> where Self: VecZnxFillUniform + VecZnxCopy {}
|
impl<B: Backend> GGLWEDecompress for Module<B> where Self: VecZnxFillUniform + VecZnxCopy {}
|
||||||
|
|
||||||
impl<D: DataMut> GGLWE<D> {
|
impl<D: DataMut> GGLWE<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: GGLWECompressedToRef,
|
O: GGLWECompressedToRef,
|
||||||
Module<B>: GGLWEDecompress,
|
M: GGLWEDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_gglwe(self, other);
|
module.decompress_gglwe(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -135,16 +135,16 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWESwitchingKeyCompressedAlloc,
|
M: GLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_switching_key_compressed_from_infos(infos)
|
module.alloc_glwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(
|
pub fn alloc<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -153,21 +153,21 @@ impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> Self
|
) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWESwitchingKeyCompressedAlloc,
|
M: GLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_switching_key_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_glwe_switching_key_compressed(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWESwitchingKeyCompressedAlloc,
|
M: GLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key_compressed_from_infos(infos)
|
module.bytes_of_glwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -175,7 +175,7 @@ impl GLWESwitchingKeyCompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWESwitchingKeyCompressedAlloc,
|
M: GLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key_compressed(base2k, k, rank_in, dnum, dsize)
|
module.bytes_of_glwe_switching_key_compressed(base2k, k, rank_in, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -216,10 +216,10 @@ where
|
|||||||
impl<B: Backend> GLWESwitchingKeyDecompress for Module<B> where Self: GGLWEDecompress {}
|
impl<B: Backend> GLWESwitchingKeyDecompress for Module<B> where Self: GGLWEDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> GLWESwitchingKey<D> {
|
impl<D: DataMut> GLWESwitchingKey<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: GLWESwitchingKeyCompressedToRef,
|
O: GLWESwitchingKeyCompressedToRef,
|
||||||
Module<B>: GGLWEDecompress,
|
M: GLWESwitchingKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_glwe_switching_key(self, other);
|
module.decompress_glwe_switching_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -140,31 +140,31 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TensorKeyCompressed<Vec<u8>> {
|
impl TensorKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
M: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_tensor_key_compressed_from_infos(infos)
|
module.alloc_tensor_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
M: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
module.alloc_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
M: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_tensor_key_compressed_from_infos(infos)
|
module.bytes_of_tensor_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank: Rank,
|
rank: Rank,
|
||||||
@@ -172,7 +172,7 @@ impl TensorKeyCompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: TensorKeyCompressedAlloc,
|
M: TensorKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
module.bytes_of_tensor_key_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -243,10 +243,10 @@ where
|
|||||||
impl<B: Backend> TensorKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
impl<B: Backend> TensorKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> TensorKey<D> {
|
impl<D: DataMut> TensorKey<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: TensorKeyCompressedToRef,
|
O: TensorKeyCompressedToRef,
|
||||||
Module<B>: GLWESwitchingKeyDecompress,
|
M: TensorKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_tensor_key(self, other);
|
module.decompress_tensor_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -170,31 +170,31 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGSWCompressed<Vec<u8>> {
|
impl GGSWCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: GGSWCompressedAlloc,
|
M: GGSWCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_ggsw_compressed_from_infos(infos)
|
module.alloc_ggsw_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GGSWCompressedAlloc,
|
M: GGSWCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_ggsw_compressed(base2k, k, rank, dnum, dsize)
|
module.alloc_ggsw_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: GGSWCompressedAlloc,
|
M: GGSWCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_ggsw_compressed_key_from_infos(infos)
|
module.bytes_of_ggsw_compressed_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank: Rank,
|
rank: Rank,
|
||||||
@@ -202,7 +202,7 @@ impl GGSWCompressed<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GGSWCompressedAlloc,
|
M: GGSWCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_ggsw_compressed(base2k, k, rank, dnum, dsize)
|
module.bytes_of_ggsw_compressed(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -290,10 +290,10 @@ where
|
|||||||
impl<B: Backend> GGSWDecompress for Module<B> where Self: GGSWDecompress {}
|
impl<B: Backend> GGSWDecompress for Module<B> where Self: GGSWDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> GGSW<D> {
|
impl<D: DataMut> GGSW<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: GGSWCompressedToRef,
|
O: GGSWCompressedToRef,
|
||||||
Module<B>: GGSWDecompress,
|
M: GGSWDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_ggsw(self, other);
|
module.decompress_ggsw(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -106,32 +106,32 @@ where
|
|||||||
impl<B: Backend> GLWECompressedAlloc for Module<B> where Self: GetDegree {}
|
impl<B: Backend> GLWECompressedAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GLWECompressed<Vec<u8>> {
|
impl GLWECompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWECompressedAlloc,
|
M: GLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_compressed_from_infos(infos)
|
module.alloc_glwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWECompressedAlloc,
|
M: GLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_compressed(base2k, k, rank)
|
module.alloc_glwe_compressed(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWECompressedAlloc,
|
M: GLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_compressed_from_infos(infos)
|
module.bytes_of_glwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWECompressedAlloc,
|
M: GLWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_compressed(base2k, k)
|
module.bytes_of_glwe_compressed(base2k, k)
|
||||||
}
|
}
|
||||||
@@ -196,10 +196,10 @@ where
|
|||||||
impl<B: Backend> GLWEDecompress for Module<B> where Self: GetDegree + VecZnxFillUniform + VecZnxCopy {}
|
impl<B: Backend> GLWEDecompress for Module<B> where Self: GetDegree + VecZnxFillUniform + VecZnxCopy {}
|
||||||
|
|
||||||
impl<D: DataMut> GLWE<D> {
|
impl<D: DataMut> GLWE<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: GLWECompressedToRef + GLWEInfos,
|
O: GLWECompressedToRef + GLWEInfos,
|
||||||
Module<B>: GLWEDecompress,
|
M: GLWEDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_glwe(self, other);
|
module.decompress_glwe(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,32 +150,32 @@ where
|
|||||||
impl<B: Backend> GLWEToLWESwitchingKeyCompressedAlloc for Module<B> where Self: GLWESwitchingKeyCompressedAlloc {}
|
impl<B: Backend> GLWEToLWESwitchingKeyCompressedAlloc for Module<B> where Self: GLWESwitchingKeyCompressedAlloc {}
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyCompressedAlloc,
|
M: GLWEToLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key_compressed_from_infos(infos)
|
module.alloc_glwe_to_lwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWEToLWESwitchingKeyCompressedAlloc,
|
M: GLWEToLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
|
module.alloc_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyCompressedAlloc,
|
M: GLWEToLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key_compressed_from_infos(infos)
|
module.bytes_of_glwe_to_lwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_in: Rank) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_in: Rank) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWEToLWESwitchingKeyCompressedAlloc,
|
M: GLWEToLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
|
module.bytes_of_glwe_to_lwe_switching_key_compressed(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
@@ -197,10 +197,10 @@ where
|
|||||||
impl<B: Backend> GLWEToLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
impl<B: Backend> GLWEToLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: GLWEToLWESwitchingKeyCompressedToRef,
|
O: GLWEToLWESwitchingKeyCompressedToRef,
|
||||||
Module<B>: GLWEToLWESwitchingKeyDecompress,
|
M: GLWEToLWESwitchingKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_glwe_to_lwe_switching_key(self, other);
|
module.decompress_glwe_to_lwe_switching_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,33 +91,35 @@ pub trait LWECompressedAlloc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> LWECompressedAlloc for Module<B>{}
|
||||||
|
|
||||||
impl LWECompressed<Vec<u8>> {
|
impl LWECompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: LWEInfos,
|
A: LWEInfos,
|
||||||
Module<B>: LWECompressedAlloc,
|
M: LWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_compressed_from_infos(infos)
|
module.alloc_lwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWECompressedAlloc,
|
M: LWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_compressed(base2k, k)
|
module.alloc_lwe_compressed(base2k, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: LWEInfos,
|
A: LWEInfos,
|
||||||
Module<B>: LWECompressedAlloc,
|
M: LWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_compressed_from_infos(infos)
|
module.bytes_of_lwe_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWECompressedAlloc,
|
M: LWECompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_compressed(base2k, k)
|
module.bytes_of_lwe_compressed(base2k, k)
|
||||||
}
|
}
|
||||||
@@ -174,10 +176,10 @@ where
|
|||||||
impl<B: Backend> LWEDecompress for Module<B> where Self: ZnFillUniform {}
|
impl<B: Backend> LWEDecompress for Module<B> where Self: ZnFillUniform {}
|
||||||
|
|
||||||
impl<D: DataMut> LWE<D> {
|
impl<D: DataMut> LWE<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: LWECompressedToRef,
|
O: LWECompressedToRef,
|
||||||
Module<B>: LWEDecompress,
|
M: LWEDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_lwe(self, other);
|
module.decompress_lwe(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -147,33 +147,35 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> LWESwitchingKeyCompressedAlloc for Module<B> where Self: GLWESwitchingKeyCompressedAlloc{}
|
||||||
|
|
||||||
impl LWESwitchingKeyCompressed<Vec<u8>> {
|
impl LWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyCompressedAlloc,
|
M: LWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_switching_key_compressed_from_infos(infos)
|
module.alloc_lwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWESwitchingKeyCompressedAlloc,
|
M: LWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_switching_key_compressed(base2k, k, dnum)
|
module.alloc_lwe_switching_key_compressed(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyCompressedAlloc,
|
M: LWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_switching_key_compressed_from_infos(infos)
|
module.bytes_of_lwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWESwitchingKeyCompressedAlloc,
|
M: LWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_switching_key_compressed(base2k, k, dnum)
|
module.bytes_of_lwe_switching_key_compressed(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
@@ -195,10 +197,10 @@ where
|
|||||||
impl<B: Backend> LWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
impl<B: Backend> LWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> LWESwitchingKey<D> {
|
impl<D: DataMut> LWESwitchingKey<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: LWESwitchingKeyCompressedToRef,
|
O: LWESwitchingKeyCompressedToRef,
|
||||||
Module<B>: LWESwitchingKeyDecompress,
|
M: LWESwitchingKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_lwe_switching_key(self, other);
|
module.decompress_lwe_switching_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -140,32 +140,32 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyCompressedAlloc,
|
M: LWEToGLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_to_glwe_switching_key_compressed_from_infos(infos)
|
module.alloc_lwe_to_glwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc_with<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
|
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWEToGLWESwitchingKeyCompressedAlloc,
|
M: LWEToGLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_to_glwe_switching_key_compressed(base2k, k, rank_out, dnum)
|
module.alloc_lwe_to_glwe_switching_key_compressed(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyCompressedAlloc,
|
M: LWEToGLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key_compressed_from_infos(infos)
|
module.bytes_of_lwe_to_glwe_switching_key_compressed_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWEToGLWESwitchingKeyCompressedAlloc,
|
M: LWEToGLWESwitchingKeyCompressedAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key_compressed(base2k, k, dnum)
|
module.bytes_of_lwe_to_glwe_switching_key_compressed(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
@@ -187,10 +187,10 @@ where
|
|||||||
impl<B: Backend> LWEToGLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
impl<B: Backend> LWEToGLWESwitchingKeyDecompress for Module<B> where Self: GLWESwitchingKeyDecompress {}
|
||||||
|
|
||||||
impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
||||||
pub fn decompress<O, B: Backend>(&mut self, module: &Module<B>, other: &O)
|
pub fn decompress<O, M>(&mut self, module: &M, other: &O)
|
||||||
where
|
where
|
||||||
O: LWEToGLWESwitchingKeyCompressedToRef,
|
O: LWEToGLWESwitchingKeyCompressedToRef,
|
||||||
Module<B>: LWEToGLWESwitchingKeyDecompress,
|
M: LWEToGLWESwitchingKeyDecompress,
|
||||||
{
|
{
|
||||||
module.decompress_lwe_to_glwe_switching_key(self, other);
|
module.decompress_lwe_to_glwe_switching_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -188,46 +188,32 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AutomorphismKey<Vec<u8>> {
|
impl AutomorphismKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyAlloc,
|
M: AutomorphismKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key_from_infos(infos)
|
module.alloc_automorphism_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc_with<B: Backend>(
|
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
module: &Module<B>,
|
|
||||||
base2k: Base2K,
|
|
||||||
k: TorusPrecision,
|
|
||||||
rank: Rank,
|
|
||||||
dnum: Dnum,
|
|
||||||
dsize: Dsize,
|
|
||||||
) -> Self
|
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyAlloc,
|
M: AutomorphismKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key(base2k, k, rank, dnum, dsize)
|
module.alloc_automorphism_key(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyAlloc,
|
M: AutomorphismKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key_from_infos(infos)
|
module.bytes_of_automorphism_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
base2k: Base2K,
|
|
||||||
k: TorusPrecision,
|
|
||||||
rank: Rank,
|
|
||||||
dnum: Dnum,
|
|
||||||
dsize: Dsize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyAlloc,
|
M: AutomorphismKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key(base2k, k, rank, dnum, dsize)
|
module.bytes_of_automorphism_key(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -296,39 +296,31 @@ where
|
|||||||
impl<B: Backend> GGLWEAlloc for Module<B> where Self: GetDegree {}
|
impl<B: Backend> GGLWEAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GGLWE<Vec<u8>> {
|
impl GGLWE<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GGLWEAlloc,
|
M: GGLWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_from_infos(infos)
|
module.alloc_glwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, rank_out: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
module: Module<B>,
|
|
||||||
base2k: Base2K,
|
|
||||||
k: TorusPrecision,
|
|
||||||
rank_in: Rank,
|
|
||||||
rank_out: Rank,
|
|
||||||
dnum: Dnum,
|
|
||||||
dsize: Dsize,
|
|
||||||
) -> Self
|
|
||||||
where
|
where
|
||||||
Module<B>: GGLWEAlloc,
|
M: GGLWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GGLWEAlloc,
|
M: GGLWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_gglwe_from_infos(infos)
|
module.bytes_of_gglwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -337,7 +329,7 @@ impl GGLWE<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GGLWEAlloc,
|
M: GGLWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.bytes_of_gglwe(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -227,16 +227,16 @@ where
|
|||||||
impl<B: Backend> GLWESwitchingKeyAlloc for Module<B> where Self: GGLWEAlloc {}
|
impl<B: Backend> GLWESwitchingKeyAlloc for Module<B> where Self: GGLWEAlloc {}
|
||||||
|
|
||||||
impl GLWESwitchingKey<Vec<u8>> {
|
impl GLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWESwitchingKeyAlloc,
|
M: GLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_switching_key_from_infos(infos)
|
module.alloc_glwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(
|
pub fn alloc<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -245,21 +245,21 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> Self
|
) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWESwitchingKeyAlloc,
|
M: GLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWESwitchingKeyAlloc,
|
M: GLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key_from_infos(infos)
|
module.bytes_of_glwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
@@ -268,7 +268,7 @@ impl GLWESwitchingKey<Vec<u8>> {
|
|||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWESwitchingKeyAlloc,
|
M: GLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.bytes_of_glwe_switching_key(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -188,39 +188,32 @@ where
|
|||||||
impl<B: Backend> TensorKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
impl<B: Backend> TensorKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
||||||
|
|
||||||
impl TensorKey<Vec<u8>> {
|
impl TensorKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyAlloc,
|
M: TensorKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_tensor_key_from_infos(infos)
|
module.alloc_tensor_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: TensorKeyAlloc,
|
M: TensorKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_tensor_key(base2k, k, rank, dnum, dsize)
|
module.alloc_tensor_key(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: TensorKeyAlloc,
|
M: TensorKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_tensor_key_from_infos(infos)
|
module.bytes_of_tensor_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||||
module: Module<B>,
|
|
||||||
base2k: Base2K,
|
|
||||||
k: TorusPrecision,
|
|
||||||
rank: Rank,
|
|
||||||
dnum: Dnum,
|
|
||||||
dsize: Dsize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: TensorKeyAlloc,
|
M: TensorKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_tensor_key(base2k, k, rank, dnum, dsize)
|
module.bytes_of_tensor_key(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -239,39 +239,32 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGSW<Vec<u8>> {
|
impl GGSW<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: GGSWAlloc,
|
M: GGSWAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_ggsw_from_infos(infos)
|
module.alloc_ggsw_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GGSWAlloc,
|
M: GGSWAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_ggsw(base2k, k, rank, dnum, dsize)
|
module.alloc_ggsw(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
Module<B>: GGSWAlloc,
|
M: GGSWAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_ggsw_from_infos(infos)
|
module.bytes_of_ggsw_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||||
module: Module<B>,
|
|
||||||
base2k: Base2K,
|
|
||||||
k: TorusPrecision,
|
|
||||||
rank: Rank,
|
|
||||||
dnum: Dnum,
|
|
||||||
dsize: Dsize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: GGSWAlloc,
|
M: GGSWAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_ggsw(base2k, k, rank, dnum, dsize)
|
module.bytes_of_ggsw(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -188,32 +188,32 @@ where
|
|||||||
impl<B: Backend> GLWEAlloc for Module<B> where Self: GetDegree {}
|
impl<B: Backend> GLWEAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GLWE<Vec<u8>> {
|
impl GLWE<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEAlloc,
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_from_infos(infos)
|
module.alloc_glwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWEAlloc,
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe(base2k, k, rank)
|
module.alloc_glwe(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEAlloc,
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_from_infos(infos)
|
module.bytes_of_glwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWEAlloc,
|
M: GLWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe(base2k, k, rank)
|
module.bytes_of_glwe(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -122,33 +122,35 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GLWEPublicKeyAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GLWEPublicKey<Vec<u8>> {
|
impl GLWEPublicKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEPublicKeyAlloc,
|
M: GLWEPublicKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_public_key_from_infos(infos)
|
module.alloc_glwe_public_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWEPublicKeyAlloc,
|
M: GLWEPublicKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_public_key(base2k, k, rank)
|
module.alloc_glwe_public_key(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEPublicKeyAlloc,
|
M: GLWEPublicKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_public_key_from_infos(infos)
|
module.bytes_of_glwe_public_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWEPublicKeyAlloc,
|
M: GLWEPublicKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_public_key(base2k, k, rank)
|
module.bytes_of_glwe_public_key(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,33 +116,35 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GLWEPlaintextAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GLWEPlaintext<Vec<u8>> {
|
impl GLWEPlaintext<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEPlaintextAlloc,
|
M: GLWEPlaintextAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_plaintext_from_infos(infos)
|
module.alloc_glwe_plaintext_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWEPlaintextAlloc,
|
M: GLWEPlaintextAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_plaintext(base2k, k)
|
module.alloc_glwe_plaintext(base2k, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWEPlaintextAlloc,
|
M: GLWEPlaintextAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_plaintext_from_infos(infos)
|
module.bytes_of_glwe_plaintext_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWEPlaintextAlloc,
|
M: GLWEPlaintextAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_plaintext(base2k, k)
|
module.bytes_of_glwe_plaintext(base2k, k)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -100,33 +100,35 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GLWESecretAlloc for Module<B> where Self: GetDegree {}
|
||||||
|
|
||||||
impl GLWESecret<Vec<u8>> {
|
impl GLWESecret<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWESecretAlloc,
|
M: GLWESecretAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_secret_from_infos(infos)
|
module.alloc_glwe_secret_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, rank: Rank) -> Self
|
pub fn alloc<M>(module: &M, rank: Rank) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWESecretAlloc,
|
M: GLWESecretAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_secret(rank)
|
module.alloc_glwe_secret(rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
Module<B>: GLWESecretAlloc,
|
M: GLWESecretAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_secret_from_infos(infos)
|
module.bytes_of_glwe_secret_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: Module<B>, rank: Rank) -> usize
|
pub fn bytes_of<M>(module: &M, rank: Rank) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWESecretAlloc,
|
M: GLWESecretAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_secret(rank)
|
module.bytes_of_glwe_secret(rank)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -185,33 +185,35 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GLWEToLWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyAlloc,
|
M: GLWEToLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key_from_infos(infos)
|
module.alloc_glwe_to_lwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: GLWEToLWESwitchingKeyAlloc,
|
M: GLWEToLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
|
module.alloc_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyAlloc,
|
M: GLWEToLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key_from_infos(infos)
|
module.bytes_of_glwe_to_lwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWEToLWESwitchingKeyAlloc,
|
M: GLWEToLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
|
module.bytes_of_glwe_to_lwe_switching_key(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -153,33 +153,35 @@ pub trait LWEAlloc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> LWEAlloc for Module<B> {}
|
||||||
|
|
||||||
impl LWE<Vec<u8>> {
|
impl LWE<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: LWEInfos,
|
A: LWEInfos,
|
||||||
Module<B>: LWEAlloc,
|
M: LWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_from_infos(infos)
|
module.alloc_lwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, n: Degree, base2k: Base2K, k: TorusPrecision) -> Self
|
pub fn alloc<M>(module: &M, n: Degree, base2k: Base2K, k: TorusPrecision) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWEAlloc,
|
M: LWEAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe(n, base2k, k)
|
module.alloc_lwe(n, base2k, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: LWEInfos,
|
A: LWEInfos,
|
||||||
Module<B>: LWEAlloc,
|
M: LWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_from_infos(infos)
|
module.bytes_of_lwe_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, n: Degree, base2k: Base2K, k: TorusPrecision) -> usize
|
pub fn bytes_of<M>(module: &M, n: Degree, base2k: Base2K, k: TorusPrecision) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWEAlloc,
|
M: LWEAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe(n, base2k, k)
|
module.bytes_of_lwe(n, base2k, k)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -161,32 +161,32 @@ where
|
|||||||
impl<B: Backend> LWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
impl<B: Backend> LWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
||||||
|
|
||||||
impl LWESwitchingKey<Vec<u8>> {
|
impl LWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyAlloc,
|
M: LWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_switching_key_from_infos(infos)
|
module.alloc_lwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWESwitchingKeyAlloc,
|
M: LWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_switching_key(base2k, k, dnum)
|
module.alloc_lwe_switching_key(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyAlloc,
|
M: LWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key_from_infos(infos)
|
module.bytes_of_glwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWESwitchingKeyAlloc,
|
M: LWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_switching_key(base2k, k, dnum)
|
module.bytes_of_lwe_switching_key(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,14 +72,18 @@ pub trait LWEPlaintextAlloc {
|
|||||||
impl<B: Backend> LWEPlaintextAlloc for Module<B> {}
|
impl<B: Backend> LWEPlaintextAlloc for Module<B> {}
|
||||||
|
|
||||||
impl LWEPlaintext<Vec<u8>> {
|
impl LWEPlaintext<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: LWEInfos,
|
A: LWEInfos,
|
||||||
|
M: LWEPlaintextAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_plaintext_from_infos(infos)
|
module.alloc_lwe_plaintext_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: Module<B>, base2k: Base2K, k: TorusPrecision) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision) -> Self
|
||||||
|
where
|
||||||
|
M: LWEPlaintextAlloc,
|
||||||
|
{
|
||||||
module.alloc_lwe_plaintext(base2k, k)
|
module.alloc_lwe_plaintext(base2k, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,9 +25,9 @@ pub trait LWESecretAlloc {
|
|||||||
impl<B: Backend> LWESecretAlloc for Module<B> {}
|
impl<B: Backend> LWESecretAlloc for Module<B> {}
|
||||||
|
|
||||||
impl LWESecret<Vec<u8>> {
|
impl LWESecret<Vec<u8>> {
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, n: Degree) -> Self
|
pub fn alloc<M>(module: &M, n: Degree) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWESecretAlloc,
|
M: LWESecretAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_secret(n)
|
module.alloc_lwe_secret(n)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -188,32 +188,32 @@ where
|
|||||||
impl<B: Backend> LWEToGLWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
impl<B: Backend> LWEToGLWESwitchingKeyAlloc for Module<B> where Self: GLWESwitchingKeyAlloc {}
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyAlloc,
|
M: LWEToGLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_to_glwe_switching_key_from_infos(infos)
|
module.alloc_lwe_to_glwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: LWEToGLWESwitchingKeyAlloc,
|
M: LWEToGLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
|
module.alloc_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A, B: Backend>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyAlloc,
|
M: LWEToGLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key_from_infos(infos)
|
module.bytes_of_lwe_to_glwe_switching_key_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of<B: Backend>(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_out: Rank) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, rank_out: Rank) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: LWEToGLWESwitchingKeyAlloc,
|
M: LWEToGLWESwitchingKeyAlloc,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
|
module.bytes_of_lwe_to_glwe_switching_key(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -145,32 +145,32 @@ where
|
|||||||
impl<B: Backend> AutomorphismKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
|
impl<B: Backend> AutomorphismKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyPreparedAlloc<B>,
|
M: AutomorphismKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key_prepared_from_infos(infos)
|
module.alloc_automorphism_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyPreparedAlloc<B>,
|
M: AutomorphismKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
|
module.alloc_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: AutomorphismKeyPreparedAlloc<B>,
|
M: AutomorphismKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key_prepared_from_infos(infos)
|
module.bytes_of_automorphism_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: AutomorphismKeyPreparedAlloc<B>,
|
M: AutomorphismKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
|
module.bytes_of_automorphism_key_prepared(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
@@ -200,19 +200,19 @@ where
|
|||||||
impl<B: Backend> PrepareAutomorphismKey<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> PrepareAutomorphismKey<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> AutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize
|
pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GLWESwitchingKeyPrepare<B>,
|
M: PrepareAutomorphismKey<B>,
|
||||||
{
|
{
|
||||||
module.prepare_automorphism_key_tmp_bytes(self)
|
module.prepare_automorphism_key_tmp_bytes(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> AutomorphismKeyPrepared<D, B> {
|
impl<D: DataMut, B: Backend> AutomorphismKeyPrepared<D, B> {
|
||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
O: AutomorphismKeyToRef + GetAutomorphismGaloisElement,
|
||||||
Module<B>: PrepareAutomorphismKey<B>,
|
M: PrepareAutomorphismKey<B>,
|
||||||
{
|
{
|
||||||
module.prepare_automorphism_key(self, other, scratch);
|
module.prepare_automorphism_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -151,45 +151,50 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
impl<B: Backend> GGLWEPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEPrepared<Vec<u8>, B>
|
impl<B: Backend> GGLWEPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GGLWEPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GGLWEPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_gglwe_prepared_from_infos(infos)
|
module.alloc_gglwe_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(
|
pub fn alloc<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
rank_out: Rank,
|
rank_out: Rank,
|
||||||
dnum: Dnum,
|
dnum: Dnum,
|
||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> Self {
|
) -> Self
|
||||||
|
where
|
||||||
|
M: GGLWEPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GGLWEPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_gglwe_prepared_from_infos(infos)
|
module.bytes_of_gglwe_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(
|
pub fn bytes_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
rank_out: Rank,
|
rank_out: Rank,
|
||||||
dnum: Dnum,
|
dnum: Dnum,
|
||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize {
|
) -> usize
|
||||||
|
where
|
||||||
|
M: GGLWEPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.bytes_of_gglwe_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -230,22 +235,20 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GGLWEPrepare<B> for Module<B> where Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
|
impl<B: Backend> GGLWEPrepare<B> for Module<B> where Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> GGLWEPrepared<D, B>
|
impl<D: DataMut, B: Backend> GGLWEPrepared<D, B> {
|
||||||
where
|
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
Module<B>: GGLWEPrepare<B>,
|
|
||||||
{
|
|
||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
O: GGLWEToRef,
|
O: GGLWEToRef,
|
||||||
|
M: GGLWEPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_gglwe(self, other, scratch);
|
module.prepare_gglwe(self, other, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWEPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize
|
pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: GGLWEPrepare<B>,
|
M: GGLWEPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_gglwe_tmp_bytes(self)
|
module.prepare_gglwe_tmp_bytes(self)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -136,47 +136,52 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWESwitchingKeyPreparedAlloc<B> for Module<B> where Module<B>: GGLWEPreparedAlloc<B> {}
|
impl<B: Backend> GLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GGLWEPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GLWESwitchingKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_switching_key_prepared_from_infos(infos)
|
module.alloc_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(
|
pub fn alloc<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
rank_out: Rank,
|
rank_out: Rank,
|
||||||
dnum: Dnum,
|
dnum: Dnum,
|
||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> Self {
|
) -> Self
|
||||||
|
where
|
||||||
|
M: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.alloc_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_switching_key_prepared_from_infos(infos)
|
module.bytes_of_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(
|
pub fn bytes_of<M>(
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
base2k: Base2K,
|
base2k: Base2K,
|
||||||
k: TorusPrecision,
|
k: TorusPrecision,
|
||||||
rank_in: Rank,
|
rank_in: Rank,
|
||||||
rank_out: Rank,
|
rank_out: Rank,
|
||||||
dnum: Dnum,
|
dnum: Dnum,
|
||||||
dsize: Dsize,
|
dsize: Dsize,
|
||||||
) -> usize {
|
) -> usize
|
||||||
|
where
|
||||||
|
M: GLWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
module.bytes_of_glwe_switching_key_prepared(base2k, k, rank_in, rank_out, dnum, dsize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -206,21 +211,21 @@ where
|
|||||||
impl<B: Backend> GLWESwitchingKeyPrepare<B> for Module<B> where Self: GGLWEPrepare<B> {}
|
impl<B: Backend> GLWESwitchingKeyPrepare<B> for Module<B> where Self: GGLWEPrepare<B> {}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> GLWESwitchingKeyPrepared<D, B> {
|
impl<D: DataMut, B: Backend> GLWESwitchingKeyPrepared<D, B> {
|
||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: GLWESwitchingKeyToRef + GLWESwtichingKeyGetMetaData,
|
O: GLWESwitchingKeyToRef + GLWESwtichingKeyGetMetaData,
|
||||||
Module<B>: GLWESwitchingKeyPrepare<B>,
|
M: GLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_glwe_switching(self, other, scratch);
|
module.prepare_glwe_switching(self, other, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn prepare_tmp_bytes<M>(&self, module: &M) -> usize
|
||||||
Module<B>: GLWESwitchingKeyPrepare<B>,
|
where
|
||||||
{
|
M: GLWESwitchingKeyPrepare<B>,
|
||||||
pub fn prepare_tmp_bytes(&self, module: &Module<B>) -> usize {
|
{
|
||||||
module.prepare_gglwe_tmp_bytes(self)
|
module.prepare_glwe_switching_key_tmp_bytes(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -114,29 +114,34 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> TensorKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
|
impl<B: Backend> TensorKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> TensorKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> TensorKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: TensorKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: TensorKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_tensor_key_prepared_from_infos(infos)
|
module.alloc_tensor_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self {
|
pub fn alloc_with<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
|
||||||
|
where
|
||||||
|
M: TensorKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_tensor_key_prepared(base2k, k, dnum, dsize, rank)
|
module.alloc_tensor_key_prepared(base2k, k, dnum, dsize, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: TensorKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_tensor_key_prepared_from_infos(infos)
|
module.bytes_of_tensor_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank, dnum: Dnum, dsize: Dsize) -> usize
|
||||||
|
where
|
||||||
|
M: TensorKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_tensor_key_prepared(base2k, k, rank, dnum, dsize)
|
module.bytes_of_tensor_key_prepared(base2k, k, rank, dnum, dsize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -190,27 +195,23 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> TensorKeyPrepare<B> for Module<B> where Module<B>: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> TensorKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> TensorKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> TensorKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A) -> usize
|
||||||
Module<B>: TensorKeyPrepare<B>,
|
|
||||||
{
|
|
||||||
fn prepare_tmp_bytes<A>(&self, module: &Module<B>, infos: &A) -> usize
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: TensorKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_tensor_key_tmp_bytes(infos)
|
module.prepare_tensor_key_tmp_bytes(infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> TensorKeyPrepared<D, B>
|
impl<D: DataMut, B: Backend> TensorKeyPrepared<D, B> {
|
||||||
where
|
fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
Module<B>: TensorKeyPrepare<B>,
|
|
||||||
{
|
|
||||||
fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
O: TensorKeyToRef,
|
O: TensorKeyToRef,
|
||||||
|
M: TensorKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_tensor_key(self, other, scratch);
|
module.prepare_tensor_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -135,31 +135,36 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Self: GetDegree + VmpPMatAlloc<B> + VmpPMatBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
|
impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GGSWPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: GGSWPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_ggsw_prepared_from_infos(infos)
|
module.alloc_ggsw_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
|
||||||
|
where
|
||||||
|
M: GGSWPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_ggsw_prepared(base2k, k, dnum, dsize, rank)
|
module.alloc_ggsw_prepared(base2k, k, dnum, dsize, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: GGSWPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_ggsw_prepared_from_infos(infos)
|
module.bytes_of_ggsw_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize
|
||||||
|
where
|
||||||
|
M: GGSWPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_ggsw_prepared(base2k, k, dnum, dsize, rank)
|
module.bytes_of_ggsw_prepared(base2k, k, dnum, dsize, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -204,25 +209,21 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GGSWPrepare<B> for Module<B> where Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
|
impl<B: Backend> GGSWPrepare<B> for Module<B> where Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
|
impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A) -> usize
|
||||||
Module<B>: GGSWPrepare<B>,
|
|
||||||
{
|
|
||||||
pub fn prepare_tmp_bytes<A>(&self, module: Module<B>, infos: &A) -> usize
|
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: GGSWPrepare<B>,
|
||||||
{
|
{
|
||||||
module.ggsw_prepare_tmp_bytes(infos)
|
module.ggsw_prepare_tmp_bytes(infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> GGSWPrepared<D, B>
|
impl<D: DataMut, B: Backend> GGSWPrepared<D, B> {
|
||||||
where
|
pub fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
Module<B>: GGSWPrepare<B>,
|
|
||||||
{
|
|
||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
|
||||||
where
|
where
|
||||||
O: GGSWToRef,
|
O: GGSWToRef,
|
||||||
|
M: GGSWPrepare<B>,
|
||||||
{
|
{
|
||||||
module.ggsw_prepare(self, other, scratch);
|
module.ggsw_prepare(self, other, scratch);
|
||||||
}
|
}
|
||||||
@@ -243,11 +244,11 @@ impl<D: DataMut, B: Backend> GGSWPreparedToMut<B> for GGSWPrepared<D, B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GGSWCiphertextPreparedToRef<B: Backend> {
|
pub trait GGSWPreparedToRef<B: Backend> {
|
||||||
fn to_ref(&self) -> GGSWPrepared<&[u8], B>;
|
fn to_ref(&self) -> GGSWPrepared<&[u8], B>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef, B: Backend> GGSWCiphertextPreparedToRef<B> for GGSWPrepared<D, B> {
|
impl<D: DataRef, B: Backend> GGSWPreparedToRef<B> for GGSWPrepared<D, B> {
|
||||||
fn to_ref(&self) -> GGSWPrepared<&[u8], B> {
|
fn to_ref(&self) -> GGSWPrepared<&[u8], B> {
|
||||||
GGSWPrepared {
|
GGSWPrepared {
|
||||||
base2k: self.base2k,
|
base2k: self.base2k,
|
||||||
|
|||||||
@@ -84,29 +84,34 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftBytesOf {}
|
impl<B: Backend> GLWEPublicKeyPreparedAlloc<B> for Module<B> where Self: VecZnxDftAlloc<B> + VecZnxDftBytesOf {}
|
||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GLWEPublicKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWEPublicKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_public_key_prepared_from_infos(infos)
|
module.alloc_glwe_public_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
|
||||||
|
where
|
||||||
|
M: GLWEPublicKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_glwe_public_key_prepared(base2k, k, rank)
|
module.alloc_glwe_public_key_prepared(base2k, k, rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWEPublicKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_public_key_prepared_from_infos(infos)
|
module.bytes_of_glwe_public_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
|
||||||
|
where
|
||||||
|
M: GLWEPublicKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_glwe_public_key_prepared(base2k, k, rank)
|
module.bytes_of_glwe_public_key_prepared(base2k, k, rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -141,13 +146,11 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GLWEPublicKeyPrepare<B> for Module<B> where Self: GetDegree + VecZnxDftApply<B> {}
|
impl<B: Backend> GLWEPublicKeyPrepare<B> for Module<B> where Self: GetDegree + VecZnxDftApply<B> {}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> GLWEPublicKeyPrepared<D, B>
|
impl<D: DataMut, B: Backend> GLWEPublicKeyPrepared<D, B> {
|
||||||
where
|
pub fn prepare<O, M>(&mut self, module: &M, other: &O)
|
||||||
Module<B>: GLWEPublicKeyPrepare<B>,
|
|
||||||
{
|
|
||||||
pub fn prepare<O>(&mut self, module: &Module<B>, other: &O)
|
|
||||||
where
|
where
|
||||||
O: GLWEPublicKeyToRef + GetDist,
|
O: GLWEPublicKeyToRef + GetDist,
|
||||||
|
M: GLWEPublicKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_glwe_public_key(self, other);
|
module.prepare_glwe_public_key(self, other);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,29 +77,34 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B> {}
|
impl<B: Backend> GLWESecretPreparedAlloc<B> for Module<B> where Self: GetDegree + SvpPPolBytesOf + SvpPPolAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GLWESecretPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWESecretPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_secret_prepared_from_infos(infos)
|
module.alloc_glwe_secret_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, rank: Rank) -> Self {
|
pub fn alloc<M>(module: &M, rank: Rank) -> Self
|
||||||
|
where
|
||||||
|
M: GLWESecretPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_glwe_secret_prepared(rank)
|
module.alloc_glwe_secret_prepared(rank)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: GLWESecretPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_secret_from_infos(infos)
|
module.bytes_of_glwe_secret_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, rank: Rank) -> usize {
|
pub fn bytes_of<M>(module: &M, rank: Rank) -> usize
|
||||||
|
where
|
||||||
|
M: GLWESecretPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_glwe_secret(rank)
|
module.bytes_of_glwe_secret(rank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -138,6 +143,16 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> GLWESecretPrepare<B> for Module<B> where Self: SvpPrepare<B> {}
|
impl<B: Backend> GLWESecretPrepare<B> for Module<B> where Self: SvpPrepare<B> {}
|
||||||
|
|
||||||
|
impl<D: DataMut, B: Backend> GLWESecretPrepared<D, B> {
|
||||||
|
pub fn prepare<M, O>(&mut self, module: &M, other: &O)
|
||||||
|
where
|
||||||
|
M: GLWESecretPrepare<B>,
|
||||||
|
O: GLWESecretToRef + GetDist,
|
||||||
|
{
|
||||||
|
module.prepare_glwe_secret(self, other);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait GLWESecretPreparedToRef<B: Backend> {
|
pub trait GLWESecretPreparedToRef<B: Backend> {
|
||||||
fn to_ref(&self) -> GLWESecretPrepared<&[u8], B>;
|
fn to_ref(&self) -> GLWESecretPrepared<&[u8], B>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -105,31 +105,36 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWEToLWESwitchingKeyPreparedAlloc<B> for Module<B> where Module<B>: GLWESwitchingKeyPreparedAlloc<B> {}
|
impl<B: Backend> GLWEToLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: GLWEToLWESwitchingKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
module.alloc_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> Self
|
||||||
|
where
|
||||||
|
M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
module.alloc_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
module.bytes_of_glwe_to_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_in: Rank, dnum: Dnum) -> usize
|
||||||
|
where
|
||||||
|
M: GLWEToLWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
module.bytes_of_glwe_to_lwe_switching_key_prepared(base2k, k, rank_in, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -157,20 +162,20 @@ where
|
|||||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> GLWEToLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes<A>(&self, module: &Module<B>, infos: &A)
|
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
M: GLWEToLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
|
module.prepare_glwe_to_lwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
impl<D: DataMut, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
|
||||||
fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: GLWEToLWESwitchingKeyToRef,
|
O: GLWEToLWESwitchingKeyToRef,
|
||||||
Module<B>: GLWEToLWESwitchingKeyPrepare<B>,
|
M: GLWEToLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
|
module.prepare_glwe_to_lwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,29 +116,34 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> LWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
|
impl<B: Backend> LWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: LWESwitchingKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: LWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_switching_key_prepared_from_infos(infos)
|
module.alloc_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> Self
|
||||||
|
where
|
||||||
|
M: LWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.alloc_lwe_switching_key_prepared(base2k, k, dnum)
|
module.alloc_lwe_switching_key_prepared(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: LWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_switching_key_prepared_from_infos(infos)
|
module.bytes_of_lwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, dnum: Dnum) -> usize
|
||||||
|
where
|
||||||
|
M: LWESwitchingKeyPreparedAlloc<B>,
|
||||||
|
{
|
||||||
module.bytes_of_lwe_switching_key_prepared(base2k, k, dnum)
|
module.bytes_of_lwe_switching_key_prepared(base2k, k, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -165,20 +170,20 @@ where
|
|||||||
impl<B: Backend> LWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> LWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes<A>(&self, module: &Module<B>, infos: &A)
|
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
M: LWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_lwe_switching_key_tmp_bytes(infos);
|
module.prepare_lwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> LWESwitchingKeyPrepared<D, B> {
|
impl<D: DataMut, B: Backend> LWESwitchingKeyPrepared<D, B> {
|
||||||
fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: LWESwitchingKeyToRef,
|
O: LWESwitchingKeyToRef,
|
||||||
Module<B>: LWESwitchingKeyPrepare<B>,
|
M: LWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_lwe_switching_key(self, other, scratch);
|
module.prepare_lwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -114,29 +114,28 @@ where
|
|||||||
|
|
||||||
impl<B: Backend> LWEToGLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
|
impl<B: Backend> LWEToGLWESwitchingKeyPreparedAlloc<B> for Module<B> where Self: GLWESwitchingKeyPreparedAlloc<B> {}
|
||||||
|
|
||||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>
|
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B>{
|
||||||
where
|
pub fn alloc_from_infos<A, M>(module: &M, infos: &A) -> Self
|
||||||
Module<B>: LWEToGLWESwitchingKeyPreparedAlloc<B>,
|
|
||||||
{
|
|
||||||
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
|
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
module.alloc_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self {
|
pub fn alloc<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> Self where M: LWEToGLWESwitchingKeyPreparedAlloc<B>, {
|
||||||
module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
module.alloc_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of_from_infos<A>(module: &Module<B>, infos: &A) -> usize
|
pub fn bytes_of_from_infos<A, M>(module: &M, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: LWEToGLWESwitchingKeyPreparedAlloc<B>,
|
||||||
{
|
{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
module.bytes_of_lwe_to_glwe_switching_key_prepared_from_infos(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize {
|
pub fn bytes_of<M>(module: &M, base2k: Base2K, k: TorusPrecision, rank_out: Rank, dnum: Dnum) -> usize where M: LWEToGLWESwitchingKeyPreparedAlloc<B>,{
|
||||||
module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
module.bytes_of_lwe_to_glwe_switching_key_prepared(base2k, k, rank_out, dnum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -164,20 +163,20 @@ where
|
|||||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
impl<B: Backend> LWEToGLWESwitchingKeyPrepare<B> for Module<B> where Self: GLWESwitchingKeyPrepare<B> {}
|
||||||
|
|
||||||
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn prepare_tmp_bytes<A>(&self, module: &Module<B>, infos: &A)
|
pub fn prepare_tmp_bytes<A, M>(&self, module: &M, infos: &A)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
M: LWEToGLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
|
module.prepare_lwe_to_glwe_switching_key_tmp_bytes(infos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
impl<D: DataMut, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
|
||||||
fn prepare<O>(&mut self, module: &Module<B>, other: &O, scratch: &mut Scratch<B>)
|
fn prepare<O, M>(&mut self, module: &M, other: &O, scratch: &mut Scratch<B>)
|
||||||
where
|
where
|
||||||
O: LWEToGLWESwitchingKeyToRef,
|
O: LWEToGLWESwitchingKeyToRef,
|
||||||
Module<B>: LWEToGLWESwitchingKeyPrepare<B>,
|
M: LWEToGLWESwitchingKeyPrepare<B>,
|
||||||
{
|
{
|
||||||
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
|
module.prepare_lwe_to_glwe_switching_key(self, other, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,4 +22,4 @@ pub use encryption::SIGMA;
|
|||||||
|
|
||||||
pub use scratch::*;
|
pub use scratch::*;
|
||||||
|
|
||||||
pub mod tests;
|
// pub mod tests;
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use poulpy_hal::{
|
|||||||
VecZnxSubScalarInplace,
|
VecZnxSubScalarInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
|
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, ZnxZero},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWE, GGLWEInfos, GLWE, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GGLWE, GGLWEInfos, GLWE, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
||||||
@@ -30,13 +30,13 @@ impl<D: DataRef> GGLWE<D> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxSubScalarInplace,
|
+ VecZnxSubScalarInplace,
|
||||||
B: Backend + TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
{
|
{
|
||||||
let dsize: usize = self.dsize().into();
|
let dsize: usize = self.dsize().into();
|
||||||
let base2k: usize = self.base2k().into();
|
let base2k: usize = self.base2k().into();
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
|
|
||||||
(0..self.rank_in().into()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.dnum().into()).for_each(|row_i| {
|
(0..self.dnum().into()).for_each(|row_i| {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use poulpy_hal::{
|
|||||||
VecZnxSubInplace,
|
VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, VecZnxBig, VecZnxDft, ZnxZero},
|
layouts::{Backend, DataRef, Module, ScalarZnx, ScratchOwned, VecZnxBig, VecZnxDft, ZnxZero},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
||||||
@@ -36,14 +36,14 @@ impl<D: DataRef> GGSW<D> {
|
|||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxSubInplace,
|
+ VecZnxSubInplace,
|
||||||
B: Backend + TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
F: Fn(usize) -> f64,
|
F: Fn(usize) -> f64,
|
||||||
{
|
{
|
||||||
let base2k: usize = self.base2k().into();
|
let base2k: usize = self.base2k().into();
|
||||||
let dsize: usize = self.dsize().into();
|
let dsize: usize = self.dsize().into();
|
||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||||
|
|
||||||
@@ -109,13 +109,13 @@ impl<D: DataRef> GGSW<D> {
|
|||||||
+ VecZnxIdftApplyTmpA<B>
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
+ VecZnxAddScalarInplace
|
+ VecZnxAddScalarInplace
|
||||||
+ VecZnxSubInplace,
|
+ VecZnxSubInplace,
|
||||||
B: Backend + TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
{
|
{
|
||||||
let base2k: usize = self.base2k().into();
|
let base2k: usize = self.base2k().into();
|
||||||
let dsize: usize = self.dsize().into();
|
let dsize: usize = self.dsize().into();
|
||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, B> = module.vec_znx_dft_alloc(1, self.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, B> = module.vec_znx_big_alloc(1, self.size());
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddInplace,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxBigAddInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume,
|
VecZnxBigBytesOf, VecZnxBigNormalize, VecZnxDftApply, VecZnxDftBytesOf, VecZnxIdftApplyConsume, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
VecZnxNormalizeTmpBytes, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataRef, Module, Scratch, ScratchOwned},
|
layouts::{Backend, DataRef, Module, Scratch, ScratchOwned},
|
||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxBigImpl, TakeVecZnxDftImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWE, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GLWE, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
||||||
@@ -30,9 +30,9 @@ impl<D: DataRef> GLWE<D> {
|
|||||||
+ VecZnxBigAddInplace<B>
|
+ VecZnxBigAddInplace<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
Scratch<B>:,
|
||||||
{
|
{
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(self);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc_from_infos(module, self);
|
||||||
self.decrypt(module, &mut pt_have, sk_prepared, scratch);
|
self.decrypt(module, &mut pt_have, sk_prepared, scratch);
|
||||||
module.vec_znx_sub_inplace(&mut pt_have.data, 0, &pt_want.data, 0);
|
module.vec_znx_sub_inplace(&mut pt_have.data, 0, &pt_want.data, 0);
|
||||||
module.vec_znx_normalize_inplace(self.base2k().into(), &mut pt_have.data, 0, scratch);
|
module.vec_znx_normalize_inplace(self.base2k().into(), &mut pt_have.data, 0, scratch);
|
||||||
@@ -59,7 +59,7 @@ impl<D: DataRef> GLWE<D> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxSubInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxNormalizeInplace<B>,
|
+ VecZnxNormalizeInplace<B>,
|
||||||
B: Backend + TakeVecZnxDftImpl<B> + TakeVecZnxBigImpl<B> + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
B: Backend + ScratchOwnedAllocImpl<B> + ScratchOwnedBorrowImpl<B>,
|
||||||
{
|
{
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWE::decrypt_scratch_space(module, self));
|
||||||
let noise_have: f64 = self.noise(module, sk_prepared, pt_want, scratch.borrow());
|
let noise_have: f64 = self.noise(module, sk_prepared, pt_want, scratch.borrow());
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{ScratchAvailable, ScratchTakeBasic},
|
api::{ModuleN, ScratchAvailable, ScratchTakeBasic, SvpPPolBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
||||||
layouts::{Backend, Module, Scratch},
|
layouts::{Backend, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
dist::Distribution,
|
dist::Distribution,
|
||||||
layouts::{
|
layouts::{
|
||||||
AutomorphismKey, Degree, GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPublicKey, GLWESecret,
|
AutomorphismKey, GGLWE, GGLWEInfos, GGSW, GGSWInfos, GLWE, GLWEInfos, GLWEPlaintext, GLWEPublicKey, GLWESecret,
|
||||||
GLWESwitchingKey, GetDegree, Rank, TensorKey,
|
GLWESwitchingKey, Rank, TensorKey,
|
||||||
prepared::{
|
prepared::{
|
||||||
AutomorphismKeyPrepared, GGLWEPrepared, GGSWPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
AutomorphismKeyPrepared, GGLWEPrepared, GGSWPrepared, GLWEPublicKeyPrepared, GLWESecretPrepared,
|
||||||
GLWESwitchingKeyPrepared, TensorKeyPrepared,
|
GLWESwitchingKeyPrepared, TensorKeyPrepared,
|
||||||
@@ -17,12 +17,14 @@ use crate::{
|
|||||||
|
|
||||||
pub trait ScratchTakeCore<B: Backend>
|
pub trait ScratchTakeCore<B: Backend>
|
||||||
where
|
where
|
||||||
Self: ScratchTakeBasic<B> + ScratchAvailable,
|
Self: ScratchTakeBasic + ScratchAvailable,
|
||||||
{
|
{
|
||||||
fn take_glwe_ct<A>(&mut self, module: &Module<B>, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
fn take_glwe_ct<A, M>(&mut self, module: &M, infos: &A) -> (GLWE<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
let (data, scratch) = self.take_vec_znx(module, (infos.rank() + 1).into(), infos.size());
|
let (data, scratch) = self.take_vec_znx(module, (infos.rank() + 1).into(), infos.size());
|
||||||
(
|
(
|
||||||
GLWE {
|
GLWE {
|
||||||
@@ -34,25 +36,28 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_ct_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
fn take_glwe_ct_slice<A, M>(&mut self, module: &M, size: usize, infos: &A) -> (Vec<GLWE<&mut [u8]>>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
let mut scratch: &mut Scratch<B> = self;
|
let mut scratch: &mut Self = self;
|
||||||
let mut cts: Vec<GLWE<&mut [u8]>> = Vec::with_capacity(size);
|
let mut cts: Vec<GLWE<&mut [u8]>> = Vec::with_capacity(size);
|
||||||
for _ in 0..size {
|
for _ in 0..size {
|
||||||
let (ct, new_scratch) = scratch.take_glwe_ct(infos);
|
let (ct, new_scratch) = scratch.take_glwe_ct(module, infos);
|
||||||
scratch = new_scratch;
|
scratch = new_scratch;
|
||||||
cts.push(ct);
|
cts.push(ct);
|
||||||
}
|
}
|
||||||
(cts, scratch)
|
(cts, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_pt<A>(&mut self, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
fn take_glwe_pt<A, M>(&mut self, module: &M, infos: &A) -> (GLWEPlaintext<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_vec_znx(infos.n().into(), 1, infos.size());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_vec_znx(module, 1, infos.size());
|
||||||
(
|
(
|
||||||
GLWEPlaintext {
|
GLWEPlaintext {
|
||||||
k: infos.k(),
|
k: infos.k(),
|
||||||
@@ -63,12 +68,14 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe<A>(&mut self, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
fn take_gglwe<A, M>(&mut self, module: &M, infos: &A) -> (GGLWE<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
let (data, scratch) = self.take_mat_znx(
|
let (data, scratch) = self.take_mat_znx(
|
||||||
infos.n().into(),
|
module,
|
||||||
infos.dnum().0.div_ceil(infos.dsize().0) as usize,
|
infos.dnum().0.div_ceil(infos.dsize().0) as usize,
|
||||||
infos.rank_in().into(),
|
infos.rank_in().into(),
|
||||||
(infos.rank_out() + 1).into(),
|
(infos.rank_out() + 1).into(),
|
||||||
@@ -85,12 +92,14 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe_prepared<A>(&mut self, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_prepared<A, M>(&mut self, module: &M, infos: &A) -> (GGLWEPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
let (data, scratch) = self.take_vmp_pmat(
|
let (data, scratch) = self.take_vmp_pmat(
|
||||||
infos.n().into(),
|
module,
|
||||||
infos.dnum().into(),
|
infos.dnum().into(),
|
||||||
infos.rank_in().into(),
|
infos.rank_in().into(),
|
||||||
(infos.rank_out() + 1).into(),
|
(infos.rank_out() + 1).into(),
|
||||||
@@ -107,12 +116,14 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_ggsw<A>(&mut self, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
fn take_ggsw<A, M>(&mut self, module: &M, infos: &A) -> (GGSW<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
let (data, scratch) = self.take_mat_znx(
|
let (data, scratch) = self.take_mat_znx(
|
||||||
infos.n().into(),
|
module,
|
||||||
infos.dnum().into(),
|
infos.dnum().into(),
|
||||||
(infos.rank() + 1).into(),
|
(infos.rank() + 1).into(),
|
||||||
(infos.rank() + 1).into(),
|
(infos.rank() + 1).into(),
|
||||||
@@ -129,12 +140,14 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_ggsw_prepared<A>(&mut self, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
fn take_ggsw_prepared<A, M>(&mut self, module: &M, infos: &A) -> (GGSWPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
let (data, scratch) = self.take_vmp_pmat(
|
let (data, scratch) = self.take_vmp_pmat(
|
||||||
infos.n().into(),
|
module,
|
||||||
infos.dnum().into(),
|
infos.dnum().into(),
|
||||||
(infos.rank() + 1).into(),
|
(infos.rank() + 1).into(),
|
||||||
(infos.rank() + 1).into(),
|
(infos.rank() + 1).into(),
|
||||||
@@ -151,25 +164,33 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_ggsw_prepared_slice<A>(&mut self, size: usize, infos: &A) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
fn take_ggsw_prepared_slice<A, M>(
|
||||||
|
&mut self,
|
||||||
|
module: &M,
|
||||||
|
size: usize,
|
||||||
|
infos: &A,
|
||||||
|
) -> (Vec<GGSWPrepared<&mut [u8], B>>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGSWInfos,
|
A: GGSWInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
let mut scratch: &mut Scratch<B> = self;
|
let mut scratch: &mut Self = self;
|
||||||
let mut cts: Vec<GGSWPrepared<&mut [u8], B>> = Vec::with_capacity(size);
|
let mut cts: Vec<GGSWPrepared<&mut [u8], B>> = Vec::with_capacity(size);
|
||||||
for _ in 0..size {
|
for _ in 0..size {
|
||||||
let (ct, new_scratch) = scratch.take_ggsw_prepared(infos);
|
let (ct, new_scratch) = scratch.take_ggsw_prepared(module, infos);
|
||||||
scratch = new_scratch;
|
scratch = new_scratch;
|
||||||
cts.push(ct)
|
cts.push(ct)
|
||||||
}
|
}
|
||||||
(cts, scratch)
|
(cts, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_pk<A>(&mut self, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
fn take_glwe_pk<A, M>(&mut self, module: &M, infos: &A) -> (GLWEPublicKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_vec_znx(infos.n().into(), (infos.rank() + 1).into(), infos.size());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_vec_znx(module, (infos.rank() + 1).into(), infos.size());
|
||||||
(
|
(
|
||||||
GLWEPublicKey {
|
GLWEPublicKey {
|
||||||
k: infos.k(),
|
k: infos.k(),
|
||||||
@@ -181,11 +202,13 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_pk_prepared<A>(&mut self, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_glwe_pk_prepared<A, M>(&mut self, module: &M, infos: &A) -> (GLWEPublicKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GLWEInfos,
|
A: GLWEInfos,
|
||||||
|
M: ModuleN + VecZnxDftBytesOf,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_vec_znx_dft(infos.n().into(), (infos.rank() + 1).into(), infos.size());
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_vec_znx_dft(module, (infos.rank() + 1).into(), infos.size());
|
||||||
(
|
(
|
||||||
GLWEPublicKeyPrepared {
|
GLWEPublicKeyPrepared {
|
||||||
k: infos.k(),
|
k: infos.k(),
|
||||||
@@ -197,8 +220,11 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_secret(&mut self, n: Degree, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self) {
|
fn take_glwe_secret<M>(&mut self, module: &M, rank: Rank) -> (GLWESecret<&mut [u8]>, &mut Self)
|
||||||
let (data, scratch) = self.take_scalar_znx(n.into(), rank.into());
|
where
|
||||||
|
M: ModuleN,
|
||||||
|
{
|
||||||
|
let (data, scratch) = self.take_scalar_znx(module, rank.into());
|
||||||
(
|
(
|
||||||
GLWESecret {
|
GLWESecret {
|
||||||
data,
|
data,
|
||||||
@@ -208,8 +234,11 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_secret_prepared(&mut self, n: Degree, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self) {
|
fn take_glwe_secret_prepared<M>(&mut self, module: &M, rank: Rank) -> (GLWESecretPrepared<&mut [u8], B>, &mut Self)
|
||||||
let (data, scratch) = self.take_svp_ppol(n.into(), rank.into());
|
where
|
||||||
|
M: ModuleN + SvpPPolBytesOf,
|
||||||
|
{
|
||||||
|
let (data, scratch) = self.take_svp_ppol(module, rank.into());
|
||||||
(
|
(
|
||||||
GLWESecretPrepared {
|
GLWESecretPrepared {
|
||||||
data,
|
data,
|
||||||
@@ -219,11 +248,13 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_glwe_switching_key<A>(&mut self, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
fn take_glwe_switching_key<A, M>(&mut self, module: &M, infos: &A) -> (GLWESwitchingKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_gglwe(infos);
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_gglwe(module, infos);
|
||||||
(
|
(
|
||||||
GLWESwitchingKey {
|
GLWESwitchingKey {
|
||||||
key: data,
|
key: data,
|
||||||
@@ -234,11 +265,17 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe_switching_key_prepared<A>(&mut self, infos: &A) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_switching_key_prepared<A, M>(
|
||||||
|
&mut self,
|
||||||
|
module: &M,
|
||||||
|
infos: &A,
|
||||||
|
) -> (GLWESwitchingKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_gglwe_prepared(infos);
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_gglwe_prepared(module, infos);
|
||||||
(
|
(
|
||||||
GLWESwitchingKeyPrepared {
|
GLWESwitchingKeyPrepared {
|
||||||
key: data,
|
key: data,
|
||||||
@@ -249,26 +286,36 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe_automorphism_key<A>(&mut self, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
fn take_gglwe_automorphism_key<A, M>(&mut self, module: &M, infos: &A) -> (AutomorphismKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_glwe_switching_key(infos);
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_glwe_switching_key(module, infos);
|
||||||
(AutomorphismKey { key: data, p: 0 }, scratch)
|
(AutomorphismKey { key: data, p: 0 }, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe_automorphism_key_prepared<A>(&mut self, infos: &A) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_automorphism_key_prepared<A, M>(
|
||||||
|
&mut self,
|
||||||
|
module: &M,
|
||||||
|
infos: &A,
|
||||||
|
) -> (AutomorphismKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
let (data, scratch) = self.take_gglwe_switching_key_prepared(infos);
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
|
let (data, scratch) = self.take_gglwe_switching_key_prepared(module, infos);
|
||||||
(AutomorphismKeyPrepared { key: data, p: 0 }, scratch)
|
(AutomorphismKeyPrepared { key: data, p: 0 }, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_tensor_key<A>(&mut self, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
fn take_tensor_key<A, M>(&mut self, module: &M, infos: &A) -> (TensorKey<&mut [u8]>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
infos.rank_in(),
|
infos.rank_in(),
|
||||||
infos.rank_out(),
|
infos.rank_out(),
|
||||||
@@ -277,28 +324,30 @@ where
|
|||||||
let mut keys: Vec<GLWESwitchingKey<&mut [u8]>> = Vec::new();
|
let mut keys: Vec<GLWESwitchingKey<&mut [u8]>> = Vec::new();
|
||||||
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
||||||
|
|
||||||
let mut scratch: &mut Scratch<B> = self;
|
let mut scratch: &mut Self = self;
|
||||||
|
|
||||||
let mut ksk_infos: crate::layouts::GGLWECiphertextLayout = infos.gglwe_layout();
|
let mut ksk_infos: crate::layouts::GGLWECiphertextLayout = infos.gglwe_layout();
|
||||||
ksk_infos.rank_in = Rank(1);
|
ksk_infos.rank_in = Rank(1);
|
||||||
|
|
||||||
if pairs != 0 {
|
if pairs != 0 {
|
||||||
let (gglwe, s) = scratch.take_glwe_switching_key(&ksk_infos);
|
let (gglwe, s) = scratch.take_glwe_switching_key(module, &ksk_infos);
|
||||||
scratch = s;
|
scratch = s;
|
||||||
keys.push(gglwe);
|
keys.push(gglwe);
|
||||||
}
|
}
|
||||||
for _ in 1..pairs {
|
for _ in 1..pairs {
|
||||||
let (gglwe, s) = scratch.take_glwe_switching_key(&ksk_infos);
|
let (gglwe, s) = scratch.take_glwe_switching_key(module, &ksk_infos);
|
||||||
scratch = s;
|
scratch = s;
|
||||||
keys.push(gglwe);
|
keys.push(gglwe);
|
||||||
}
|
}
|
||||||
(TensorKey { keys }, scratch)
|
(TensorKey { keys }, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_gglwe_tensor_key_prepared<A>(&mut self, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
fn take_gglwe_tensor_key_prepared<A, M>(&mut self, module: &M, infos: &A) -> (TensorKeyPrepared<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
A: GGLWEInfos,
|
A: GGLWEInfos,
|
||||||
|
M: ModuleN + VmpPMatBytesOf,
|
||||||
{
|
{
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
infos.rank_in(),
|
infos.rank_in(),
|
||||||
infos.rank_out(),
|
infos.rank_out(),
|
||||||
@@ -308,18 +357,18 @@ where
|
|||||||
let mut keys: Vec<GLWESwitchingKeyPrepared<&mut [u8], B>> = Vec::new();
|
let mut keys: Vec<GLWESwitchingKeyPrepared<&mut [u8], B>> = Vec::new();
|
||||||
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
let pairs: usize = (((infos.rank_out().0 + 1) * infos.rank_out().0) >> 1).max(1) as usize;
|
||||||
|
|
||||||
let mut scratch: &mut Scratch<B> = self;
|
let mut scratch: &mut Self = self;
|
||||||
|
|
||||||
let mut ksk_infos: crate::layouts::GGLWECiphertextLayout = infos.gglwe_layout();
|
let mut ksk_infos: crate::layouts::GGLWECiphertextLayout = infos.gglwe_layout();
|
||||||
ksk_infos.rank_in = Rank(1);
|
ksk_infos.rank_in = Rank(1);
|
||||||
|
|
||||||
if pairs != 0 {
|
if pairs != 0 {
|
||||||
let (gglwe, s) = scratch.take_gglwe_switching_key_prepared(&ksk_infos);
|
let (gglwe, s) = scratch.take_gglwe_switching_key_prepared(module, &ksk_infos);
|
||||||
scratch = s;
|
scratch = s;
|
||||||
keys.push(gglwe);
|
keys.push(gglwe);
|
||||||
}
|
}
|
||||||
for _ in 1..pairs {
|
for _ in 1..pairs {
|
||||||
let (gglwe, s) = scratch.take_gglwe_switching_key_prepared(&ksk_infos);
|
let (gglwe, s) = scratch.take_gglwe_switching_key_prepared(module, &ksk_infos);
|
||||||
scratch = s;
|
scratch = s;
|
||||||
keys.push(gglwe);
|
keys.push(gglwe);
|
||||||
}
|
}
|
||||||
@@ -327,4 +376,4 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> ScratchTakeCore<B> for Scratch<B> where Self: ScratchTakeBasic<B> + ScratchAvailable {}
|
impl<B: Backend> ScratchTakeCore<B> for Scratch<B> where Self: ScratchTakeBasic + ScratchAvailable {}
|
||||||
|
|||||||
@@ -4,3 +4,7 @@ use crate::layouts::Backend;
|
|||||||
pub trait ModuleNew<B: Backend> {
|
pub trait ModuleNew<B: Backend> {
|
||||||
fn new(n: u64) -> Self;
|
fn new(n: u64) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait ModuleN {
|
||||||
|
fn n(&self) -> usize;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{SvpPPolBytesOf, VecZnxBigBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
api::{ModuleN, SvpPPolBytesOf, VecZnxBigBytesOf, VecZnxDftBytesOf, VmpPMatBytesOf},
|
||||||
layouts::{Backend, MatZnx, Module, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
layouts::{Backend, MatZnx, ScalarZnx, Scratch, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, VmpPMat},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
/// Allocates a new [crate::layouts::ScratchOwned] of `size` aligned bytes.
|
||||||
@@ -28,11 +28,14 @@ pub trait TakeSlice {
|
|||||||
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self);
|
fn take_slice<T>(&mut self, len: usize) -> (&mut [T], &mut Self);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ScratchTakeBasic<B: Backend>
|
pub trait ScratchTakeBasic
|
||||||
where
|
where
|
||||||
Self: TakeSlice,
|
Self: TakeSlice,
|
||||||
{
|
{
|
||||||
fn take_scalar_znx(&mut self, module: &Module<B>, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self) {
|
fn take_scalar_znx<M>(&mut self, module: &M, cols: usize) -> (ScalarZnx<&mut [u8]>, &mut Self)
|
||||||
|
where
|
||||||
|
M: ModuleN,
|
||||||
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(ScalarZnx::bytes_of(module.n(), cols));
|
let (take_slice, rem_slice) = self.take_slice(ScalarZnx::bytes_of(module.n(), cols));
|
||||||
(
|
(
|
||||||
ScalarZnx::from_data(take_slice, module.n(), cols),
|
ScalarZnx::from_data(take_slice, module.n(), cols),
|
||||||
@@ -40,15 +43,18 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_svp_ppol(&mut self, module: &Module<B>, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self)
|
fn take_svp_ppol<M, B: Backend>(&mut self, module: &M, cols: usize) -> (SvpPPol<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
Module<B>: SvpPPolBytesOf,
|
M: SvpPPolBytesOf + ModuleN,
|
||||||
{
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_svp_ppol(cols));
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_svp_ppol(cols));
|
||||||
(SvpPPol::from_data(take_slice, module.n(), cols), rem_slice)
|
(SvpPPol::from_data(take_slice, module.n(), cols), rem_slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vec_znx(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self) {
|
fn take_vec_znx<M>(&mut self, module: &M, cols: usize, size: usize) -> (VecZnx<&mut [u8]>, &mut Self)
|
||||||
|
where
|
||||||
|
M: ModuleN,
|
||||||
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(VecZnx::bytes_of(module.n(), cols, size));
|
let (take_slice, rem_slice) = self.take_slice(VecZnx::bytes_of(module.n(), cols, size));
|
||||||
(
|
(
|
||||||
VecZnx::from_data(take_slice, module.n(), cols, size),
|
VecZnx::from_data(take_slice, module.n(), cols, size),
|
||||||
@@ -56,9 +62,9 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vec_znx_big(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self)
|
fn take_vec_znx_big<M, B: Backend>(&mut self, module: &M, cols: usize, size: usize) -> (VecZnxBig<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxBigBytesOf,
|
M: VecZnxBigBytesOf + ModuleN,
|
||||||
{
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_big(cols, size));
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_big(cols, size));
|
||||||
(
|
(
|
||||||
@@ -67,9 +73,9 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vec_znx_dft(&mut self, module: &Module<B>, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self)
|
fn take_vec_znx_dft<M, B: Backend>(&mut self, module: &M, cols: usize, size: usize) -> (VecZnxDft<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftBytesOf,
|
M: VecZnxDftBytesOf + ModuleN,
|
||||||
{
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_dft(cols, size));
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vec_znx_dft(cols, size));
|
||||||
|
|
||||||
@@ -79,15 +85,15 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vec_znx_dft_slice(
|
fn take_vec_znx_dft_slice<M, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
len: usize,
|
len: usize,
|
||||||
cols: usize,
|
cols: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self)
|
) -> (Vec<VecZnxDft<&mut [u8], B>>, &mut Self)
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftBytesOf,
|
M: VecZnxDftBytesOf + ModuleN,
|
||||||
{
|
{
|
||||||
let mut scratch: &mut Self = self;
|
let mut scratch: &mut Self = self;
|
||||||
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
let mut slice: Vec<VecZnxDft<&mut [u8], B>> = Vec::with_capacity(len);
|
||||||
@@ -99,13 +105,10 @@ where
|
|||||||
(slice, scratch)
|
(slice, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vec_znx_slice(
|
fn take_vec_znx_slice<M>(&mut self, module: &M, len: usize, cols: usize, size: usize) -> (Vec<VecZnx<&mut [u8]>>, &mut Self)
|
||||||
&mut self,
|
where
|
||||||
module: &Module<B>,
|
M: ModuleN,
|
||||||
len: usize,
|
{
|
||||||
cols: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> (Vec<VecZnx<&mut [u8]>>, &mut Self) {
|
|
||||||
let mut scratch: &mut Self = self;
|
let mut scratch: &mut Self = self;
|
||||||
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
let mut slice: Vec<VecZnx<&mut [u8]>> = Vec::with_capacity(len);
|
||||||
for _ in 0..len {
|
for _ in 0..len {
|
||||||
@@ -116,16 +119,16 @@ where
|
|||||||
(slice, scratch)
|
(slice, scratch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_vmp_pmat(
|
fn take_vmp_pmat<M, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
cols_in: usize,
|
cols_in: usize,
|
||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (VmpPMat<&mut [u8], B>, &mut Self)
|
) -> (VmpPMat<&mut [u8], B>, &mut Self)
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatBytesOf,
|
M: VmpPMatBytesOf + ModuleN,
|
||||||
{
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vmp_pmat(rows, cols_in, cols_out, size));
|
let (take_slice, rem_slice) = self.take_slice(module.bytes_of_vmp_pmat(rows, cols_in, cols_out, size));
|
||||||
(
|
(
|
||||||
@@ -134,14 +137,17 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn take_mat_znx(
|
fn take_mat_znx<M>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &M,
|
||||||
rows: usize,
|
rows: usize,
|
||||||
cols_in: usize,
|
cols_in: usize,
|
||||||
cols_out: usize,
|
cols_out: usize,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> (MatZnx<&mut [u8]>, &mut Self) {
|
) -> (MatZnx<&mut [u8]>, &mut Self)
|
||||||
|
where
|
||||||
|
M: ModuleN,
|
||||||
|
{
|
||||||
let (take_slice, rem_slice) = self.take_slice(MatZnx::bytes_of(module.n(), rows, cols_in, cols_out, size));
|
let (take_slice, rem_slice) = self.take_slice(MatZnx::bytes_of(module.n(), rows, cols_in, cols_out, size));
|
||||||
(
|
(
|
||||||
MatZnx::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
MatZnx::from_data(take_slice, module.n(), rows, cols_in, cols_out, size),
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use poulpy_core::layouts::prepared::GGSWCiphertextPreparedToRef;
|
use poulpy_core::layouts::prepared::GGSWPreparedToRef;
|
||||||
use poulpy_hal::layouts::{Backend, DataMut, DataRef, Module, Scratch};
|
use poulpy_hal::layouts::{Backend, DataMut, DataRef, Module, Scratch};
|
||||||
|
|
||||||
use crate::tfhe::bdd_arithmetic::{
|
use crate::tfhe::bdd_arithmetic::{
|
||||||
@@ -60,15 +60,11 @@ pub fn eval_bdd_2w_to_1w<R: DataMut, A: DataRef, B: DataRef, T: UnsignedInteger,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Collects inputs into a single array
|
// Collects inputs into a single array
|
||||||
let inputs: Vec<&dyn GGSWCiphertextPreparedToRef<BE>> = a
|
let inputs: Vec<&dyn GGSWPreparedToRef<BE>> = a
|
||||||
.blocks
|
.blocks
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x as &dyn GGSWCiphertextPreparedToRef<BE>)
|
.map(|x| x as &dyn GGSWPreparedToRef<BE>)
|
||||||
.chain(
|
.chain(b.blocks.iter().map(|x| x as &dyn GGSWPreparedToRef<BE>))
|
||||||
b.blocks
|
|
||||||
.iter()
|
|
||||||
.map(|x| x as &dyn GGSWCiphertextPreparedToRef<BE>),
|
|
||||||
)
|
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
|
|
||||||
// Evaluates out[i] = circuit[i](a, b)
|
// Evaluates out[i] = circuit[i](a, b)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_core::{
|
|||||||
GLWEExternalProductInplace, GLWEOperations, TakeGLWESlice,
|
GLWEExternalProductInplace, GLWEOperations, TakeGLWESlice,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWE, GLWEToMut, LWEInfos,
|
GLWE, GLWEToMut, LWEInfos,
|
||||||
prepared::{GGSWCiphertextPreparedToRef, GGSWPrepared},
|
prepared::{GGSWPrepared, GGSWPreparedToRef},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
@@ -39,7 +39,7 @@ where
|
|||||||
&self,
|
&self,
|
||||||
module: &Module<BE>,
|
module: &Module<BE>,
|
||||||
out: &mut [GLWE<O>],
|
out: &mut [GLWE<O>],
|
||||||
inputs: &[&dyn GGSWCiphertextPreparedToRef<BE>],
|
inputs: &[&dyn GGSWPreparedToRef<BE>],
|
||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
O: DataMut;
|
O: DataMut;
|
||||||
@@ -55,7 +55,7 @@ where
|
|||||||
&self,
|
&self,
|
||||||
module: &Module<BE>,
|
module: &Module<BE>,
|
||||||
out: &mut [GLWE<O>],
|
out: &mut [GLWE<O>],
|
||||||
inputs: &[&dyn GGSWCiphertextPreparedToRef<BE>],
|
inputs: &[&dyn GGSWPreparedToRef<BE>],
|
||||||
scratch: &mut Scratch<BE>,
|
scratch: &mut Scratch<BE>,
|
||||||
) where
|
) where
|
||||||
O: DataMut,
|
O: DataMut,
|
||||||
|
|||||||
Reference in New Issue
Block a user