Add cross-basek normalization (#90)

* added cross_basek_normalization

* updated method signatures to take layouts

* fixed cross-base normalization

fix #91
fix #93
This commit is contained in:
Jean-Philippe Bossuat
2025-09-30 14:40:10 +02:00
committed by GitHub
parent 4da790ea6a
commit 37e13b965c
216 changed files with 12481 additions and 7745 deletions

View File

@@ -1,10 +1,10 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
GGLWEAutomorphismKey, Infos,
Base2K, Degree, Digits, GGLWEAutomorphismKey, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
};
@@ -14,61 +14,107 @@ pub struct GGLWEAutomorphismKeyPrepared<D: Data, B: Backend> {
pub(crate) p: i64,
}
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
key: GGLWESwitchingKeyPrepared::alloc(module, basek, k, rows, digits, rank, rank),
p: 0,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, rank, rank)
}
}
impl<D: Data, B: Backend> Infos for GGLWEAutomorphismKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.key.inner()
}
fn basek(&self) -> usize {
self.key.basek()
}
fn k(&self) -> usize {
self.key.k()
}
}
impl<D: Data, B: Backend> GGLWEAutomorphismKeyPrepared<D, B> {
pub fn p(&self) -> i64 {
self.p
}
}
pub fn digits(&self) -> usize {
self.key.digits()
impl<D: Data, B: Backend> LWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
fn n(&self) -> Degree {
self.key.n()
}
pub fn rank(&self) -> usize {
self.key.rank()
fn base2k(&self) -> Base2K {
self.key.base2k()
}
pub fn rank_in(&self) -> usize {
fn k(&self) -> TorusPrecision {
self.key.k()
}
fn size(&self) -> usize {
self.key.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWEAutomorphismKeyPrepared<D, B> {
fn rank_in(&self) -> Rank {
self.key.rank_in()
}
pub fn rank_out(&self) -> usize {
fn rank_out(&self) -> Rank {
self.key.rank_out()
}
fn digits(&self) -> Digits {
self.key.digits()
}
fn rows(&self) -> Rows {
self.key.rows()
}
}
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
);
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
key: GGLWESwitchingKeyPrepared::alloc(module, infos),
p: 0,
}
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
GGLWEAutomorphismKeyPrepared {
key: GGLWESwitchingKeyPrepared::alloc_with(module, base2k, k, rows, digits, rank, rank),
p: 0,
}
}
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
}
pub fn alloc_bytes_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank: Rank,
) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, digits, rank, rank)
}
}
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWEAutomorphismKey<DR>> for GGLWEAutomorphismKeyPrepared<D, B>
@@ -86,14 +132,7 @@ where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(
module,
self.basek(),
self.k(),
self.rows(),
self.digits(),
self.rank(),
);
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(module, self);
atk_prepared.prepare(module, self, scratch);
atk_prepared
}

View File

@@ -1,115 +1,262 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, ZnxInfos},
oep::VmpPMatAllocBytesImpl,
};
use crate::layouts::{
GGLWECiphertext, Infos,
Base2K, BuildError, Degree, Digits, GGLWECiphertext, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{Prepare, PrepareAlloc},
};
#[derive(PartialEq, Eq)]
pub struct GGLWECiphertextPrepared<D: Data, B: Backend> {
pub(crate) data: VmpPMat<D, B>,
pub(crate) basek: usize,
pub(crate) k: usize,
pub(crate) digits: usize,
pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K,
pub(crate) digits: Digits,
}
impl<D: Data, B: Backend> LWEInfos for GGLWECiphertextPrepared<D, B> {
fn n(&self) -> Degree {
Degree(self.data.n() as u32)
}
fn base2k(&self) -> Base2K {
self.base2k
}
fn k(&self) -> TorusPrecision {
self.k
}
fn size(&self) -> usize {
self.data.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GGLWECiphertextPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWECiphertextPrepared<D, B> {
fn rank_in(&self) -> Rank {
Rank(self.data.cols_in() as u32)
}
fn rank_out(&self) -> Rank {
Rank(self.data.cols_out() as u32 - 1)
}
fn digits(&self) -> Digits {
self.digits
}
fn rows(&self) -> Rows {
Rows(self.data.rows() as u32)
}
}
pub struct GGLWECiphertextPreparedBuilder<D: Data, B: Backend> {
data: Option<VmpPMat<D, B>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
digits: Option<Digits>,
}
impl<D: Data, B: Backend> GGLWECiphertextPrepared<D, B> {
#[inline]
pub fn builder() -> GGLWECiphertextPreparedBuilder<D, B> {
GGLWECiphertextPreparedBuilder {
data: None,
base2k: None,
k: None,
digits: None,
}
}
}
impl<B: Backend> GGLWECiphertextPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGLWELayoutInfos,
B: VmpPMatAllocBytesImpl<B>,
{
self.data = Some(VmpPMat::alloc(
infos.n().into(),
infos.rows().into(),
infos.rank_in().into(),
(infos.rank_out() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.digits = Some(infos.digits());
self
}
}
impl<D: Data, B: Backend> GGLWECiphertextPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn digits(mut self, digits: Digits) -> Self {
self.digits = Some(digits);
self
}
pub fn build(self) -> Result<GGLWECiphertextPrepared<D, B>, BuildError> {
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let digits: Digits = self.digits.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if digits == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGLWECiphertextPrepared {
data,
base2k,
k,
digits,
})
}
}
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
#[allow(clippy::too_many_arguments)]
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
Self::alloc_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
infos.rank_in(),
infos.rank_out(),
)
}
pub fn alloc_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank_in: Rank,
rank_out: Rank,
) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
let size: usize = k.div_ceil(basek);
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size > digits,
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
size,
digits
size as u32 > digits.0,
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
digits.0
);
assert!(
rows * digits <= size,
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
rows,
digits,
size
rows.0 * digits.0 <= size as u32,
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
rows.0,
digits.0,
);
Self {
data: module.vmp_pmat_alloc(rows, rank_in, rank_out + 1, size),
basek,
data: module.vmp_pmat_alloc(rows.into(), rank_in.into(), (rank_out + 1).into(), size),
k,
base2k,
digits,
}
}
#[allow(clippy::too_many_arguments)]
pub fn bytes_of(
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
Self::alloc_bytes_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
infos.rank_in(),
infos.rank_out(),
)
}
pub fn alloc_bytes_with(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank_in: Rank,
rank_out: Rank,
) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
let size: usize = k.div_ceil(basek);
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size > digits,
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
size,
digits
size as u32 > digits.0,
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
digits.0
);
assert!(
rows * digits <= size,
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
rows,
digits,
size
rows.0 * digits.0 <= size as u32,
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
rows.0,
digits.0,
);
module.vmp_pmat_alloc_bytes(rows, rank_in, rank_out + 1, rows)
}
}
impl<D: Data, B: Backend> Infos for GGLWECiphertextPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
&self.data
}
fn basek(&self) -> usize {
self.basek
}
fn k(&self) -> usize {
self.k
}
}
impl<D: Data, B: Backend> GGLWECiphertextPrepared<D, B> {
pub fn rank(&self) -> usize {
self.data.cols_out() - 1
}
pub fn digits(&self) -> usize {
self.digits
}
pub fn rank_in(&self) -> usize {
self.data.cols_in()
}
pub fn rank_out(&self) -> usize {
self.data.cols_out() - 1
module.vmp_pmat_alloc_bytes(rows.into(), rank_in.into(), (rank_out + 1).into(), size)
}
}
@@ -119,8 +266,8 @@ where
{
fn prepare(&mut self, module: &Module<B>, other: &GGLWECiphertext<DR>, scratch: &mut Scratch<B>) {
module.vmp_prepare(&mut self.data, &other.data, scratch);
self.basek = other.basek;
self.k = other.k;
self.base2k = other.base2k;
self.digits = other.digits;
}
}
@@ -130,15 +277,7 @@ where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> {
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(
module,
self.basek(),
self.k(),
self.rows(),
self.digits(),
self.rank_in(),
self.rank_out(),
);
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(module, self);
atk_prepared.prepare(module, self, scratch);
atk_prepared
}

View File

@@ -1,10 +1,10 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
GGLWESwitchingKey, Infos,
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{GGLWECiphertextPrepared, Prepare, PrepareAlloc},
};
@@ -15,75 +15,103 @@ pub struct GGLWESwitchingKeyPrepared<D: Data, B: Backend> {
pub(crate) sk_out_n: usize, // Degree of sk_out
}
impl<D: Data, B: Backend> LWEInfos for GGLWESwitchingKeyPrepared<D, B> {
fn n(&self) -> Degree {
self.key.n()
}
fn base2k(&self) -> Base2K {
self.key.base2k()
}
fn k(&self) -> TorusPrecision {
self.key.k()
}
fn size(&self) -> usize {
self.key.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GGLWESwitchingKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWESwitchingKeyPrepared<D, B> {
fn rank_in(&self) -> Rank {
self.key.rank_in()
}
fn rank_out(&self) -> Rank {
self.key.rank_out()
}
fn digits(&self) -> Digits {
self.key.digits()
}
fn rows(&self) -> Rows {
self.key.rows()
}
}
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
#[allow(clippy::too_many_arguments)]
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()");
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
key: GGLWECiphertextPrepared::alloc(module, basek, k, rows, digits, rank_in, rank_out),
key: GGLWECiphertextPrepared::alloc(module, infos),
sk_in_n: 0,
sk_out_n: 0,
}
}
#[allow(clippy::too_many_arguments)]
pub fn bytes_of(
pub fn alloc_with(
module: &Module<B>,
basek: usize,
k: usize,
rows: usize,
digits: usize,
rank_in: usize,
rank_out: usize,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank_in: Rank,
rank_out: Rank,
) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
key: GGLWECiphertextPrepared::alloc_with(module, base2k, k, rows, digits, rank_in, rank_out),
sk_in_n: 0,
sk_out_n: 0,
}
}
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()");
GGLWECiphertextPrepared::alloc_bytes(module, infos)
}
pub fn alloc_bytes_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank_in: Rank,
rank_out: Rank,
) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWECiphertextPrepared::bytes_of(module, basek, k, rows, digits, rank_in, rank_out)
}
}
impl<D: Data, B: Backend> Infos for GGLWESwitchingKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.key.inner()
}
fn basek(&self) -> usize {
self.key.basek()
}
fn k(&self) -> usize {
self.key.k()
}
}
impl<D: Data, B: Backend> GGLWESwitchingKeyPrepared<D, B> {
pub fn rank(&self) -> usize {
self.key.data.cols_out() - 1
}
pub fn rank_in(&self) -> usize {
self.key.data.cols_in()
}
pub fn rank_out(&self) -> usize {
self.key.data.cols_out() - 1
}
pub fn digits(&self) -> usize {
self.key.digits()
}
pub fn sk_degree_in(&self) -> usize {
self.sk_in_n
}
pub fn sk_degree_out(&self) -> usize {
self.sk_out_n
GGLWECiphertextPrepared::alloc_bytes_with(module, base2k, k, rows, digits, rank_in, rank_out)
}
}
@@ -103,15 +131,7 @@ where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(
module,
self.basek(),
self.k(),
self.rows(),
self.digits(),
self.rank_in(),
self.rank_out(),
);
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(module, self);
atk_prepared.prepare(module, self, scratch);
atk_prepared
}

View File

@@ -1,10 +1,10 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
GGLWETensorKey, Infos,
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWETensorKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
};
@@ -13,61 +13,126 @@ pub struct GGLWETensorKeyPrepared<D: Data, B: Backend> {
pub(crate) keys: Vec<GGLWESwitchingKeyPrepared<D, B>>,
}
impl<D: Data, B: Backend> LWEInfos for GGLWETensorKeyPrepared<D, B> {
fn n(&self) -> Degree {
self.keys[0].n()
}
fn base2k(&self) -> Base2K {
self.keys[0].base2k()
}
fn k(&self) -> TorusPrecision {
self.keys[0].k()
}
fn size(&self) -> usize {
self.keys[0].size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GGLWETensorKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWETensorKeyPrepared<D, B> {
fn rank_in(&self) -> Rank {
self.rank_out()
}
fn rank_out(&self) -> Rank {
self.keys[0].rank_out()
}
fn digits(&self) -> Digits {
self.keys[0].digits()
}
fn rows(&self) -> Rows {
self.keys[0].rows()
}
}
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKeyPrepared"
);
Self::alloc_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
infos.rank_out(),
)
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
let mut keys: Vec<GGLWESwitchingKeyPrepared<Vec<u8>, B>> = Vec::new();
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
(0..pairs).for_each(|_| {
keys.push(GGLWESwitchingKeyPrepared::alloc(
module, basek, k, rows, digits, 1, rank,
keys.push(GGLWESwitchingKeyPrepared::alloc_with(
module,
base2k,
k,
rows,
digits,
Rank(1),
rank,
));
});
Self { keys }
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
assert_eq!(
infos.rank_in(),
infos.rank_out(),
"rank_in != rank_out is not supported for GGLWETensorKey"
);
let rank_out: usize = infos.rank_out().into();
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1);
pairs
* GGLWESwitchingKeyPrepared::alloc_bytes_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
Rank(1),
infos.rank_out(),
)
}
pub fn alloc_bytes_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank: Rank,
) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
pairs * GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, 1, rank)
}
}
impl<D: Data, B: Backend> Infos for GGLWETensorKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.keys[0].inner()
}
fn basek(&self) -> usize {
self.keys[0].basek()
}
fn k(&self) -> usize {
self.keys[0].k()
}
}
impl<D: Data, B: Backend> GGLWETensorKeyPrepared<D, B> {
pub fn rank(&self) -> usize {
self.keys[0].rank()
}
pub fn rank_in(&self) -> usize {
self.keys[0].rank_in()
}
pub fn rank_out(&self) -> usize {
self.keys[0].rank_out()
}
pub fn digits(&self) -> usize {
self.keys[0].digits()
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
pairs * GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, digits, Rank(1), rank)
}
}
@@ -77,7 +142,7 @@ impl<D: DataMut, B: Backend> GGLWETensorKeyPrepared<D, B> {
if i > j {
std::mem::swap(&mut i, &mut j);
};
let rank: usize = self.rank();
let rank: usize = self.rank_out().into();
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
}
}
@@ -88,7 +153,7 @@ impl<D: DataRef, B: Backend> GGLWETensorKeyPrepared<D, B> {
if i > j {
std::mem::swap(&mut i, &mut j);
};
let rank: usize = self.rank();
let rank: usize = self.rank_out().into();
&self.keys[i * rank + j - (i * (i + 1) / 2)]
}
}
@@ -116,14 +181,7 @@ where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> {
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(
module,
self.basek(),
self.k(),
self.rows(),
self.digits(),
self.rank(),
);
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(module, self);
tsk_prepared.prepare(module, self, scratch);
tsk_prepared
}

View File

@@ -1,99 +1,261 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, ZnxInfos},
oep::VmpPMatAllocBytesImpl,
};
use crate::layouts::{
GGSWCiphertext, Infos,
Base2K, BuildError, Degree, Digits, GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{Prepare, PrepareAlloc},
};
#[derive(PartialEq, Eq)]
pub struct GGSWCiphertextPrepared<D: Data, B: Backend> {
pub(crate) data: VmpPMat<D, B>,
pub(crate) basek: usize,
pub(crate) k: usize,
pub(crate) digits: usize,
pub(crate) k: TorusPrecision,
pub(crate) base2k: Base2K,
pub(crate) digits: Digits,
}
impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
let size: usize = k.div_ceil(basek);
debug_assert!(digits > 0, "invalid ggsw: `digits` == 0");
impl<D: Data, B: Backend> LWEInfos for GGSWCiphertextPrepared<D, B> {
fn n(&self) -> Degree {
Degree(self.data.n() as u32)
}
fn base2k(&self) -> Base2K {
self.base2k
}
fn k(&self) -> TorusPrecision {
self.k
}
fn size(&self) -> usize {
self.data.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GGSWCiphertextPrepared<D, B> {
fn rank(&self) -> Rank {
Rank(self.data.cols_out() as u32 - 1)
}
}
impl<D: Data, B: Backend> GGSWInfos for GGSWCiphertextPrepared<D, B> {
fn digits(&self) -> Digits {
self.digits
}
fn rows(&self) -> Rows {
Rows(self.data.rows() as u32)
}
}
pub struct GGSWCiphertextPreparedBuilder<D: Data, B: Backend> {
data: Option<VmpPMat<D, B>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
digits: Option<Digits>,
}
impl<D: Data, B: Backend> GGSWCiphertextPrepared<D, B> {
#[inline]
pub fn builder() -> GGSWCiphertextPreparedBuilder<D, B> {
GGSWCiphertextPreparedBuilder {
data: None,
base2k: None,
k: None,
digits: None,
}
}
}
impl<B: Backend> GGSWCiphertextPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGSWInfos,
B: VmpPMatAllocBytesImpl<B>,
{
debug_assert!(
size > digits,
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
size,
digits
infos.size() as u32 > infos.digits().0,
"invalid ggsw: ceil(k/base2k): {} <= digits: {}",
infos.size(),
infos.digits()
);
assert!(
rows * digits <= size,
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
rows,
infos.rows().0 * infos.digits().0 <= infos.size() as u32,
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {}",
infos.rows(),
infos.digits(),
infos.size(),
);
self.data = Some(VmpPMat::alloc(
infos.n().into(),
infos.rows().into(),
(infos.rank() + 1).into(),
(infos.rank() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.digits = Some(infos.digits());
self
}
}
impl<D: Data, B: Backend> GGSWCiphertextPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn digits(mut self, digits: Digits) -> Self {
self.digits = Some(digits);
self
}
pub fn build(self) -> Result<GGSWCiphertextPrepared<D, B>, BuildError> {
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let digits: Digits = self.digits.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if digits == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGSWCiphertextPrepared {
data,
base2k,
k,
digits,
size
})
}
}
impl<B: Backend> GGSWCiphertextPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGSWInfos,
Module<B>: VmpPMatAlloc<B>,
{
Self::alloc_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
infos.rank(),
)
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size as u32 > digits.0,
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
digits.0
);
assert!(
rows.0 * digits.0 <= size as u32,
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
rows.0,
digits.0,
);
Self {
data: module.vmp_pmat_alloc(rows, rank + 1, rank + 1, k.div_ceil(basek)),
basek,
data: module.vmp_pmat_alloc(
rows.into(),
(rank + 1).into(),
(rank + 1).into(),
k.0.div_ceil(base2k.0) as usize,
),
k,
base2k,
digits,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGSWInfos,
Module<B>: VmpPMatAllocBytes,
{
Self::alloc_bytes_with(
module,
infos.base2k(),
infos.k(),
infos.rows(),
infos.digits(),
infos.rank(),
)
}
pub fn alloc_bytes_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
rows: Rows,
digits: Digits,
rank: Rank,
) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
let size: usize = k.div_ceil(basek);
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size > digits,
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
size,
digits
size as u32 > digits.0,
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
digits.0
);
assert!(
rows * digits <= size,
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
rows,
digits,
size
rows.0 * digits.0 <= size as u32,
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
rows.0,
digits.0,
);
module.vmp_pmat_alloc_bytes(rows, rank + 1, rank + 1, size)
}
}
impl<D: Data, B: Backend> Infos for GGSWCiphertextPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
&self.data
}
fn basek(&self) -> usize {
self.basek
}
fn k(&self) -> usize {
self.k
}
}
impl<D: Data, B: Backend> GGSWCiphertextPrepared<D, B> {
pub fn rank(&self) -> usize {
self.data.cols_out() - 1
}
pub fn digits(&self) -> usize {
self.digits
module.vmp_pmat_alloc_bytes(rows.into(), (rank + 1).into(), (rank + 1).into(), size)
}
}
@@ -110,7 +272,7 @@ where
fn prepare(&mut self, module: &Module<B>, other: &GGSWCiphertext<DR>, scratch: &mut Scratch<B>) {
module.vmp_prepare(&mut self.data, &other.data, scratch);
self.k = other.k;
self.basek = other.basek;
self.base2k = other.base2k;
self.digits = other.digits;
}
}
@@ -120,14 +282,7 @@ where
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGSWCiphertextPrepared<Vec<u8>, B> {
let mut ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = GGSWCiphertextPrepared::alloc(
module,
self.basek(),
self.k(),
self.rows(),
self.digits(),
self.rank(),
);
let mut ggsw_prepared: GGSWCiphertextPrepared<Vec<u8>, B> = GGSWCiphertextPrepared::alloc(module, self);
ggsw_prepared.prepare(module, self, scratch);
ggsw_prepared
}

View File

@@ -1,177 +0,0 @@
use poulpy_hal::{
api::{FillUniform, Reset, VecZnxCopy, VecZnxFillUniform},
layouts::{Backend, Data, DataMut, DataRef, Module, ReaderFrom, VecZnx, WriterTo},
};
use crate::layouts::{GLWECiphertext, Infos, compressed::Decompress};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
#[derive(PartialEq, Eq, Clone)]
pub struct GLWECiphertextCompressed<D: Data> {
pub(crate) data: VecZnx<D>,
pub(crate) basek: usize,
pub(crate) k: usize,
pub(crate) rank: usize,
pub(crate) seed: [u8; 32],
}
impl<D: DataRef> fmt::Debug for GLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl<D: DataRef> fmt::Display for GLWECiphertextCompressed<D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"GLWECiphertextCompressed: basek={} k={} rank={} seed={:?}: {}",
self.basek(),
self.k(),
self.rank,
self.seed,
self.data
)
}
}
impl<D: DataMut> Reset for GLWECiphertextCompressed<D> {
fn reset(&mut self) {
self.data.reset();
self.basek = 0;
self.k = 0;
self.rank = 0;
self.seed = [0u8; 32];
}
}
impl<D: DataMut> FillUniform for GLWECiphertextCompressed<D> {
fn fill_uniform(&mut self, source: &mut Source) {
self.data.fill_uniform(source);
}
}
impl<D: Data> Infos for GLWECiphertextCompressed<D> {
type Inner = VecZnx<D>;
fn inner(&self) -> &Self::Inner {
&self.data
}
fn basek(&self) -> usize {
self.basek
}
fn k(&self) -> usize {
self.k
}
}
impl<D: Data> GLWECiphertextCompressed<D> {
pub fn rank(&self) -> usize {
self.rank
}
}
impl GLWECiphertextCompressed<Vec<u8>> {
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
Self {
data: VecZnx::alloc(n, 1, k.div_ceil(basek)),
basek,
k,
rank,
seed: [0u8; 32],
}
}
pub fn bytes_of(n: usize, basek: usize, k: usize) -> usize {
GLWECiphertext::bytes_of(n, basek, k, 1)
}
}
impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> {
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
self.k = reader.read_u64::<LittleEndian>()? as usize;
self.basek = reader.read_u64::<LittleEndian>()? as usize;
self.rank = reader.read_u64::<LittleEndian>()? as usize;
reader.read_exact(&mut self.seed)?;
self.data.read_from(reader)
}
}
impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> {
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_u64::<LittleEndian>(self.k as u64)?;
writer.write_u64::<LittleEndian>(self.basek as u64)?;
writer.write_u64::<LittleEndian>(self.rank as u64)?;
writer.write_all(&self.seed)?;
self.data.write_to(writer)
}
}
impl<D: DataMut, B: Backend, DR: DataRef> Decompress<B, GLWECiphertextCompressed<DR>> for GLWECiphertext<D> {
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>)
where
Module<B>: VecZnxCopy + VecZnxFillUniform,
{
#[cfg(debug_assertions)]
{
use poulpy_hal::api::ZnxInfos;
assert_eq!(
self.n(),
other.data.n(),
"invalid receiver: self.n()={} != other.n()={}",
self.n(),
other.data.n()
);
assert_eq!(
self.size(),
other.size(),
"invalid receiver: self.size()={} != other.size()={}",
self.size(),
other.size()
);
assert_eq!(
self.rank(),
other.rank(),
"invalid receiver: self.rank()={} != other.rank()={}",
self.rank(),
other.rank()
);
}
let mut source: Source = Source::new(other.seed);
self.decompress_internal(module, other, &mut source);
}
}
impl<D: DataMut> GLWECiphertext<D> {
pub(crate) fn decompress_internal<DataOther, B: Backend>(
&mut self,
module: &Module<B>,
other: &GLWECiphertextCompressed<DataOther>,
source: &mut Source,
) where
DataOther: DataRef,
Module<B>: VecZnxCopy + VecZnxFillUniform,
{
#[cfg(debug_assertions)]
{
assert_eq!(self.rank(), other.rank())
}
let k: usize = other.k;
let basek: usize = other.basek;
let cols: usize = other.rank() + 1;
module.vec_znx_copy(&mut self.data, 0, &other.data, 0);
(1..cols).for_each(|i| {
module.vec_znx_fill_uniform(basek, &mut self.data, i, k, source);
});
self.basek = basek;
self.k = k;
}
}

View File

@@ -1,12 +1,13 @@
use poulpy_hal::{
api::{VecZnxDftAlloc, VecZnxDftAllocBytes, VecZnxDftApply},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VecZnxDft},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VecZnxDft, ZnxInfos},
oep::VecZnxDftAllocBytesImpl,
};
use crate::{
dist::Distribution,
layouts::{
GLWEPublicKey, Infos,
Base2K, BuildError, Degree, GLWEInfos, GLWEPublicKey, LWEInfos, Rank, TorusPrecision,
prepared::{Prepare, PrepareAlloc},
},
};
@@ -14,51 +15,157 @@ use crate::{
#[derive(PartialEq, Eq)]
pub struct GLWEPublicKeyPrepared<D: Data, B: Backend> {
pub(crate) data: VecZnxDft<D, B>,
pub(crate) basek: usize,
pub(crate) k: usize,
pub(crate) base2k: Base2K,
pub(crate) k: TorusPrecision,
pub(crate) dist: Distribution,
}
impl<D: Data, B: Backend> Infos for GLWEPublicKeyPrepared<D, B> {
type Inner = VecZnxDft<D, B>;
fn inner(&self) -> &Self::Inner {
&self.data
impl<D: Data, B: Backend> LWEInfos for GLWEPublicKeyPrepared<D, B> {
fn base2k(&self) -> Base2K {
self.base2k
}
fn basek(&self) -> usize {
self.basek
}
fn k(&self) -> usize {
fn k(&self) -> TorusPrecision {
self.k
}
fn size(&self) -> usize {
self.data.size()
}
fn n(&self) -> Degree {
Degree(self.data.n() as u32)
}
}
impl<D: Data, B: Backend> GLWEInfos for GLWEPublicKeyPrepared<D, B> {
fn rank(&self) -> Rank {
Rank(self.data.cols() as u32 - 1)
}
}
pub struct GLWEPublicKeyPreparedBuilder<D: Data, B: Backend> {
data: Option<VecZnxDft<D, B>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
}
impl<D: Data, B: Backend> GLWEPublicKeyPrepared<D, B> {
pub fn rank(&self) -> usize {
self.cols() - 1
#[inline]
pub fn builder() -> GLWEPublicKeyPreparedBuilder<D, B> {
GLWEPublicKeyPreparedBuilder {
data: None,
base2k: None,
k: None,
}
}
}
impl<B: Backend> GLWEPublicKeyPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, layout: &A) -> Self
where
A: GLWEInfos,
B: VecZnxDftAllocBytesImpl<B>,
{
self.data = Some(VecZnxDft::alloc(
layout.n().into(),
(layout.rank() + 1).into(),
layout.size(),
));
self.base2k = Some(layout.base2k());
self.k = Some(layout.k());
self
}
}
impl<D: Data, B: Backend> GLWEPublicKeyPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VecZnxDft<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
pub fn build(self) -> Result<GLWEPublicKeyPrepared<D, B>, BuildError> {
let data: VecZnxDft<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GLWEPublicKeyPrepared {
data,
base2k,
k,
dist: Distribution::NONE,
})
}
}
impl<B: Backend> GLWEPublicKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GLWEInfos,
Module<B>: VecZnxDftAlloc<B>,
{
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
Self::alloc_with(module, infos.base2k(), infos.k(), infos.rank())
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self
where
Module<B>: VecZnxDftAlloc<B>,
{
Self {
data: module.vec_znx_dft_alloc(rank + 1, k.div_ceil(basek)),
basek,
data: module.vec_znx_dft_alloc((rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
base2k,
k,
dist: Distribution::NONE,
}
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GLWEInfos,
Module<B>: VecZnxDftAllocBytes,
{
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
Self::alloc_bytes_with(module, infos.base2k(), infos.k(), infos.rank())
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize
where
Module<B>: VecZnxDftAllocBytes,
{
module.vec_znx_dft_alloc_bytes(rank + 1, k.div_ceil(basek))
module.vec_znx_dft_alloc_bytes((rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
}
}
@@ -67,8 +174,7 @@ where
Module<B>: VecZnxDftAlloc<B> + VecZnxDftApply<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEPublicKeyPrepared<Vec<u8>, B> {
let mut pk_prepared: GLWEPublicKeyPrepared<Vec<u8>, B> =
GLWEPublicKeyPrepared::alloc(module, self.basek(), self.k(), self.rank());
let mut pk_prepared: GLWEPublicKeyPrepared<Vec<u8>, B> = GLWEPublicKeyPrepared::alloc(module, self);
pk_prepared.prepare(module, self, scratch);
pk_prepared
}
@@ -85,11 +191,11 @@ where
assert_eq!(self.size(), other.size());
}
(0..self.cols()).for_each(|i| {
(0..(self.rank() + 1).into()).for_each(|i| {
module.vec_znx_dft_apply(1, 0, &mut self.data, i, &other.data, i);
});
self.k = other.k;
self.basek = other.basek;
self.k = other.k();
self.base2k = other.base2k();
self.dist = other.dist;
}
}

View File

@@ -6,7 +6,7 @@ use poulpy_hal::{
use crate::{
dist::Distribution,
layouts::{
GLWESecret,
Base2K, Degree, GLWEInfos, GLWESecret, LWEInfos, Rank, TorusPrecision,
prepared::{Prepare, PrepareAlloc},
},
};
@@ -16,36 +16,72 @@ pub struct GLWESecretPrepared<D: Data, B: Backend> {
pub(crate) dist: Distribution,
}
impl<D: Data, B: Backend> LWEInfos for GLWESecretPrepared<D, B> {
fn base2k(&self) -> Base2K {
Base2K(0)
}
fn k(&self) -> TorusPrecision {
TorusPrecision(0)
}
fn n(&self) -> Degree {
Degree(self.data.n() as u32)
}
fn size(&self) -> usize {
self.data.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for GLWESecretPrepared<D, B> {
fn rank(&self) -> Rank {
Rank(self.data.cols() as u32)
}
}
impl<B: Backend> GLWESecretPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, rank: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GLWEInfos,
Module<B>: SvpPPolAlloc<B>,
{
assert_eq!(module.n() as u32, infos.n());
Self::alloc_with(module, infos.rank())
}
pub fn alloc_with(module: &Module<B>, rank: Rank) -> Self
where
Module<B>: SvpPPolAlloc<B>,
{
Self {
data: module.svp_ppol_alloc(rank),
data: module.svp_ppol_alloc(rank.into()),
dist: Distribution::NONE,
}
}
pub fn bytes_of(module: &Module<B>, rank: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GLWEInfos,
Module<B>: SvpPPolAllocBytes,
{
assert_eq!(module.n() as u32, infos.n());
Self::alloc_bytes_with(module, infos.rank())
}
pub fn alloc_bytes_with(module: &Module<B>, rank: Rank) -> usize
where
Module<B>: SvpPPolAllocBytes,
{
module.svp_ppol_alloc_bytes(rank)
module.svp_ppol_alloc_bytes(rank.into())
}
}
impl<D: Data, B: Backend> GLWESecretPrepared<D, B> {
pub fn n(&self) -> usize {
self.data.n()
pub fn n(&self) -> Degree {
Degree(self.data.n() as u32)
}
pub fn log_n(&self) -> usize {
self.data.log_n()
}
pub fn rank(&self) -> usize {
self.data.cols()
pub fn rank(&self) -> Rank {
Rank(self.data.cols() as u32)
}
}
@@ -54,7 +90,7 @@ where
Module<B>: SvpPrepare<B> + SvpPPolAlloc<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut poulpy_hal::layouts::Scratch<B>) -> GLWESecretPrepared<Vec<u8>, B> {
let mut sk_dft: GLWESecretPrepared<Vec<u8>, B> = GLWESecretPrepared::alloc(module, self.rank());
let mut sk_dft: GLWESecretPrepared<Vec<u8>, B> = GLWESecretPrepared::alloc(module, self);
sk_dft.prepare(module, self, scratch);
sk_dft
}
@@ -65,7 +101,7 @@ where
Module<B>: SvpPrepare<B>,
{
fn prepare(&mut self, module: &Module<B>, other: &GLWESecret<DR>, _scratch: &mut poulpy_hal::layouts::Scratch<B>) {
(0..self.rank()).for_each(|i| {
(0..self.rank().into()).for_each(|i| {
module.svp_prepare(&mut self.data, i, &other.data, i);
});
self.dist = other.dist

View File

@@ -1,65 +1,115 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
GLWEToLWESwitchingKey, Infos,
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, GLWEToLWESwitchingKey, LWEInfos, Rank, Rows, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
};
#[derive(PartialEq, Eq)]
pub struct GLWEToLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> Infos for GLWEToLWESwitchingKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.0.inner()
impl<D: Data, B: Backend> LWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K {
self.0.base2k()
}
fn basek(&self) -> usize {
self.0.basek()
}
fn k(&self) -> usize {
fn k(&self) -> TorusPrecision {
self.0.k()
}
fn n(&self) -> Degree {
self.0.n()
}
fn size(&self) -> usize {
self.0.size()
}
}
impl<D: Data, B: Backend> GLWEToLWESwitchingKeyPrepared<D, B> {
pub fn digits(&self) -> usize {
self.0.digits()
impl<D: Data, B: Backend> GLWEInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
pub fn rank(&self) -> usize {
self.0.rank()
}
pub fn rank_in(&self) -> usize {
impl<D: Data, B: Backend> GGLWELayoutInfos for GLWEToLWESwitchingKeyPrepared<D, B> {
fn rank_in(&self) -> Rank {
self.0.rank_in()
}
pub fn rank_out(&self) -> usize {
fn digits(&self) -> Digits {
self.0.digits()
}
fn rank_out(&self) -> Rank {
self.0.rank_out()
}
fn rows(&self) -> Rows {
self.0.rows()
}
}
impl<B: Backend> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_in: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
Self(GGLWESwitchingKeyPrepared::alloc(
module, basek, k, rows, 1, rank_in, 1,
Self(GGLWESwitchingKeyPrepared::alloc_with(
module,
base2k,
k,
rows,
Digits(1),
rank_in,
Rank(1),
))
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for GLWEToLWESwitchingKeyPrepared"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, rank_in, 1)
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, Digits(1), rank_in, Rank(1))
}
}
@@ -68,13 +118,7 @@ where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> {
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> = GLWEToLWESwitchingKeyPrepared::alloc(
module,
self.0.basek(),
self.0.k(),
self.0.rows(),
self.0.rank_in(),
);
let mut ksk_prepared: GLWEToLWESwitchingKeyPrepared<Vec<u8>, B> = GLWEToLWESwitchingKeyPrepared::alloc(module, self);
ksk_prepared.prepare(module, self, scratch);
ksk_prepared
}

View File

@@ -1,65 +1,124 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
Infos, LWESwitchingKey,
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, LWEInfos, LWESwitchingKey, Rank, Rows, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
};
#[derive(PartialEq, Eq)]
pub struct LWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> Infos for LWESwitchingKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.0.inner()
impl<D: Data, B: Backend> LWEInfos for LWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K {
self.0.base2k()
}
fn basek(&self) -> usize {
self.0.basek()
}
fn k(&self) -> usize {
fn k(&self) -> TorusPrecision {
self.0.k()
}
fn n(&self) -> Degree {
self.0.n()
}
fn size(&self) -> usize {
self.0.size()
}
}
impl<D: Data, B: Backend> GLWEInfos for LWESwitchingKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> LWESwitchingKeyPrepared<D, B> {
pub fn digits(&self) -> usize {
impl<D: Data, B: Backend> GGLWELayoutInfos for LWESwitchingKeyPrepared<D, B> {
fn digits(&self) -> Digits {
self.0.digits()
}
pub fn rank(&self) -> usize {
self.0.rank()
}
pub fn rank_in(&self) -> usize {
fn rank_in(&self) -> Rank {
self.0.rank_in()
}
pub fn rank_out(&self) -> usize {
fn rank_out(&self) -> Rank {
self.0.rank_out()
}
fn rows(&self) -> Rows {
self.0.rows()
}
}
impl<B: Backend> LWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
Self(GGLWESwitchingKeyPrepared::alloc(
module, basek, k, rows, 1, 1, 1,
Self(GGLWESwitchingKeyPrepared::alloc_with(
module,
base2k,
k,
rows,
Digits(1),
Rank(1),
Rank(1),
))
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWESwitchingKey"
);
debug_assert_eq!(
infos.rank_out().0,
1,
"rank_out > 1 is not supported for LWESwitchingKey"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, 1, 1)
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, Digits(1), Rank(1), Rank(1))
}
}
@@ -68,8 +127,7 @@ where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWESwitchingKeyPrepared<Vec<u8>, B> {
let mut ksk_prepared: LWESwitchingKeyPrepared<Vec<u8>, B> =
LWESwitchingKeyPrepared::alloc(module, self.0.basek(), self.0.k(), self.0.rows());
let mut ksk_prepared: LWESwitchingKeyPrepared<Vec<u8>, B> = LWESwitchingKeyPrepared::alloc(module, self);
ksk_prepared.prepare(module, self, scratch);
ksk_prepared
}

View File

@@ -1,10 +1,10 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
};
use crate::layouts::{
Infos, LWEToGLWESwitchingKey,
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKey, Rank, Rows, TorusPrecision,
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
};
@@ -12,55 +12,105 @@ use crate::layouts::{
#[derive(PartialEq, Eq)]
pub struct LWEToGLWESwitchingKeyPrepared<D: Data, B: Backend>(pub(crate) GGLWESwitchingKeyPrepared<D, B>);
impl<D: Data, B: Backend> Infos for LWEToGLWESwitchingKeyPrepared<D, B> {
type Inner = VmpPMat<D, B>;
fn inner(&self) -> &Self::Inner {
self.0.inner()
impl<D: Data, B: Backend> LWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
fn base2k(&self) -> Base2K {
self.0.base2k()
}
fn basek(&self) -> usize {
self.0.basek()
}
fn k(&self) -> usize {
fn k(&self) -> TorusPrecision {
self.0.k()
}
fn n(&self) -> Degree {
self.0.n()
}
fn size(&self) -> usize {
self.0.size()
}
}
impl<D: Data, B: Backend> LWEToGLWESwitchingKeyPrepared<D, B> {
pub fn digits(&self) -> usize {
impl<D: Data, B: Backend> GLWEInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
fn rank(&self) -> Rank {
self.rank_out()
}
}
impl<D: Data, B: Backend> GGLWELayoutInfos for LWEToGLWESwitchingKeyPrepared<D, B> {
fn digits(&self) -> Digits {
self.0.digits()
}
pub fn rank(&self) -> usize {
self.0.rank()
}
pub fn rank_in(&self) -> usize {
fn rank_in(&self) -> Rank {
self.0.rank_in()
}
pub fn rank_out(&self) -> usize {
fn rank_out(&self) -> Rank {
self.0.rank_out()
}
fn rows(&self) -> Rows {
self.0.rows()
}
}
impl<B: Backend> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_out: usize) -> Self
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAlloc<B>,
{
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for LWEToGLWESwitchingKey"
);
Self(GGLWESwitchingKeyPrepared::alloc(module, infos))
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
Self(GGLWESwitchingKeyPrepared::alloc(
module, basek, k, rows, 1, 1, rank_out,
Self(GGLWESwitchingKeyPrepared::alloc_with(
module,
base2k,
k,
rows,
Digits(1),
Rank(1),
rank_out,
))
}
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_out: usize) -> usize
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGLWELayoutInfos,
Module<B>: VmpPMatAllocBytes,
{
debug_assert_eq!(
infos.rank_in().0,
1,
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
);
debug_assert_eq!(
infos.digits().0,
1,
"digits > 1 is not supported for LWEToGLWESwitchingKey"
);
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
GGLWESwitchingKeyPrepared::<Vec<u8>, B>::bytes_of(module, basek, k, rows, digits, 1, rank_out)
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, Digits(1), Rank(1), rank_out)
}
}
@@ -69,13 +119,7 @@ where
Module<B>: VmpPrepare<B> + VmpPMatAlloc<B>,
{
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> {
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> = LWEToGLWESwitchingKeyPrepared::alloc(
module,
self.0.basek(),
self.0.k(),
self.0.rows(),
self.0.rank_out(),
);
let mut ksk_prepared: LWEToGLWESwitchingKeyPrepared<Vec<u8>, B> = LWEToGLWESwitchingKeyPrepared::alloc(module, self);
ksk_prepared.prepare(module, self, scratch);
ksk_prepared
}