This commit is contained in:
Pro7ech
2025-10-13 23:49:09 +02:00
parent d32b4738c3
commit 0533cdff8a
27 changed files with 776 additions and 2022 deletions

View File

@@ -1,11 +1,10 @@
use poulpy_hal::{
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare, VmpPrepareTmpBytes},
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, VmpPMatToMut, VmpPMatToRef, ZnxInfos},
oep::VmpPMatAllocBytesImpl,
};
use crate::layouts::{
Base2K, BuildError, Degree, Dnum, Dsize, GGSW, GGSWInfos, GGSWToMut, GGSWToRef, GLWEInfos, LWEInfos, Rank, TorusPrecision,
Base2K, Degree, Dnum, Dsize, GGSW, GGSWInfos, GGSWToRef, GLWEInfos, GetDegree, LWEInfos, Rank, TorusPrecision,
};
#[derive(PartialEq, Eq)]
@@ -50,143 +49,18 @@ impl<D: Data, B: Backend> GGSWInfos for GGSWPrepared<D, B> {
}
}
pub struct GGSWCiphertextPreparedBuilder<D: Data, B: Backend> {
data: Option<VmpPMat<D, B>>,
base2k: Option<Base2K>,
k: Option<TorusPrecision>,
dsize: Option<Dsize>,
}
impl<D: Data, B: Backend> GGSWPrepared<D, B> {
#[inline]
pub fn builder() -> GGSWCiphertextPreparedBuilder<D, B> {
GGSWCiphertextPreparedBuilder {
data: None,
base2k: None,
k: None,
dsize: None,
}
}
}
impl<B: Backend> GGSWCiphertextPreparedBuilder<Vec<u8>, B> {
#[inline]
pub fn layout<A>(mut self, infos: &A) -> Self
where
A: GGSWInfos,
B: VmpPMatAllocBytesImpl<B>,
{
debug_assert!(
infos.size() as u32 > infos.dsize().0,
"invalid ggsw: ceil(k/base2k): {} <= dsize: {}",
infos.size(),
infos.dsize()
);
assert!(
infos.dnum().0 * infos.dsize().0 <= infos.size() as u32,
"invalid ggsw: dnum: {} * dsize:{} > ceil(k/base2k): {}",
infos.dnum(),
infos.dsize(),
infos.size(),
);
self.data = Some(VmpPMat::alloc(
infos.n().into(),
infos.dnum().into(),
(infos.rank() + 1).into(),
(infos.rank() + 1).into(),
infos.size(),
));
self.base2k = Some(infos.base2k());
self.k = Some(infos.k());
self.dsize = Some(infos.dsize());
self
}
}
impl<D: Data, B: Backend> GGSWCiphertextPreparedBuilder<D, B> {
#[inline]
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
self.data = Some(data);
self
}
#[inline]
pub fn base2k(mut self, base2k: Base2K) -> Self {
self.base2k = Some(base2k);
self
}
#[inline]
pub fn k(mut self, k: TorusPrecision) -> Self {
self.k = Some(k);
self
}
#[inline]
pub fn dsize(mut self, dsize: Dsize) -> Self {
self.dsize = Some(dsize);
self
}
pub fn build(self) -> Result<GGSWPrepared<D, B>, BuildError> {
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
let dsize: Dsize = self.dsize.ok_or(BuildError::MissingDigits)?;
if base2k == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if dsize == 0_u32 {
return Err(BuildError::ZeroBase2K);
}
if k == 0_u32 {
return Err(BuildError::ZeroTorusPrecision);
}
if data.n() == 0 {
return Err(BuildError::ZeroDegree);
}
if data.cols() == 0 {
return Err(BuildError::ZeroCols);
}
if data.size() == 0 {
return Err(BuildError::ZeroLimbs);
}
Ok(GGSWPrepared {
data,
base2k,
k,
dsize,
})
}
}
impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGSWInfos,
Module<B>: VmpPMatAlloc<B>,
{
Self::alloc_with(
module,
infos.base2k(),
infos.k(),
infos.dnum(),
infos.dsize(),
infos.rank(),
)
}
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self
where
Module<B>: VmpPMatAlloc<B>,
{
pub trait GGSWPreparedAlloc<B: Backend>
where
Self: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes,
{
fn ggsw_prepared_alloc(
&self,
base2k: Base2K,
k: TorusPrecision,
dnum: Dnum,
dsize: Dsize,
rank: Rank,
) -> GGSWPrepared<Vec<u8>, B> {
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size as u32 > dsize.0,
@@ -201,8 +75,8 @@ impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
dsize.0,
);
Self {
data: module.vmp_pmat_alloc(
GGSWPrepared {
data: self.vmp_pmat_alloc(
dnum.into(),
(rank + 1).into(),
(rank + 1).into(),
@@ -214,13 +88,12 @@ impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
}
}
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
fn ggsw_prepared_alloc_from_infos<A>(&self, infos: &A) -> GGSWPrepared<Vec<u8>, B>
where
A: GGSWInfos,
Module<B>: VmpPMatAllocBytes,
{
Self::alloc_bytes_with(
module,
assert_eq!(self.n(), infos.n());
self.ggsw_prepared_alloc(
infos.base2k(),
infos.k(),
infos.dnum(),
@@ -229,10 +102,7 @@ impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
)
}
pub fn alloc_bytes_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize
where
Module<B>: VmpPMatAllocBytes,
{
fn ggsw_prepared_alloc_bytes(&self, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> usize {
let size: usize = k.0.div_ceil(base2k.0) as usize;
debug_assert!(
size as u32 > dsize.0,
@@ -247,7 +117,57 @@ impl<B: Backend> GGSWPrepared<Vec<u8>, B> {
dsize.0,
);
module.vmp_pmat_alloc_bytes(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
self.vmp_pmat_alloc_bytes(dnum.into(), (rank + 1).into(), (rank + 1).into(), size)
}
fn ggsw_prepared_alloc_bytes_from_infos<A>(&self, infos: &A) -> usize
where
A: GGSWInfos,
{
assert_eq!(self.n(), infos.n());
self.ggsw_prepared_alloc_bytes(
infos.base2k(),
infos.k(),
infos.dnum(),
infos.dsize(),
infos.rank(),
)
}
}
impl<B: Backend> GGSWPreparedAlloc<B> for Module<B> where Module<B>: GetDegree + VmpPMatAlloc<B> + VmpPMatAllocBytes {}
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
where
Module<B>: GGSWPreparedAlloc<B>,
{
pub fn alloc_from_infos<A>(module: &Module<B>, infos: &A) -> Self
where
A: GGSWInfos,
{
module.ggsw_prepared_alloc_from_infos(infos)
}
pub fn alloc(module: &Module<B>, base2k: Base2K, k: TorusPrecision, dnum: Dnum, dsize: Dsize, rank: Rank) -> Self {
module.ggsw_prepared_alloc(base2k, k, dnum, dsize, rank)
}
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
where
A: GGSWInfos,
{
module.ggsw_prepared_alloc_bytes_from_infos(infos)
}
pub fn alloc_bytes_with(
module: &Module<B>,
base2k: Base2K,
k: TorusPrecision,
dnum: Dnum,
dsize: Dsize,
rank: Rank,
) -> usize {
module.ggsw_prepared_alloc_bytes(base2k, k, dnum, dsize, rank)
}
}
@@ -257,20 +177,15 @@ impl<D: DataRef, B: Backend> GGSWPrepared<D, B> {
}
}
pub trait GGSWPrepareTmpBytes {
fn ggsw_prepare_tmp_bytes<A>(&self, infos: &A) -> usize
where
A: GGSWInfos;
}
impl<B: Backend> GGSWPrepareTmpBytes for Module<B>
pub trait GGSWPrepare<B: Backend>
where
Module<B>: VmpPrepareTmpBytes,
Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B>,
{
fn ggsw_prepare_tmp_bytes<A>(&self, infos: &A) -> usize
where
A: GGSWInfos,
{
assert_eq!(self.n(), infos.n());
self.vmp_prepare_tmp_bytes(
infos.dnum().into(),
(infos.rank() + 1).into(),
@@ -278,31 +193,6 @@ where
infos.size(),
)
}
}
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
where
Module<B>: GGSWPrepareTmpBytes,
{
pub fn prepare_tmp_bytes<A>(&self, module: Module<B>, infos: &A) -> usize
where
A: GGSWInfos,
{
module.ggsw_prepare_tmp_bytes(self)
}
}
pub trait GGSWPrepare<B: Backend> {
fn ggsw_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: GGSWPreparedToMut<B>,
O: GGSWToRef;
}
impl<B: Backend> GGSWPrepare<B> for Module<B>
where
Module<B>: VmpPrepare<B>,
{
fn ggsw_prepare<R, O>(&self, res: &mut R, other: &O, scratch: &mut Scratch<B>)
where
R: GGSWPreparedToMut<B>,
@@ -310,6 +200,8 @@ where
{
let mut res: GGSWPrepared<&mut [u8], B> = res.to_mut();
let other: GGSW<&[u8]> = other.to_ref();
assert_eq!(res.n(), self.n());
assert_eq!(other.n(), self.n());
assert_eq!(res.k, other.k);
assert_eq!(res.base2k, other.base2k);
assert_eq!(res.dsize, other.dsize);
@@ -317,6 +209,20 @@ where
}
}
impl<B: Backend> GGSWPrepare<B> for Module<B> where Self: GetDegree + VmpPrepareTmpBytes + VmpPrepare<B> {}
impl<B: Backend> GGSWPrepared<Vec<u8>, B>
where
Module<B>: GGSWPrepare<B>,
{
pub fn prepare_tmp_bytes<A>(&self, module: Module<B>, infos: &A) -> usize
where
A: GGSWInfos,
{
module.ggsw_prepare_tmp_bytes(infos)
}
}
impl<D: DataMut, B: Backend> GGSWPrepared<D, B>
where
Module<B>: GGSWPrepare<B>,
@@ -329,35 +235,6 @@ where
}
}
pub trait GGSWPrepareAlloc<B: Backend> {
fn ggsw_prepare_alloc<O>(&self, other: &O, scratch: &mut Scratch<B>)
where
O: GGSWToRef;
}
impl<B: Backend> GGSWPrepareAlloc<B> for Module<B>
where
Module<B>: GGSWPrepare<B>,
{
fn ggsw_prepare_alloc<O>(&self, other: &O, scratch: &mut Scratch<B>)
where
O: GGSWToRef,
{
let mut ct_prepared: GGSWPrepared<Vec<u8>, B> = GGSWPrepared::alloc(self, other);
self.ggsw_prepare(&mut ct_prepared, other, scratch);
ct_prepared
}
}
impl<D: DataRef> GGSW<D> {
fn prepare_alloc<B: Backend>(&self, module: &Module<B>, scratch: &mut Scratch<B>)
where
Module<B>: GGSWPrepareAlloc<B>,
{
module.ggsw_prepare_alloc(self, scratch);
}
}
pub trait GGSWPreparedToMut<B: Backend> {
fn to_mut(&mut self) -> GGSWPrepared<&mut [u8], B>;
}
@@ -383,7 +260,7 @@ impl<D: DataRef, B: Backend> GGSWCiphertextPreparedToRef<B> for GGSWPrepared<D,
base2k: self.base2k,
k: self.k,
dsize: self.dsize,
data: self.data.to_mut(),
data: self.data.to_ref(),
}
}
}