mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Add BDD Arithmetic (#98)
* Added some circuit, evaluation + some layouts * Refactor + memory reduction * Rows -> Dnum, Digits -> Dsize * fix #96 + glwe_packing (indirectly CBT) * clippy
This commit is contained in:
committed by
GitHub
parent
37e13b965c
commit
6357a05509
@@ -4,16 +4,16 @@ use poulpy_hal::{
|
||||
api::{
|
||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalize,
|
||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNegateInplace,
|
||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace,
|
||||
VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||
},
|
||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
GLWEOperations, TakeGLWECt,
|
||||
layouts::{GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared},
|
||||
layouts::{GGLWEInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared},
|
||||
};
|
||||
|
||||
/// [GLWEPacker] enables only the fly GLWE packing
|
||||
@@ -93,7 +93,7 @@ impl GLWEPacker {
|
||||
pub fn scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
pack_core_scratch_space(module, out_infos, key_infos)
|
||||
@@ -180,7 +180,7 @@ impl GLWEPacker {
|
||||
fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
combine_scratch_space(module, out_infos, key_infos)
|
||||
@@ -271,7 +271,7 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||
where
|
||||
OUT: GLWEInfos,
|
||||
KEY: GGLWELayoutInfos,
|
||||
KEY: GGLWEInfos,
|
||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||
{
|
||||
GLWECiphertext::alloc_bytes(out_infos)
|
||||
@@ -390,3 +390,165 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||
acc.value = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Packs [x_0: GLWE(m_0), x_1: GLWE(m_1), ..., x_i: GLWE(m_i)]
|
||||
/// to [0: GLWE(m_0 * X^x_0 + m_1 * X^x_1 + ... + m_i * X^x_i)]
|
||||
pub fn glwe_packing<D: DataMut, ATK, B: Backend>(
|
||||
module: &Module<B>,
|
||||
cts: &mut HashMap<usize, &mut GLWECiphertext<D>>,
|
||||
log_gap_out: usize,
|
||||
auto_keys: &HashMap<i64, GGLWEAutomorphismKeyPrepared<ATK, B>>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
ATK: DataRef,
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxSwitchRing
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
||||
{
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
assert!(*cts.keys().max().unwrap() < module.n())
|
||||
}
|
||||
|
||||
let log_n: usize = module.log_n();
|
||||
|
||||
(0..log_n - log_gap_out).for_each(|i| {
|
||||
let t: usize = (1 << log_n).min(1 << (log_n - 1 - i));
|
||||
|
||||
let auto_key: &GGLWEAutomorphismKeyPrepared<ATK, B> = if i == 0 {
|
||||
auto_keys.get(&-1).unwrap()
|
||||
} else {
|
||||
auto_keys.get(&module.galois_element(1 << (i - 1))).unwrap()
|
||||
};
|
||||
|
||||
(0..t).for_each(|j| {
|
||||
let mut a: Option<&mut GLWECiphertext<D>> = cts.remove(&j);
|
||||
let mut b: Option<&mut GLWECiphertext<D>> = cts.remove(&(j + t));
|
||||
|
||||
pack_internal(module, &mut a, &mut b, i, auto_key, scratch);
|
||||
|
||||
if let Some(a) = a {
|
||||
cts.insert(j, a);
|
||||
} else if let Some(b) = b {
|
||||
cts.insert(j, b);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn pack_internal<A: DataMut, D: DataMut, DataAK: DataRef, B: Backend>(
|
||||
module: &Module<B>,
|
||||
a: &mut Option<&mut GLWECiphertext<A>>,
|
||||
b: &mut Option<&mut GLWECiphertext<D>>,
|
||||
i: usize,
|
||||
auto_key: &GGLWEAutomorphismKeyPrepared<DataAK, B>,
|
||||
scratch: &mut Scratch<B>,
|
||||
) where
|
||||
Module<B>: VecZnxRotateInplace<B>
|
||||
+ VecZnxNormalizeInplace<B>
|
||||
+ VecZnxNormalizeTmpBytes
|
||||
+ VecZnxBigAutomorphismInplace<B>
|
||||
+ VecZnxRshInplace<B>
|
||||
+ VecZnxDftCopy<B>
|
||||
+ VecZnxIdftApplyTmpA<B>
|
||||
+ VecZnxSub
|
||||
+ VecZnxAddInplace
|
||||
+ VecZnxNegateInplace
|
||||
+ VecZnxCopy
|
||||
+ VecZnxSubInplace
|
||||
+ VecZnxDftAllocBytes
|
||||
+ VmpApplyDftToDftTmpBytes
|
||||
+ VecZnxBigNormalizeTmpBytes
|
||||
+ VmpApplyDftToDft<B>
|
||||
+ VmpApplyDftToDftAdd<B>
|
||||
+ VecZnxDftApply<B>
|
||||
+ VecZnxIdftApplyConsume<B>
|
||||
+ VecZnxBigAddSmallInplace<B>
|
||||
+ VecZnxBigNormalize<B>
|
||||
+ VecZnxAutomorphismInplace<B>
|
||||
+ VecZnxBigSubSmallNegateInplace<B>
|
||||
+ VecZnxRotate
|
||||
+ VecZnxNormalize<B>,
|
||||
Scratch<B>: TakeVecZnx + TakeVecZnxDft<B> + ScratchAvailable,
|
||||
{
|
||||
// Goal is to evaluate: a = a + b*X^t + phi(a - b*X^t))
|
||||
// We also use the identity: AUTO(a * X^t, g) = -X^t * AUTO(a, g)
|
||||
// where t = 2^(log_n - i - 1) and g = 5^{2^(i - 1)}
|
||||
// Different cases for wether a and/or b are zero.
|
||||
//
|
||||
// Implicite RSH without modulus switch, introduces extra I(X) * Q/2 on decryption.
|
||||
// Necessary so that the scaling of the plaintext remains constant.
|
||||
// It however is ok to do so here because coefficients are eventually
|
||||
// either mapped to garbage or twice their value which vanishes I(X)
|
||||
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
||||
if let Some(a) = a.as_deref_mut() {
|
||||
let t: i64 = 1 << (a.n().log2() - i - 1);
|
||||
|
||||
if let Some(b) = b.as_deref_mut() {
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
|
||||
|
||||
// a = a * X^-t
|
||||
a.rotate_inplace(module, -t, scratch_1);
|
||||
|
||||
// tmp_b = a * X^-t - b
|
||||
tmp_b.sub(module, a, b);
|
||||
tmp_b.rsh(module, 1, scratch_1);
|
||||
|
||||
// a = a * X^-t + b
|
||||
a.add_inplace(module, b);
|
||||
a.rsh(module, 1, scratch_1);
|
||||
|
||||
tmp_b.normalize_inplace(module, scratch_1);
|
||||
|
||||
// tmp_b = phi(a * X^-t - b)
|
||||
tmp_b.automorphism_inplace(module, auto_key, scratch_1);
|
||||
|
||||
// a = a * X^-t + b - phi(a * X^-t - b)
|
||||
a.sub_inplace_ab(module, &tmp_b);
|
||||
a.normalize_inplace(module, scratch_1);
|
||||
|
||||
// a = a + b * X^t - phi(a * X^-t - b) * X^t
|
||||
// = a + b * X^t - phi(a * X^-t - b) * - phi(X^t)
|
||||
// = a + b * X^t + phi(a - b * X^t)
|
||||
a.rotate_inplace(module, t, scratch_1);
|
||||
} else {
|
||||
a.rsh(module, 1, scratch);
|
||||
// a = a + phi(a)
|
||||
a.automorphism_add_inplace(module, auto_key, scratch);
|
||||
}
|
||||
} else if let Some(b) = b.as_deref_mut() {
|
||||
let t: i64 = 1 << (b.n().log2() - i - 1);
|
||||
|
||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(b);
|
||||
tmp_b.rotate(module, t, b);
|
||||
tmp_b.rsh(module, 1, scratch_1);
|
||||
|
||||
// a = (b* X^t - phi(b* X^t))
|
||||
b.automorphism_sub_negate(module, &tmp_b, auto_key, scratch_1);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user