mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Add cross-basek normalization (#90)
* added cross_basek_normalization * updated method signatures to take layouts * fixed cross-base normalization fix #91 fix #93
This commit is contained in:
committed by
GitHub
parent
4da790ea6a
commit
37e13b965c
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
|||||||
run: cargo build --all-targets
|
run: cargo build --all-targets
|
||||||
|
|
||||||
- name: Clippy (deny warnings)
|
- name: Clippy (deny warnings)
|
||||||
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
|
run: cargo clippy --workspace --all-targets --all-features
|
||||||
|
|
||||||
- name: rustfmt (check only)
|
- name: rustfmt (check only)
|
||||||
run: cargo fmt --all --check
|
run: cargo fmt --all --check
|
||||||
|
|||||||
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -403,6 +403,7 @@ name = "poulpy-schemes"
|
|||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder",
|
"byteorder",
|
||||||
|
"criterion",
|
||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"poulpy-backend",
|
"poulpy-backend",
|
||||||
"poulpy-core",
|
"poulpy-core",
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_backend::cpu_spqlios::FFT64Spqlios;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPrepare, VecZnxAddNormal,
|
ModuleNew, ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPrepare, VecZnxAddNormal,
|
||||||
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace,
|
VecZnxBigAddSmallInplace, VecZnxBigAlloc, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace,
|
||||||
VecZnxDftAlloc, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyTmpA, VecZnxNormalizeInplace,
|
VecZnxDftAlloc, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyTmpA, VecZnxNormalizeInplace,
|
||||||
},
|
},
|
||||||
layouts::{Module, ScalarZnx, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, ZnxInfos},
|
layouts::{Module, ScalarZnx, ScratchOwned, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, ZnxInfos},
|
||||||
@@ -12,10 +12,10 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let n: usize = 16;
|
let n: usize = 16;
|
||||||
let basek: usize = 18;
|
let base2k: usize = 18;
|
||||||
let ct_size: usize = 3;
|
let ct_size: usize = 3;
|
||||||
let msg_size: usize = 2;
|
let msg_size: usize = 2;
|
||||||
let log_scale: usize = msg_size * basek - 5;
|
let log_scale: usize = msg_size * base2k - 5;
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n as u64);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n as u64);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::<FFT64Spqlios>::alloc(module.vec_znx_big_normalize_tmp_bytes());
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::<FFT64Spqlios>::alloc(module.vec_znx_big_normalize_tmp_bytes());
|
||||||
@@ -41,7 +41,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Fill the second column with random values: ct = (0, a)
|
// Fill the second column with random values: ct = (0, a)
|
||||||
module.vec_znx_fill_uniform(basek, &mut ct, 1, &mut source);
|
module.vec_znx_fill_uniform(base2k, &mut ct, 1, &mut source);
|
||||||
|
|
||||||
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64Spqlios> = module.vec_znx_dft_alloc(1, ct_size);
|
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64Spqlios> = module.vec_znx_dft_alloc(1, ct_size);
|
||||||
|
|
||||||
@@ -70,11 +70,11 @@ fn main() {
|
|||||||
let mut want: Vec<i64> = vec![0; n];
|
let mut want: Vec<i64> = vec![0; n];
|
||||||
want.iter_mut()
|
want.iter_mut()
|
||||||
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
||||||
m.encode_vec_i64(basek, 0, log_scale, &want, 4);
|
m.encode_vec_i64(base2k, 0, log_scale, &want);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut m, 0, scratch.borrow());
|
module.vec_znx_normalize_inplace(base2k, &mut m, 0, scratch.borrow());
|
||||||
|
|
||||||
// m - BIG(ct[1] * s)
|
// m - BIG(ct[1] * s)
|
||||||
module.vec_znx_big_sub_small_b_inplace(
|
module.vec_znx_big_sub_small_negate_inplace(
|
||||||
&mut buf_big,
|
&mut buf_big,
|
||||||
0, // Selects the first column of the receiver
|
0, // Selects the first column of the receiver
|
||||||
&m,
|
&m,
|
||||||
@@ -84,9 +84,10 @@ fn main() {
|
|||||||
// Normalizes back to VecZnx
|
// Normalizes back to VecZnx
|
||||||
// ct[0] <- m - BIG(c1 * s)
|
// ct[0] <- m - BIG(c1 * s)
|
||||||
module.vec_znx_big_normalize(
|
module.vec_znx_big_normalize(
|
||||||
basek,
|
base2k,
|
||||||
&mut ct,
|
&mut ct,
|
||||||
0, // Selects the first column of ct (ct[0])
|
0, // Selects the first column of ct (ct[0])
|
||||||
|
base2k,
|
||||||
&buf_big,
|
&buf_big,
|
||||||
0, // Selects the first column of buf_big
|
0, // Selects the first column of buf_big
|
||||||
scratch.borrow(),
|
scratch.borrow(),
|
||||||
@@ -95,10 +96,10 @@ fn main() {
|
|||||||
// Add noise to ct[0]
|
// Add noise to ct[0]
|
||||||
// ct[0] <- ct[0] + e
|
// ct[0] <- ct[0] + e
|
||||||
module.vec_znx_add_normal(
|
module.vec_znx_add_normal(
|
||||||
basek,
|
base2k,
|
||||||
&mut ct,
|
&mut ct,
|
||||||
0, // Selects the first column of ct (ct[0])
|
0, // Selects the first column of ct (ct[0])
|
||||||
basek * ct_size, // Scaling of the noise: 2^{-basek * limbs}
|
base2k * ct_size, // Scaling of the noise: 2^{-base2k * limbs}
|
||||||
&mut source,
|
&mut source,
|
||||||
3.2, // Standard deviation
|
3.2, // Standard deviation
|
||||||
3.2 * 6.0, // Truncatation bound
|
3.2 * 6.0, // Truncatation bound
|
||||||
@@ -125,12 +126,12 @@ fn main() {
|
|||||||
|
|
||||||
// m + e <- BIG(ct[1] * s + ct[0])
|
// m + e <- BIG(ct[1] * s + ct[0])
|
||||||
let mut res = VecZnx::alloc(module.n(), 1, ct_size);
|
let mut res = VecZnx::alloc(module.n(), 1, ct_size);
|
||||||
module.vec_znx_big_normalize(basek, &mut res, 0, &buf_big, 0, scratch.borrow());
|
module.vec_znx_big_normalize(base2k, &mut res, 0, base2k, &buf_big, 0, scratch.borrow());
|
||||||
|
|
||||||
// have = m * 2^{log_scale} + e
|
// have = m * 2^{log_scale} + e
|
||||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||||
res.decode_vec_i64(basek, 0, ct_size * basek, &mut have);
|
res.decode_vec_i64(base2k, 0, ct_size * base2k, &mut have);
|
||||||
let scale: f64 = (1 << (res.size() * basek - log_scale)) as f64;
|
let scale: f64 = (1 << (res.size() * base2k - log_scale)) as f64;
|
||||||
izip!(want.iter(), have.iter())
|
izip!(want.iter(), have.iter())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.for_each(|(i, (a, b))| {
|
.for_each(|(i, (a, b))| {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use poulpy_hal::{
|
|||||||
fft64::{
|
fft64::{
|
||||||
reim::{
|
reim::{
|
||||||
ReimAdd, ReimAddInplace, ReimAddMul, ReimCopy, ReimDFTExecute, ReimFFTTable, ReimFromZnx, ReimIFFTTable, ReimMul,
|
ReimAdd, ReimAddInplace, ReimAddMul, ReimCopy, ReimDFTExecute, ReimFFTTable, ReimFromZnx, ReimIFFTTable, ReimMul,
|
||||||
ReimMulInplace, ReimNegate, ReimNegateInplace, ReimSub, ReimSubABInplace, ReimSubBAInplace, ReimToZnx,
|
ReimMulInplace, ReimNegate, ReimNegateInplace, ReimSub, ReimSubInplace, ReimSubNegateInplace, ReimToZnx,
|
||||||
ReimToZnxInplace, ReimZero, reim_copy_ref, reim_zero_ref,
|
ReimToZnxInplace, ReimZero, reim_copy_ref, reim_zero_ref,
|
||||||
},
|
},
|
||||||
reim4::{
|
reim4::{
|
||||||
@@ -15,10 +15,11 @@ use poulpy_hal::{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
znx::{
|
znx::{
|
||||||
ZnxAdd, ZnxAddInplace, ZnxAutomorphism, ZnxCopy, ZnxNegate, ZnxNegateInplace, ZnxNormalizeFinalStep,
|
ZnxAdd, ZnxAddInplace, ZnxAutomorphism, ZnxCopy, ZnxExtractDigitAddMul, ZnxMulAddPowerOfTwo, ZnxMulPowerOfTwo,
|
||||||
|
ZnxMulPowerOfTwoInplace, ZnxNegate, ZnxNegateInplace, ZnxNormalizeDigit, ZnxNormalizeFinalStep,
|
||||||
ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly, ZnxNormalizeFirstStepInplace,
|
ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly, ZnxNormalizeFirstStepInplace,
|
||||||
ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace, ZnxRotate, ZnxSub,
|
ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace, ZnxRotate, ZnxSub,
|
||||||
ZnxSubABInplace, ZnxSubBAInplace, ZnxSwitchRing, ZnxZero, znx_copy_ref, znx_rotate, znx_zero_ref,
|
ZnxSubInplace, ZnxSubNegateInplace, ZnxSwitchRing, ZnxZero, znx_copy_ref, znx_rotate, znx_zero_ref,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -27,8 +28,8 @@ use crate::cpu_fft64_avx::{
|
|||||||
FFT64Avx,
|
FFT64Avx,
|
||||||
reim::{
|
reim::{
|
||||||
ReimFFTAvx, ReimIFFTAvx, reim_add_avx2_fma, reim_add_inplace_avx2_fma, reim_addmul_avx2_fma, reim_from_znx_i64_bnd50_fma,
|
ReimFFTAvx, ReimIFFTAvx, reim_add_avx2_fma, reim_add_inplace_avx2_fma, reim_addmul_avx2_fma, reim_from_znx_i64_bnd50_fma,
|
||||||
reim_mul_avx2_fma, reim_mul_inplace_avx2_fma, reim_negate_avx2_fma, reim_negate_inplace_avx2_fma,
|
reim_mul_avx2_fma, reim_mul_inplace_avx2_fma, reim_negate_avx2_fma, reim_negate_inplace_avx2_fma, reim_sub_avx2_fma,
|
||||||
reim_sub_ab_inplace_avx2_fma, reim_sub_avx2_fma, reim_sub_ba_inplace_avx2_fma, reim_to_znx_i64_inplace_bnd63_avx2_fma,
|
reim_sub_inplace_avx2_fma, reim_sub_negate_inplace_avx2_fma, reim_to_znx_i64_inplace_bnd63_avx2_fma,
|
||||||
},
|
},
|
||||||
reim_to_znx_i64_bnd63_avx2_fma,
|
reim_to_znx_i64_bnd63_avx2_fma,
|
||||||
reim4::{
|
reim4::{
|
||||||
@@ -36,11 +37,12 @@ use crate::cpu_fft64_avx::{
|
|||||||
reim4_vec_mat1col_product_avx, reim4_vec_mat2cols_2ndcol_product_avx, reim4_vec_mat2cols_product_avx,
|
reim4_vec_mat1col_product_avx, reim4_vec_mat2cols_2ndcol_product_avx, reim4_vec_mat2cols_product_avx,
|
||||||
},
|
},
|
||||||
znx_avx::{
|
znx_avx::{
|
||||||
znx_add_avx, znx_add_inplace_avx, znx_automorphism_avx, znx_negate_avx, znx_negate_inplace_avx,
|
znx_add_avx, znx_add_inplace_avx, znx_automorphism_avx, znx_extract_digit_addmul_avx, znx_mul_add_power_of_two_avx,
|
||||||
znx_normalize_final_step_avx, znx_normalize_final_step_inplace_avx, znx_normalize_first_step_avx,
|
znx_mul_power_of_two_avx, znx_mul_power_of_two_inplace_avx, znx_negate_avx, znx_negate_inplace_avx,
|
||||||
znx_normalize_first_step_carry_only_avx, znx_normalize_first_step_inplace_avx, znx_normalize_middle_step_avx,
|
znx_normalize_digit_avx, znx_normalize_final_step_avx, znx_normalize_final_step_inplace_avx,
|
||||||
znx_normalize_middle_step_carry_only_avx, znx_normalize_middle_step_inplace_avx, znx_sub_ab_inplace_avx, znx_sub_avx,
|
znx_normalize_first_step_avx, znx_normalize_first_step_carry_only_avx, znx_normalize_first_step_inplace_avx,
|
||||||
znx_sub_ba_inplace_avx, znx_switch_ring_avx,
|
znx_normalize_middle_step_avx, znx_normalize_middle_step_carry_only_avx, znx_normalize_middle_step_inplace_avx,
|
||||||
|
znx_sub_avx, znx_sub_inplace_avx, znx_sub_negate_inplace_avx, znx_switch_ring_avx,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -131,20 +133,20 @@ impl ZnxSub for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSubABInplace for FFT64Avx {
|
impl ZnxSubInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_sub_ab_inplace(res: &mut [i64], a: &[i64]) {
|
fn znx_sub_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_sub_ab_inplace_avx(res, a);
|
znx_sub_inplace_avx(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSubBAInplace for FFT64Avx {
|
impl ZnxSubNegateInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_sub_ba_inplace(res: &mut [i64], a: &[i64]) {
|
fn znx_sub_negate_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_sub_ba_inplace_avx(res, a);
|
znx_sub_negate_inplace_avx(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -183,6 +185,33 @@ impl ZnxNegateInplace for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ZnxMulAddPowerOfTwo for FFT64Avx {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_muladd_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
unsafe {
|
||||||
|
znx_mul_add_power_of_two_avx(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwo for FFT64Avx {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
unsafe {
|
||||||
|
znx_mul_power_of_two_avx(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwoInplace for FFT64Avx {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two_inplace(k: i64, res: &mut [i64]) {
|
||||||
|
unsafe {
|
||||||
|
znx_mul_power_of_two_inplace_avx(k, res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ZnxRotate for FFT64Avx {
|
impl ZnxRotate for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_rotate(p: i64, res: &mut [i64], src: &[i64]) {
|
fn znx_rotate(p: i64, res: &mut [i64], src: &[i64]) {
|
||||||
@@ -208,72 +237,90 @@ impl ZnxSwitchRing for FFT64Avx {
|
|||||||
|
|
||||||
impl ZnxNormalizeFinalStep for FFT64Avx {
|
impl ZnxNormalizeFinalStep for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_final_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_final_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_final_step_avx(basek, lsh, x, a, carry);
|
znx_normalize_final_step_avx(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFinalStepInplace for FFT64Avx {
|
impl ZnxNormalizeFinalStepInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_final_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_final_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_final_step_inplace_avx(basek, lsh, x, carry);
|
znx_normalize_final_step_inplace_avx(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStep for FFT64Avx {
|
impl ZnxNormalizeFirstStep for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_first_step_avx(basek, lsh, x, a, carry);
|
znx_normalize_first_step_avx(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepCarryOnly for FFT64Avx {
|
impl ZnxNormalizeFirstStepCarryOnly for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_first_step_carry_only_avx(basek, lsh, x, carry);
|
znx_normalize_first_step_carry_only_avx(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepInplace for FFT64Avx {
|
impl ZnxNormalizeFirstStepInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_first_step_inplace_avx(basek, lsh, x, carry);
|
znx_normalize_first_step_inplace_avx(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStep for FFT64Avx {
|
impl ZnxNormalizeMiddleStep for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_middle_step_avx(basek, lsh, x, a, carry);
|
znx_normalize_middle_step_avx(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Avx {
|
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_middle_step_carry_only_avx(basek, lsh, x, carry);
|
znx_normalize_middle_step_carry_only_avx(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepInplace for FFT64Avx {
|
impl ZnxNormalizeMiddleStepInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
znx_normalize_middle_step_inplace_avx(basek, lsh, x, carry);
|
znx_normalize_middle_step_inplace_avx(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxExtractDigitAddMul for FFT64Avx {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_extract_digit_addmul(base2k: usize, lsh: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
unsafe {
|
||||||
|
znx_extract_digit_addmul_avx(base2k, lsh, res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeDigit for FFT64Avx {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_digit(base2k: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
unsafe {
|
||||||
|
znx_normalize_digit_avx(base2k, res, src);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -346,20 +393,20 @@ impl ReimSub for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReimSubABInplace for FFT64Avx {
|
impl ReimSubInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reim_sub_ab_inplace(res: &mut [f64], a: &[f64]) {
|
fn reim_sub_inplace(res: &mut [f64], a: &[f64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
reim_sub_ab_inplace_avx2_fma(res, a);
|
reim_sub_inplace_avx2_fma(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReimSubBAInplace for FFT64Avx {
|
impl ReimSubNegateInplace for FFT64Avx {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reim_sub_ba_inplace(res: &mut [f64], a: &[f64]) {
|
fn reim_sub_negate_inplace(res: &mut [f64], a: &[f64]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
reim_sub_ba_inplace_avx2_fma(res, a);
|
reim_sub_negate_inplace_avx2_fma(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ pub fn reim_sub_avx2_fma(res: &mut [f64], a: &[f64], b: &[f64]) {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
#[target_feature(enable = "avx2,fma")]
|
#[target_feature(enable = "avx2,fma")]
|
||||||
pub fn reim_sub_ab_inplace_avx2_fma(res: &mut [f64], a: &[f64]) {
|
pub fn reim_sub_inplace_avx2_fma(res: &mut [f64], a: &[f64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(a.len(), res.len());
|
assert_eq!(a.len(), res.len());
|
||||||
@@ -115,7 +115,7 @@ pub fn reim_sub_ab_inplace_avx2_fma(res: &mut [f64], a: &[f64]) {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
#[target_feature(enable = "avx2,fma")]
|
#[target_feature(enable = "avx2,fma")]
|
||||||
pub fn reim_sub_ba_inplace_avx2_fma(res: &mut [f64], a: &[f64]) {
|
pub fn reim_sub_negate_inplace_avx2_fma(res: &mut [f64], a: &[f64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(a.len(), res.len());
|
assert_eq!(a.len(), res.len());
|
||||||
|
|||||||
@@ -253,9 +253,6 @@ fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]
|
|||||||
(take_slice, rem_slice)
|
(take_slice, rem_slice)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
panic!(
|
panic!("Attempted to take {take_len} from scratch with {aligned_len} aligned bytes left");
|
||||||
"Attempted to take {} from scratch with {} aligned bytes left",
|
|
||||||
take_len, aligned_len,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,15 +5,15 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_add => poulpy_hal::test_suite::vec_znx::test_vec_znx_add,
|
test_vec_znx_add => poulpy_hal::test_suite::vec_znx::test_vec_znx_add,
|
||||||
test_vec_znx_add_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_inplace,
|
test_vec_znx_add_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_inplace,
|
||||||
test_vec_znx_add_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar,
|
test_vec_znx_add_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar,
|
||||||
test_vec_znx_add_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar_inplace,
|
test_vec_znx_add_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar_inplace,
|
||||||
test_vec_znx_sub => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub,
|
test_vec_znx_sub => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub,
|
||||||
test_vec_znx_sub_ab_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_ab_inplace,
|
test_vec_znx_sub_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_inplace,
|
||||||
test_vec_znx_sub_ba_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_ba_inplace,
|
test_vec_znx_sub_negate_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_negate_inplace,
|
||||||
test_vec_znx_sub_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar,
|
test_vec_znx_sub_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar,
|
||||||
test_vec_znx_sub_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar_inplace,
|
test_vec_znx_sub_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar_inplace,
|
||||||
test_vec_znx_rsh => poulpy_hal::test_suite::vec_znx::test_vec_znx_rsh,
|
test_vec_znx_rsh => poulpy_hal::test_suite::vec_znx::test_vec_znx_rsh,
|
||||||
@@ -41,7 +41,7 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_svp_apply_dft_to_dft => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft,
|
test_svp_apply_dft_to_dft => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft,
|
||||||
test_svp_apply_dft_to_dft_inplace => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft_inplace,
|
test_svp_apply_dft_to_dft_inplace => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft_inplace,
|
||||||
@@ -53,20 +53,20 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_big_add => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add,
|
test_vec_znx_big_add => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add,
|
||||||
test_vec_znx_big_add_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_inplace,
|
test_vec_znx_big_add_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_inplace,
|
||||||
test_vec_znx_big_add_small => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small,
|
test_vec_znx_big_add_small => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small,
|
||||||
test_vec_znx_big_add_small_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small_inplace,
|
test_vec_znx_big_add_small_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small_inplace,
|
||||||
test_vec_znx_big_sub => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub,
|
test_vec_znx_big_sub => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub,
|
||||||
test_vec_znx_big_sub_ab_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_ab_inplace,
|
test_vec_znx_big_sub_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_inplace,
|
||||||
test_vec_znx_big_automorphism => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism,
|
test_vec_znx_big_automorphism => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism,
|
||||||
test_vec_znx_big_automorphism_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism_inplace,
|
test_vec_znx_big_automorphism_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism_inplace,
|
||||||
test_vec_znx_big_negate => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate,
|
test_vec_znx_big_negate => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate,
|
||||||
test_vec_znx_big_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate_inplace,
|
test_vec_znx_big_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate_inplace,
|
||||||
test_vec_znx_big_normalize => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_normalize,
|
test_vec_znx_big_normalize => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_normalize,
|
||||||
test_vec_znx_big_sub_ba_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_ba_inplace,
|
test_vec_znx_big_sub_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_negate_inplace,
|
||||||
test_vec_znx_big_sub_small_a => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a,
|
test_vec_znx_big_sub_small_a => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a,
|
||||||
test_vec_znx_big_sub_small_a_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a_inplace,
|
test_vec_znx_big_sub_small_a_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a_inplace,
|
||||||
test_vec_znx_big_sub_small_b => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_b,
|
test_vec_znx_big_sub_small_b => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_b,
|
||||||
@@ -79,13 +79,13 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_dft_add => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add,
|
test_vec_znx_dft_add => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add,
|
||||||
test_vec_znx_dft_add_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add_inplace,
|
test_vec_znx_dft_add_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add_inplace,
|
||||||
test_vec_znx_dft_sub => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub,
|
test_vec_znx_dft_sub => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub,
|
||||||
test_vec_znx_dft_sub_ab_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_ab_inplace,
|
test_vec_znx_dft_sub_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_inplace,
|
||||||
test_vec_znx_dft_sub_ba_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_ba_inplace,
|
test_vec_znx_dft_sub_negate_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_negate_inplace,
|
||||||
test_vec_znx_idft_apply => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply,
|
test_vec_znx_idft_apply => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply,
|
||||||
test_vec_znx_idft_apply_consume => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_consume,
|
test_vec_znx_idft_apply_consume => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_consume,
|
||||||
test_vec_znx_idft_apply_tmpa => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_tmpa,
|
test_vec_znx_idft_apply_tmpa => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_tmpa,
|
||||||
@@ -97,7 +97,7 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
backend_test = crate::cpu_fft64_avx::FFT64Avx,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vmp_apply_dft_to_dft => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft,
|
test_vmp_apply_dft_to_dft => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft,
|
||||||
test_vmp_apply_dft_to_dft_add => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft_add,
|
test_vmp_apply_dft_to_dft_add => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft_add,
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
TakeSlice, VecZnxAutomorphismInplaceTmpBytes, VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOneInplaceTmpBytes,
|
TakeSlice, VecZnxAutomorphismInplaceTmpBytes, VecZnxLshTmpBytes, VecZnxMergeRingsTmpBytes,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotateInplaceTmpBytes, VecZnxSplitRingTmpBytes,
|
VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNormalizeTmpBytes, VecZnxRotateInplaceTmpBytes, VecZnxRshTmpBytes,
|
||||||
|
VecZnxSplitRingTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
layouts::{Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -12,7 +13,7 @@ use poulpy_hal::{
|
|||||||
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
||||||
},
|
},
|
||||||
reference::vec_znx::{
|
reference::vec_znx::{
|
||||||
@@ -23,7 +24,7 @@ use poulpy_hal::{
|
|||||||
vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_negate, vec_znx_negate_inplace, vec_znx_normalize,
|
vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_negate, vec_znx_negate_inplace, vec_znx_normalize,
|
||||||
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
||||||
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_ab_inplace, vec_znx_sub_ba_inplace, vec_znx_sub_scalar,
|
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
||||||
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -43,9 +44,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_impl<R, A>(
|
fn vec_znx_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -54,7 +56,7 @@ where
|
|||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_normalize::<R, A, Self>(basek, res, res_col, a, a_col, carry);
|
vec_znx_normalize::<R, A, Self>(res_basek, res, res_col, a_basek, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +66,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_inplace_impl<R>(
|
fn vec_znx_normalize_inplace_impl<R>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -72,7 +74,7 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_normalize_inplace::<R, Self>(basek, res, res_col, carry);
|
vec_znx_normalize_inplace::<R, Self>(base2k, res, res_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,23 +145,23 @@ unsafe impl VecZnxSubImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubABInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxSubInplaceImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
vec_znx_sub_ab_inplace::<R, A, Self>(res, res_col, a, a_col);
|
vec_znx_sub_inplace::<R, A, Self>(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubBAInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxSubNegateInplaceImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
vec_znx_sub_ba_inplace::<R, A, Self>(res, res_col, a, a_col);
|
vec_znx_sub_negate_inplace::<R, A, Self>(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -234,9 +236,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<R, A>(
|
fn vec_znx_lsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -247,8 +249,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh::<_, _, Self>(basek, k, res, res_col, a, a_col, carry);
|
vec_znx_lsh::<_, _, Self>(base2k, k, res, res_col, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +261,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<A>(
|
fn vec_znx_lsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -267,8 +269,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh_inplace::<_, Self>(basek, k, a, a_col, carry);
|
vec_znx_lsh_inplace::<_, Self>(base2k, k, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,9 +279,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<R, A>(
|
fn vec_znx_rsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -290,8 +292,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh::<_, _, Self>(basek, k, res, res_col, a, a_col, carry);
|
vec_znx_rsh::<_, _, Self>(base2k, k, res, res_col, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -302,7 +304,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<A>(
|
fn vec_znx_rsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -310,8 +312,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh_inplace::<_, Self>(basek, k, a, a_col, carry);
|
vec_znx_rsh_inplace::<_, Self>(base2k, k, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -495,18 +497,18 @@ unsafe impl VecZnxCopyImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_uniform_ref(basek, res, res_col, source)
|
vec_znx_fill_uniform_ref(base2k, res, res_col, source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_fill_normal_impl<R>(
|
fn vec_znx_fill_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -516,14 +518,14 @@ unsafe impl VecZnxFillNormalImpl<Self> for FFT64Avx {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_fill_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_add_normal_impl<R>(
|
fn vec_znx_add_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -533,6 +535,6 @@ unsafe impl VecZnxAddNormalImpl<Self> for FFT64Avx {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_add_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_add_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,15 +10,15 @@ use poulpy_hal::{
|
|||||||
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
||||||
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
||||||
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
||||||
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl, VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl,
|
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubImpl, VecZnxBigSubInplaceImpl, VecZnxBigSubNegateInplaceImpl,
|
||||||
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
|
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallInplaceImpl, VecZnxBigSubSmallNegateInplaceImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
fft64::vec_znx_big::{
|
fft64::vec_znx_big::{
|
||||||
vec_znx_big_add, vec_znx_big_add_inplace, vec_znx_big_add_normal_ref, vec_znx_big_add_small,
|
vec_znx_big_add, vec_znx_big_add_inplace, vec_znx_big_add_normal_ref, vec_znx_big_add_small,
|
||||||
vec_znx_big_add_small_inplace, vec_znx_big_automorphism, vec_znx_big_automorphism_inplace,
|
vec_znx_big_add_small_inplace, vec_znx_big_automorphism, vec_znx_big_automorphism_inplace,
|
||||||
vec_znx_big_automorphism_inplace_tmp_bytes, vec_znx_big_negate, vec_znx_big_negate_inplace, vec_znx_big_normalize,
|
vec_znx_big_automorphism_inplace_tmp_bytes, vec_znx_big_negate, vec_znx_big_negate_inplace, vec_znx_big_normalize,
|
||||||
vec_znx_big_normalize_tmp_bytes, vec_znx_big_sub, vec_znx_big_sub_ab_inplace, vec_znx_big_sub_ba_inplace,
|
vec_znx_big_normalize_tmp_bytes, vec_znx_big_sub, vec_znx_big_sub_inplace, vec_znx_big_sub_negate_inplace,
|
||||||
vec_znx_big_sub_small_a, vec_znx_big_sub_small_a_inplace, vec_znx_big_sub_small_b, vec_znx_big_sub_small_b_inplace,
|
vec_znx_big_sub_small_a, vec_znx_big_sub_small_a_inplace, vec_znx_big_sub_small_b, vec_znx_big_sub_small_b_inplace,
|
||||||
},
|
},
|
||||||
znx::{znx_copy_ref, znx_zero_ref},
|
znx::{znx_copy_ref, znx_zero_ref},
|
||||||
@@ -76,7 +76,7 @@ unsafe impl VecZnxBigFromSmallImpl<Self> for FFT64Avx {
|
|||||||
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Avx {
|
||||||
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -84,7 +84,7 @@ unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Avx {
|
|||||||
sigma: f64,
|
sigma: f64,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
) {
|
) {
|
||||||
vec_znx_big_add_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_big_add_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,25 +167,25 @@ unsafe impl VecZnxBigSubImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubABInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxBigSubInplaceImpl<Self> for FFT64Avx {
|
||||||
/// Subtracts `a` from `b` and stores the result on `b`.
|
/// Subtracts `a` from `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_big_sub_ab_inplace(res, res_col, a, a_col);
|
vec_znx_big_sub_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubBAInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxBigSubNegateInplaceImpl<Self> for FFT64Avx {
|
||||||
/// Subtracts `b` from `a` and stores the result on `b`.
|
/// Subtracts `b` from `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_big_sub_ba_inplace(res, res_col, a, a_col);
|
vec_znx_big_sub_negate_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,9 +208,9 @@ unsafe impl VecZnxBigSubSmallAImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallAInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxBigSubSmallInplaceImpl<Self> for FFT64Avx {
|
||||||
/// Subtracts `a` from `res` and stores the result on `res`.
|
/// Subtracts `a` from `res` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_a_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -238,9 +238,9 @@ unsafe impl VecZnxBigSubSmallBImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallBInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxBigSubSmallNegateInplaceImpl<Self> for FFT64Avx {
|
||||||
/// Subtracts `res` from `a` and stores the result on `res`.
|
/// Subtracts `res` from `a` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_b_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -280,9 +280,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_big_normalize_impl<R, A>(
|
fn vec_znx_big_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -291,7 +292,7 @@ where
|
|||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_big_normalize(basek, res, res_col, a, a_col, carry);
|
vec_znx_big_normalize(res_basek, res, res_col, a_basek, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +327,7 @@ where
|
|||||||
) where
|
) where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
{
|
{
|
||||||
let (tmp, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
let (tmp, _) = scratch.take_slice(module.vec_znx_big_automorphism_inplace_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_big_automorphism_inplace(p, res, res_col, tmp);
|
vec_znx_big_automorphism_inplace(p, res, res_col, tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use poulpy_hal::{
|
|||||||
},
|
},
|
||||||
oep::{
|
oep::{
|
||||||
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
||||||
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubABInplaceImpl, VecZnxDftSubBAInplaceImpl, VecZnxDftSubImpl,
|
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubImpl, VecZnxDftSubInplaceImpl, VecZnxDftSubNegateInplaceImpl,
|
||||||
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
||||||
},
|
},
|
||||||
reference::fft64::vec_znx_dft::{
|
reference::fft64::vec_znx_dft::{
|
||||||
vec_znx_dft_add, vec_znx_dft_add_inplace, vec_znx_dft_apply, vec_znx_dft_copy, vec_znx_dft_sub,
|
vec_znx_dft_add, vec_znx_dft_add_inplace, vec_znx_dft_apply, vec_znx_dft_copy, vec_znx_dft_sub, vec_znx_dft_sub_inplace,
|
||||||
vec_znx_dft_sub_ab_inplace, vec_znx_dft_sub_ba_inplace, vec_znx_dft_zero, vec_znx_idft_apply, vec_znx_idft_apply_consume,
|
vec_znx_dft_sub_negate_inplace, vec_znx_dft_zero, vec_znx_idft_apply, vec_znx_idft_apply_consume,
|
||||||
vec_znx_idft_apply_tmpa,
|
vec_znx_idft_apply_tmpa,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -139,23 +139,23 @@ unsafe impl VecZnxDftSubImpl<Self> for FFT64Avx {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubABInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxDftSubInplaceImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_dft_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_dft_sub_ab_inplace(res, res_col, a, a_col);
|
vec_znx_dft_sub_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubBAInplaceImpl<Self> for FFT64Avx {
|
unsafe impl VecZnxDftSubNegateInplaceImpl<Self> for FFT64Avx {
|
||||||
fn vec_znx_dft_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_dft_sub_ba_inplace(res, res_col, a, a_col);
|
vec_znx_dft_sub_negate_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,21 +18,21 @@ unsafe impl ZnNormalizeInplaceImpl<Self> for FFT64Avx
|
|||||||
where
|
where
|
||||||
Self: TakeSliceImpl<Self>,
|
Self: TakeSliceImpl<Self>,
|
||||||
{
|
{
|
||||||
fn zn_normalize_inplace_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<Self>)
|
fn zn_normalize_inplace_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<Self>)
|
||||||
where
|
where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(n);
|
let (carry, _) = scratch.take_slice(n);
|
||||||
zn_normalize_inplace::<R, FFT64Avx>(n, basek, res, res_col, carry);
|
zn_normalize_inplace::<R, FFT64Avx>(n, base2k, res, res_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl ZnFillUniformImpl<Self> for FFT64Avx {
|
unsafe impl ZnFillUniformImpl<Self> for FFT64Avx {
|
||||||
fn zn_fill_uniform_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn zn_fill_uniform_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_uniform(n, basek, res, res_col, source);
|
zn_fill_uniform(n, base2k, res, res_col, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,7 +40,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Avx {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_fill_normal_impl<R>(
|
fn zn_fill_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -50,7 +50,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Avx {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_fill_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Avx {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_add_normal_impl<R>(
|
fn zn_add_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -68,6 +68,6 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Avx {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_add_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_add_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
mod add;
|
mod add;
|
||||||
mod automorphism;
|
mod automorphism;
|
||||||
|
mod mul;
|
||||||
mod neg;
|
mod neg;
|
||||||
mod normalization;
|
mod normalization;
|
||||||
mod sub;
|
mod sub;
|
||||||
@@ -7,6 +8,7 @@ mod switch_ring;
|
|||||||
|
|
||||||
pub(crate) use add::*;
|
pub(crate) use add::*;
|
||||||
pub(crate) use automorphism::*;
|
pub(crate) use automorphism::*;
|
||||||
|
pub(crate) use mul::*;
|
||||||
pub(crate) use neg::*;
|
pub(crate) use neg::*;
|
||||||
pub(crate) use normalization::*;
|
pub(crate) use normalization::*;
|
||||||
pub(crate) use sub::*;
|
pub(crate) use sub::*;
|
||||||
|
|||||||
318
poulpy-backend/src/cpu_fft64_avx/znx_avx/mul.rs
Normal file
318
poulpy-backend/src/cpu_fft64_avx/znx_avx/mul.rs
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
/// Multiply/divide by a power of two with rounding matching [poulpy_hal::reference::znx::znx_mul_power_of_two_ref].
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
|
/// all inputs must have the same length and must not alias.
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
pub unsafe fn znx_mul_power_of_two_avx(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(res.len(), a.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
use core::arch::x86_64::{
|
||||||
|
__m128i, __m256i, _mm_cvtsi32_si128, _mm256_add_epi64, _mm256_and_si256, _mm256_cmpgt_epi64, _mm256_loadu_si256,
|
||||||
|
_mm256_or_si256, _mm256_set1_epi64x, _mm256_setzero_si256, _mm256_sll_epi64, _mm256_srl_epi64, _mm256_srli_epi64,
|
||||||
|
_mm256_storeu_si256, _mm256_sub_epi64,
|
||||||
|
};
|
||||||
|
|
||||||
|
let n: usize = res.len();
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0 {
|
||||||
|
use poulpy_hal::reference::znx::znx_copy_ref;
|
||||||
|
znx_copy_ref(res, a);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let span: usize = n >> 2; // number of 256-bit chunks
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let mut rr: *mut __m256i = res.as_mut_ptr() as *mut __m256i;
|
||||||
|
let mut aa: *const __m256i = a.as_ptr() as *const __m256i;
|
||||||
|
|
||||||
|
if k > 0 {
|
||||||
|
// Left shift by k (variable count).
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!(k <= 63);
|
||||||
|
}
|
||||||
|
let cnt128: __m128i = _mm_cvtsi32_si128(k as i32);
|
||||||
|
for _ in 0..span {
|
||||||
|
let x: __m256i = _mm256_loadu_si256(aa);
|
||||||
|
let y: __m256i = _mm256_sll_epi64(x, cnt128);
|
||||||
|
_mm256_storeu_si256(rr, y);
|
||||||
|
rr = rr.add(1);
|
||||||
|
aa = aa.add(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_power_of_two_ref;
|
||||||
|
|
||||||
|
znx_mul_power_of_two_ref(k, &mut res[span << 2..], &a[span << 2..]);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// k < 0 => arithmetic right shift with rounding:
|
||||||
|
// for each x:
|
||||||
|
// sign_bit = (x >> 63) & 1
|
||||||
|
// bias = (1<<(kp-1)) - sign_bit
|
||||||
|
// t = x + bias
|
||||||
|
// y = t >> kp (arithmetic)
|
||||||
|
let kp = -k;
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!((1..=63).contains(&kp));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cnt_right: __m128i = _mm_cvtsi32_si128(kp as i32);
|
||||||
|
let bias_base: __m256i = _mm256_set1_epi64x(1_i64 << (kp - 1));
|
||||||
|
let top_mask: __m256i = _mm256_set1_epi64x(-1_i64 << (64 - kp)); // high kp bits
|
||||||
|
let zero: __m256i = _mm256_setzero_si256();
|
||||||
|
|
||||||
|
for _ in 0..span {
|
||||||
|
let x = _mm256_loadu_si256(aa);
|
||||||
|
|
||||||
|
// bias = (1 << (kp-1)) - sign_bit
|
||||||
|
let sign_bit_x: __m256i = _mm256_srli_epi64(x, 63);
|
||||||
|
let bias: __m256i = _mm256_sub_epi64(bias_base, sign_bit_x);
|
||||||
|
|
||||||
|
// t = x + bias
|
||||||
|
let t: __m256i = _mm256_add_epi64(x, bias);
|
||||||
|
|
||||||
|
// logical shift
|
||||||
|
let lsr: __m256i = _mm256_srl_epi64(t, cnt_right);
|
||||||
|
|
||||||
|
// sign extension
|
||||||
|
let neg: __m256i = _mm256_cmpgt_epi64(zero, t);
|
||||||
|
let fill: __m256i = _mm256_and_si256(neg, top_mask);
|
||||||
|
let y: __m256i = _mm256_or_si256(lsr, fill);
|
||||||
|
|
||||||
|
_mm256_storeu_si256(rr, y);
|
||||||
|
rr = rr.add(1);
|
||||||
|
aa = aa.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_power_of_two_ref;
|
||||||
|
|
||||||
|
znx_mul_power_of_two_ref(k, &mut res[span << 2..], &a[span << 2..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Multiply/divide inplace by a power of two with rounding matching [poulpy_hal::reference::znx::znx_mul_power_of_two_inplace_ref].
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
|
/// all inputs must have the same length and must not alias.
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
pub unsafe fn znx_mul_power_of_two_inplace_avx(k: i64, res: &mut [i64]) {
|
||||||
|
use core::arch::x86_64::{
|
||||||
|
__m128i, __m256i, _mm_cvtsi32_si128, _mm256_add_epi64, _mm256_and_si256, _mm256_cmpgt_epi64, _mm256_loadu_si256,
|
||||||
|
_mm256_or_si256, _mm256_set1_epi64x, _mm256_setzero_si256, _mm256_sll_epi64, _mm256_srl_epi64, _mm256_srli_epi64,
|
||||||
|
_mm256_storeu_si256, _mm256_sub_epi64,
|
||||||
|
};
|
||||||
|
|
||||||
|
let n: usize = res.len();
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let span: usize = n >> 2; // number of 256-bit chunks
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let mut rr: *mut __m256i = res.as_mut_ptr() as *mut __m256i;
|
||||||
|
|
||||||
|
if k > 0 {
|
||||||
|
// Left shift by k (variable count).
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!(k <= 63);
|
||||||
|
}
|
||||||
|
let cnt128: __m128i = _mm_cvtsi32_si128(k as i32);
|
||||||
|
for _ in 0..span {
|
||||||
|
let x: __m256i = _mm256_loadu_si256(rr);
|
||||||
|
let y: __m256i = _mm256_sll_epi64(x, cnt128);
|
||||||
|
_mm256_storeu_si256(rr, y);
|
||||||
|
rr = rr.add(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_power_of_two_inplace_ref;
|
||||||
|
znx_mul_power_of_two_inplace_ref(k, &mut res[span << 2..]);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// k < 0 => arithmetic right shift with rounding:
|
||||||
|
// for each x:
|
||||||
|
// sign_bit = (x >> 63) & 1
|
||||||
|
// bias = (1<<(kp-1)) - sign_bit
|
||||||
|
// t = x + bias
|
||||||
|
// y = t >> kp (arithmetic)
|
||||||
|
let kp = -k;
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!((1..=63).contains(&kp));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cnt_right: __m128i = _mm_cvtsi32_si128(kp as i32);
|
||||||
|
let bias_base: __m256i = _mm256_set1_epi64x(1_i64 << (kp - 1));
|
||||||
|
let top_mask: __m256i = _mm256_set1_epi64x(-1_i64 << (64 - kp)); // high kp bits
|
||||||
|
let zero: __m256i = _mm256_setzero_si256();
|
||||||
|
|
||||||
|
for _ in 0..span {
|
||||||
|
let x = _mm256_loadu_si256(rr);
|
||||||
|
|
||||||
|
// bias = (1 << (kp-1)) - sign_bit
|
||||||
|
let sign_bit_x: __m256i = _mm256_srli_epi64(x, 63);
|
||||||
|
let bias: __m256i = _mm256_sub_epi64(bias_base, sign_bit_x);
|
||||||
|
|
||||||
|
// t = x + bias
|
||||||
|
let t: __m256i = _mm256_add_epi64(x, bias);
|
||||||
|
|
||||||
|
// logical shift
|
||||||
|
let lsr: __m256i = _mm256_srl_epi64(t, cnt_right);
|
||||||
|
|
||||||
|
// sign extension
|
||||||
|
let neg: __m256i = _mm256_cmpgt_epi64(zero, t);
|
||||||
|
let fill: __m256i = _mm256_and_si256(neg, top_mask);
|
||||||
|
let y: __m256i = _mm256_or_si256(lsr, fill);
|
||||||
|
|
||||||
|
_mm256_storeu_si256(rr, y);
|
||||||
|
rr = rr.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_power_of_two_inplace_ref;
|
||||||
|
znx_mul_power_of_two_inplace_ref(k, &mut res[span << 2..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Multiply/divide by a power of two and add on the result with rounding matching [poulpy_hal::reference::znx::znx_mul_power_of_two_inplace_ref].
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
|
/// all inputs must have the same length and must not alias.
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
pub unsafe fn znx_mul_add_power_of_two_avx(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(res.len(), a.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
use core::arch::x86_64::{
|
||||||
|
__m128i, __m256i, _mm_cvtsi32_si128, _mm256_add_epi64, _mm256_and_si256, _mm256_cmpgt_epi64, _mm256_loadu_si256,
|
||||||
|
_mm256_or_si256, _mm256_set1_epi64x, _mm256_setzero_si256, _mm256_sll_epi64, _mm256_srl_epi64, _mm256_srli_epi64,
|
||||||
|
_mm256_storeu_si256, _mm256_sub_epi64,
|
||||||
|
};
|
||||||
|
|
||||||
|
let n: usize = res.len();
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0 {
|
||||||
|
use crate::cpu_fft64_avx::znx_avx::znx_add_inplace_avx;
|
||||||
|
|
||||||
|
znx_add_inplace_avx(res, a);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let span: usize = n >> 2; // number of 256-bit chunks
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let mut rr: *mut __m256i = res.as_mut_ptr() as *mut __m256i;
|
||||||
|
let mut aa: *const __m256i = a.as_ptr() as *const __m256i;
|
||||||
|
|
||||||
|
if k > 0 {
|
||||||
|
// Left shift by k (variable count).
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!(k <= 63);
|
||||||
|
}
|
||||||
|
let cnt128: __m128i = _mm_cvtsi32_si128(k as i32);
|
||||||
|
for _ in 0..span {
|
||||||
|
let x: __m256i = _mm256_loadu_si256(aa);
|
||||||
|
let y: __m256i = _mm256_loadu_si256(rr);
|
||||||
|
_mm256_storeu_si256(rr, _mm256_add_epi64(y, _mm256_sll_epi64(x, cnt128)));
|
||||||
|
rr = rr.add(1);
|
||||||
|
aa = aa.add(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_add_power_of_two_ref;
|
||||||
|
|
||||||
|
znx_mul_add_power_of_two_ref(k, &mut res[span << 2..], &a[span << 2..]);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// k < 0 => arithmetic right shift with rounding:
|
||||||
|
// for each x:
|
||||||
|
// sign_bit = (x >> 63) & 1
|
||||||
|
// bias = (1<<(kp-1)) - sign_bit
|
||||||
|
// t = x + bias
|
||||||
|
// y = t >> kp (arithmetic)
|
||||||
|
let kp = -k;
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
debug_assert!((1..=63).contains(&kp));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cnt_right: __m128i = _mm_cvtsi32_si128(kp as i32);
|
||||||
|
let bias_base: __m256i = _mm256_set1_epi64x(1_i64 << (kp - 1));
|
||||||
|
let top_mask: __m256i = _mm256_set1_epi64x(-1_i64 << (64 - kp)); // high kp bits
|
||||||
|
let zero: __m256i = _mm256_setzero_si256();
|
||||||
|
|
||||||
|
for _ in 0..span {
|
||||||
|
let x: __m256i = _mm256_loadu_si256(aa);
|
||||||
|
let y: __m256i = _mm256_loadu_si256(rr);
|
||||||
|
|
||||||
|
// bias = (1 << (kp-1)) - sign_bit
|
||||||
|
let sign_bit_x: __m256i = _mm256_srli_epi64(x, 63);
|
||||||
|
let bias: __m256i = _mm256_sub_epi64(bias_base, sign_bit_x);
|
||||||
|
|
||||||
|
// t = x + bias
|
||||||
|
let t: __m256i = _mm256_add_epi64(x, bias);
|
||||||
|
|
||||||
|
// logical shift
|
||||||
|
let lsr: __m256i = _mm256_srl_epi64(t, cnt_right);
|
||||||
|
|
||||||
|
// sign extension
|
||||||
|
let neg: __m256i = _mm256_cmpgt_epi64(zero, t);
|
||||||
|
let fill: __m256i = _mm256_and_si256(neg, top_mask);
|
||||||
|
let out: __m256i = _mm256_or_si256(lsr, fill);
|
||||||
|
|
||||||
|
_mm256_storeu_si256(rr, _mm256_add_epi64(y, out));
|
||||||
|
rr = rr.add(1);
|
||||||
|
aa = aa.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_mul_add_power_of_two_ref;
|
||||||
|
znx_mul_add_power_of_two_ref(k, &mut res[span << 2..], &a[span << 2..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -6,14 +6,14 @@ use std::arch::x86_64::__m256i;
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
fn normalize_consts_avx(basek: usize) -> (__m256i, __m256i, __m256i, __m256i) {
|
fn normalize_consts_avx(base2k: usize) -> (__m256i, __m256i, __m256i, __m256i) {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
assert!((1..=63).contains(&basek));
|
assert!((1..=63).contains(&base2k));
|
||||||
let mask_k: i64 = ((1u64 << basek) - 1) as i64; // 0..k-1 bits set
|
let mask_k: i64 = ((1u64 << base2k) - 1) as i64; // 0..k-1 bits set
|
||||||
let sign_k: i64 = (1u64 << (basek - 1)) as i64; // bit k-1
|
let sign_k: i64 = (1u64 << (base2k - 1)) as i64; // bit k-1
|
||||||
let topmask: i64 = (!0u64 << (64 - basek)) as i64; // top k bits set
|
let topmask: i64 = (!0u64 << (64 - base2k)) as i64; // top k bits set
|
||||||
let sh_k: __m256i = _mm256_set1_epi64x(basek as i64);
|
let sh_k: __m256i = _mm256_set1_epi64x(base2k as i64);
|
||||||
(
|
(
|
||||||
_mm256_set1_epi64x(mask_k), // mask_k_vec
|
_mm256_set1_epi64x(mask_k), // mask_k_vec
|
||||||
_mm256_set1_epi64x(sign_k), // sign_k_vec
|
_mm256_set1_epi64x(sign_k), // sign_k_vec
|
||||||
@@ -46,14 +46,14 @@ fn get_digit_avx(x: __m256i, mask_k: __m256i, sign_k: __m256i) -> __m256i {
|
|||||||
unsafe fn get_carry_avx(
|
unsafe fn get_carry_avx(
|
||||||
x: __m256i,
|
x: __m256i,
|
||||||
digit: __m256i,
|
digit: __m256i,
|
||||||
basek: __m256i, // _mm256_set1_epi64x(k)
|
base2k: __m256i, // _mm256_set1_epi64x(k)
|
||||||
top_mask: __m256i, // (!0 << (64 - k)) broadcast
|
top_mask: __m256i, // (!0 << (64 - k)) broadcast
|
||||||
) -> __m256i {
|
) -> __m256i {
|
||||||
use std::arch::x86_64::{
|
use std::arch::x86_64::{
|
||||||
__m256i, _mm256_and_si256, _mm256_cmpgt_epi64, _mm256_or_si256, _mm256_setzero_si256, _mm256_srlv_epi64, _mm256_sub_epi64,
|
__m256i, _mm256_and_si256, _mm256_cmpgt_epi64, _mm256_or_si256, _mm256_setzero_si256, _mm256_srlv_epi64, _mm256_sub_epi64,
|
||||||
};
|
};
|
||||||
let diff: __m256i = _mm256_sub_epi64(x, digit);
|
let diff: __m256i = _mm256_sub_epi64(x, digit);
|
||||||
let lsr: __m256i = _mm256_srlv_epi64(diff, basek); // logical >>
|
let lsr: __m256i = _mm256_srlv_epi64(diff, base2k); // logical >>
|
||||||
let neg: __m256i = _mm256_cmpgt_epi64(_mm256_setzero_si256(), diff); // 0xFFFF.. where v<0
|
let neg: __m256i = _mm256_cmpgt_epi64(_mm256_setzero_si256(), diff); // 0xFFFF.. where v<0
|
||||||
let fill: __m256i = _mm256_and_si256(neg, top_mask); // top k bits if negative
|
let fill: __m256i = _mm256_and_si256(neg, top_mask); // top k bits if negative
|
||||||
_mm256_or_si256(lsr, fill)
|
_mm256_or_si256(lsr, fill)
|
||||||
@@ -61,13 +61,121 @@ unsafe fn get_carry_avx(
|
|||||||
|
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// `res` and `src` must have the same length and must not alias.
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_first_step_carry_only_avx(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
pub fn znx_extract_digit_addmul_avx(base2k: usize, lsh: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert_eq!(res.len(), src.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
|
}
|
||||||
|
|
||||||
|
use std::arch::x86_64::{
|
||||||
|
__m256i, _mm256_add_epi64, _mm256_loadu_si256, _mm256_set1_epi64x, _mm256_sllv_epi64, _mm256_storeu_si256,
|
||||||
|
};
|
||||||
|
|
||||||
|
let n: usize = res.len();
|
||||||
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let mut rr: *mut __m256i = res.as_mut_ptr() as *mut __m256i;
|
||||||
|
let mut ss: *mut __m256i = src.as_mut_ptr() as *mut __m256i;
|
||||||
|
|
||||||
|
// constants for digit/carry extraction
|
||||||
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
|
for _ in 0..span {
|
||||||
|
// load source & extract digit/carry
|
||||||
|
let sv: __m256i = _mm256_loadu_si256(ss);
|
||||||
|
let digit_256: __m256i = get_digit_avx(sv, mask, sign);
|
||||||
|
let carry_256: __m256i = get_carry_avx(sv, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
|
// res += (digit << lsh)
|
||||||
|
let rv: __m256i = _mm256_loadu_si256(rr);
|
||||||
|
let madd: __m256i = _mm256_sllv_epi64(digit_256, lsh_v);
|
||||||
|
let sum: __m256i = _mm256_add_epi64(rv, madd);
|
||||||
|
|
||||||
|
_mm256_storeu_si256(rr, sum);
|
||||||
|
_mm256_storeu_si256(ss, carry_256);
|
||||||
|
|
||||||
|
rr = rr.add(1);
|
||||||
|
ss = ss.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tail (scalar)
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_extract_digit_addmul_ref;
|
||||||
|
|
||||||
|
let off: usize = span << 2;
|
||||||
|
znx_extract_digit_addmul_ref(base2k, lsh, &mut res[off..], &mut src[off..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
|
/// `res` and `src` must have the same length and must not alias.
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
pub fn znx_normalize_digit_avx(base2k: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(res.len(), src.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
use std::arch::x86_64::{__m256i, _mm256_add_epi64, _mm256_loadu_si256, _mm256_storeu_si256};
|
||||||
|
|
||||||
|
let n: usize = res.len();
|
||||||
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
// Pointers to 256-bit lanes
|
||||||
|
let mut rr: *mut __m256i = res.as_mut_ptr() as *mut __m256i;
|
||||||
|
let mut ss: *mut __m256i = src.as_mut_ptr() as *mut __m256i;
|
||||||
|
|
||||||
|
// Constants for digit/carry extraction
|
||||||
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
|
for _ in 0..span {
|
||||||
|
// Load res lane
|
||||||
|
let rv: __m256i = _mm256_loadu_si256(rr);
|
||||||
|
|
||||||
|
// Extract digit and carry from res
|
||||||
|
let digit_256: __m256i = get_digit_avx(rv, mask, sign);
|
||||||
|
let carry_256: __m256i = get_carry_avx(rv, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
|
// src += carry
|
||||||
|
let sv: __m256i = _mm256_loadu_si256(ss);
|
||||||
|
let sum: __m256i = _mm256_add_epi64(sv, carry_256);
|
||||||
|
|
||||||
|
_mm256_storeu_si256(ss, sum);
|
||||||
|
_mm256_storeu_si256(rr, digit_256);
|
||||||
|
|
||||||
|
rr = rr.add(1);
|
||||||
|
ss = ss.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scalar tail
|
||||||
|
if !n.is_multiple_of(4) {
|
||||||
|
use poulpy_hal::reference::znx::znx_normalize_digit_ref;
|
||||||
|
|
||||||
|
let off = span << 2;
|
||||||
|
znx_normalize_digit_ref(base2k, &mut res[off..], &mut src[off..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
|
/// all inputs must have the same length and must not alias.
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
pub fn znx_normalize_first_step_carry_only_avx(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert!(x.len() <= carry.len());
|
||||||
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_storeu_si256};
|
||||||
@@ -81,19 +189,19 @@ pub fn znx_normalize_first_step_carry_only_avx(basek: usize, lsh: usize, x: &[i6
|
|||||||
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
||||||
|
|
||||||
let (mask, sign, basek_vec, top_mask) = if lsh == 0 {
|
let (mask, sign, basek_vec, top_mask) = if lsh == 0 {
|
||||||
normalize_consts_avx(basek)
|
normalize_consts_avx(base2k)
|
||||||
} else {
|
} else {
|
||||||
normalize_consts_avx(basek - lsh)
|
normalize_consts_avx(base2k - lsh)
|
||||||
};
|
};
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
|
|
||||||
// (x << (64 - basek)) >> (64 - basek)
|
// (x << (64 - base2k)) >> (64 - base2k)
|
||||||
let digit_256: __m256i = get_digit_avx(xx_256, mask, sign);
|
let digit_256: __m256i = get_digit_avx(xv, mask, sign);
|
||||||
|
|
||||||
// (x - digit) >> basek
|
// (x - digit) >> base2k
|
||||||
let carry_256: __m256i = get_carry_avx(xx_256, digit_256, basek_vec, top_mask);
|
let carry_256: __m256i = get_carry_avx(xv, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
_mm256_storeu_si256(cc, carry_256);
|
_mm256_storeu_si256(cc, carry_256);
|
||||||
|
|
||||||
@@ -106,7 +214,7 @@ pub fn znx_normalize_first_step_carry_only_avx(basek: usize, lsh: usize, x: &[i6
|
|||||||
if !x.len().is_multiple_of(4) {
|
if !x.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_normalize_first_step_carry_only_ref;
|
use poulpy_hal::reference::znx::znx_normalize_first_step_carry_only_ref;
|
||||||
|
|
||||||
znx_normalize_first_step_carry_only_ref(basek, lsh, &x[span << 2..], &mut carry[span << 2..]);
|
znx_normalize_first_step_carry_only_ref(base2k, lsh, &x[span << 2..], &mut carry[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,11 +222,11 @@ pub fn znx_normalize_first_step_carry_only_avx(basek: usize, lsh: usize, x: &[i6
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_first_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
pub fn znx_normalize_first_step_inplace_avx(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_set1_epi64x, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_set1_epi64x, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -132,16 +240,16 @@ pub fn znx_normalize_first_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
||||||
|
|
||||||
if lsh == 0 {
|
if lsh == 0 {
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
|
|
||||||
// (x << (64 - basek)) >> (64 - basek)
|
// (x << (64 - base2k)) >> (64 - base2k)
|
||||||
let digit_256: __m256i = get_digit_avx(xx_256, mask, sign);
|
let digit_256: __m256i = get_digit_avx(xv, mask, sign);
|
||||||
|
|
||||||
// (x - digit) >> basek
|
// (x - digit) >> base2k
|
||||||
let carry_256: __m256i = get_carry_avx(xx_256, digit_256, basek_vec, top_mask);
|
let carry_256: __m256i = get_carry_avx(xv, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
_mm256_storeu_si256(xx, digit_256);
|
_mm256_storeu_si256(xx, digit_256);
|
||||||
_mm256_storeu_si256(cc, carry_256);
|
_mm256_storeu_si256(cc, carry_256);
|
||||||
@@ -150,18 +258,18 @@ pub fn znx_normalize_first_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
cc = cc.add(1);
|
cc = cc.add(1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek - lsh);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
|
|
||||||
// (x << (64 - basek)) >> (64 - basek)
|
// (x << (64 - base2k)) >> (64 - base2k)
|
||||||
let digit_256: __m256i = get_digit_avx(xx_256, mask, sign);
|
let digit_256: __m256i = get_digit_avx(xv, mask, sign);
|
||||||
|
|
||||||
// (x - digit) >> basek
|
// (x - digit) >> base2k
|
||||||
let carry_256: __m256i = get_carry_avx(xx_256, digit_256, basek_vec, top_mask);
|
let carry_256: __m256i = get_carry_avx(xv, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
_mm256_storeu_si256(xx, _mm256_sllv_epi64(digit_256, lsh_v));
|
_mm256_storeu_si256(xx, _mm256_sllv_epi64(digit_256, lsh_v));
|
||||||
_mm256_storeu_si256(cc, carry_256);
|
_mm256_storeu_si256(cc, carry_256);
|
||||||
@@ -176,7 +284,7 @@ pub fn znx_normalize_first_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
if !x.len().is_multiple_of(4) {
|
if !x.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_normalize_first_step_inplace_ref;
|
use poulpy_hal::reference::znx::znx_normalize_first_step_inplace_ref;
|
||||||
|
|
||||||
znx_normalize_first_step_inplace_ref(basek, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
znx_normalize_first_step_inplace_ref(base2k, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -184,12 +292,12 @@ pub fn znx_normalize_first_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_first_step_avx(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
pub fn znx_normalize_first_step_avx(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert_eq!(x.len(), a.len());
|
||||||
assert_eq!(a.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -204,16 +312,16 @@ pub fn znx_normalize_first_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
let mut cc: *mut __m256i = carry.as_ptr() as *mut __m256i;
|
||||||
|
|
||||||
if lsh == 0 {
|
if lsh == 0 {
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let aa_256: __m256i = _mm256_loadu_si256(aa);
|
let av: __m256i = _mm256_loadu_si256(aa);
|
||||||
|
|
||||||
// (x << (64 - basek)) >> (64 - basek)
|
// (x << (64 - base2k)) >> (64 - base2k)
|
||||||
let digit_256: __m256i = get_digit_avx(aa_256, mask, sign);
|
let digit_256: __m256i = get_digit_avx(av, mask, sign);
|
||||||
|
|
||||||
// (x - digit) >> basek
|
// (x - digit) >> base2k
|
||||||
let carry_256: __m256i = get_carry_avx(aa_256, digit_256, basek_vec, top_mask);
|
let carry_256: __m256i = get_carry_avx(av, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
_mm256_storeu_si256(xx, digit_256);
|
_mm256_storeu_si256(xx, digit_256);
|
||||||
_mm256_storeu_si256(cc, carry_256);
|
_mm256_storeu_si256(cc, carry_256);
|
||||||
@@ -225,18 +333,18 @@ pub fn znx_normalize_first_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek - lsh);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let aa_256: __m256i = _mm256_loadu_si256(aa);
|
let av: __m256i = _mm256_loadu_si256(aa);
|
||||||
|
|
||||||
// (x << (64 - basek)) >> (64 - basek)
|
// (x << (64 - base2k)) >> (64 - base2k)
|
||||||
let digit_256: __m256i = get_digit_avx(aa_256, mask, sign);
|
let digit_256: __m256i = get_digit_avx(av, mask, sign);
|
||||||
|
|
||||||
// (x - digit) >> basek
|
// (x - digit) >> base2k
|
||||||
let carry_256: __m256i = get_carry_avx(aa_256, digit_256, basek_vec, top_mask);
|
let carry_256: __m256i = get_carry_avx(av, digit_256, basek_vec, top_mask);
|
||||||
|
|
||||||
_mm256_storeu_si256(xx, _mm256_sllv_epi64(digit_256, lsh_v));
|
_mm256_storeu_si256(xx, _mm256_sllv_epi64(digit_256, lsh_v));
|
||||||
_mm256_storeu_si256(cc, carry_256);
|
_mm256_storeu_si256(cc, carry_256);
|
||||||
@@ -253,7 +361,7 @@ pub fn znx_normalize_first_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
use poulpy_hal::reference::znx::znx_normalize_first_step_ref;
|
use poulpy_hal::reference::znx::znx_normalize_first_step_ref;
|
||||||
|
|
||||||
znx_normalize_first_step_ref(
|
znx_normalize_first_step_ref(
|
||||||
basek,
|
base2k,
|
||||||
lsh,
|
lsh,
|
||||||
&mut x[span << 2..],
|
&mut x[span << 2..],
|
||||||
&a[span << 2..],
|
&a[span << 2..],
|
||||||
@@ -266,11 +374,11 @@ pub fn znx_normalize_first_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
pub fn znx_normalize_middle_step_inplace_avx(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -279,7 +387,7 @@ pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [
|
|||||||
|
|
||||||
let span: usize = n >> 2;
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
||||||
@@ -287,13 +395,13 @@ pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [
|
|||||||
|
|
||||||
if lsh == 0 {
|
if lsh == 0 {
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(xx_256, mask, sign);
|
let d0: __m256i = get_digit_avx(xv, mask, sign);
|
||||||
let c0: __m256i = get_carry_avx(xx_256, d0, basek_vec, top_mask);
|
let c0: __m256i = get_carry_avx(xv, d0, basek_vec, top_mask);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -307,20 +415,20 @@ pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(basek - lsh);
|
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(xx_256, mask_lsh, sign_lsh);
|
let d0: __m256i = get_digit_avx(xv, mask_lsh, sign_lsh);
|
||||||
let c0: __m256i = get_carry_avx(xx_256, d0, basek_vec_lsh, top_mask_lsh);
|
let c0: __m256i = get_carry_avx(xv, d0, basek_vec_lsh, top_mask_lsh);
|
||||||
|
|
||||||
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0_lsh, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0_lsh, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -337,7 +445,7 @@ pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [
|
|||||||
if !x.len().is_multiple_of(4) {
|
if !x.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_normalize_middle_step_inplace_ref;
|
use poulpy_hal::reference::znx::znx_normalize_middle_step_inplace_ref;
|
||||||
|
|
||||||
znx_normalize_middle_step_inplace_ref(basek, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
znx_normalize_middle_step_inplace_ref(base2k, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,11 +453,11 @@ pub fn znx_normalize_middle_step_inplace_avx(basek: usize, lsh: usize, x: &mut [
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
pub fn znx_normalize_middle_step_carry_only_avx(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -358,7 +466,7 @@ pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i
|
|||||||
|
|
||||||
let span: usize = n >> 2;
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut xx: *const __m256i = x.as_ptr() as *const __m256i;
|
let mut xx: *const __m256i = x.as_ptr() as *const __m256i;
|
||||||
@@ -366,13 +474,13 @@ pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i
|
|||||||
|
|
||||||
if lsh == 0 {
|
if lsh == 0 {
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(xx_256, mask, sign);
|
let d0: __m256i = get_digit_avx(xv, mask, sign);
|
||||||
let c0: __m256i = get_carry_avx(xx_256, d0, basek_vec, top_mask);
|
let c0: __m256i = get_carry_avx(xv, d0, basek_vec, top_mask);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -385,20 +493,20 @@ pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(basek - lsh);
|
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let xx_256: __m256i = _mm256_loadu_si256(xx);
|
let xv: __m256i = _mm256_loadu_si256(xx);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(xx_256, mask_lsh, sign_lsh);
|
let d0: __m256i = get_digit_avx(xv, mask_lsh, sign_lsh);
|
||||||
let c0: __m256i = get_carry_avx(xx_256, d0, basek_vec_lsh, top_mask_lsh);
|
let c0: __m256i = get_carry_avx(xv, d0, basek_vec_lsh, top_mask_lsh);
|
||||||
|
|
||||||
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0_lsh, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0_lsh, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -414,7 +522,7 @@ pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i
|
|||||||
if !x.len().is_multiple_of(4) {
|
if !x.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_normalize_middle_step_carry_only_ref;
|
use poulpy_hal::reference::znx::znx_normalize_middle_step_carry_only_ref;
|
||||||
|
|
||||||
znx_normalize_middle_step_carry_only_ref(basek, lsh, &x[span << 2..], &mut carry[span << 2..]);
|
znx_normalize_middle_step_carry_only_ref(base2k, lsh, &x[span << 2..], &mut carry[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -422,12 +530,12 @@ pub fn znx_normalize_middle_step_carry_only_avx(basek: usize, lsh: usize, x: &[i
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
pub fn znx_normalize_middle_step_avx(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert_eq!(x.len(), a.len());
|
||||||
assert_eq!(a.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -436,7 +544,7 @@ pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
|
|
||||||
let span: usize = n >> 2;
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (mask, sign, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
||||||
@@ -445,13 +553,13 @@ pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
|
|
||||||
if lsh == 0 {
|
if lsh == 0 {
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let aa_256: __m256i = _mm256_loadu_si256(aa);
|
let av: __m256i = _mm256_loadu_si256(aa);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(aa_256, mask, sign);
|
let d0: __m256i = get_digit_avx(av, mask, sign);
|
||||||
let c0: __m256i = get_carry_avx(aa_256, d0, basek_vec, top_mask);
|
let c0: __m256i = get_carry_avx(av, d0, basek_vec, top_mask);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -466,20 +574,20 @@ pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(basek - lsh);
|
let (mask_lsh, sign_lsh, basek_vec_lsh, top_mask_lsh) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
for _ in 0..span {
|
for _ in 0..span {
|
||||||
let aa_256: __m256i = _mm256_loadu_si256(aa);
|
let av: __m256i = _mm256_loadu_si256(aa);
|
||||||
let cc_256: __m256i = _mm256_loadu_si256(cc);
|
let cv: __m256i = _mm256_loadu_si256(cc);
|
||||||
|
|
||||||
let d0: __m256i = get_digit_avx(aa_256, mask_lsh, sign_lsh);
|
let d0: __m256i = get_digit_avx(av, mask_lsh, sign_lsh);
|
||||||
let c0: __m256i = get_carry_avx(aa_256, d0, basek_vec_lsh, top_mask_lsh);
|
let c0: __m256i = get_carry_avx(av, d0, basek_vec_lsh, top_mask_lsh);
|
||||||
|
|
||||||
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
let d0_lsh: __m256i = _mm256_sllv_epi64(d0, lsh_v);
|
||||||
|
|
||||||
let s: __m256i = _mm256_add_epi64(d0_lsh, cc_256);
|
let s: __m256i = _mm256_add_epi64(d0_lsh, cv);
|
||||||
let x1: __m256i = get_digit_avx(s, mask, sign);
|
let x1: __m256i = get_digit_avx(s, mask, sign);
|
||||||
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
let c1: __m256i = get_carry_avx(s, x1, basek_vec, top_mask);
|
||||||
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
let cout: __m256i = _mm256_add_epi64(c0, c1);
|
||||||
@@ -498,7 +606,7 @@ pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
use poulpy_hal::reference::znx::znx_normalize_middle_step_ref;
|
use poulpy_hal::reference::znx::znx_normalize_middle_step_ref;
|
||||||
|
|
||||||
znx_normalize_middle_step_ref(
|
znx_normalize_middle_step_ref(
|
||||||
basek,
|
base2k,
|
||||||
lsh,
|
lsh,
|
||||||
&mut x[span << 2..],
|
&mut x[span << 2..],
|
||||||
&a[span << 2..],
|
&a[span << 2..],
|
||||||
@@ -511,11 +619,11 @@ pub fn znx_normalize_middle_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_final_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
pub fn znx_normalize_final_step_inplace_avx(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -524,7 +632,7 @@ pub fn znx_normalize_final_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
|
|
||||||
let span: usize = n >> 2;
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
let (mask, sign, _, _) = normalize_consts_avx(basek);
|
let (mask, sign, _, _) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
||||||
@@ -547,7 +655,7 @@ pub fn znx_normalize_final_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask_lsh, sign_lsh, _, _) = normalize_consts_avx(basek - lsh);
|
let (mask_lsh, sign_lsh, _, _) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
@@ -573,7 +681,7 @@ pub fn znx_normalize_final_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
if !x.len().is_multiple_of(4) {
|
if !x.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_normalize_final_step_inplace_ref;
|
use poulpy_hal::reference::znx::znx_normalize_final_step_inplace_ref;
|
||||||
|
|
||||||
znx_normalize_final_step_inplace_ref(basek, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
znx_normalize_final_step_inplace_ref(base2k, lsh, &mut x[span << 2..], &mut carry[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -581,12 +689,12 @@ pub fn znx_normalize_final_step_inplace_avx(basek: usize, lsh: usize, x: &mut [i
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_normalize_final_step_avx(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
pub fn znx_normalize_final_step_avx(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(x.len(), carry.len());
|
assert_eq!(x.len(), a.len());
|
||||||
assert_eq!(a.len(), carry.len());
|
assert!(x.len() <= carry.len());
|
||||||
assert!(lsh < basek);
|
assert!(lsh < base2k);
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
use std::arch::x86_64::{_mm256_add_epi64, _mm256_loadu_si256, _mm256_sllv_epi64, _mm256_storeu_si256};
|
||||||
@@ -595,7 +703,7 @@ pub fn znx_normalize_final_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
|
|
||||||
let span: usize = n >> 2;
|
let span: usize = n >> 2;
|
||||||
|
|
||||||
let (mask, sign, _, _) = normalize_consts_avx(basek);
|
let (mask, sign, _, _) = normalize_consts_avx(base2k);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
let mut xx: *mut __m256i = x.as_mut_ptr() as *mut __m256i;
|
||||||
@@ -620,7 +728,7 @@ pub fn znx_normalize_final_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
} else {
|
} else {
|
||||||
use std::arch::x86_64::_mm256_set1_epi64x;
|
use std::arch::x86_64::_mm256_set1_epi64x;
|
||||||
|
|
||||||
let (mask_lsh, sign_lsh, _, _) = normalize_consts_avx(basek - lsh);
|
let (mask_lsh, sign_lsh, _, _) = normalize_consts_avx(base2k - lsh);
|
||||||
|
|
||||||
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
let lsh_v: __m256i = _mm256_set1_epi64x(lsh as i64);
|
||||||
|
|
||||||
@@ -647,7 +755,7 @@ pub fn znx_normalize_final_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
use poulpy_hal::reference::znx::znx_normalize_final_step_ref;
|
use poulpy_hal::reference::znx::znx_normalize_final_step_ref;
|
||||||
|
|
||||||
znx_normalize_final_step_ref(
|
znx_normalize_final_step_ref(
|
||||||
basek,
|
base2k,
|
||||||
lsh,
|
lsh,
|
||||||
&mut x[span << 2..],
|
&mut x[span << 2..],
|
||||||
&a[span << 2..],
|
&a[span << 2..],
|
||||||
@@ -658,9 +766,9 @@ pub fn znx_normalize_final_step_avx(basek: usize, lsh: usize, x: &mut [i64], a:
|
|||||||
|
|
||||||
mod tests {
|
mod tests {
|
||||||
use poulpy_hal::reference::znx::{
|
use poulpy_hal::reference::znx::{
|
||||||
get_carry, get_digit, znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref,
|
get_carry_i64, get_digit_i64, znx_extract_digit_addmul_ref, znx_normalize_digit_ref,
|
||||||
znx_normalize_first_step_inplace_ref, znx_normalize_first_step_ref, znx_normalize_middle_step_inplace_ref,
|
znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref, znx_normalize_first_step_inplace_ref,
|
||||||
znx_normalize_middle_step_ref,
|
znx_normalize_first_step_ref, znx_normalize_middle_step_inplace_ref, znx_normalize_middle_step_ref,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -670,7 +778,7 @@ mod tests {
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
fn test_get_digit_avx_internal() {
|
fn test_get_digit_avx_internal() {
|
||||||
let basek: usize = 12;
|
let base2k: usize = 12;
|
||||||
let x: [i64; 4] = [
|
let x: [i64; 4] = [
|
||||||
7638646372408325293,
|
7638646372408325293,
|
||||||
-61440197422348985,
|
-61440197422348985,
|
||||||
@@ -678,15 +786,15 @@ mod tests {
|
|||||||
-4835376105455195188,
|
-4835376105455195188,
|
||||||
];
|
];
|
||||||
let y0: Vec<i64> = vec![
|
let y0: Vec<i64> = vec![
|
||||||
get_digit(basek, x[0]),
|
get_digit_i64(base2k, x[0]),
|
||||||
get_digit(basek, x[1]),
|
get_digit_i64(base2k, x[1]),
|
||||||
get_digit(basek, x[2]),
|
get_digit_i64(base2k, x[2]),
|
||||||
get_digit(basek, x[3]),
|
get_digit_i64(base2k, x[3]),
|
||||||
];
|
];
|
||||||
let mut y1: Vec<i64> = vec![0i64; 4];
|
let mut y1: Vec<i64> = vec![0i64; 4];
|
||||||
unsafe {
|
unsafe {
|
||||||
let x_256: __m256i = _mm256_loadu_si256(x.as_ptr() as *const __m256i);
|
let x_256: __m256i = _mm256_loadu_si256(x.as_ptr() as *const __m256i);
|
||||||
let (mask, sign, _, _) = normalize_consts_avx(basek);
|
let (mask, sign, _, _) = normalize_consts_avx(base2k);
|
||||||
let digit: __m256i = get_digit_avx(x_256, mask, sign);
|
let digit: __m256i = get_digit_avx(x_256, mask, sign);
|
||||||
_mm256_storeu_si256(y1.as_mut_ptr() as *mut __m256i, digit);
|
_mm256_storeu_si256(y1.as_mut_ptr() as *mut __m256i, digit);
|
||||||
}
|
}
|
||||||
@@ -707,7 +815,7 @@ mod tests {
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
fn test_get_carry_avx_internal() {
|
fn test_get_carry_avx_internal() {
|
||||||
let basek: usize = 12;
|
let base2k: usize = 12;
|
||||||
let x: [i64; 4] = [
|
let x: [i64; 4] = [
|
||||||
7638646372408325293,
|
7638646372408325293,
|
||||||
-61440197422348985,
|
-61440197422348985,
|
||||||
@@ -716,16 +824,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let carry: [i64; 4] = [1174467039, -144794816, -1466676977, 513122840];
|
let carry: [i64; 4] = [1174467039, -144794816, -1466676977, 513122840];
|
||||||
let y0: Vec<i64> = vec![
|
let y0: Vec<i64> = vec![
|
||||||
get_carry(basek, x[0], carry[0]),
|
get_carry_i64(base2k, x[0], carry[0]),
|
||||||
get_carry(basek, x[1], carry[1]),
|
get_carry_i64(base2k, x[1], carry[1]),
|
||||||
get_carry(basek, x[2], carry[2]),
|
get_carry_i64(base2k, x[2], carry[2]),
|
||||||
get_carry(basek, x[3], carry[3]),
|
get_carry_i64(base2k, x[3], carry[3]),
|
||||||
];
|
];
|
||||||
let mut y1: Vec<i64> = vec![0i64; 4];
|
let mut y1: Vec<i64> = vec![0i64; 4];
|
||||||
unsafe {
|
unsafe {
|
||||||
let x_256: __m256i = _mm256_loadu_si256(x.as_ptr() as *const __m256i);
|
let x_256: __m256i = _mm256_loadu_si256(x.as_ptr() as *const __m256i);
|
||||||
let d_256: __m256i = _mm256_loadu_si256(carry.as_ptr() as *const __m256i);
|
let d_256: __m256i = _mm256_loadu_si256(carry.as_ptr() as *const __m256i);
|
||||||
let (_, _, basek_vec, top_mask) = normalize_consts_avx(basek);
|
let (_, _, basek_vec, top_mask) = normalize_consts_avx(base2k);
|
||||||
let digit: __m256i = get_carry_avx(x_256, d_256, basek_vec, top_mask);
|
let digit: __m256i = get_carry_avx(x_256, d_256, basek_vec, top_mask);
|
||||||
_mm256_storeu_si256(y1.as_mut_ptr() as *mut __m256i, digit);
|
_mm256_storeu_si256(y1.as_mut_ptr() as *mut __m256i, digit);
|
||||||
}
|
}
|
||||||
@@ -762,16 +870,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_first_step_inplace_ref(basek, 0, &mut y0, &mut c0);
|
znx_normalize_first_step_inplace_ref(base2k, 0, &mut y0, &mut c0);
|
||||||
znx_normalize_first_step_inplace_avx(basek, 0, &mut y1, &mut c1);
|
znx_normalize_first_step_inplace_avx(base2k, 0, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_first_step_inplace_ref(basek, basek - 1, &mut y0, &mut c0);
|
znx_normalize_first_step_inplace_ref(base2k, base2k - 1, &mut y0, &mut c0);
|
||||||
znx_normalize_first_step_inplace_avx(basek, basek - 1, &mut y1, &mut c1);
|
znx_normalize_first_step_inplace_avx(base2k, base2k - 1, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -807,16 +915,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_middle_step_inplace_ref(basek, 0, &mut y0, &mut c0);
|
znx_normalize_middle_step_inplace_ref(base2k, 0, &mut y0, &mut c0);
|
||||||
znx_normalize_middle_step_inplace_avx(basek, 0, &mut y1, &mut c1);
|
znx_normalize_middle_step_inplace_avx(base2k, 0, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_middle_step_inplace_ref(basek, basek - 1, &mut y0, &mut c0);
|
znx_normalize_middle_step_inplace_ref(base2k, base2k - 1, &mut y0, &mut c0);
|
||||||
znx_normalize_middle_step_inplace_avx(basek, basek - 1, &mut y1, &mut c1);
|
znx_normalize_middle_step_inplace_avx(base2k, base2k - 1, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -852,16 +960,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_final_step_inplace_ref(basek, 0, &mut y0, &mut c0);
|
znx_normalize_final_step_inplace_ref(base2k, 0, &mut y0, &mut c0);
|
||||||
znx_normalize_final_step_inplace_avx(basek, 0, &mut y1, &mut c1);
|
znx_normalize_final_step_inplace_avx(base2k, 0, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_final_step_inplace_ref(basek, basek - 1, &mut y0, &mut c0);
|
znx_normalize_final_step_inplace_ref(base2k, base2k - 1, &mut y0, &mut c0);
|
||||||
znx_normalize_final_step_inplace_avx(basek, basek - 1, &mut y1, &mut c1);
|
znx_normalize_final_step_inplace_avx(base2k, base2k - 1, &mut y1, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -898,16 +1006,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_first_step_ref(basek, 0, &mut y0, &a, &mut c0);
|
znx_normalize_first_step_ref(base2k, 0, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_first_step_avx(basek, 0, &mut y1, &a, &mut c1);
|
znx_normalize_first_step_avx(base2k, 0, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_first_step_ref(basek, basek - 1, &mut y0, &a, &mut c0);
|
znx_normalize_first_step_ref(base2k, base2k - 1, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_first_step_avx(basek, basek - 1, &mut y1, &a, &mut c1);
|
znx_normalize_first_step_avx(base2k, base2k - 1, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -944,16 +1052,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_middle_step_ref(basek, 0, &mut y0, &a, &mut c0);
|
znx_normalize_middle_step_ref(base2k, 0, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_middle_step_avx(basek, 0, &mut y1, &a, &mut c1);
|
znx_normalize_middle_step_avx(base2k, 0, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_middle_step_ref(basek, basek - 1, &mut y0, &a, &mut c0);
|
znx_normalize_middle_step_ref(base2k, base2k - 1, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_middle_step_avx(basek, basek - 1, &mut y1, &a, &mut c1);
|
znx_normalize_middle_step_avx(base2k, base2k - 1, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -990,16 +1098,16 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let mut c1: [i64; 4] = c0;
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
let basek = 12;
|
let base2k = 12;
|
||||||
|
|
||||||
znx_normalize_final_step_ref(basek, 0, &mut y0, &a, &mut c0);
|
znx_normalize_final_step_ref(base2k, 0, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_final_step_avx(basek, 0, &mut y1, &a, &mut c1);
|
znx_normalize_final_step_avx(base2k, 0, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
znx_normalize_final_step_ref(basek, basek - 1, &mut y0, &a, &mut c0);
|
znx_normalize_final_step_ref(base2k, base2k - 1, &mut y0, &a, &mut c0);
|
||||||
znx_normalize_final_step_avx(basek, basek - 1, &mut y1, &a, &mut c1);
|
znx_normalize_final_step_avx(base2k, base2k - 1, &mut y1, &a, &mut c1);
|
||||||
|
|
||||||
assert_eq!(y0, y1);
|
assert_eq!(y0, y1);
|
||||||
assert_eq!(c0, c1);
|
assert_eq!(c0, c1);
|
||||||
@@ -1015,4 +1123,86 @@ mod tests {
|
|||||||
test_znx_normalize_final_step_avx_internal();
|
test_znx_normalize_final_step_avx_internal();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
fn znx_extract_digit_addmul_internal() {
|
||||||
|
let mut y0: [i64; 4] = [
|
||||||
|
7638646372408325293,
|
||||||
|
-61440197422348985,
|
||||||
|
6835891051541717957,
|
||||||
|
-4835376105455195188,
|
||||||
|
];
|
||||||
|
let mut y1: [i64; 4] = y0;
|
||||||
|
|
||||||
|
let mut c0: [i64; 4] = [
|
||||||
|
621182201135793202,
|
||||||
|
9000856573317006236,
|
||||||
|
5542252755421113668,
|
||||||
|
-6036847263131690631,
|
||||||
|
];
|
||||||
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
|
let base2k: usize = 12;
|
||||||
|
|
||||||
|
znx_extract_digit_addmul_ref(base2k, 0, &mut y0, &mut c0);
|
||||||
|
znx_extract_digit_addmul_avx(base2k, 0, &mut y1, &mut c1);
|
||||||
|
|
||||||
|
assert_eq!(y0, y1);
|
||||||
|
assert_eq!(c0, c1);
|
||||||
|
|
||||||
|
znx_extract_digit_addmul_ref(base2k, base2k - 1, &mut y0, &mut c0);
|
||||||
|
znx_extract_digit_addmul_avx(base2k, base2k - 1, &mut y1, &mut c1);
|
||||||
|
|
||||||
|
assert_eq!(y0, y1);
|
||||||
|
assert_eq!(c0, c1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_znx_extract_digit_addmul_avx() {
|
||||||
|
if !std::is_x86_feature_detected!("avx2") {
|
||||||
|
eprintln!("skipping: CPU lacks avx2");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
unsafe {
|
||||||
|
znx_extract_digit_addmul_internal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[target_feature(enable = "avx2")]
|
||||||
|
fn znx_normalize_digit_internal() {
|
||||||
|
let mut y0: [i64; 4] = [
|
||||||
|
7638646372408325293,
|
||||||
|
-61440197422348985,
|
||||||
|
6835891051541717957,
|
||||||
|
-4835376105455195188,
|
||||||
|
];
|
||||||
|
let mut y1: [i64; 4] = y0;
|
||||||
|
|
||||||
|
let mut c0: [i64; 4] = [
|
||||||
|
621182201135793202,
|
||||||
|
9000856573317006236,
|
||||||
|
5542252755421113668,
|
||||||
|
-6036847263131690631,
|
||||||
|
];
|
||||||
|
let mut c1: [i64; 4] = c0;
|
||||||
|
|
||||||
|
let base2k: usize = 12;
|
||||||
|
|
||||||
|
znx_normalize_digit_ref(base2k, &mut y0, &mut c0);
|
||||||
|
znx_normalize_digit_avx(base2k, &mut y1, &mut c1);
|
||||||
|
|
||||||
|
assert_eq!(y0, y1);
|
||||||
|
assert_eq!(c0, c1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_znx_normalize_digit_internal_avx() {
|
||||||
|
if !std::is_x86_feature_detected!("avx2") {
|
||||||
|
eprintln!("skipping: CPU lacks avx2");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
unsafe {
|
||||||
|
znx_normalize_digit_internal();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pub fn znx_sub_avx(res: &mut [i64], a: &[i64], b: &[i64]) {
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_sub_ab_inplace_avx(res: &mut [i64], a: &[i64]) {
|
pub fn znx_sub_inplace_avx(res: &mut [i64], a: &[i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(res.len(), a.len());
|
assert_eq!(res.len(), a.len());
|
||||||
@@ -67,9 +67,9 @@ pub fn znx_sub_ab_inplace_avx(res: &mut [i64], a: &[i64]) {
|
|||||||
|
|
||||||
// tail
|
// tail
|
||||||
if !res.len().is_multiple_of(4) {
|
if !res.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_sub_ab_inplace_ref;
|
use poulpy_hal::reference::znx::znx_sub_inplace_ref;
|
||||||
|
|
||||||
znx_sub_ab_inplace_ref(&mut res[span << 2..], &a[span << 2..]);
|
znx_sub_inplace_ref(&mut res[span << 2..], &a[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +77,7 @@ pub fn znx_sub_ab_inplace_avx(res: &mut [i64], a: &[i64]) {
|
|||||||
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
/// Caller must ensure the CPU supports AVX2 (e.g., via `is_x86_feature_detected!("avx2")`);
|
||||||
/// all inputs must have the same length and must not alias.
|
/// all inputs must have the same length and must not alias.
|
||||||
#[target_feature(enable = "avx2")]
|
#[target_feature(enable = "avx2")]
|
||||||
pub fn znx_sub_ba_inplace_avx(res: &mut [i64], a: &[i64]) {
|
pub fn znx_sub_negate_inplace_avx(res: &mut [i64], a: &[i64]) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(res.len(), a.len());
|
assert_eq!(res.len(), a.len());
|
||||||
@@ -103,8 +103,8 @@ pub fn znx_sub_ba_inplace_avx(res: &mut [i64], a: &[i64]) {
|
|||||||
|
|
||||||
// tail
|
// tail
|
||||||
if !res.len().is_multiple_of(4) {
|
if !res.len().is_multiple_of(4) {
|
||||||
use poulpy_hal::reference::znx::znx_sub_ba_inplace_ref;
|
use poulpy_hal::reference::znx::znx_sub_negate_inplace_ref;
|
||||||
|
|
||||||
znx_sub_ba_inplace_ref(&mut res[span << 2..], &a[span << 2..]);
|
znx_sub_negate_inplace_ref(&mut res[span << 2..], &a[span << 2..]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
use poulpy_hal::reference::fft64::{
|
use poulpy_hal::reference::fft64::{
|
||||||
reim::{
|
reim::{
|
||||||
ReimAdd, ReimAddInplace, ReimAddMul, ReimCopy, ReimDFTExecute, ReimFFTTable, ReimFromZnx, ReimIFFTTable, ReimMul,
|
ReimAdd, ReimAddInplace, ReimAddMul, ReimCopy, ReimDFTExecute, ReimFFTTable, ReimFromZnx, ReimIFFTTable, ReimMul,
|
||||||
ReimMulInplace, ReimNegate, ReimNegateInplace, ReimSub, ReimSubABInplace, ReimSubBAInplace, ReimToZnx, ReimToZnxInplace,
|
ReimMulInplace, ReimNegate, ReimNegateInplace, ReimSub, ReimSubInplace, ReimSubNegateInplace, ReimToZnx,
|
||||||
ReimZero, fft_ref, ifft_ref, reim_add_inplace_ref, reim_add_ref, reim_addmul_ref, reim_copy_ref, reim_from_znx_i64_ref,
|
ReimToZnxInplace, ReimZero, fft_ref, ifft_ref, reim_add_inplace_ref, reim_add_ref, reim_addmul_ref, reim_copy_ref,
|
||||||
reim_mul_inplace_ref, reim_mul_ref, reim_negate_inplace_ref, reim_negate_ref, reim_sub_ab_inplace_ref,
|
reim_from_znx_i64_ref, reim_mul_inplace_ref, reim_mul_ref, reim_negate_inplace_ref, reim_negate_ref,
|
||||||
reim_sub_ba_inplace_ref, reim_sub_ref, reim_to_znx_i64_inplace_ref, reim_to_znx_i64_ref, reim_zero_ref,
|
reim_sub_inplace_ref, reim_sub_negate_inplace_ref, reim_sub_ref, reim_to_znx_i64_inplace_ref, reim_to_znx_i64_ref,
|
||||||
|
reim_zero_ref,
|
||||||
},
|
},
|
||||||
reim4::{
|
reim4::{
|
||||||
Reim4Extract1Blk, Reim4Mat1ColProd, Reim4Mat2Cols2ndColProd, Reim4Mat2ColsProd, Reim4Save1Blk, Reim4Save2Blks,
|
Reim4Extract1Blk, Reim4Mat1ColProd, Reim4Mat2Cols2ndColProd, Reim4Mat2ColsProd, Reim4Save1Blk, Reim4Save2Blks,
|
||||||
@@ -69,17 +70,17 @@ impl ReimSub for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReimSubABInplace for FFT64Ref {
|
impl ReimSubInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reim_sub_ab_inplace(res: &mut [f64], a: &[f64]) {
|
fn reim_sub_inplace(res: &mut [f64], a: &[f64]) {
|
||||||
reim_sub_ab_inplace_ref(res, a);
|
reim_sub_inplace_ref(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReimSubBAInplace for FFT64Ref {
|
impl ReimSubNegateInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn reim_sub_ba_inplace(res: &mut [f64], a: &[f64]) {
|
fn reim_sub_negate_inplace(res: &mut [f64], a: &[f64]) {
|
||||||
reim_sub_ba_inplace_ref(res, a);
|
reim_sub_negate_inplace_ref(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -253,9 +253,6 @@ fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]
|
|||||||
(take_slice, rem_slice)
|
(take_slice, rem_slice)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
panic!(
|
panic!("Attempted to take {take_len} from scratch with {aligned_len} aligned bytes left");
|
||||||
"Attempted to take {} from scratch with {} aligned bytes left",
|
|
||||||
take_len, aligned_len,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
TakeSlice, VecZnxAutomorphismInplaceTmpBytes, VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOneInplaceTmpBytes,
|
TakeSlice, VecZnxAutomorphismInplaceTmpBytes, VecZnxLshTmpBytes, VecZnxMergeRingsTmpBytes,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxRotateInplaceTmpBytes, VecZnxSplitRingTmpBytes,
|
VecZnxMulXpMinusOneInplaceTmpBytes, VecZnxNormalizeTmpBytes, VecZnxRotateInplaceTmpBytes, VecZnxRshTmpBytes,
|
||||||
|
VecZnxSplitRingTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
layouts::{Module, ScalarZnxToRef, Scratch, VecZnxToMut, VecZnxToRef},
|
||||||
oep::{
|
oep::{
|
||||||
@@ -12,7 +13,7 @@ use poulpy_hal::{
|
|||||||
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
||||||
},
|
},
|
||||||
reference::vec_znx::{
|
reference::vec_znx::{
|
||||||
@@ -23,7 +24,7 @@ use poulpy_hal::{
|
|||||||
vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_negate, vec_znx_negate_inplace, vec_znx_normalize,
|
vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_negate, vec_znx_negate_inplace, vec_znx_normalize,
|
||||||
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
vec_znx_normalize_inplace, vec_znx_normalize_tmp_bytes, vec_znx_rotate, vec_znx_rotate_inplace,
|
||||||
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_ab_inplace, vec_znx_sub_ba_inplace, vec_znx_sub_scalar,
|
vec_znx_split_ring_tmp_bytes, vec_znx_sub, vec_znx_sub_inplace, vec_znx_sub_negate_inplace, vec_znx_sub_scalar,
|
||||||
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
vec_znx_sub_scalar_inplace, vec_znx_switch_ring,
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -43,9 +44,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_impl<R, A>(
|
fn vec_znx_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -54,7 +56,7 @@ where
|
|||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_normalize::<R, A, Self>(basek, res, res_col, a, a_col, carry);
|
vec_znx_normalize::<R, A, Self>(res_basek, res, res_col, a_basek, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +66,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_inplace_impl<R>(
|
fn vec_znx_normalize_inplace_impl<R>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -72,7 +74,7 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_normalize_inplace::<R, Self>(basek, res, res_col, carry);
|
vec_znx_normalize_inplace::<R, Self>(base2k, res, res_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,23 +145,23 @@ unsafe impl VecZnxSubImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubABInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxSubInplaceImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
vec_znx_sub_ab_inplace::<R, A, Self>(res, res_col, a, a_col);
|
vec_znx_sub_inplace::<R, A, Self>(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubBAInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxSubNegateInplaceImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
vec_znx_sub_ba_inplace::<R, A, Self>(res, res_col, a, a_col);
|
vec_znx_sub_negate_inplace::<R, A, Self>(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -234,9 +236,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<R, A>(
|
fn vec_znx_lsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -247,8 +249,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh::<_, _, Self>(basek, k, res, res_col, a, a_col, carry);
|
vec_znx_lsh::<_, _, Self>(base2k, k, res, res_col, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +261,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<A>(
|
fn vec_znx_lsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -267,8 +269,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh_inplace::<_, Self>(basek, k, a, a_col, carry);
|
vec_znx_lsh_inplace::<_, Self>(base2k, k, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,9 +279,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<R, A>(
|
fn vec_znx_rsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -290,8 +292,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh::<_, _, Self>(basek, k, res, res_col, a, a_col, carry);
|
vec_znx_rsh::<_, _, Self>(base2k, k, res, res_col, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -302,7 +304,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<A>(
|
fn vec_znx_rsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -310,8 +312,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh_inplace::<_, Self>(basek, k, a, a_col, carry);
|
vec_znx_rsh_inplace::<_, Self>(base2k, k, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -495,18 +497,18 @@ unsafe impl VecZnxCopyImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_uniform_ref(basek, res, res_col, source)
|
vec_znx_fill_uniform_ref(base2k, res, res_col, source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_fill_normal_impl<R>(
|
fn vec_znx_fill_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -516,14 +518,14 @@ unsafe impl VecZnxFillNormalImpl<Self> for FFT64Ref {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_fill_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_add_normal_impl<R>(
|
fn vec_znx_add_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -533,6 +535,6 @@ unsafe impl VecZnxAddNormalImpl<Self> for FFT64Ref {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_add_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_add_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,15 +10,15 @@ use poulpy_hal::{
|
|||||||
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
||||||
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
||||||
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
||||||
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl, VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl,
|
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubImpl, VecZnxBigSubInplaceImpl, VecZnxBigSubNegateInplaceImpl,
|
||||||
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
|
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallInplaceImpl, VecZnxBigSubSmallNegateInplaceImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
fft64::vec_znx_big::{
|
fft64::vec_znx_big::{
|
||||||
vec_znx_big_add, vec_znx_big_add_inplace, vec_znx_big_add_normal_ref, vec_znx_big_add_small,
|
vec_znx_big_add, vec_znx_big_add_inplace, vec_znx_big_add_normal_ref, vec_znx_big_add_small,
|
||||||
vec_znx_big_add_small_inplace, vec_znx_big_automorphism, vec_znx_big_automorphism_inplace,
|
vec_znx_big_add_small_inplace, vec_znx_big_automorphism, vec_znx_big_automorphism_inplace,
|
||||||
vec_znx_big_automorphism_inplace_tmp_bytes, vec_znx_big_negate, vec_znx_big_negate_inplace, vec_znx_big_normalize,
|
vec_znx_big_automorphism_inplace_tmp_bytes, vec_znx_big_negate, vec_znx_big_negate_inplace, vec_znx_big_normalize,
|
||||||
vec_znx_big_normalize_tmp_bytes, vec_znx_big_sub, vec_znx_big_sub_ab_inplace, vec_znx_big_sub_ba_inplace,
|
vec_znx_big_normalize_tmp_bytes, vec_znx_big_sub, vec_znx_big_sub_inplace, vec_znx_big_sub_negate_inplace,
|
||||||
vec_znx_big_sub_small_a, vec_znx_big_sub_small_a_inplace, vec_znx_big_sub_small_b, vec_znx_big_sub_small_b_inplace,
|
vec_znx_big_sub_small_a, vec_znx_big_sub_small_a_inplace, vec_znx_big_sub_small_b, vec_znx_big_sub_small_b_inplace,
|
||||||
},
|
},
|
||||||
znx::{znx_copy_ref, znx_zero_ref},
|
znx::{znx_copy_ref, znx_zero_ref},
|
||||||
@@ -76,7 +76,7 @@ unsafe impl VecZnxBigFromSmallImpl<Self> for FFT64Ref {
|
|||||||
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Ref {
|
||||||
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -84,7 +84,7 @@ unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Ref {
|
|||||||
sigma: f64,
|
sigma: f64,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
) {
|
) {
|
||||||
vec_znx_big_add_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_big_add_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,25 +167,25 @@ unsafe impl VecZnxBigSubImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubABInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxBigSubInplaceImpl<Self> for FFT64Ref {
|
||||||
/// Subtracts `a` from `b` and stores the result on `b`.
|
/// Subtracts `a` from `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_big_sub_ab_inplace(res, res_col, a, a_col);
|
vec_znx_big_sub_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubBAInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxBigSubNegateInplaceImpl<Self> for FFT64Ref {
|
||||||
/// Subtracts `b` from `a` and stores the result on `b`.
|
/// Subtracts `b` from `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_big_sub_ba_inplace(res, res_col, a, a_col);
|
vec_znx_big_sub_negate_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,9 +208,9 @@ unsafe impl VecZnxBigSubSmallAImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallAInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxBigSubSmallInplaceImpl<Self> for FFT64Ref {
|
||||||
/// Subtracts `a` from `res` and stores the result on `res`.
|
/// Subtracts `a` from `res` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_a_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -238,9 +238,9 @@ unsafe impl VecZnxBigSubSmallBImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallBInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxBigSubSmallNegateInplaceImpl<Self> for FFT64Ref {
|
||||||
/// Subtracts `res` from `a` and stores the result on `res`.
|
/// Subtracts `res` from `a` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_b_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -280,9 +280,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_big_normalize_impl<R, A>(
|
fn vec_znx_big_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -291,7 +292,7 @@ where
|
|||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_big_normalize(basek, res, res_col, a, a_col, carry);
|
vec_znx_big_normalize(res_basek, res, res_col, a_basek, a, a_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +327,7 @@ where
|
|||||||
) where
|
) where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
{
|
{
|
||||||
let (tmp, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
let (tmp, _) = scratch.take_slice(module.vec_znx_big_automorphism_inplace_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_big_automorphism_inplace(p, res, res_col, tmp);
|
vec_znx_big_automorphism_inplace(p, res, res_col, tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use poulpy_hal::{
|
|||||||
},
|
},
|
||||||
oep::{
|
oep::{
|
||||||
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
||||||
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubABInplaceImpl, VecZnxDftSubBAInplaceImpl, VecZnxDftSubImpl,
|
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubImpl, VecZnxDftSubInplaceImpl, VecZnxDftSubNegateInplaceImpl,
|
||||||
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
||||||
},
|
},
|
||||||
reference::fft64::vec_znx_dft::{
|
reference::fft64::vec_znx_dft::{
|
||||||
vec_znx_dft_add, vec_znx_dft_add_inplace, vec_znx_dft_apply, vec_znx_dft_copy, vec_znx_dft_sub,
|
vec_znx_dft_add, vec_znx_dft_add_inplace, vec_znx_dft_apply, vec_znx_dft_copy, vec_znx_dft_sub, vec_znx_dft_sub_inplace,
|
||||||
vec_znx_dft_sub_ab_inplace, vec_znx_dft_sub_ba_inplace, vec_znx_dft_zero, vec_znx_idft_apply, vec_znx_idft_apply_consume,
|
vec_znx_dft_sub_negate_inplace, vec_znx_dft_zero, vec_znx_idft_apply, vec_znx_idft_apply_consume,
|
||||||
vec_znx_idft_apply_tmpa,
|
vec_znx_idft_apply_tmpa,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -139,23 +139,23 @@ unsafe impl VecZnxDftSubImpl<Self> for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubABInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxDftSubInplaceImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_dft_sub_ab_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_dft_sub_ab_inplace(res, res_col, a, a_col);
|
vec_znx_dft_sub_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubBAInplaceImpl<Self> for FFT64Ref {
|
unsafe impl VecZnxDftSubNegateInplaceImpl<Self> for FFT64Ref {
|
||||||
fn vec_znx_dft_sub_ba_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_negate_inplace_impl<R, A>(_module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
{
|
{
|
||||||
vec_znx_dft_sub_ba_inplace(res, res_col, a, a_col);
|
vec_znx_dft_sub_negate_inplace(res, res_col, a, a_col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,21 +18,21 @@ unsafe impl ZnNormalizeInplaceImpl<Self> for FFT64Ref
|
|||||||
where
|
where
|
||||||
Self: TakeSliceImpl<Self>,
|
Self: TakeSliceImpl<Self>,
|
||||||
{
|
{
|
||||||
fn zn_normalize_inplace_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<Self>)
|
fn zn_normalize_inplace_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, scratch: &mut Scratch<Self>)
|
||||||
where
|
where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(n);
|
let (carry, _) = scratch.take_slice(n);
|
||||||
zn_normalize_inplace::<R, FFT64Ref>(n, basek, res, res_col, carry);
|
zn_normalize_inplace::<R, FFT64Ref>(n, base2k, res, res_col, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl ZnFillUniformImpl<Self> for FFT64Ref {
|
unsafe impl ZnFillUniformImpl<Self> for FFT64Ref {
|
||||||
fn zn_fill_uniform_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn zn_fill_uniform_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_uniform(n, basek, res, res_col, source);
|
zn_fill_uniform(n, base2k, res, res_col, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,7 +40,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Ref {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_fill_normal_impl<R>(
|
fn zn_fill_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -50,7 +50,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Ref {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_fill_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Ref {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_add_normal_impl<R>(
|
fn zn_add_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -68,6 +68,6 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Ref {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_add_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_add_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
use poulpy_hal::reference::znx::{
|
use poulpy_hal::reference::znx::{
|
||||||
ZnxAdd, ZnxAddInplace, ZnxAutomorphism, ZnxCopy, ZnxNegate, ZnxNegateInplace, ZnxNormalizeFinalStep,
|
ZnxAdd, ZnxAddInplace, ZnxAutomorphism, ZnxCopy, ZnxExtractDigitAddMul, ZnxMulAddPowerOfTwo, ZnxMulPowerOfTwo,
|
||||||
ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly, ZnxNormalizeFirstStepInplace,
|
ZnxMulPowerOfTwoInplace, ZnxNegate, ZnxNegateInplace, ZnxNormalizeDigit, ZnxNormalizeFinalStep, ZnxNormalizeFinalStepInplace,
|
||||||
ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace, ZnxRotate, ZnxSub, ZnxSubABInplace,
|
ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly, ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStep,
|
||||||
ZnxSubBAInplace, ZnxSwitchRing, ZnxZero, znx_add_inplace_ref, znx_add_ref, znx_automorphism_ref, znx_copy_ref,
|
ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace, ZnxRotate, ZnxSub, ZnxSubInplace, ZnxSubNegateInplace,
|
||||||
znx_negate_inplace_ref, znx_negate_ref, znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref,
|
ZnxSwitchRing, ZnxZero, znx_add_inplace_ref, znx_add_ref, znx_automorphism_ref, znx_copy_ref, znx_extract_digit_addmul_ref,
|
||||||
|
znx_mul_add_power_of_two_ref, znx_mul_power_of_two_inplace_ref, znx_mul_power_of_two_ref, znx_negate_inplace_ref,
|
||||||
|
znx_negate_ref, znx_normalize_digit_ref, znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref,
|
||||||
znx_normalize_first_step_carry_only_ref, znx_normalize_first_step_inplace_ref, znx_normalize_first_step_ref,
|
znx_normalize_first_step_carry_only_ref, znx_normalize_first_step_inplace_ref, znx_normalize_first_step_ref,
|
||||||
znx_normalize_middle_step_carry_only_ref, znx_normalize_middle_step_inplace_ref, znx_normalize_middle_step_ref, znx_rotate,
|
znx_normalize_middle_step_carry_only_ref, znx_normalize_middle_step_inplace_ref, znx_normalize_middle_step_ref, znx_rotate,
|
||||||
znx_sub_ab_inplace_ref, znx_sub_ba_inplace_ref, znx_sub_ref, znx_switch_ring_ref, znx_zero_ref,
|
znx_sub_inplace_ref, znx_sub_negate_inplace_ref, znx_sub_ref, znx_switch_ring_ref, znx_zero_ref,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_fft64_ref::FFT64Ref;
|
use crate::cpu_fft64_ref::FFT64Ref;
|
||||||
@@ -32,17 +34,38 @@ impl ZnxSub for FFT64Ref {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSubABInplace for FFT64Ref {
|
impl ZnxSubInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_sub_ab_inplace(res: &mut [i64], a: &[i64]) {
|
fn znx_sub_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
znx_sub_ab_inplace_ref(res, a);
|
znx_sub_inplace_ref(res, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxSubBAInplace for FFT64Ref {
|
impl ZnxSubNegateInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_sub_ba_inplace(res: &mut [i64], a: &[i64]) {
|
fn znx_sub_negate_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
znx_sub_ba_inplace_ref(res, a);
|
znx_sub_negate_inplace_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulAddPowerOfTwo for FFT64Ref {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_muladd_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_mul_add_power_of_two_ref(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwo for FFT64Ref {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_mul_power_of_two_ref(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwoInplace for FFT64Ref {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two_inplace(k: i64, res: &mut [i64]) {
|
||||||
|
znx_mul_power_of_two_inplace_ref(k, res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,56 +120,70 @@ impl ZnxSwitchRing for FFT64Ref {
|
|||||||
|
|
||||||
impl ZnxNormalizeFinalStep for FFT64Ref {
|
impl ZnxNormalizeFinalStep for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_final_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_final_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
znx_normalize_final_step_ref(basek, lsh, x, a, carry);
|
znx_normalize_final_step_ref(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFinalStepInplace for FFT64Ref {
|
impl ZnxNormalizeFinalStepInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_final_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_final_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
znx_normalize_final_step_inplace_ref(basek, lsh, x, carry);
|
znx_normalize_final_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStep for FFT64Ref {
|
impl ZnxNormalizeFirstStep for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
znx_normalize_first_step_ref(basek, lsh, x, a, carry);
|
znx_normalize_first_step_ref(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepCarryOnly for FFT64Ref {
|
impl ZnxNormalizeFirstStepCarryOnly for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
znx_normalize_first_step_carry_only_ref(basek, lsh, x, carry);
|
znx_normalize_first_step_carry_only_ref(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepInplace for FFT64Ref {
|
impl ZnxNormalizeFirstStepInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_first_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_first_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
znx_normalize_first_step_inplace_ref(basek, lsh, x, carry);
|
znx_normalize_first_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStep for FFT64Ref {
|
impl ZnxNormalizeMiddleStep for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
znx_normalize_middle_step_ref(basek, lsh, x, a, carry);
|
znx_normalize_middle_step_ref(base2k, lsh, x, a, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Ref {
|
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
znx_normalize_middle_step_carry_only_ref(basek, lsh, x, carry);
|
znx_normalize_middle_step_carry_only_ref(base2k, lsh, x, carry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepInplace for FFT64Ref {
|
impl ZnxNormalizeMiddleStepInplace for FFT64Ref {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn znx_normalize_middle_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
fn znx_normalize_middle_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
znx_normalize_middle_step_inplace_ref(basek, lsh, x, carry);
|
znx_normalize_middle_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxExtractDigitAddMul for FFT64Ref {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_extract_digit_addmul(base2k: usize, lsh: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
znx_extract_digit_addmul_ref(base2k, lsh, res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeDigit for FFT64Ref {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_digit(base2k: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
znx_normalize_digit_ref(base2k, res, src);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,5 +6,6 @@ mod vec_znx_big;
|
|||||||
mod vec_znx_dft;
|
mod vec_znx_dft;
|
||||||
mod vmp_pmat;
|
mod vmp_pmat;
|
||||||
mod zn;
|
mod zn;
|
||||||
|
mod znx;
|
||||||
|
|
||||||
pub struct FFT64Spqlios;
|
pub struct FFT64Spqlios;
|
||||||
|
|||||||
@@ -3,20 +3,11 @@ use std::ptr::NonNull;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Backend, Module},
|
layouts::{Backend, Module},
|
||||||
oep::ModuleNewImpl,
|
oep::ModuleNewImpl,
|
||||||
reference::znx::{
|
|
||||||
ZnxCopy, ZnxNormalizeFinalStep, ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly,
|
|
||||||
ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStep, ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace,
|
|
||||||
ZnxRotate, ZnxSwitchRing, ZnxZero, znx_copy_ref, znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref,
|
|
||||||
znx_normalize_first_step_carry_only_ref, znx_normalize_first_step_inplace_ref, znx_normalize_first_step_ref,
|
|
||||||
znx_normalize_middle_step_carry_only_ref, znx_normalize_middle_step_inplace_ref, znx_normalize_middle_step_ref,
|
|
||||||
znx_switch_ring_ref, znx_zero_ref,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::cpu_spqlios::{
|
use crate::cpu_spqlios::{
|
||||||
FFT64Spqlios,
|
FFT64Spqlios,
|
||||||
ffi::module::{MODULE, delete_module_info, new_module_info},
|
ffi::module::{MODULE, delete_module_info, new_module_info},
|
||||||
znx::znx_rotate_i64,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl Backend for FFT64Spqlios {
|
impl Backend for FFT64Spqlios {
|
||||||
@@ -41,85 +32,3 @@ unsafe impl ModuleNewImpl<Self> for FFT64Spqlios {
|
|||||||
unsafe { Module::from_raw_parts(new_module_info(n, 0), n) }
|
unsafe { Module::from_raw_parts(new_module_info(n, 0), n) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZnxCopy for FFT64Spqlios {
|
|
||||||
fn znx_copy(res: &mut [i64], a: &[i64]) {
|
|
||||||
znx_copy_ref(res, a);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxZero for FFT64Spqlios {
|
|
||||||
fn znx_zero(res: &mut [i64]) {
|
|
||||||
znx_zero_ref(res);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxSwitchRing for FFT64Spqlios {
|
|
||||||
fn znx_switch_ring(res: &mut [i64], a: &[i64]) {
|
|
||||||
znx_switch_ring_ref(res, a);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxRotate for FFT64Spqlios {
|
|
||||||
fn znx_rotate(p: i64, res: &mut [i64], src: &[i64]) {
|
|
||||||
unsafe {
|
|
||||||
znx_rotate_i64(res.len() as u64, p, res.as_mut_ptr(), src.as_ptr());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeFinalStep for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_final_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_final_step_ref(basek, lsh, x, a, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeFinalStepInplace for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_final_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_final_step_inplace_ref(basek, lsh, x, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStep for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_first_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_first_step_ref(basek, lsh, x, a, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepCarryOnly for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_first_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_first_step_carry_only_ref(basek, lsh, x, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeFirstStepInplace for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_first_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_first_step_inplace_ref(basek, lsh, x, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStep for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_middle_step(basek: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_middle_step_ref(basek, lsh, x, a, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_middle_step_carry_only(basek: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_middle_step_carry_only_ref(basek, lsh, x, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZnxNormalizeMiddleStepInplace for FFT64Spqlios {
|
|
||||||
#[inline(always)]
|
|
||||||
fn znx_normalize_middle_step_inplace(basek: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
|
||||||
znx_normalize_middle_step_inplace_ref(basek, lsh, x, carry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -253,9 +253,6 @@ fn take_slice_aligned(data: &mut [u8], take_len: usize) -> (&mut [u8], &mut [u8]
|
|||||||
(take_slice, rem_slice)
|
(take_slice, rem_slice)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
panic!(
|
panic!("Attempted to take {take_len} from scratch with {aligned_len} aligned bytes left");
|
||||||
"Attempted to take {} from scratch with {} aligned bytes left",
|
|
||||||
take_len, aligned_len,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{TakeSlice, VecZnxMergeRingsTmpBytes, VecZnxNormalizeTmpBytes, VecZnxSplitRingTmpBytes},
|
api::{
|
||||||
|
TakeSlice, VecZnxLshTmpBytes, VecZnxMergeRingsTmpBytes, VecZnxNormalizeTmpBytes, VecZnxRshTmpBytes,
|
||||||
|
VecZnxSplitRingTmpBytes,
|
||||||
|
},
|
||||||
layouts::{
|
layouts::{
|
||||||
Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut,
|
Module, ScalarZnx, ScalarZnxToRef, Scratch, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut,
|
||||||
},
|
},
|
||||||
@@ -11,16 +14,16 @@ use poulpy_hal::{
|
|||||||
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
VecZnxMulXpMinusOneInplaceTmpBytesImpl, VecZnxNegateImpl, VecZnxNegateInplaceImpl, VecZnxNormalizeImpl,
|
||||||
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
VecZnxNormalizeInplaceImpl, VecZnxNormalizeTmpBytesImpl, VecZnxRotateImpl, VecZnxRotateInplaceImpl,
|
||||||
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
VecZnxRotateInplaceTmpBytesImpl, VecZnxRshImpl, VecZnxRshInplaceImpl, VecZnxRshTmpBytesImpl, VecZnxSplitRingImpl,
|
||||||
VecZnxSplitRingTmpBytesImpl, VecZnxSubABInplaceImpl, VecZnxSubBAInplaceImpl, VecZnxSubImpl, VecZnxSubScalarImpl,
|
VecZnxSplitRingTmpBytesImpl, VecZnxSubImpl, VecZnxSubInplaceImpl, VecZnxSubNegateInplaceImpl, VecZnxSubScalarImpl,
|
||||||
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
VecZnxSubScalarInplaceImpl, VecZnxSwitchRingImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
vec_znx::{
|
vec_znx::{
|
||||||
vec_znx_add_normal_ref, vec_znx_automorphism_inplace_tmp_bytes, vec_znx_copy, vec_znx_fill_normal_ref,
|
vec_znx_add_normal_ref, vec_znx_automorphism_inplace_tmp_bytes, vec_znx_copy, vec_znx_fill_normal_ref,
|
||||||
vec_znx_fill_uniform_ref, vec_znx_lsh, vec_znx_lsh_inplace, vec_znx_lsh_tmp_bytes, vec_znx_merge_rings,
|
vec_znx_fill_uniform_ref, vec_znx_lsh, vec_znx_lsh_inplace, vec_znx_lsh_tmp_bytes, vec_znx_merge_rings,
|
||||||
vec_znx_merge_rings_tmp_bytes, vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_rotate_inplace_tmp_bytes,
|
vec_znx_merge_rings_tmp_bytes, vec_znx_mul_xp_minus_one_inplace_tmp_bytes, vec_znx_normalize_tmp_bytes,
|
||||||
vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring, vec_znx_split_ring_tmp_bytes,
|
vec_znx_rotate_inplace_tmp_bytes, vec_znx_rsh, vec_znx_rsh_inplace, vec_znx_rsh_tmp_bytes, vec_znx_split_ring,
|
||||||
vec_znx_switch_ring,
|
vec_znx_split_ring_tmp_bytes, vec_znx_switch_ring,
|
||||||
},
|
},
|
||||||
znx::{znx_copy_ref, znx_zero_ref},
|
znx::{znx_copy_ref, znx_zero_ref},
|
||||||
},
|
},
|
||||||
@@ -34,7 +37,7 @@ use crate::cpu_spqlios::{
|
|||||||
|
|
||||||
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
fn vec_znx_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr() as *const module_info_t) as usize }
|
vec_znx_normalize_tmp_bytes(module.n())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,9 +47,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_impl<R, A>(
|
fn vec_znx_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -60,6 +64,10 @@ where
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(res.n(), a.n());
|
assert_eq!(res.n(), a.n());
|
||||||
|
assert_eq!(
|
||||||
|
res_basek, a_basek,
|
||||||
|
"res_basek != a_basek -> base2k conversion is not supported"
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes());
|
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes());
|
||||||
@@ -67,7 +75,7 @@ where
|
|||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
module.ptr() as *const module_info_t,
|
module.ptr() as *const module_info_t,
|
||||||
basek as u64,
|
res_basek as u64,
|
||||||
res.at_mut_ptr(res_col, 0),
|
res.at_mut_ptr(res_col, 0),
|
||||||
res.size() as u64,
|
res.size() as u64,
|
||||||
res.sl() as u64,
|
res.sl() as u64,
|
||||||
@@ -86,7 +94,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_normalize_inplace_impl<A>(
|
fn vec_znx_normalize_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -100,7 +108,7 @@ where
|
|||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
module.ptr() as *const module_info_t,
|
module.ptr() as *const module_info_t,
|
||||||
basek as u64,
|
base2k as u64,
|
||||||
a.at_mut_ptr(a_col, 0),
|
a.at_mut_ptr(a_col, 0),
|
||||||
a.size() as u64,
|
a.size() as u64,
|
||||||
a.sl() as u64,
|
a.sl() as u64,
|
||||||
@@ -301,8 +309,8 @@ unsafe impl VecZnxSubImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubABInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxSubInplaceImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_sub_ab_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -330,8 +338,8 @@ unsafe impl VecZnxSubABInplaceImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxSubBAInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxSubNegateInplaceImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_sub_ba_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_sub_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -512,9 +520,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<R, A>(
|
fn vec_znx_lsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -525,8 +533,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh::<_, _, FFT64Spqlios>(basek, k, res, res_col, a, a_col, carry)
|
vec_znx_lsh::<_, _, FFT64Spqlios>(base2k, k, res, res_col, a, a_col, carry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -537,7 +545,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_lsh_inplace_impl<A>(
|
fn vec_znx_lsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -545,8 +553,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_lsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_lsh_inplace::<_, FFT64Spqlios>(basek, k, a, a_col, carry)
|
vec_znx_lsh_inplace::<_, FFT64Spqlios>(base2k, k, a, a_col, carry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -555,9 +563,9 @@ where
|
|||||||
Module<Self>: VecZnxNormalizeTmpBytes,
|
Module<Self>: VecZnxNormalizeTmpBytes,
|
||||||
Scratch<Self>: TakeSlice,
|
Scratch<Self>: TakeSlice,
|
||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<R, A>(
|
fn vec_znx_rsh_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
@@ -568,8 +576,8 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh::<_, _, FFT64Spqlios>(basek, k, res, res_col, a, a_col, carry)
|
vec_znx_rsh::<_, _, FFT64Spqlios>(base2k, k, res, res_col, a, a_col, carry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -580,7 +588,7 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_rsh_inplace_impl<A>(
|
fn vec_znx_rsh_inplace_impl<A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
a: &mut A,
|
a: &mut A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
@@ -588,8 +596,8 @@ where
|
|||||||
) where
|
) where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
let (carry, _) = scratch.take_slice(module.vec_znx_normalize_tmp_bytes() / size_of::<i64>());
|
let (carry, _) = scratch.take_slice(module.vec_znx_rsh_tmp_bytes() / size_of::<i64>());
|
||||||
vec_znx_rsh_inplace::<_, FFT64Spqlios>(basek, k, a, a_col, carry)
|
vec_znx_rsh_inplace::<_, FFT64Spqlios>(base2k, k, a, a_col, carry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -690,11 +698,7 @@ unsafe impl VecZnxAutomorphismInplaceImpl<Self> for FFT64Spqlios {
|
|||||||
let mut a: VecZnx<&mut [u8]> = a.to_mut();
|
let mut a: VecZnx<&mut [u8]> = a.to_mut();
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(k & 1 != 0, "invalid galois element: must be odd but is {k}");
|
||||||
k & 1 != 0,
|
|
||||||
"invalid galois element: must be odd but is {}",
|
|
||||||
k
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_automorphism(
|
vec_znx::vec_znx_automorphism(
|
||||||
@@ -852,18 +856,18 @@ unsafe impl VecZnxCopyImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxFillUniformImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn vec_znx_fill_uniform_impl<R>(_module: &Module<Self>, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_uniform_ref(basek, res, res_col, source)
|
vec_znx_fill_uniform_ref(base2k, res, res_col, source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxFillNormalImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_fill_normal_impl<R>(
|
fn vec_znx_fill_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -873,14 +877,14 @@ unsafe impl VecZnxFillNormalImpl<Self> for FFT64Spqlios {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_fill_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_fill_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxAddNormalImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_add_normal_impl<R>(
|
fn vec_znx_add_normal_impl<R>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -890,6 +894,6 @@ unsafe impl VecZnxAddNormalImpl<Self> for FFT64Spqlios {
|
|||||||
) where
|
) where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
{
|
{
|
||||||
vec_znx_add_normal_ref(basek, res, res_col, k, sigma, bound, source);
|
vec_znx_add_normal_ref(base2k, res, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,11 +10,12 @@ use poulpy_hal::{
|
|||||||
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
VecZnxBigAddSmallInplaceImpl, VecZnxBigAllocBytesImpl, VecZnxBigAllocImpl, VecZnxBigAutomorphismImpl,
|
||||||
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
VecZnxBigAutomorphismInplaceImpl, VecZnxBigAutomorphismInplaceTmpBytesImpl, VecZnxBigFromBytesImpl,
|
||||||
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
VecZnxBigFromSmallImpl, VecZnxBigNegateImpl, VecZnxBigNegateInplaceImpl, VecZnxBigNormalizeImpl,
|
||||||
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubABInplaceImpl, VecZnxBigSubBAInplaceImpl, VecZnxBigSubImpl,
|
VecZnxBigNormalizeTmpBytesImpl, VecZnxBigSubImpl, VecZnxBigSubInplaceImpl, VecZnxBigSubNegateInplaceImpl,
|
||||||
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallAInplaceImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallBInplaceImpl,
|
VecZnxBigSubSmallAImpl, VecZnxBigSubSmallBImpl, VecZnxBigSubSmallInplaceImpl, VecZnxBigSubSmallNegateInplaceImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
vec_znx::vec_znx_add_normal_ref,
|
fft64::vec_znx_big::vec_znx_big_normalize,
|
||||||
|
vec_znx::{vec_znx_add_normal_ref, vec_znx_normalize_tmp_bytes},
|
||||||
znx::{znx_copy_ref, znx_zero_ref},
|
znx::{znx_copy_ref, znx_zero_ref},
|
||||||
},
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -70,7 +71,7 @@ unsafe impl VecZnxBigFromSmallImpl<Self> for FFT64Spqlios {
|
|||||||
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Spqlios {
|
||||||
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
fn add_normal_impl<R: VecZnxBigToMut<Self>>(
|
||||||
_module: &Module<Self>,
|
_module: &Module<Self>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -88,7 +89,7 @@ unsafe impl VecZnxBigAddNormalImpl<Self> for FFT64Spqlios {
|
|||||||
max_size: res.max_size,
|
max_size: res.max_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
vec_znx_add_normal_ref(basek, &mut res_znx, res_col, k, sigma, bound, source);
|
vec_znx_add_normal_ref(base2k, &mut res_znx, res_col, k, sigma, bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,9 +267,9 @@ unsafe impl VecZnxBigSubImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubABInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigSubInplaceImpl<Self> for FFT64Spqlios {
|
||||||
/// Subtracts `a` from `b` and stores the result on `b`.
|
/// Subtracts `a` from `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ab_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
@@ -297,9 +298,9 @@ unsafe impl VecZnxBigSubABInplaceImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubBAInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigSubNegateInplaceImpl<Self> for FFT64Spqlios {
|
||||||
/// Subtracts `b` from `a` and stores the result on `b`.
|
/// Subtracts `b` from `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ba_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
@@ -370,9 +371,9 @@ unsafe impl VecZnxBigSubSmallAImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallAInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigSubSmallInplaceImpl<Self> for FFT64Spqlios {
|
||||||
/// Subtracts `a` from `res` and stores the result on `res`.
|
/// Subtracts `a` from `res` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_a_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -443,9 +444,9 @@ unsafe impl VecZnxBigSubSmallBImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxBigSubSmallBInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigSubSmallNegateInplaceImpl<Self> for FFT64Spqlios {
|
||||||
/// Subtracts `res` from `a` and stores the result on `res`.
|
/// Subtracts `res` from `a` and stores the result on `res`.
|
||||||
fn vec_znx_big_sub_small_b_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_big_sub_small_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxBigToMut<Self>,
|
R: VecZnxBigToMut<Self>,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -518,7 +519,7 @@ unsafe impl VecZnxBigNegateInplaceImpl<Self> for FFT64Spqlios {
|
|||||||
|
|
||||||
unsafe impl VecZnxBigNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxBigNormalizeTmpBytesImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_big_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
fn vec_znx_big_normalize_tmp_bytes_impl(module: &Module<Self>) -> usize {
|
||||||
unsafe { vec_znx::vec_znx_normalize_base2k_tmp_bytes(module.ptr()) as usize }
|
vec_znx_normalize_tmp_bytes(module.n())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -528,9 +529,10 @@ where
|
|||||||
{
|
{
|
||||||
fn vec_znx_big_normalize_impl<R, A>(
|
fn vec_znx_big_normalize_impl<R, A>(
|
||||||
module: &Module<Self>,
|
module: &Module<Self>,
|
||||||
basek: usize,
|
res_basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
|
a_basek: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
a_col: usize,
|
a_col: usize,
|
||||||
scratch: &mut Scratch<Self>,
|
scratch: &mut Scratch<Self>,
|
||||||
@@ -538,28 +540,21 @@ where
|
|||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxBigToRef<Self>,
|
A: VecZnxBigToRef<Self>,
|
||||||
{
|
{
|
||||||
let a: VecZnxBig<&[u8], Self> = a.to_ref();
|
let (carry, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes() / size_of::<i64>());
|
||||||
let mut res: VecZnx<&mut [u8]> = res.to_mut();
|
// unsafe {
|
||||||
|
// vec_znx::vec_znx_normalize_base2k(
|
||||||
#[cfg(debug_assertions)]
|
// module.ptr(),
|
||||||
{
|
// base2k as u64,
|
||||||
assert_eq!(res.n(), a.n());
|
// res.at_mut_ptr(res_col, 0),
|
||||||
}
|
// res.size() as u64,
|
||||||
|
// res.sl() as u64,
|
||||||
let (tmp_bytes, _) = scratch.take_slice(module.vec_znx_big_normalize_tmp_bytes());
|
// a.at_ptr(a_col, 0),
|
||||||
unsafe {
|
// a.size() as u64,
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
// a.sl() as u64,
|
||||||
module.ptr(),
|
// tmp_bytes.as_mut_ptr(),
|
||||||
basek as u64,
|
// );
|
||||||
res.at_mut_ptr(res_col, 0),
|
// }
|
||||||
res.size() as u64,
|
vec_znx_big_normalize(res_basek, res, res_col, a_basek, a, a_col, carry);
|
||||||
res.sl() as u64,
|
|
||||||
a.at_ptr(a_col, 0),
|
|
||||||
a.size() as u64,
|
|
||||||
a.sl() as u64,
|
|
||||||
tmp_bytes.as_mut_ptr(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use poulpy_hal::{
|
|||||||
},
|
},
|
||||||
oep::{
|
oep::{
|
||||||
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
VecZnxDftAddImpl, VecZnxDftAddInplaceImpl, VecZnxDftAllocBytesImpl, VecZnxDftAllocImpl, VecZnxDftApplyImpl,
|
||||||
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubABInplaceImpl, VecZnxDftSubBAInplaceImpl, VecZnxDftSubImpl,
|
VecZnxDftCopyImpl, VecZnxDftFromBytesImpl, VecZnxDftSubImpl, VecZnxDftSubInplaceImpl, VecZnxDftSubNegateInplaceImpl,
|
||||||
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
VecZnxDftZeroImpl, VecZnxIdftApplyConsumeImpl, VecZnxIdftApplyImpl, VecZnxIdftApplyTmpAImpl, VecZnxIdftApplyTmpBytesImpl,
|
||||||
},
|
},
|
||||||
reference::{
|
reference::{
|
||||||
@@ -336,8 +336,8 @@ unsafe impl VecZnxDftSubImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubABInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxDftSubInplaceImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_dft_sub_ab_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
@@ -363,8 +363,8 @@ unsafe impl VecZnxDftSubABInplaceImpl<Self> for FFT64Spqlios {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl VecZnxDftSubBAInplaceImpl<Self> for FFT64Spqlios {
|
unsafe impl VecZnxDftSubNegateInplaceImpl<Self> for FFT64Spqlios {
|
||||||
fn vec_znx_dft_sub_ba_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
fn vec_znx_dft_sub_negate_inplace_impl<R, A>(module: &Module<Self>, res: &mut R, res_col: usize, a: &A, a_col: usize)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<Self>,
|
R: VecZnxDftToMut<Self>,
|
||||||
A: VecZnxDftToRef<Self>,
|
A: VecZnxDftToRef<Self>,
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ unsafe impl ZnNormalizeInplaceImpl<Self> for FFT64Spqlios
|
|||||||
where
|
where
|
||||||
Self: TakeSliceImpl<Self>,
|
Self: TakeSliceImpl<Self>,
|
||||||
{
|
{
|
||||||
fn zn_normalize_inplace_impl<A>(n: usize, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<Self>)
|
fn zn_normalize_inplace_impl<A>(n: usize, base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch<Self>)
|
||||||
where
|
where
|
||||||
A: ZnToMut,
|
A: ZnToMut,
|
||||||
{
|
{
|
||||||
@@ -23,7 +23,7 @@ where
|
|||||||
unsafe {
|
unsafe {
|
||||||
zn64::zn64_normalize_base2k_ref(
|
zn64::zn64_normalize_base2k_ref(
|
||||||
n as u64,
|
n as u64,
|
||||||
basek as u64,
|
base2k as u64,
|
||||||
a.at_mut_ptr(a_col, 0),
|
a.at_mut_ptr(a_col, 0),
|
||||||
a.size() as u64,
|
a.size() as u64,
|
||||||
a.sl() as u64,
|
a.sl() as u64,
|
||||||
@@ -37,11 +37,11 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl ZnFillUniformImpl<Self> for FFT64Spqlios {
|
unsafe impl ZnFillUniformImpl<Self> for FFT64Spqlios {
|
||||||
fn zn_fill_uniform_impl<R>(n: usize, basek: usize, res: &mut R, res_col: usize, source: &mut Source)
|
fn zn_fill_uniform_impl<R>(n: usize, base2k: usize, res: &mut R, res_col: usize, source: &mut Source)
|
||||||
where
|
where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_uniform(n, basek, res, res_col, source);
|
zn_fill_uniform(n, base2k, res, res_col, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,7 +49,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Spqlios {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_fill_normal_impl<R>(
|
fn zn_fill_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -59,7 +59,7 @@ unsafe impl ZnFillNormalImpl<Self> for FFT64Spqlios {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_fill_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_fill_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Spqlios {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn zn_add_normal_impl<R>(
|
fn zn_add_normal_impl<R>(
|
||||||
n: usize,
|
n: usize,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
@@ -77,6 +77,6 @@ unsafe impl ZnAddNormalImpl<Self> for FFT64Spqlios {
|
|||||||
) where
|
) where
|
||||||
R: ZnToMut,
|
R: ZnToMut,
|
||||||
{
|
{
|
||||||
zn_add_normal(n, basek, res, res_col, k, source, sigma, bound);
|
zn_add_normal(n, base2k, res, res_col, k, source, sigma, bound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
189
poulpy-backend/src/cpu_spqlios/fft64/znx.rs
Normal file
189
poulpy-backend/src/cpu_spqlios/fft64/znx.rs
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
use poulpy_hal::reference::znx::{
|
||||||
|
ZnxAdd, ZnxAddInplace, ZnxAutomorphism, ZnxCopy, ZnxExtractDigitAddMul, ZnxMulAddPowerOfTwo, ZnxMulPowerOfTwo,
|
||||||
|
ZnxMulPowerOfTwoInplace, ZnxNegate, ZnxNegateInplace, ZnxNormalizeDigit, ZnxNormalizeFinalStep, ZnxNormalizeFinalStepInplace,
|
||||||
|
ZnxNormalizeFirstStep, ZnxNormalizeFirstStepCarryOnly, ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStep,
|
||||||
|
ZnxNormalizeMiddleStepCarryOnly, ZnxNormalizeMiddleStepInplace, ZnxRotate, ZnxSub, ZnxSubInplace, ZnxSubNegateInplace,
|
||||||
|
ZnxSwitchRing, ZnxZero, znx_add_inplace_ref, znx_add_ref, znx_automorphism_ref, znx_copy_ref, znx_extract_digit_addmul_ref,
|
||||||
|
znx_mul_add_power_of_two_ref, znx_mul_power_of_two_inplace_ref, znx_mul_power_of_two_ref, znx_negate_inplace_ref,
|
||||||
|
znx_negate_ref, znx_normalize_digit_ref, znx_normalize_final_step_inplace_ref, znx_normalize_final_step_ref,
|
||||||
|
znx_normalize_first_step_carry_only_ref, znx_normalize_first_step_inplace_ref, znx_normalize_first_step_ref,
|
||||||
|
znx_normalize_middle_step_carry_only_ref, znx_normalize_middle_step_inplace_ref, znx_normalize_middle_step_ref, znx_rotate,
|
||||||
|
znx_sub_inplace_ref, znx_sub_negate_inplace_ref, znx_sub_ref, znx_switch_ring_ref, znx_zero_ref,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::FFT64Spqlios;
|
||||||
|
|
||||||
|
impl ZnxAdd for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_add(res: &mut [i64], a: &[i64], b: &[i64]) {
|
||||||
|
znx_add_ref(res, a, b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxAddInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_add_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_add_inplace_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxSub for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_sub(res: &mut [i64], a: &[i64], b: &[i64]) {
|
||||||
|
znx_sub_ref(res, a, b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxSubInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_sub_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_sub_inplace_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxSubNegateInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_sub_negate_inplace(res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_sub_negate_inplace_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulAddPowerOfTwo for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_muladd_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_mul_add_power_of_two_ref(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwo for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two(k: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_mul_power_of_two_ref(k, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxMulPowerOfTwoInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_mul_power_of_two_inplace(k: i64, res: &mut [i64]) {
|
||||||
|
znx_mul_power_of_two_inplace_ref(k, res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxAutomorphism for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_automorphism(p: i64, res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_automorphism_ref(p, res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxCopy for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_copy(res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_copy_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNegate for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_negate(res: &mut [i64], src: &[i64]) {
|
||||||
|
znx_negate_ref(res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNegateInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_negate_inplace(res: &mut [i64]) {
|
||||||
|
znx_negate_inplace_ref(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxRotate for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_rotate(p: i64, res: &mut [i64], src: &[i64]) {
|
||||||
|
znx_rotate::<Self>(p, res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxZero for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_zero(res: &mut [i64]) {
|
||||||
|
znx_zero_ref(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxSwitchRing for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_switch_ring(res: &mut [i64], a: &[i64]) {
|
||||||
|
znx_switch_ring_ref(res, a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeFinalStep for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_final_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_final_step_ref(base2k, lsh, x, a, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeFinalStepInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_final_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_final_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeFirstStep for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_first_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_first_step_ref(base2k, lsh, x, a, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeFirstStepCarryOnly for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_first_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_first_step_carry_only_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeFirstStepInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_first_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_first_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeMiddleStep for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_middle_step(base2k: usize, lsh: usize, x: &mut [i64], a: &[i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_middle_step_ref(base2k, lsh, x, a, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeMiddleStepCarryOnly for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_middle_step_carry_only(base2k: usize, lsh: usize, x: &[i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_middle_step_carry_only_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeMiddleStepInplace for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_middle_step_inplace(base2k: usize, lsh: usize, x: &mut [i64], carry: &mut [i64]) {
|
||||||
|
znx_normalize_middle_step_inplace_ref(base2k, lsh, x, carry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxExtractDigitAddMul for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_extract_digit_addmul(base2k: usize, lsh: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
znx_extract_digit_addmul_ref(base2k, lsh, res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZnxNormalizeDigit for FFT64Spqlios {
|
||||||
|
#[inline(always)]
|
||||||
|
fn znx_normalize_digit(base2k: usize, res: &mut [i64], src: &mut [i64]) {
|
||||||
|
znx_normalize_digit_ref(base2k, res, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,15 +5,15 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_add => poulpy_hal::test_suite::vec_znx::test_vec_znx_add,
|
test_vec_znx_add => poulpy_hal::test_suite::vec_znx::test_vec_znx_add,
|
||||||
test_vec_znx_add_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_inplace,
|
test_vec_znx_add_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_inplace,
|
||||||
test_vec_znx_add_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar,
|
test_vec_znx_add_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar,
|
||||||
test_vec_znx_add_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar_inplace,
|
test_vec_znx_add_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_add_scalar_inplace,
|
||||||
test_vec_znx_sub => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub,
|
test_vec_znx_sub => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub,
|
||||||
test_vec_znx_sub_ab_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_ab_inplace,
|
test_vec_znx_sub_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_inplace,
|
||||||
test_vec_znx_sub_ba_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_ba_inplace,
|
test_vec_znx_sub_negate_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_negate_inplace,
|
||||||
test_vec_znx_sub_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar,
|
test_vec_znx_sub_scalar => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar,
|
||||||
test_vec_znx_sub_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar_inplace,
|
test_vec_znx_sub_scalar_inplace => poulpy_hal::test_suite::vec_znx::test_vec_znx_sub_scalar_inplace,
|
||||||
test_vec_znx_rsh => poulpy_hal::test_suite::vec_znx::test_vec_znx_rsh,
|
test_vec_znx_rsh => poulpy_hal::test_suite::vec_znx::test_vec_znx_rsh,
|
||||||
@@ -41,7 +41,7 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_svp_apply_dft_to_dft => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft,
|
test_svp_apply_dft_to_dft => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft,
|
||||||
test_svp_apply_dft_to_dft_inplace => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft_inplace,
|
test_svp_apply_dft_to_dft_inplace => poulpy_hal::test_suite::svp::test_svp_apply_dft_to_dft_inplace,
|
||||||
@@ -53,20 +53,20 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_big_add => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add,
|
test_vec_znx_big_add => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add,
|
||||||
test_vec_znx_big_add_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_inplace,
|
test_vec_znx_big_add_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_inplace,
|
||||||
test_vec_znx_big_add_small => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small,
|
test_vec_znx_big_add_small => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small,
|
||||||
test_vec_znx_big_add_small_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small_inplace,
|
test_vec_znx_big_add_small_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_add_small_inplace,
|
||||||
test_vec_znx_big_sub => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub,
|
test_vec_znx_big_sub => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub,
|
||||||
test_vec_znx_big_sub_ab_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_ab_inplace,
|
test_vec_znx_big_sub_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_inplace,
|
||||||
test_vec_znx_big_automorphism => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism,
|
test_vec_znx_big_automorphism => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism,
|
||||||
test_vec_znx_big_automorphism_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism_inplace,
|
test_vec_znx_big_automorphism_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_automorphism_inplace,
|
||||||
test_vec_znx_big_negate => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate,
|
test_vec_znx_big_negate => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate,
|
||||||
test_vec_znx_big_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate_inplace,
|
test_vec_znx_big_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_negate_inplace,
|
||||||
test_vec_znx_big_normalize => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_normalize,
|
test_vec_znx_big_normalize => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_normalize,
|
||||||
test_vec_znx_big_sub_ba_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_ba_inplace,
|
test_vec_znx_big_sub_negate_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_negate_inplace,
|
||||||
test_vec_znx_big_sub_small_a => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a,
|
test_vec_znx_big_sub_small_a => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a,
|
||||||
test_vec_znx_big_sub_small_a_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a_inplace,
|
test_vec_znx_big_sub_small_a_inplace => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_a_inplace,
|
||||||
test_vec_znx_big_sub_small_b => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_b,
|
test_vec_znx_big_sub_small_b => poulpy_hal::test_suite::vec_znx_big::test_vec_znx_big_sub_small_b,
|
||||||
@@ -79,13 +79,13 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vec_znx_dft_add => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add,
|
test_vec_znx_dft_add => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add,
|
||||||
test_vec_znx_dft_add_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add_inplace,
|
test_vec_znx_dft_add_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_add_inplace,
|
||||||
test_vec_znx_dft_sub => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub,
|
test_vec_znx_dft_sub => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub,
|
||||||
test_vec_znx_dft_sub_ab_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_ab_inplace,
|
test_vec_znx_dft_sub_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_inplace,
|
||||||
test_vec_znx_dft_sub_ba_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_ba_inplace,
|
test_vec_znx_dft_sub_negate_inplace => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_dft_sub_negate_inplace,
|
||||||
test_vec_znx_idft_apply => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply,
|
test_vec_znx_idft_apply => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply,
|
||||||
test_vec_znx_idft_apply_consume => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_consume,
|
test_vec_znx_idft_apply_consume => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_consume,
|
||||||
test_vec_znx_idft_apply_tmpa => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_tmpa,
|
test_vec_znx_idft_apply_tmpa => poulpy_hal::test_suite::vec_znx_dft::test_vec_znx_idft_apply_tmpa,
|
||||||
@@ -97,7 +97,7 @@ cross_backend_test_suite! {
|
|||||||
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
backend_ref = crate::cpu_fft64_ref::FFT64Ref,
|
||||||
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
backend_test = crate::cpu_spqlios::FFT64Spqlios,
|
||||||
size = 1 << 5,
|
size = 1 << 5,
|
||||||
basek = 12,
|
base2k = 12,
|
||||||
tests = {
|
tests = {
|
||||||
test_vmp_apply_dft_to_dft => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft,
|
test_vmp_apply_dft_to_dft => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft,
|
||||||
test_vmp_apply_dft_to_dft_add => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft_add,
|
test_vmp_apply_dft_to_dft_add => poulpy_hal::test_suite::vmp::test_vmp_apply_dft_to_dft_add,
|
||||||
|
|||||||
@@ -26,13 +26,13 @@ fn main() {
|
|||||||
let n: usize = 1<<log_n;
|
let n: usize = 1<<log_n;
|
||||||
|
|
||||||
// Base-2-k (implicit digit decomposition)
|
// Base-2-k (implicit digit decomposition)
|
||||||
let basek: usize = 14;
|
let base2k: usize = 14;
|
||||||
|
|
||||||
// Ciphertext Torus precision (equivalent to ciphertext modulus)
|
// Ciphertext Torus precision (equivalent to ciphertext modulus)
|
||||||
let k_ct: usize = 27;
|
let k_ct: usize = 27;
|
||||||
|
|
||||||
// Plaintext Torus precision (equivament to plaintext modulus)
|
// Plaintext Torus precision (equivament to plaintext modulus)
|
||||||
let k_pt: usize = basek;
|
let k_pt: usize = base2k;
|
||||||
|
|
||||||
// GLWE rank
|
// GLWE rank
|
||||||
let rank: usize = 1;
|
let rank: usize = 1;
|
||||||
@@ -41,9 +41,9 @@ fn main() {
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(n as u64);
|
let module: Module<FFT64> = Module::<FFT64>::new(n as u64);
|
||||||
|
|
||||||
// Allocates ciphertext & plaintexts
|
// Allocates ciphertext & plaintexts
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, base2k, k_ct, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, base2k, k_pt);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, base2k, k_pt);
|
||||||
|
|
||||||
// CPRNG
|
// CPRNG
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -52,8 +52,8 @@ fn main() {
|
|||||||
|
|
||||||
// Scratch space
|
// Scratch space
|
||||||
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(&module, n, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(&module, n, base2k, ct.k())
|
||||||
| GLWECiphertext::decrypt_scratch_space(&module, n, basek, ct.k()),
|
| GLWECiphertext::decrypt_scratch_space(&module, n, base2k, ct.k()),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Generate secret-key
|
// Generate secret-key
|
||||||
@@ -64,7 +64,7 @@ fn main() {
|
|||||||
let sk_prepared: GLWESecretPrepared<Vec<u8>, FFT64> = sk.prepare_alloc(&module, scratch.borrow());
|
let sk_prepared: GLWESecretPrepared<Vec<u8>, FFT64> = sk.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
// Uniform plaintext
|
// Uniform plaintext
|
||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, k_pt, &mut source_xa);
|
module.vec_znx_fill_uniform(base2k, &mut pt_want.data, 0, k_pt, &mut source_xa);
|
||||||
|
|
||||||
// Encryption
|
// Encryption
|
||||||
ct.encrypt_sk(
|
ct.encrypt_sk(
|
||||||
@@ -83,7 +83,7 @@ fn main() {
|
|||||||
pt_want.sub_inplace_ab(&module, &pt_have);
|
pt_want.sub_inplace_ab(&module, &pt_have);
|
||||||
|
|
||||||
// Ideal vs. actual noise
|
// Ideal vs. actual noise
|
||||||
let noise_have: f64 = pt_want.data.std(basek, 0) * (ct.k() as f64).exp2();
|
let noise_have: f64 = pt_want.data.std(base2k, 0) * (ct.k() as f64).exp2();
|
||||||
let noise_want: f64 = SIGMA;
|
let noise_want: f64 = SIGMA;
|
||||||
|
|
||||||
// Check
|
// Check
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use poulpy_core::layouts::{
|
use poulpy_core::layouts::{
|
||||||
GGSWCiphertext, GLWECiphertext, GLWESecret, Infos,
|
Base2K, Degree, Digits, GGSWCiphertext, GGSWCiphertextLayout, GLWECiphertext, GLWECiphertextLayout, GLWESecret, Rank, Rows,
|
||||||
|
TorusPrecision,
|
||||||
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared, PrepareAlloc},
|
prepared::{GGSWCiphertextPrepared, GLWESecretPrepared, PrepareAlloc},
|
||||||
};
|
};
|
||||||
use std::hint::black_box;
|
use std::hint::black_box;
|
||||||
@@ -18,50 +19,65 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
struct Params {
|
struct Params {
|
||||||
log_n: usize,
|
log_n: usize,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k_ct_in: usize,
|
k_ct_in: TorusPrecision,
|
||||||
k_ct_out: usize,
|
k_ct_out: TorusPrecision,
|
||||||
k_ggsw: usize,
|
k_ggsw: TorusPrecision,
|
||||||
rank: usize,
|
rank: Rank,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runner(p: Params) -> impl FnMut() {
|
fn runner(p: Params) -> impl FnMut() {
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
||||||
|
|
||||||
let n: usize = module.n();
|
let n: Degree = Degree(module.n() as u32);
|
||||||
let basek: usize = p.basek;
|
let base2k: Base2K = p.base2k;
|
||||||
let k_ct_in: usize = p.k_ct_in;
|
let k_ct_in: TorusPrecision = p.k_ct_in;
|
||||||
let k_ct_out: usize = p.k_ct_out;
|
let k_ct_out: TorusPrecision = p.k_ct_out;
|
||||||
let k_ggsw: usize = p.k_ggsw;
|
let k_ggsw: TorusPrecision = p.k_ggsw;
|
||||||
let rank: usize = p.rank;
|
let rank: Rank = p.rank;
|
||||||
let digits: usize = 1;
|
let digits: Digits = Digits(1);
|
||||||
|
|
||||||
let rows: usize = 1; //(p.k_ct_in.div_ceil(p.basek);
|
let rows: Rows = Rows(1); //(p.k_ct_in.div_ceil(p.base2k);
|
||||||
|
|
||||||
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
|
let ggsw_layout: GGSWCiphertextLayout = GGSWCiphertextLayout {
|
||||||
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct_in, rank);
|
n,
|
||||||
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct_out, rank);
|
base2k,
|
||||||
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
k: k_ggsw,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_out_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ct_out,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_in_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ct_in,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&ggsw_layout);
|
||||||
|
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_in_layout);
|
||||||
|
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_out_layout);
|
||||||
|
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, &ggsw_layout)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_in_layout)
|
||||||
| GLWECiphertext::external_product_scratch_space(
|
| GLWECiphertext::external_product_scratch_space(&module, &glwe_out_layout, &glwe_in_layout, &ggsw_layout),
|
||||||
&module,
|
|
||||||
basek,
|
|
||||||
ct_glwe_out.k(),
|
|
||||||
ct_glwe_in.k(),
|
|
||||||
ct_ggsw.k(),
|
|
||||||
digits,
|
|
||||||
rank,
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs = Source::new([0u8; 32]);
|
let mut source_xs = Source::new([0u8; 32]);
|
||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_in_layout);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
@@ -92,11 +108,11 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
let params_set: Vec<Params> = vec![Params {
|
let params_set: Vec<Params> = vec![Params {
|
||||||
log_n: 11,
|
log_n: 11,
|
||||||
basek: 22,
|
base2k: 22_u32.into(),
|
||||||
k_ct_in: 44,
|
k_ct_in: 44_u32.into(),
|
||||||
k_ct_out: 44,
|
k_ct_out: 44_u32.into(),
|
||||||
k_ggsw: 54,
|
k_ggsw: 54_u32.into(),
|
||||||
rank: 1,
|
rank: 1_u32.into(),
|
||||||
}];
|
}];
|
||||||
|
|
||||||
for params in params_set {
|
for params in params_set {
|
||||||
@@ -113,39 +129,55 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
struct Params {
|
struct Params {
|
||||||
log_n: usize,
|
log_n: usize,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k_ct: usize,
|
k_ct: TorusPrecision,
|
||||||
k_ggsw: usize,
|
k_ggsw: TorusPrecision,
|
||||||
rank: usize,
|
rank: Rank,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runner(p: Params) -> impl FnMut() {
|
fn runner(p: Params) -> impl FnMut() {
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
||||||
|
|
||||||
let n = module.n();
|
let n: Degree = Degree(module.n() as u32);
|
||||||
let basek: usize = p.basek;
|
let base2k: Base2K = p.base2k;
|
||||||
let k_glwe: usize = p.k_ct;
|
let k_glwe: TorusPrecision = p.k_ct;
|
||||||
let k_ggsw: usize = p.k_ggsw;
|
let k_ggsw: TorusPrecision = p.k_ggsw;
|
||||||
let rank: usize = p.rank;
|
let rank: Rank = p.rank;
|
||||||
let digits: usize = 1;
|
let digits: Digits = Digits(1);
|
||||||
|
|
||||||
let rows: usize = p.k_ct.div_ceil(p.basek);
|
let rows: Rows = p.k_ct.div_ceil(p.base2k).into();
|
||||||
|
|
||||||
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(n, basek, k_ggsw, rows, digits, rank);
|
let ggsw_layout: GGSWCiphertextLayout = GGSWCiphertextLayout {
|
||||||
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_glwe, rank);
|
n,
|
||||||
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, 1);
|
base2k,
|
||||||
|
k: k_ggsw,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_glwe,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ct_ggsw: GGSWCiphertext<Vec<u8>> = GGSWCiphertext::alloc(&ggsw_layout);
|
||||||
|
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_layout);
|
||||||
|
let pt_rgsw: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n.into(), 1);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, basek, ct_ggsw.k(), rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, &ggsw_layout)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_glwe.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_layout)
|
||||||
| GLWECiphertext::external_product_inplace_scratch_space(&module, basek, ct_glwe.k(), ct_ggsw.k(), digits, rank),
|
| GLWECiphertext::external_product_inplace_scratch_space(&module, &glwe_layout, &ggsw_layout),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
let sk_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
@@ -177,10 +209,10 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
let params_set: Vec<Params> = vec![Params {
|
let params_set: Vec<Params> = vec![Params {
|
||||||
log_n: 12,
|
log_n: 12,
|
||||||
basek: 18,
|
base2k: 18_u32.into(),
|
||||||
k_ct: 54,
|
k_ct: 54_u32.into(),
|
||||||
k_ggsw: 54,
|
k_ggsw: 54_u32.into(),
|
||||||
rank: 1,
|
rank: 1_u32.into(),
|
||||||
}];
|
}];
|
||||||
|
|
||||||
for params in params_set {
|
for params in params_set {
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use poulpy_core::layouts::{
|
use poulpy_core::layouts::{
|
||||||
GGLWEAutomorphismKey, GGLWESwitchingKey, GLWECiphertext, GLWESecret, Infos,
|
Base2K, Degree, Digits, GGLWEAutomorphismKey, GGLWEAutomorphismKeyLayout, GGLWESwitchingKey, GGLWESwitchingKeyLayout,
|
||||||
|
GLWECiphertext, GLWECiphertextLayout, GLWESecret, Rank, Rows, TorusPrecision,
|
||||||
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared, GLWESecretPrepared, PrepareAlloc},
|
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared, GLWESecretPrepared, PrepareAlloc},
|
||||||
};
|
};
|
||||||
use std::{hint::black_box, time::Duration};
|
use std::{hint::black_box, time::Duration};
|
||||||
@@ -17,59 +18,73 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
struct Params {
|
struct Params {
|
||||||
log_n: usize,
|
log_n: usize,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k_ct_in: usize,
|
k_ct_in: TorusPrecision,
|
||||||
k_ct_out: usize,
|
k_ct_out: TorusPrecision,
|
||||||
k_ksk: usize,
|
k_ksk: TorusPrecision,
|
||||||
digits: usize,
|
digits: Digits,
|
||||||
rank_in: usize,
|
rank: Rank,
|
||||||
rank_out: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runner(p: Params) -> impl FnMut() {
|
fn runner(p: Params) -> impl FnMut() {
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
||||||
|
|
||||||
let n = module.n();
|
let n: Degree = Degree(module.n() as u32);
|
||||||
let basek: usize = p.basek;
|
let base2k: Base2K = p.base2k;
|
||||||
let k_rlwe_in: usize = p.k_ct_in;
|
let k_glwe_in: TorusPrecision = p.k_ct_in;
|
||||||
let k_rlwe_out: usize = p.k_ct_out;
|
let k_glwe_out: TorusPrecision = p.k_ct_out;
|
||||||
let k_grlwe: usize = p.k_ksk;
|
let k_gglwe: TorusPrecision = p.k_ksk;
|
||||||
let rank_in: usize = p.rank_in;
|
let rank: Rank = p.rank;
|
||||||
let rank_out: usize = p.rank_out;
|
let digits: Digits = p.digits;
|
||||||
let digits: usize = p.digits;
|
|
||||||
|
|
||||||
let rows: usize = p.k_ct_in.div_ceil(p.basek * digits);
|
let rows: Rows = p.k_ct_in.div_ceil(p.base2k.0 * digits.0).into();
|
||||||
|
|
||||||
let mut ksk: GGLWEAutomorphismKey<Vec<u8>> = GGLWEAutomorphismKey::alloc(n, basek, k_grlwe, rows, digits, rank_out);
|
let gglwe_atk_layout: GGLWEAutomorphismKeyLayout = GGLWEAutomorphismKeyLayout {
|
||||||
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_in, rank_in);
|
n,
|
||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_rlwe_out, rank_out);
|
base2k,
|
||||||
|
k: k_gglwe,
|
||||||
|
rows,
|
||||||
|
rank,
|
||||||
|
digits,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_in_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_glwe_in,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_out_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_glwe_out,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ksk: GGLWEAutomorphismKey<Vec<u8>> = GGLWEAutomorphismKey::alloc(&gglwe_atk_layout);
|
||||||
|
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_in_layout);
|
||||||
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_out_layout);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank_in, rank_out)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, &gglwe_atk_layout)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct_in.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_in_layout)
|
||||||
| GLWECiphertext::keyswitch_scratch_space(
|
| GLWECiphertext::keyswitch_scratch_space(
|
||||||
&module,
|
&module,
|
||||||
basek,
|
&glwe_out_layout,
|
||||||
ct_out.k(),
|
&glwe_in_layout,
|
||||||
ct_in.k(),
|
&gglwe_atk_layout,
|
||||||
ksk.k(),
|
|
||||||
digits,
|
|
||||||
rank_in,
|
|
||||||
rank_out,
|
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_in);
|
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_in_layout);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
|
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank_out);
|
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
&module,
|
&module,
|
||||||
-1,
|
-1,
|
||||||
@@ -95,18 +110,17 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let digits: usize = 1;
|
let base2k: usize = 19;
|
||||||
let basek: usize = 19;
|
let digits = 1;
|
||||||
|
|
||||||
let params_set: Vec<Params> = vec![Params {
|
let params_set: Vec<Params> = vec![Params {
|
||||||
log_n: 15,
|
log_n: 15,
|
||||||
basek,
|
base2k: base2k.into(),
|
||||||
k_ct_in: 874 - digits * basek,
|
k_ct_in: (874 - digits * base2k).into(),
|
||||||
k_ct_out: 874 - digits * basek,
|
k_ct_out: (874 - digits * base2k).into(),
|
||||||
k_ksk: 874,
|
k_ksk: 874_u32.into(),
|
||||||
digits,
|
digits: 1_u32.into(),
|
||||||
rank_in: 1,
|
rank: 1_u32.into(),
|
||||||
rank_out: 1,
|
|
||||||
}];
|
}];
|
||||||
|
|
||||||
for params in params_set {
|
for params in params_set {
|
||||||
@@ -125,42 +139,59 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
struct Params {
|
struct Params {
|
||||||
log_n: usize,
|
log_n: usize,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k_ct: usize,
|
k_ct: TorusPrecision,
|
||||||
k_ksk: usize,
|
k_ksk: TorusPrecision,
|
||||||
rank: usize,
|
rank: Rank,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runner(p: Params) -> impl FnMut() {
|
fn runner(p: Params) -> impl FnMut() {
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(1 << p.log_n);
|
||||||
|
|
||||||
let n = module.n();
|
let n: Degree = Degree(module.n() as u32);
|
||||||
let basek: usize = p.basek;
|
let base2k: Base2K = p.base2k;
|
||||||
let k_ct: usize = p.k_ct;
|
let k_ct: TorusPrecision = p.k_ct;
|
||||||
let k_ksk: usize = p.k_ksk;
|
let k_ksk: TorusPrecision = p.k_ksk;
|
||||||
let rank: usize = p.rank;
|
let rank: Rank = p.rank;
|
||||||
let digits: usize = 1;
|
let digits: Digits = Digits(1);
|
||||||
|
|
||||||
let rows: usize = p.k_ct.div_ceil(p.basek);
|
let rows: Rows = p.k_ct.div_ceil(p.base2k).into();
|
||||||
|
|
||||||
let mut ksk: GGLWESwitchingKey<Vec<u8>> = GGLWESwitchingKey::alloc(n, basek, k_ksk, rows, digits, rank, rank);
|
let gglwe_layout: GGLWESwitchingKeyLayout = GGLWESwitchingKeyLayout {
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ksk,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
rank_in: rank,
|
||||||
|
rank_out: rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ct,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ksk: GGLWESwitchingKey<Vec<u8>> = GGLWESwitchingKey::alloc(&gglwe_layout);
|
||||||
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_layout);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, basek, ksk.k(), rank, rank)
|
GGLWESwitchingKey::encrypt_sk_scratch_space(&module, &gglwe_layout)
|
||||||
| GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct.k())
|
| GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_layout)
|
||||||
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, basek, ct.k(), ksk.k(), digits, rank),
|
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, &glwe_layout, &gglwe_layout),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk_in: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
|
let sk_in_dft: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk_in.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk_out: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_layout);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -190,10 +221,10 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
|
|
||||||
let params_set: Vec<Params> = vec![Params {
|
let params_set: Vec<Params> = vec![Params {
|
||||||
log_n: 9,
|
log_n: 9,
|
||||||
basek: 18,
|
base2k: 18_u32.into(),
|
||||||
k_ct: 27,
|
k_ct: 27_u32.into(),
|
||||||
k_ksk: 27,
|
k_ksk: 27_u32.into(),
|
||||||
rank: 1,
|
rank: 1_u32.into(),
|
||||||
}];
|
}];
|
||||||
|
|
||||||
for params in params_set {
|
for params in params_set {
|
||||||
|
|||||||
@@ -2,7 +2,8 @@ use poulpy_backend::cpu_spqlios::FFT64Spqlios;
|
|||||||
use poulpy_core::{
|
use poulpy_core::{
|
||||||
GLWEOperations, SIGMA,
|
GLWEOperations, SIGMA,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWECiphertext, GLWEPlaintext, GLWESecret, Infos,
|
Base2K, Degree, GLWECiphertext, GLWECiphertextLayout, GLWEPlaintext, GLWEPlaintextLayout, GLWESecret, LWEInfos, Rank,
|
||||||
|
TorusPrecision,
|
||||||
prepared::{GLWESecretPrepared, PrepareAlloc},
|
prepared::{GLWESecretPrepared, PrepareAlloc},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -16,27 +17,36 @@ fn main() {
|
|||||||
// Ring degree
|
// Ring degree
|
||||||
let log_n: usize = 10;
|
let log_n: usize = 10;
|
||||||
|
|
||||||
let n: usize = 1 << log_n;
|
let n: Degree = Degree(1 << log_n);
|
||||||
|
|
||||||
// Base-2-k (implicit digit decomposition)
|
// Base-2-k (implicit digit decomposition)
|
||||||
let basek: usize = 14;
|
let base2k: Base2K = Base2K(14);
|
||||||
|
|
||||||
// Ciphertext Torus precision (equivalent to ciphertext modulus)
|
// Ciphertext Torus precision (equivalent to ciphertext modulus)
|
||||||
let k_ct: usize = 27;
|
let k_ct: TorusPrecision = TorusPrecision(27);
|
||||||
|
|
||||||
// Plaintext Torus precision (equivament to plaintext modulus)
|
// Plaintext Torus precision (equivament to plaintext modulus)
|
||||||
let k_pt: usize = basek;
|
let k_pt: TorusPrecision = TorusPrecision(base2k.into());
|
||||||
|
|
||||||
// GLWE rank
|
// GLWE rank
|
||||||
let rank: usize = 1;
|
let rank: Rank = Rank(1);
|
||||||
|
|
||||||
// Instantiate Module (DFT Tables)
|
// Instantiate Module (DFT Tables)
|
||||||
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n as u64);
|
let module: Module<FFT64Spqlios> = Module::<FFT64Spqlios>::new(n.0 as u64);
|
||||||
|
|
||||||
|
let glwe_ct_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k: k_ct,
|
||||||
|
rank,
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_pt_infos: GLWEPlaintextLayout = GLWEPlaintextLayout { n, base2k, k: k_pt };
|
||||||
|
|
||||||
// Allocates ciphertext & plaintexts
|
// Allocates ciphertext & plaintexts
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(n, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&glwe_ct_infos);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&glwe_pt_infos);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(n, basek, k_pt);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&glwe_pt_infos);
|
||||||
|
|
||||||
// CPRNG
|
// CPRNG
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -45,19 +55,19 @@ fn main() {
|
|||||||
|
|
||||||
// Scratch space
|
// Scratch space
|
||||||
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
let mut scratch: ScratchOwned<FFT64Spqlios> = ScratchOwned::alloc(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(&module, basek, ct.k())
|
GLWECiphertext::encrypt_sk_scratch_space(&module, &glwe_ct_infos)
|
||||||
| GLWECiphertext::decrypt_scratch_space(&module, basek, ct.k()),
|
| GLWECiphertext::decrypt_scratch_space(&module, &glwe_ct_infos),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Generate secret-key
|
// Generate secret-key
|
||||||
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(n, rank);
|
let mut sk: GLWESecret<Vec<u8>> = GLWESecret::alloc(&glwe_ct_infos);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
// Backend-prepared secret
|
// Backend-prepared secret
|
||||||
let sk_prepared: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
let sk_prepared: GLWESecretPrepared<Vec<u8>, FFT64Spqlios> = sk.prepare_alloc(&module, scratch.borrow());
|
||||||
|
|
||||||
// Uniform plaintext
|
// Uniform plaintext
|
||||||
module.vec_znx_fill_uniform(basek, &mut pt_want.data, 0, &mut source_xa);
|
module.vec_znx_fill_uniform(base2k.into(), &mut pt_want.data, 0, &mut source_xa);
|
||||||
|
|
||||||
// Encryption
|
// Encryption
|
||||||
ct.encrypt_sk(
|
ct.encrypt_sk(
|
||||||
@@ -76,7 +86,7 @@ fn main() {
|
|||||||
pt_want.sub_inplace_ab(&module, &pt_have);
|
pt_want.sub_inplace_ab(&module, &pt_have);
|
||||||
|
|
||||||
// Ideal vs. actual noise
|
// Ideal vs. actual noise
|
||||||
let noise_have: f64 = pt_want.data.std(basek, 0) * (ct.k() as f64).exp2();
|
let noise_have: f64 = pt_want.data.std(base2k.into(), 0) * (ct.k().as_u32() as f64).exp2();
|
||||||
let noise_want: f64 = SIGMA;
|
let noise_want: f64 = SIGMA;
|
||||||
|
|
||||||
// Check
|
// Check
|
||||||
|
|||||||
@@ -1,43 +1,42 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphism, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWEAutomorphismKey, GLWECiphertext, Infos, prepared::GGLWEAutomorphismKeyPrepared};
|
use crate::layouts::{GGLWEAutomorphismKey, GGLWELayoutInfos, GLWECiphertext, prepared::GGLWEAutomorphismKeyPrepared};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos,
|
||||||
|
IN: GGLWELayoutInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
GLWECiphertext::keyswitch_scratch_space(
|
||||||
|
module,
|
||||||
|
&out_infos.glwe_layout(),
|
||||||
|
&in_infos.glwe_layout(),
|
||||||
|
key_infos,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k_out: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWEAutomorphismKey::automorphism_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank)
|
GGLWEAutomorphismKey::automorphism_scratch_space(module, out_infos, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,11 +58,15 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxAutomorphismInplace<B>,
|
+ VecZnxAutomorphismInplace<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
use crate::layouts::LWEInfos;
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
lhs.rank_in(),
|
lhs.rank_in(),
|
||||||
@@ -93,13 +96,13 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols_out: usize = rhs.rank_out() + 1;
|
let cols_out: usize = (rhs.rank_out() + 1).into();
|
||||||
|
|
||||||
let p: i64 = lhs.p();
|
let p: i64 = lhs.p();
|
||||||
let p_inv = module.galois_element_inv(p);
|
let p_inv: i64 = module.galois_element_inv(p);
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i);
|
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i);
|
||||||
let lhs_ct: GLWECiphertext<&[u8]> = lhs.at(row_j, col_i);
|
let lhs_ct: GLWECiphertext<&[u8]> = lhs.at(row_j, col_i);
|
||||||
|
|
||||||
@@ -118,8 +121,8 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
(self.rows().min(lhs.rows())..self.rows()).for_each(|row_i| {
|
(self.rows().min(lhs.rows()).into()..self.rows().into()).for_each(|row_i| {
|
||||||
(0..self.rank_in()).for_each(|col_j| {
|
(0..self.rank_in().into()).for_each(|col_j| {
|
||||||
self.at_mut(row_i, col_j).data.zero();
|
self.at_mut(row_i, col_j).data.zero();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -143,8 +146,10 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphism
|
+ VecZnxAutomorphism
|
||||||
+ VecZnxAutomorphismInplace<B>,
|
+ VecZnxAutomorphismInplace<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -164,13 +169,13 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols_out: usize = rhs.rank_out() + 1;
|
let cols_out: usize = (rhs.rank_out() + 1).into();
|
||||||
|
|
||||||
let p: i64 = self.p();
|
let p: i64 = self.p();
|
||||||
let p_inv = module.galois_element_inv(p);
|
let p_inv = module.galois_element_inv(p);
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i);
|
let mut res_ct: GLWECiphertext<&mut [u8]> = self.at_mut(row_j, col_i);
|
||||||
|
|
||||||
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
// Reverts the automorphism X^{-k}: (-pi^{-1}_{k}(s)a + s, a) to (-sa + pi_{k}(s), a)
|
||||||
|
|||||||
@@ -1,67 +1,66 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
|
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAddInplace, VecZnxDftAllocBytes,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftApply, VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGSWCiphertext, GLWECiphertext, Infos,
|
GGLWELayoutInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext,
|
||||||
prepared::{GGLWEAutomorphismKeyPrepared, GGLWETensorKeyPrepared},
|
prepared::{GGLWEAutomorphismKeyPrepared, GGLWETensorKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
tsk_infos: &TSK,
|
||||||
digits_ksk: usize,
|
|
||||||
k_tsk: usize,
|
|
||||||
digits_tsk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
IN: GGSWInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
TSK: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigAllocBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
+ VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
let out_size: usize = out_infos.size();
|
||||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
let ci_dft: usize = module.vec_znx_dft_alloc_bytes((key_infos.rank_out() + 1).into(), out_size);
|
||||||
let ks_internal: usize =
|
let ks_internal: usize = GLWECiphertext::keyswitch_scratch_space(
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
module,
|
||||||
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
|
&out_infos.glwe_layout(),
|
||||||
|
&in_infos.glwe_layout(),
|
||||||
|
key_infos,
|
||||||
|
);
|
||||||
|
let expand: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||||
ci_dft + (ks_internal | expand)
|
ci_dft + (ks_internal | expand)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
tsk_infos: &TSK,
|
||||||
digits_ksk: usize,
|
|
||||||
k_tsk: usize,
|
|
||||||
digits_tsk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
TSK: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigAllocBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
+ VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::automorphism_scratch_space(
|
GGSWCiphertext::automorphism_scratch_space(module, out_infos, out_infos, key_infos, tsk_infos)
|
||||||
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,13 +87,18 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.n(), auto_key.n());
|
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||||
assert_eq!(lhs.n(), auto_key.n());
|
|
||||||
|
assert_eq!(self.n(), module.n() as u32);
|
||||||
|
assert_eq!(lhs.n(), module.n() as u32);
|
||||||
|
assert_eq!(auto_key.n(), module.n() as u32);
|
||||||
|
assert_eq!(tensor_key.n(), module.n() as u32);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank(),
|
self.rank(),
|
||||||
@@ -105,36 +109,23 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank(),
|
self.rank(),
|
||||||
auto_key.rank(),
|
auto_key.rank_out(),
|
||||||
"ggsw_in rank: {} != auto_key rank: {}",
|
"ggsw_in rank: {} != auto_key rank: {}",
|
||||||
self.rank(),
|
self.rank(),
|
||||||
auto_key.rank()
|
auto_key.rank_out()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank(),
|
self.rank(),
|
||||||
tensor_key.rank(),
|
tensor_key.rank_out(),
|
||||||
"ggsw_in rank: {} != tensor_key rank: {}",
|
"ggsw_in rank: {} != tensor_key rank: {}",
|
||||||
self.rank(),
|
self.rank(),
|
||||||
tensor_key.rank()
|
tensor_key.rank_out()
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(scratch.available() >= GGSWCiphertext::automorphism_scratch_space(module, self, lhs, auto_key, tensor_key))
|
||||||
scratch.available()
|
|
||||||
>= GGSWCiphertext::automorphism_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
lhs.k(),
|
|
||||||
auto_key.k(),
|
|
||||||
auto_key.digits(),
|
|
||||||
tensor_key.k(),
|
|
||||||
tensor_key.digits(),
|
|
||||||
self.rank(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Keyswitch the j-th row of the col 0
|
// Keyswitch the j-th row of the col 0
|
||||||
(0..lhs.rows()).for_each(|row_i| {
|
(0..lhs.rows().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.at_mut(row_i, 0)
|
||||||
@@ -164,11 +155,12 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
// Keyswitch the j-th row of the col 0
|
// Keyswitch the j-th row of the col 0
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0pi^-1(s0) + a1pi^-1(s1) + a2pi^-1(s2)) + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.at_mut(row_i, 0)
|
||||||
|
|||||||
@@ -1,44 +1,38 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallAInplace, VecZnxBigSubSmallBInplace,
|
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallInplace,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxBigSubSmallNegateInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnxBig},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos, prepared::GGLWEAutomorphismKeyPrepared};
|
use crate::layouts::{GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn automorphism_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn automorphism_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
Self::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_inplace_scratch_space<B: Backend>(
|
pub fn automorphism_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k_out: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
Self::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,11 +53,13 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphismInplace<B>,
|
+ VecZnxAutomorphismInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.keyswitch(module, lhs, &rhs.key, scratch);
|
self.keyswitch(module, lhs, &rhs.key, scratch);
|
||||||
(0..self.rank() + 1).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -83,11 +79,13 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxAutomorphismInplace<B>,
|
+ VecZnxAutomorphismInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.keyswitch_inplace(module, &rhs.key, scratch);
|
self.keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
(0..self.rank() + 1).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
module.vec_znx_automorphism_inplace(rhs.p(), &mut self.data, i, scratch);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -108,19 +106,29 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>,
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_add_small_inplace(&mut res_big, i, &lhs.data, i);
|
module.vec_znx_big_add_small_inplace(&mut res_big, i, &lhs.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,19 +147,29 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>,
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_add_small_inplace(&mut res_big, i, &self.data, i);
|
module.vec_znx_big_add_small_inplace(&mut res_big, i, &self.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,23 +190,33 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallAInplace<B>,
|
+ VecZnxBigSubSmallInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_a_inplace(&mut res_big, i, &lhs.data, i);
|
module.vec_znx_big_sub_small_inplace(&mut res_big, i, &lhs.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_ab_inplace<DataRhs: DataRef, B: Backend>(
|
pub fn automorphism_sub_inplace<DataRhs: DataRef, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
|
||||||
@@ -204,23 +232,33 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallAInplace<B>,
|
+ VecZnxBigSubSmallInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_a_inplace(&mut res_big, i, &self.data, i);
|
module.vec_znx_big_sub_small_inplace(&mut res_big, i, &self.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_ba<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
pub fn automorphism_sub_negate<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
lhs: &GLWECiphertext<DataLhs>,
|
lhs: &GLWECiphertext<DataLhs>,
|
||||||
@@ -237,23 +275,33 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallBInplace<B>,
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
self.assert_keyswitch(module, lhs, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_b_inplace(&mut res_big, i, &lhs.data, i);
|
module.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, &lhs.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn automorphism_sub_ba_inplace<DataRhs: DataRef, B: Backend>(
|
pub fn automorphism_sub_negate_inplace<DataRhs: DataRef, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
|
rhs: &GGLWEAutomorphismKeyPrepared<DataRhs, B>,
|
||||||
@@ -269,19 +317,29 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallBInplace<B>,
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
self.assert_keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // TODO: optimise size
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // TODO: optimise size
|
||||||
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
let mut res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, &rhs.key, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
module.vec_znx_big_automorphism_inplace(rhs.p(), &mut res_big, i, scratch_1);
|
||||||
module.vec_znx_big_sub_small_b_inplace(&mut res_big, i, &self.data, i);
|
module.vec_znx_big_sub_small_negate_inplace(&mut res_big, i, &self.data, i);
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
rhs.base2k().into(),
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,31 +1,46 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWECt,
|
||||||
layouts::{GLWECiphertext, Infos, LWECiphertext, prepared::GLWEToLWESwitchingKeyPrepared},
|
layouts::{
|
||||||
|
GGLWELayoutInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWECiphertext, LWEInfos, Rank,
|
||||||
|
prepared::GLWEToLWESwitchingKeyPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn from_glwe_scratch_space<B: Backend>(
|
pub fn from_glwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
lwe_infos: &OUT,
|
||||||
k_lwe: usize,
|
glwe_infos: &IN,
|
||||||
k_glwe: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: LWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(module.n(), basek, k_lwe, 1)
|
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
+ GLWECiphertext::keyswitch_scratch_space(module, basek, k_lwe, k_glwe, k_ksk, 1, rank, 1)
|
n: module.n().into(),
|
||||||
|
base2k: lwe_infos.base2k(),
|
||||||
|
k: lwe_infos.k(),
|
||||||
|
rank: Rank(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
GLWECiphertext::alloc_bytes_with(
|
||||||
|
module.n().into(),
|
||||||
|
lwe_infos.base2k(),
|
||||||
|
lwe_infos.k(),
|
||||||
|
1u32.into(),
|
||||||
|
) + GLWECiphertext::keyswitch_scratch_space(module, &glwe_layout, glwe_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,10 +49,11 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(self.n() <= a.n());
|
assert!(self.n() <= a.n());
|
||||||
|
assert!(self.base2k() == a.base2k());
|
||||||
}
|
}
|
||||||
|
|
||||||
let min_size: usize = self.size().min(a.size());
|
let min_size: usize = self.size().min(a.size());
|
||||||
let n: usize = self.n();
|
let n: usize = self.n().into();
|
||||||
|
|
||||||
self.data.zero();
|
self.data.zero();
|
||||||
(0..min_size).for_each(|i| {
|
(0..min_size).for_each(|i| {
|
||||||
@@ -64,15 +80,26 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.basek(), a.basek());
|
assert_eq!(a.n(), module.n() as u32);
|
||||||
assert_eq!(a.n(), ks.n());
|
assert_eq!(ks.n(), module.n() as u32);
|
||||||
|
assert!(self.n() <= module.n() as u32);
|
||||||
}
|
}
|
||||||
let (mut tmp_glwe, scratch_1) = scratch.take_glwe_ct(a.n(), a.basek(), self.k(), 1);
|
|
||||||
|
let glwe_layout: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n: module.n().into(),
|
||||||
|
base2k: self.base2k(),
|
||||||
|
k: self.k(),
|
||||||
|
rank: Rank(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (mut tmp_glwe, scratch_1) = scratch.take_glwe_ct(&glwe_layout);
|
||||||
tmp_glwe.keyswitch(module, a, &ks.0, scratch_1);
|
tmp_glwe.keyswitch(module, a, &ks.0, scratch_1);
|
||||||
self.sample_extract(&tmp_glwe);
|
self.sample_extract(&tmp_glwe);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,31 +1,46 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWECt,
|
||||||
layouts::{GLWECiphertext, Infos, LWECiphertext, prepared::LWEToGLWESwitchingKeyPrepared},
|
layouts::{
|
||||||
|
GGLWELayoutInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWECiphertext, LWEInfos,
|
||||||
|
prepared::LWEToGLWESwitchingKeyPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn from_lwe_scratch_space<B: Backend>(
|
pub fn from_lwe_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
glwe_infos: &OUT,
|
||||||
k_lwe: usize,
|
lwe_infos: &IN,
|
||||||
k_glwe: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
IN: LWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, basek, k_glwe, k_lwe, k_ksk, 1, 1, rank)
|
let ct: usize = GLWECiphertext::alloc_bytes_with(
|
||||||
+ GLWECiphertext::bytes_of(module.n(), basek, k_lwe, 1)
|
module.n().into(),
|
||||||
|
key_infos.base2k(),
|
||||||
|
lwe_infos.k().max(glwe_infos.k()),
|
||||||
|
1u32.into(),
|
||||||
|
);
|
||||||
|
let ks: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, glwe_infos, key_infos);
|
||||||
|
if lwe_infos.base2k() == key_infos.base2k() {
|
||||||
|
ct + ks
|
||||||
|
} else {
|
||||||
|
let a_conv = VecZnx::alloc_bytes(module.n(), 1, lwe_infos.size()) + module.vec_znx_normalize_tmp_bytes();
|
||||||
|
ct + a_conv + ks
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,25 +62,68 @@ impl<D: DataMut> GLWECiphertext<D> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeGLWECt + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(lwe.n() <= self.n());
|
assert_eq!(self.n(), module.n() as u32);
|
||||||
assert_eq!(self.basek(), self.basek());
|
assert_eq!(ksk.n(), module.n() as u32);
|
||||||
|
assert!(lwe.n() <= module.n() as u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut glwe, scratch_1) = scratch.take_glwe_ct(ksk.n(), lwe.basek(), lwe.k(), 1);
|
let (mut glwe, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout {
|
||||||
|
n: ksk.n(),
|
||||||
|
base2k: ksk.base2k(),
|
||||||
|
k: lwe.k(),
|
||||||
|
rank: 1u32.into(),
|
||||||
|
});
|
||||||
glwe.data.zero();
|
glwe.data.zero();
|
||||||
|
|
||||||
let n_lwe: usize = lwe.n();
|
let n_lwe: usize = lwe.n().into();
|
||||||
|
|
||||||
(0..lwe.size()).for_each(|i| {
|
if lwe.base2k() == ksk.base2k() {
|
||||||
let data_lwe: &[i64] = lwe.data.at(0, i);
|
for i in 0..lwe.size() {
|
||||||
glwe.data.at_mut(0, i)[0] = data_lwe[0];
|
let data_lwe: &[i64] = lwe.data.at(0, i);
|
||||||
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
glwe.data.at_mut(0, i)[0] = data_lwe[0];
|
||||||
});
|
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(module.n(), 1, lwe.size());
|
||||||
|
a_conv.zero();
|
||||||
|
for j in 0..lwe.size() {
|
||||||
|
let data_lwe: &[i64] = lwe.data.at(0, j);
|
||||||
|
a_conv.at_mut(0, j)[0] = data_lwe[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
ksk.base2k().into(),
|
||||||
|
&mut glwe.data,
|
||||||
|
0,
|
||||||
|
lwe.base2k().into(),
|
||||||
|
&a_conv,
|
||||||
|
0,
|
||||||
|
scratch_2,
|
||||||
|
);
|
||||||
|
|
||||||
|
a_conv.zero();
|
||||||
|
for j in 0..lwe.size() {
|
||||||
|
let data_lwe: &[i64] = lwe.data.at(0, j);
|
||||||
|
a_conv.at_mut(0, j)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
ksk.base2k().into(),
|
||||||
|
&mut glwe.data,
|
||||||
|
1,
|
||||||
|
lwe.base2k().into(),
|
||||||
|
&a_conv,
|
||||||
|
0,
|
||||||
|
scratch_2,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
self.keyswitch(module, &glwe, &ksk.0, scratch_1);
|
self.keyswitch(module, &glwe, &ksk.0, scratch_1);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,14 +6,15 @@ use poulpy_hal::{
|
|||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, GLWEPlaintext, Infos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn decrypt_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn decrypt_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = infos.size();
|
||||||
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_alloc_bytes(1, size)) + module.vec_znx_dft_alloc_bytes(1, size)
|
(module.vec_znx_normalize_tmp_bytes() | module.vec_znx_dft_alloc_bytes(1, size)) + module.vec_znx_dft_alloc_bytes(1, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -41,15 +42,15 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n(), sk.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = (self.rank() + 1).into();
|
||||||
|
|
||||||
let (mut c0_big, scratch_1) = scratch.take_vec_znx_big(self.n(), 1, self.size()); // TODO optimize size when pt << ct
|
let (mut c0_big, scratch_1) = scratch.take_vec_znx_big(self.n().into(), 1, self.size()); // TODO optimize size when pt << ct
|
||||||
c0_big.data_mut().fill(0);
|
c0_big.data_mut().fill(0);
|
||||||
|
|
||||||
{
|
{
|
||||||
(1..cols).for_each(|i| {
|
(1..cols).for_each(|i| {
|
||||||
// ci_dft = DFT(a[i]) * DFT(s[i])
|
// ci_dft = DFT(a[i]) * DFT(s[i])
|
||||||
let (mut ci_dft, _) = scratch_1.take_vec_znx_dft(self.n(), 1, self.size()); // TODO optimize size when pt << ct
|
let (mut ci_dft, _) = scratch_1.take_vec_znx_dft(self.n().into(), 1, self.size()); // TODO optimize size when pt << ct
|
||||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &self.data, i);
|
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &self.data, i);
|
||||||
module.svp_apply_dft_to_dft_inplace(&mut ci_dft, 0, &sk.data, i - 1);
|
module.svp_apply_dft_to_dft_inplace(&mut ci_dft, 0, &sk.data, i - 1);
|
||||||
let ci_big = module.vec_znx_idft_apply_consume(ci_dft);
|
let ci_big = module.vec_znx_idft_apply_consume(ci_dft);
|
||||||
@@ -63,9 +64,17 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
module.vec_znx_big_add_small_inplace(&mut c0_big, 0, &self.data, 0);
|
module.vec_znx_big_add_small_inplace(&mut c0_big, 0, &self.data, 0);
|
||||||
|
|
||||||
// pt = norm(BIG(m + e))
|
// pt = norm(BIG(m + e))
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut pt.data, 0, &c0_big, 0, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut pt.data,
|
||||||
|
0,
|
||||||
|
self.base2k().into(),
|
||||||
|
&c0_big,
|
||||||
|
0,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
|
|
||||||
pt.basek = self.basek();
|
pt.base2k = self.base2k();
|
||||||
pt.k = pt.k().min(self.k());
|
pt.k = pt.k().min(self.k());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use poulpy_hal::{
|
|||||||
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
oep::{ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{Infos, LWECiphertext, LWEPlaintext, LWESecret, SetMetaData};
|
use crate::layouts::{LWECiphertext, LWEInfos, LWEPlaintext, LWESecret};
|
||||||
|
|
||||||
impl<DataSelf> LWECiphertext<DataSelf>
|
impl<DataSelf> LWECiphertext<DataSelf>
|
||||||
where
|
where
|
||||||
@@ -31,13 +31,13 @@ where
|
|||||||
.sum::<i64>();
|
.sum::<i64>();
|
||||||
});
|
});
|
||||||
module.zn_normalize_inplace(
|
module.zn_normalize_inplace(
|
||||||
pt.n(),
|
1,
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
&mut pt.data,
|
&mut pt.data,
|
||||||
0,
|
0,
|
||||||
ScratchOwned::alloc(size_of::<i64>()).borrow(),
|
ScratchOwned::alloc(size_of::<i64>()).borrow(),
|
||||||
);
|
);
|
||||||
pt.set_basek(self.basek());
|
pt.base2k = self.base2k();
|
||||||
pt.set_k(self.k().min(pt.size() * self.basek()));
|
pt.k = crate::layouts::TorusPrecision(self.k().0.min(pt.size() as u32 * self.base2k().0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -12,18 +12,20 @@ use poulpy_hal::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWESecret,
|
GGLWELayoutInfos, GLWEInfos, GLWESecret, LWEInfos,
|
||||||
compressed::{GGLWEAutomorphismKeyCompressed, GGLWESwitchingKeyCompressed},
|
compressed::{GGLWEAutomorphismKeyCompressed, GGLWESwitchingKeyCompressed},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, basek, k, rank, rank)
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
+ GLWESecret::bytes_of(module.n(), rank)
|
GGLWESwitchingKeyCompressed::encrypt_sk_scratch_space(module, infos)
|
||||||
|
+ GLWESecret::alloc_bytes_with(infos.n(), infos.rank_out())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,7 +51,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -60,26 +62,21 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKeyCompressed<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::layouts::Infos;
|
|
||||||
|
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(self.rank_out(), self.rank_in());
|
assert_eq!(self.rank_out(), self.rank_in());
|
||||||
assert_eq!(sk.rank(), self.rank());
|
assert_eq!(sk.rank(), self.rank_out());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self),
|
||||||
>= GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {}",
|
||||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self)
|
||||||
self.size(),
|
|
||||||
GGLWEAutomorphismKeyCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
||||||
|
|
||||||
{
|
{
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank_out().into()).for_each(|i| {
|
||||||
module.vec_znx_automorphism(
|
module.vec_znx_automorphism(
|
||||||
module.galois_element_inv(p),
|
module.galois_element_inv(p),
|
||||||
&mut sk_out.data.as_vec_znx_mut(),
|
&mut sk_out.data.as_vec_znx_mut(),
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,15 +11,16 @@ use poulpy_hal::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPt,
|
||||||
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
||||||
layouts::{GGLWECiphertext, Infos, compressed::GGLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
layouts::{GGLWECiphertext, GGLWELayoutInfos, LWEInfos, compressed::GGLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWECiphertextCompressed<Vec<u8>> {
|
impl GGLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
GGLWECiphertext::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +43,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -56,7 +57,7 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
pt.cols(),
|
pt.cols() as u32,
|
||||||
"self.rank_in(): {} != pt.cols(): {}",
|
"self.rank_in(): {} != pt.cols(): {}",
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
pt.cols()
|
pt.cols()
|
||||||
@@ -69,36 +70,33 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
sk.rank()
|
sk.rank()
|
||||||
);
|
);
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n() as u32, sk.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
scratch.available() >= GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self),
|
||||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self)
|
||||||
self.size(),
|
|
||||||
GGLWECiphertextCompressed::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
self.rows() * self.digits() * self.basek() <= self.k(),
|
self.rows().0 * self.digits().0 * self.base2k().0 <= self.k().0,
|
||||||
"self.rows() : {} * self.digits() : {} * self.basek() : {} = {} >= self.k() = {}",
|
"self.rows() : {} * self.digits() : {} * self.base2k() : {} = {} >= self.k() = {}",
|
||||||
self.rows(),
|
self.rows(),
|
||||||
self.digits(),
|
self.digits(),
|
||||||
self.basek(),
|
self.base2k(),
|
||||||
self.rows() * self.digits() * self.basek(),
|
self.rows().0 * self.digits().0 * self.base2k().0,
|
||||||
self.k()
|
self.k()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rows: usize = self.rows();
|
let rows: usize = self.rows().into();
|
||||||
let digits: usize = self.digits();
|
let digits: usize = self.digits().into();
|
||||||
let basek: usize = self.basek();
|
let base2k: usize = self.base2k().into();
|
||||||
let k: usize = self.k();
|
let rank_in: usize = self.rank_in().into();
|
||||||
let rank_in: usize = self.rank_in();
|
let cols: usize = (self.rank_out() + 1).into();
|
||||||
let cols: usize = self.rank_out() + 1;
|
|
||||||
|
|
||||||
let mut source_xa = Source::new(seed);
|
let mut source_xa = Source::new(seed);
|
||||||
|
|
||||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(sk.n(), basek, k);
|
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self);
|
||||||
(0..rank_in).for_each(|col_i| {
|
(0..rank_in).for_each(|col_i| {
|
||||||
(0..rows).for_each(|row_i| {
|
(0..rows).for_each(|row_i| {
|
||||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||||
@@ -110,15 +108,15 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
pt,
|
pt,
|
||||||
col_i,
|
col_i,
|
||||||
);
|
);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scrach_1);
|
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1);
|
||||||
|
|
||||||
let (seed, mut source_xa_tmp) = source_xa.branch();
|
let (seed, mut source_xa_tmp) = source_xa.branch();
|
||||||
self.seed[col_i * rows + row_i] = seed;
|
self.seed[col_i * rows + row_i] = seed;
|
||||||
|
|
||||||
glwe_encrypt_sk_internal(
|
glwe_encrypt_sk_internal(
|
||||||
module,
|
module,
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
self.k(),
|
self.k().into(),
|
||||||
&mut self.at_mut(row_i, col_i).data,
|
&mut self.at_mut(row_i, col_i).data,
|
||||||
cols,
|
cols,
|
||||||
true,
|
true,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,23 +11,21 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecretPrepared,
|
TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWECiphertext, GLWESecret, compressed::GGLWESwitchingKeyCompressed, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
Degree, GGLWECiphertext, GGLWELayoutInfos, GLWEInfos, GLWESecret, LWEInfos, compressed::GGLWESwitchingKeyCompressed,
|
||||||
|
prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + SvpPPolAllocBytes,
|
||||||
{
|
{
|
||||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | ScalarZnx::alloc_bytes(module.n(), 1))
|
(GGLWECiphertext::encrypt_sk_scratch_space(module, infos) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ ScalarZnx::alloc_bytes(module.n(), rank_in)
|
+ ScalarZnx::alloc_bytes(module.n(), infos.rank_in().into())
|
||||||
+ GLWESecretPrepared::bytes_of(module, rank_out)
|
+ GLWESecretPrepared::alloc_bytes_with(module, infos.rank_out())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,7 +50,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -63,35 +61,22 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
use crate::layouts::GGLWESwitchingKey;
|
||||||
|
|
||||||
assert!(sk_in.n() <= module.n());
|
assert!(sk_in.n().0 <= module.n() as u32);
|
||||||
assert!(sk_out.n() <= module.n());
|
assert!(sk_out.n().0 <= module.n() as u32);
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= GGLWESwitchingKey::encrypt_sk_scratch_space(module, self),
|
||||||
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out()
|
|
||||||
),
|
|
||||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, self)
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out()
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let n: usize = sk_in.n().max(sk_out.n());
|
let n: usize = sk_in.n().max(sk_out.n()).into();
|
||||||
|
|
||||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank());
|
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
|
||||||
(0..sk_in.rank()).for_each(|i| {
|
(0..sk_in.rank().into()).for_each(|i| {
|
||||||
module.vec_znx_switch_ring(
|
module.vec_znx_switch_ring(
|
||||||
&mut sk_in_tmp.as_vec_znx_mut(),
|
&mut sk_in_tmp.as_vec_znx_mut(),
|
||||||
i,
|
i,
|
||||||
@@ -100,10 +85,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(n, sk_out.rank());
|
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(Degree(n as u32), sk_out.rank());
|
||||||
{
|
{
|
||||||
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
||||||
(0..sk_out.rank()).for_each(|i| {
|
(0..sk_out.rank().into()).for_each(|i| {
|
||||||
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
||||||
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
||||||
});
|
});
|
||||||
@@ -117,7 +102,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKeyCompressed<DataSelf> {
|
|||||||
source_xe,
|
source_xe,
|
||||||
scratch_2,
|
scratch_2,
|
||||||
);
|
);
|
||||||
self.sk_in_n = sk_in.n();
|
self.sk_in_n = sk_in.n().into();
|
||||||
self.sk_out_n = sk_out.n();
|
self.sk_out_n = sk_out.n().into();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx,
|
||||||
TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
||||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,16 +11,20 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWETensorKey, GLWESecret, Infos, compressed::GGLWETensorKeyCompressed, prepared::Prepare},
|
layouts::{
|
||||||
|
GGLWELayoutInfos, GGLWETensorKey, GLWEInfos, GLWESecret, LWEInfos, Rank, compressed::GGLWETensorKeyCompressed,
|
||||||
|
prepared::Prepare,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>:
|
Module<B>:
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWETensorKey::encrypt_sk_scratch_space(module, basek, k, rank)
|
GGLWETensorKey::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +46,7 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -63,37 +67,38 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank_out(), sk.rank());
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let n: usize = sk.n();
|
let n: usize = sk.n().into();
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
|
|
||||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);
|
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(sk.n(), self.rank_out());
|
||||||
sk_dft_prep.prepare(module, sk, scratch_1);
|
sk_dft_prep.prepare(module, sk, scratch_1);
|
||||||
|
|
||||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
|
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
for i in 0..rank {
|
||||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||||
});
|
}
|
||||||
|
|
||||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
|
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
|
||||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(n, 1);
|
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(sk.n(), Rank(1));
|
||||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n, 1, 1);
|
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n, 1, 1);
|
||||||
|
|
||||||
let mut source_xa: Source = Source::new(seed_xa);
|
let mut source_xa: Source = Source::new(seed_xa);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
for i in 0..rank {
|
||||||
(i..rank).for_each(|j| {
|
for j in i..rank {
|
||||||
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
||||||
|
|
||||||
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||||
module.vec_znx_big_normalize(
|
module.vec_znx_big_normalize(
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
&mut sk_ij.data.as_vec_znx_mut(),
|
&mut sk_ij.data.as_vec_znx_mut(),
|
||||||
0,
|
0,
|
||||||
|
self.base2k().into(),
|
||||||
&sk_ij_big,
|
&sk_ij_big,
|
||||||
0,
|
0,
|
||||||
scratch_5,
|
scratch_5,
|
||||||
@@ -103,7 +108,7 @@ impl<DataSelf: DataMut> GGLWETensorKeyCompressed<DataSelf> {
|
|||||||
|
|
||||||
self.at_mut(i, j)
|
self.at_mut(i, j)
|
||||||
.encrypt_sk(module, &sk_ij, sk, seed_xa_tmp, source_xe, scratch_5);
|
.encrypt_sk(module, &sk_ij, sk, seed_xa_tmp, source_xe, scratch_5);
|
||||||
});
|
}
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,15 +11,18 @@ use poulpy_hal::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPt,
|
||||||
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
encryption::{SIGMA, glwe_encrypt_sk_internal},
|
||||||
layouts::{GGSWCiphertext, Infos, compressed::GGSWCiphertextCompressed, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, compressed::GGSWCiphertextCompressed, prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertextCompressed<Vec<u8>> {
|
impl GGSWCiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(module, basek, k, rank)
|
GGSWCiphertext::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,7 +45,7 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -56,27 +59,26 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
|||||||
|
|
||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank(), sk.rank());
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n() as u32, sk.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let basek: usize = self.basek();
|
let base2k: usize = self.base2k().into();
|
||||||
let k: usize = self.k();
|
let rank: usize = self.rank().into();
|
||||||
let rank: usize = self.rank();
|
|
||||||
let cols: usize = rank + 1;
|
let cols: usize = rank + 1;
|
||||||
let digits: usize = self.digits();
|
let digits: usize = self.digits().into();
|
||||||
|
|
||||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self.n(), basek, k);
|
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&self.glwe_layout());
|
||||||
|
|
||||||
let mut source = Source::new(seed_xa);
|
let mut source = Source::new(seed_xa);
|
||||||
|
|
||||||
self.seed = vec![[0u8; 32]; self.rows() * cols];
|
self.seed = vec![[0u8; 32]; self.rows().0 as usize * cols];
|
||||||
|
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows().into()).for_each(|row_i| {
|
||||||
tmp_pt.data.zero();
|
tmp_pt.data.zero();
|
||||||
|
|
||||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||||
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (digits - 1) + row_i * digits, pt, 0);
|
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (digits - 1) + row_i * digits, pt, 0);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scratch_1);
|
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
|
||||||
|
|
||||||
(0..rank + 1).for_each(|col_j| {
|
(0..rank + 1).for_each(|col_j| {
|
||||||
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
||||||
@@ -87,8 +89,8 @@ impl<DataSelf: DataMut> GGSWCiphertextCompressed<DataSelf> {
|
|||||||
|
|
||||||
glwe_encrypt_sk_internal(
|
glwe_encrypt_sk_internal(
|
||||||
module,
|
module,
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
self.k(),
|
self.k().into(),
|
||||||
&mut self.at_mut(row_i, col_j).data,
|
&mut self.at_mut(row_i, col_j).data,
|
||||||
cols,
|
cols,
|
||||||
true,
|
true,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -10,15 +10,18 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
encryption::{SIGMA, glwe_ct::glwe_encrypt_sk_internal},
|
encryption::{SIGMA, glwe_ct::glwe_encrypt_sk_internal},
|
||||||
layouts::{GLWECiphertext, GLWEPlaintext, Infos, compressed::GLWECiphertextCompressed, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos, compressed::GLWECiphertextCompressed, prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWECiphertextCompressed<Vec<u8>> {
|
impl GLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,7 +43,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -68,7 +71,7 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -77,11 +80,11 @@ impl<D: DataMut> GLWECiphertextCompressed<D> {
|
|||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let mut source_xa = Source::new(seed_xa);
|
let mut source_xa = Source::new(seed_xa);
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = (self.rank() + 1).into();
|
||||||
glwe_encrypt_sk_internal(
|
glwe_encrypt_sk_internal(
|
||||||
module,
|
module,
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
self.k(),
|
self.k().into(),
|
||||||
&mut self.data,
|
&mut self.data,
|
||||||
cols,
|
cols,
|
||||||
true,
|
true,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphism, VecZnxBigNormalize, VecZnxDftAllocBytes,
|
||||||
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,19 +11,33 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWEAutomorphismKey, GGLWESwitchingKey, GLWESecret},
|
layouts::{GGLWEAutomorphismKey, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, GLWESecret, LWEInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank) + GLWESecret::bytes_of(module.n(), rank)
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos) + GLWESecret::alloc_bytes(&infos.glwe_layout())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||||
GGLWESwitchingKey::encrypt_pk_scratch_space(module, _basek, _k, _rank, _rank)
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
_infos.rank_in(),
|
||||||
|
_infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::encrypt_pk_scratch_space(module, _infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,7 +60,7 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -60,26 +74,23 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::layouts::Infos;
|
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||||
|
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(self.rank_out(), self.rank_in());
|
assert_eq!(self.rank_out(), self.rank_in());
|
||||||
assert_eq!(sk.rank(), self.rank());
|
assert_eq!(sk.rank(), self.rank_out());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self),
|
||||||
>= GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank()),
|
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space: {:?}",
|
||||||
"scratch.available(): {} < AutomorphismKey::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self)
|
||||||
self.size(),
|
|
||||||
GGLWEAutomorphismKey::encrypt_sk_scratch_space(module, self.basek(), self.k(), self.rank())
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
let (mut sk_out, scratch_1) = scratch.take_glwe_secret(sk.n(), sk.rank());
|
||||||
|
|
||||||
{
|
{
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank_out().into()).for_each(|i| {
|
||||||
module.vec_znx_automorphism(
|
module.vec_znx_automorphism(
|
||||||
module.galois_element_inv(p),
|
module.galois_element_inv(p),
|
||||||
&mut sk_out.data.as_vec_znx_mut(),
|
&mut sk_out.data.as_vec_znx_mut(),
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -10,19 +10,23 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPt,
|
||||||
layouts::{GGLWECiphertext, GLWECiphertext, GLWEPlaintext, Infos, prepared::GLWESecretPrepared},
|
layouts::{GGLWECiphertext, GGLWELayoutInfos, GLWECiphertext, GLWEPlaintext, LWEInfos, prepared::GLWESecretPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWECiphertext<Vec<u8>> {
|
impl GGLWECiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||||
+ (GLWEPlaintext::byte_of(module.n(), basek, k) | module.vec_znx_normalize_tmp_bytes())
|
+ (GLWEPlaintext::alloc_bytes(&infos.glwe_layout()) | module.vec_znx_normalize_tmp_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(_module: &Module<B>, _basek: usize, _k: usize, _rank: usize) -> usize {
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(_module: &Module<B>, _infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -46,7 +50,7 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -60,7 +64,7 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
pt.cols(),
|
pt.cols() as u32,
|
||||||
"self.rank_in(): {} != pt.cols(): {}",
|
"self.rank_in(): {} != pt.cols(): {}",
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
pt.cols()
|
pt.cols()
|
||||||
@@ -73,33 +77,32 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
|
|||||||
sk.rank()
|
sk.rank()
|
||||||
);
|
);
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n() as u32, sk.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
scratch.available() >= GGLWECiphertext::encrypt_sk_scratch_space(module, self),
|
||||||
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
"scratch.available: {} < GGLWECiphertext::encrypt_sk_scratch_space(module, self.rank()={}, self.size()={}): {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
self.rank(),
|
self.rank_out(),
|
||||||
self.size(),
|
self.size(),
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
GGLWECiphertext::encrypt_sk_scratch_space(module, self)
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
self.rows() * self.digits() * self.basek() <= self.k(),
|
self.rows().0 * self.digits().0 * self.base2k().0 <= self.k().0,
|
||||||
"self.rows() : {} * self.digits() : {} * self.basek() : {} = {} >= self.k() = {}",
|
"self.rows() : {} * self.digits() : {} * self.base2k() : {} = {} >= self.k() = {}",
|
||||||
self.rows(),
|
self.rows(),
|
||||||
self.digits(),
|
self.digits(),
|
||||||
self.basek(),
|
self.base2k(),
|
||||||
self.rows() * self.digits() * self.basek(),
|
self.rows().0 * self.digits().0 * self.base2k().0,
|
||||||
self.k()
|
self.k()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rows: usize = self.rows();
|
let rows: usize = self.rows().into();
|
||||||
let digits: usize = self.digits();
|
let digits: usize = self.digits().into();
|
||||||
let basek: usize = self.basek();
|
let base2k: usize = self.base2k().into();
|
||||||
let k: usize = self.k();
|
let rank_in: usize = self.rank_in().into();
|
||||||
let rank_in: usize = self.rank_in();
|
|
||||||
|
|
||||||
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(sk.n(), basek, k);
|
let (mut tmp_pt, scrach_1) = scratch.take_glwe_pt(self);
|
||||||
// For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns
|
// For each input column (i.e. rank) produces a GGLWE ciphertext of rank_out+1 columns
|
||||||
//
|
//
|
||||||
// Example for ksk rank 2 to rank 3:
|
// Example for ksk rank 2 to rank 3:
|
||||||
@@ -122,7 +125,7 @@ impl<DataSelf: DataMut> GGLWECiphertext<DataSelf> {
|
|||||||
pt,
|
pt,
|
||||||
col_i,
|
col_i,
|
||||||
);
|
);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scrach_1);
|
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scrach_1);
|
||||||
|
|
||||||
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
||||||
self.at_mut(row_i, col_i)
|
self.at_mut(row_i, col_i)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub,
|
||||||
VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,33 +11,28 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecretPrepared,
|
TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWECiphertext, GGLWESwitchingKey, GLWESecret, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
Degree, GGLWECiphertext, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, GLWESecret, LWEInfos,
|
||||||
|
prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWESwitchingKey<Vec<u8>> {
|
impl GGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
(GGLWECiphertext::encrypt_sk_scratch_space(module, basek, k) | ScalarZnx::alloc_bytes(module.n(), 1))
|
(GGLWECiphertext::encrypt_sk_scratch_space(module, infos) | ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ ScalarZnx::alloc_bytes(module.n(), rank_in)
|
+ ScalarZnx::alloc_bytes(module.n(), infos.rank_in().into())
|
||||||
+ GLWESecretPrepared::bytes_of(module, rank_out)
|
+ GLWESecretPrepared::alloc_bytes(module, &infos.glwe_layout())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, _infos: &A) -> usize
|
||||||
module: &Module<B>,
|
where
|
||||||
_basek: usize,
|
A: GGLWELayoutInfos,
|
||||||
_k: usize,
|
{
|
||||||
_rank_in: usize,
|
GGLWECiphertext::encrypt_pk_scratch_space(module, _infos)
|
||||||
_rank_out: usize,
|
|
||||||
) -> usize {
|
|
||||||
GGLWECiphertext::encrypt_pk_scratch_space(module, _basek, _k, _rank_out)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,7 +55,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -73,35 +68,20 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::layouts::Infos;
|
assert!(sk_in.n().0 <= module.n() as u32);
|
||||||
|
assert!(sk_out.n().0 <= module.n() as u32);
|
||||||
assert!(sk_in.n() <= module.n());
|
|
||||||
assert!(sk_out.n() <= module.n());
|
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= GGLWESwitchingKey::encrypt_sk_scratch_space(module, self),
|
||||||
>= GGLWESwitchingKey::encrypt_sk_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out()
|
|
||||||
),
|
|
||||||
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
"scratch.available()={} < GLWESwitchingKey::encrypt_sk_scratch_space={}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, self)
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out()
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let n: usize = sk_in.n().max(sk_out.n());
|
let n: usize = sk_in.n().max(sk_out.n()).into();
|
||||||
|
|
||||||
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank());
|
let (mut sk_in_tmp, scratch_1) = scratch.take_scalar_znx(n, sk_in.rank().into());
|
||||||
(0..sk_in.rank()).for_each(|i| {
|
(0..sk_in.rank().into()).for_each(|i| {
|
||||||
module.vec_znx_switch_ring(
|
module.vec_znx_switch_ring(
|
||||||
&mut sk_in_tmp.as_vec_znx_mut(),
|
&mut sk_in_tmp.as_vec_znx_mut(),
|
||||||
i,
|
i,
|
||||||
@@ -110,10 +90,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(n, sk_out.rank());
|
let (mut sk_out_tmp, scratch_2) = scratch_1.take_glwe_secret_prepared(Degree(n as u32), sk_out.rank());
|
||||||
{
|
{
|
||||||
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
let (mut tmp, _) = scratch_2.take_scalar_znx(n, 1);
|
||||||
(0..sk_out.rank()).for_each(|i| {
|
(0..sk_out.rank().into()).for_each(|i| {
|
||||||
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
module.vec_znx_switch_ring(&mut tmp.as_vec_znx_mut(), 0, &sk_out.data.as_vec_znx(), i);
|
||||||
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
module.svp_prepare(&mut sk_out_tmp.data, i, &tmp, 0);
|
||||||
});
|
});
|
||||||
@@ -127,7 +107,7 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
source_xe,
|
source_xe,
|
||||||
scratch_2,
|
scratch_2,
|
||||||
);
|
);
|
||||||
self.sk_in_n = sk_in.n();
|
self.sk_in_n = sk_in.n().into();
|
||||||
self.sk_out_n = sk_out.n();
|
self.sk_out_n = sk_out.n().into();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx,
|
||||||
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
TakeVecZnxBig, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxBigAllocBytes,
|
||||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -12,23 +12,24 @@ use poulpy_hal::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWESwitchingKey, GGLWETensorKey, GLWESecret, Infos,
|
Degree, GGLWELayoutInfos, GGLWESwitchingKey, GGLWETensorKey, GLWEInfos, GLWESecret, LWEInfos, Rank,
|
||||||
prepared::{GLWESecretPrepared, Prepare},
|
prepared::{GLWESecretPrepared, Prepare},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWETensorKey<Vec<u8>> {
|
impl GGLWETensorKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>:
|
Module<B>:
|
||||||
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes + VecZnxBigAllocBytes,
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, rank)
|
GLWESecretPrepared::alloc_bytes_with(module, infos.rank_out())
|
||||||
+ module.vec_znx_dft_alloc_bytes(rank, 1)
|
+ module.vec_znx_dft_alloc_bytes(infos.rank_out().into(), 1)
|
||||||
+ module.vec_znx_big_alloc_bytes(1, 1)
|
+ module.vec_znx_big_alloc_bytes(1, 1)
|
||||||
+ module.vec_znx_dft_alloc_bytes(1, 1)
|
+ module.vec_znx_dft_alloc_bytes(1, 1)
|
||||||
+ GLWESecret::bytes_of(module.n(), 1)
|
+ GLWESecret::alloc_bytes_with(Degree(module.n() as u32), Rank(1))
|
||||||
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank, rank)
|
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,7 +52,7 @@ impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -65,36 +66,36 @@ impl<DataSelf: DataMut> GGLWETensorKey<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank_out(), sk.rank());
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let n: usize = sk.n();
|
let n: Degree = sk.n();
|
||||||
|
let rank: Rank = self.rank_out();
|
||||||
let rank: usize = self.rank();
|
|
||||||
|
|
||||||
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);
|
let (mut sk_dft_prep, scratch_1) = scratch.take_glwe_secret_prepared(n, rank);
|
||||||
sk_dft_prep.prepare(module, sk, scratch_1);
|
sk_dft_prep.prepare(module, sk, scratch_1);
|
||||||
|
|
||||||
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n, rank, 1);
|
let (mut sk_dft, scratch_2) = scratch_1.take_vec_znx_dft(n.into(), rank.into(), 1);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
(0..rank.into()).for_each(|i| {
|
||||||
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
module.vec_znx_dft_apply(1, 0, &mut sk_dft, i, &sk.data.as_vec_znx(), i);
|
||||||
});
|
});
|
||||||
|
|
||||||
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n, 1, 1);
|
let (mut sk_ij_big, scratch_3) = scratch_2.take_vec_znx_big(n.into(), 1, 1);
|
||||||
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(n, 1);
|
let (mut sk_ij, scratch_4) = scratch_3.take_glwe_secret(n, Rank(1));
|
||||||
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n, 1, 1);
|
let (mut sk_ij_dft, scratch_5) = scratch_4.take_vec_znx_dft(n.into(), 1, 1);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
(0..rank.into()).for_each(|i| {
|
||||||
(i..rank).for_each(|j| {
|
(i..rank.into()).for_each(|j| {
|
||||||
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
module.svp_apply_dft_to_dft(&mut sk_ij_dft, 0, &sk_dft_prep.data, j, &sk_dft, i);
|
||||||
|
|
||||||
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
module.vec_znx_idft_apply_tmpa(&mut sk_ij_big, 0, &mut sk_ij_dft, 0);
|
||||||
module.vec_znx_big_normalize(
|
module.vec_znx_big_normalize(
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
&mut sk_ij.data.as_vec_znx_mut(),
|
&mut sk_ij.data.as_vec_znx_mut(),
|
||||||
0,
|
0,
|
||||||
|
self.base2k().into(),
|
||||||
&sk_ij_big,
|
&sk_ij_big,
|
||||||
0,
|
0,
|
||||||
scratch_5,
|
scratch_5,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
ScratchAvailable, SvpApplyDftToDftInplace, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal,
|
||||||
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
VecZnxAddScalarInplace, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform,
|
||||||
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -10,19 +10,20 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWEPt,
|
TakeGLWEPt,
|
||||||
layouts::{GGSWCiphertext, GLWECiphertext, Infos, prepared::GLWESecretPrepared},
|
layouts::{GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GLWESecretPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGSWInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size = k.div_ceil(basek);
|
let size = infos.size();
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, basek, k)
|
GLWECiphertext::encrypt_sk_scratch_space(module, &infos.glwe_layout())
|
||||||
+ VecZnx::alloc_bytes(module.n(), rank + 1, size)
|
+ VecZnx::alloc_bytes(module.n(), (infos.rank() + 1).into(), size)
|
||||||
+ VecZnx::alloc_bytes(module.n(), 1, size)
|
+ VecZnx::alloc_bytes(module.n(), 1, size)
|
||||||
+ module.vec_znx_dft_alloc_bytes(rank + 1, size)
|
+ module.vec_znx_dft_alloc_bytes((infos.rank() + 1).into(), size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +46,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -59,22 +60,21 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
|
|
||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank(), sk.rank());
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
assert_eq!(pt.n(), sk.n());
|
assert_eq!(pt.n() as u32, sk.n());
|
||||||
}
|
}
|
||||||
|
|
||||||
let basek: usize = self.basek();
|
let base2k: usize = self.base2k().into();
|
||||||
let k: usize = self.k();
|
let rank: usize = self.rank().into();
|
||||||
let rank: usize = self.rank();
|
let digits: usize = self.digits().into();
|
||||||
let digits: usize = self.digits();
|
|
||||||
|
|
||||||
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(self.n(), basek, k);
|
let (mut tmp_pt, scratch_1) = scratch.take_glwe_pt(&self.glwe_layout());
|
||||||
|
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows().into()).for_each(|row_i| {
|
||||||
tmp_pt.data.zero();
|
tmp_pt.data.zero();
|
||||||
|
|
||||||
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
// Adds the scalar_znx_pt to the i-th limb of the vec_znx_pt
|
||||||
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (digits - 1) + row_i * digits, pt, 0);
|
module.vec_znx_add_scalar_inplace(&mut tmp_pt.data, 0, (digits - 1) + row_i * digits, pt, 0);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut tmp_pt.data, 0, scratch_1);
|
module.vec_znx_normalize_inplace(base2k, &mut tmp_pt.data, 0, scratch_1);
|
||||||
|
|
||||||
(0..rank + 1).for_each(|col_j| {
|
(0..rank + 1).for_each(|col_j| {
|
||||||
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
// rlwe encrypt of vec_znx_pt into vec_znx_ct
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
ScratchAvailable, SvpApplyDftToDft, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeSvpPPol,
|
||||||
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigAddNormal, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
VecZnxBigAllocBytes, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, ZnxInfos, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, ScalarZnx, Scratch, VecZnx, VecZnxBig, ZnxInfos, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -13,26 +13,30 @@ use crate::{
|
|||||||
dist::Distribution,
|
dist::Distribution,
|
||||||
encryption::{SIGMA, SIGMA_BOUND},
|
encryption::{SIGMA, SIGMA_BOUND},
|
||||||
layouts::{
|
layouts::{
|
||||||
GLWECiphertext, GLWEPlaintext, Infos,
|
GLWECiphertext, GLWEInfos, GLWEPlaintext, LWEInfos,
|
||||||
prepared::{GLWEPublicKeyPrepared, GLWESecretPrepared},
|
prepared::{GLWEPublicKeyPrepared, GLWESecretPrepared},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
Module<B>: VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = infos.size();
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
module.vec_znx_normalize_tmp_bytes()
|
module.vec_znx_normalize_tmp_bytes()
|
||||||
+ 2 * VecZnx::alloc_bytes(module.n(), 1, size)
|
+ 2 * VecZnx::alloc_bytes(module.n(), 1, size)
|
||||||
+ module.vec_znx_dft_alloc_bytes(1, size)
|
+ module.vec_znx_dft_alloc_bytes(1, size)
|
||||||
}
|
}
|
||||||
pub fn encrypt_pk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_pk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + SvpPPolAllocBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = infos.size();
|
||||||
|
assert_eq!(module.n() as u32, infos.n());
|
||||||
((module.vec_znx_dft_alloc_bytes(1, size) + module.vec_znx_big_alloc_bytes(1, size))
|
((module.vec_znx_dft_alloc_bytes(1, size) + module.vec_znx_big_alloc_bytes(1, size))
|
||||||
| ScalarZnx::alloc_bytes(module.n(), 1))
|
| ScalarZnx::alloc_bytes(module.n(), 1))
|
||||||
+ module.svp_ppol_alloc_bytes(1)
|
+ module.svp_ppol_alloc_bytes(1)
|
||||||
@@ -58,7 +62,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -72,10 +76,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(sk.n(), self.n());
|
assert_eq!(sk.n(), self.n());
|
||||||
assert_eq!(pt.n(), self.n());
|
assert_eq!(pt.n(), self.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self),
|
||||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, self)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,7 +101,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -110,10 +114,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
assert_eq!(self.rank(), sk.rank());
|
assert_eq!(self.rank(), sk.rank());
|
||||||
assert_eq!(sk.n(), self.n());
|
assert_eq!(sk.n(), self.n());
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k()),
|
scratch.available() >= GLWECiphertext::encrypt_sk_scratch_space(module, self),
|
||||||
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
"scratch.available(): {} < GLWECiphertext::encrypt_sk_scratch_space: {}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, self.basek(), self.k())
|
GLWECiphertext::encrypt_sk_scratch_space(module, self)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
self.encrypt_sk_internal(
|
self.encrypt_sk_internal(
|
||||||
@@ -143,7 +147,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -151,11 +155,11 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxSub,
|
+ VecZnxSub,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = (self.rank() + 1).into();
|
||||||
glwe_encrypt_sk_internal(
|
glwe_encrypt_sk_internal(
|
||||||
module,
|
module,
|
||||||
self.basek(),
|
self.base2k().into(),
|
||||||
self.k(),
|
self.k().into(),
|
||||||
&mut self.data,
|
&mut self.data,
|
||||||
cols,
|
cols,
|
||||||
false,
|
false,
|
||||||
@@ -235,24 +239,24 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.basek(), pk.basek());
|
assert_eq!(self.base2k(), pk.base2k());
|
||||||
assert_eq!(self.n(), pk.n());
|
assert_eq!(self.n(), pk.n());
|
||||||
assert_eq!(self.rank(), pk.rank());
|
assert_eq!(self.rank(), pk.rank());
|
||||||
if let Some((pt, _)) = pt {
|
if let Some((pt, _)) = pt {
|
||||||
assert_eq!(pt.basek(), pk.basek());
|
assert_eq!(pt.base2k(), pk.base2k());
|
||||||
assert_eq!(pt.n(), pk.n());
|
assert_eq!(pt.n(), pk.n());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let basek: usize = pk.basek();
|
let base2k: usize = pk.base2k().into();
|
||||||
let size_pk: usize = pk.size();
|
let size_pk: usize = pk.size();
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = (self.rank() + 1).into();
|
||||||
|
|
||||||
// Generates u according to the underlying secret distribution.
|
// Generates u according to the underlying secret distribution.
|
||||||
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(self.n(), 1);
|
let (mut u_dft, scratch_1) = scratch.take_svp_ppol(self.n().into(), 1);
|
||||||
|
|
||||||
{
|
{
|
||||||
let (mut u, _) = scratch_1.take_scalar_znx(self.n(), 1);
|
let (mut u, _) = scratch_1.take_scalar_znx(self.n().into(), 1);
|
||||||
match pk.dist {
|
match pk.dist {
|
||||||
Distribution::NONE => panic!(
|
Distribution::NONE => panic!(
|
||||||
"invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \
|
"invalid public key: SecretDistribution::NONE, ensure it has been correctly intialized through \
|
||||||
@@ -271,7 +275,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
|
|
||||||
// ct[i] = pk[i] * u + ei (+ m if col = i)
|
// ct[i] = pk[i] * u + ei (+ m if col = i)
|
||||||
(0..cols).for_each(|i| {
|
(0..cols).for_each(|i| {
|
||||||
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), 1, size_pk);
|
let (mut ci_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), 1, size_pk);
|
||||||
// ci_dft = DFT(u) * DFT(pk[i])
|
// ci_dft = DFT(u) * DFT(pk[i])
|
||||||
module.svp_apply_dft_to_dft(&mut ci_dft, 0, &u_dft, 0, &pk.data, i);
|
module.svp_apply_dft_to_dft(&mut ci_dft, 0, &u_dft, 0, &pk.data, i);
|
||||||
|
|
||||||
@@ -279,7 +283,15 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
let mut ci_big = module.vec_znx_idft_apply_consume(ci_dft);
|
let mut ci_big = module.vec_znx_idft_apply_consume(ci_dft);
|
||||||
|
|
||||||
// ci_big = u * pk[i] + e
|
// ci_big = u * pk[i] + e
|
||||||
module.vec_znx_big_add_normal(basek, &mut ci_big, 0, pk.k(), source_xe, SIGMA, SIGMA_BOUND);
|
module.vec_znx_big_add_normal(
|
||||||
|
base2k,
|
||||||
|
&mut ci_big,
|
||||||
|
0,
|
||||||
|
pk.k().into(),
|
||||||
|
source_xe,
|
||||||
|
SIGMA,
|
||||||
|
SIGMA_BOUND,
|
||||||
|
);
|
||||||
|
|
||||||
// ci_big = u * pk[i] + e + m (if col = i)
|
// ci_big = u * pk[i] + e + m (if col = i)
|
||||||
if let Some((pt, col)) = pt
|
if let Some((pt, col)) = pt
|
||||||
@@ -289,7 +301,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ct[i] = norm(ci_big)
|
// ct[i] = norm(ci_big)
|
||||||
module.vec_znx_big_normalize(basek, &mut self.data, i, &ci_big, 0, scratch_2);
|
module.vec_znx_big_normalize(base2k, &mut self.data, i, base2k, &ci_big, 0, scratch_2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -297,7 +309,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: DataRef, B: Backend>(
|
pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk: DataRef, B: Backend>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
base2k: usize,
|
||||||
k: usize,
|
k: usize,
|
||||||
ct: &mut VecZnx<DataCt>,
|
ct: &mut VecZnx<DataCt>,
|
||||||
cols: usize,
|
cols: usize,
|
||||||
@@ -316,7 +328,7 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -350,7 +362,7 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
|
|||||||
let col_ct: usize = if compressed { 0 } else { i };
|
let col_ct: usize = if compressed { 0 } else { i };
|
||||||
|
|
||||||
// ct[i] = uniform (+ pt)
|
// ct[i] = uniform (+ pt)
|
||||||
module.vec_znx_fill_uniform(basek, ct, col_ct, source_xa);
|
module.vec_znx_fill_uniform(base2k, ct, col_ct, source_xa);
|
||||||
|
|
||||||
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
|
let (mut ci_dft, scratch_3) = scratch_2.take_vec_znx_dft(ct.n(), 1, size);
|
||||||
|
|
||||||
@@ -360,7 +372,7 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
|
|||||||
if let Some((pt, col)) = pt {
|
if let Some((pt, col)) = pt {
|
||||||
if i == col {
|
if i == col {
|
||||||
module.vec_znx_sub(&mut ci, 0, ct, col_ct, &pt.data, 0);
|
module.vec_znx_sub(&mut ci, 0, ct, col_ct, &pt.data, 0);
|
||||||
module.vec_znx_normalize_inplace(basek, &mut ci, 0, scratch_3);
|
module.vec_znx_normalize_inplace(base2k, &mut ci, 0, scratch_3);
|
||||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &ci, 0);
|
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, &ci, 0);
|
||||||
} else {
|
} else {
|
||||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
|
module.vec_znx_dft_apply(1, 0, &mut ci_dft, 0, ct, col_ct);
|
||||||
@@ -373,15 +385,15 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
|
|||||||
let ci_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(ci_dft);
|
let ci_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(ci_dft);
|
||||||
|
|
||||||
// use c[0] as buffer, which is overwritten later by the normalization step
|
// use c[0] as buffer, which is overwritten later by the normalization step
|
||||||
module.vec_znx_big_normalize(basek, &mut ci, 0, &ci_big, 0, scratch_3);
|
module.vec_znx_big_normalize(base2k, &mut ci, 0, base2k, &ci_big, 0, scratch_3);
|
||||||
|
|
||||||
// c0_tmp = -c[i] * s[i] (use c[0] as buffer)
|
// c0_tmp = -c[i] * s[i] (use c[0] as buffer)
|
||||||
module.vec_znx_sub_ab_inplace(&mut c0, 0, &ci, 0);
|
module.vec_znx_sub_inplace(&mut c0, 0, &ci, 0);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// c[0] += e
|
// c[0] += e
|
||||||
module.vec_znx_add_normal(basek, &mut c0, 0, k, source_xe, sigma, SIGMA_BOUND);
|
module.vec_znx_add_normal(base2k, &mut c0, 0, k, source_xe, sigma, SIGMA_BOUND);
|
||||||
|
|
||||||
// c[0] += m if col = 0
|
// c[0] += m if col = 0
|
||||||
if let Some((pt, col)) = pt
|
if let Some((pt, col)) = pt
|
||||||
@@ -391,5 +403,5 @@ pub(crate) fn glwe_encrypt_sk_internal<DataCt: DataMut, DataPt: DataRef, DataSk:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// c[0] = norm(c[0])
|
// c[0] = norm(c[0])
|
||||||
module.vec_znx_normalize(basek, ct, 0, &c0, 0, scratch_1);
|
module.vec_znx_normalize(base2k, ct, 0, base2k, &c0, 0, scratch_1);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,14 +2,14 @@ use poulpy_hal::{
|
|||||||
api::{
|
api::{
|
||||||
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigNormalize,
|
ScratchOwnedAlloc, ScratchOwnedBorrow, SvpApplyDftToDftInplace, VecZnxAddInplace, VecZnxAddNormal, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
layouts::{Backend, DataMut, DataRef, Module, ScratchOwned},
|
||||||
oep::{ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxDftImpl, TakeVecZnxImpl},
|
oep::{ScratchAvailableImpl, ScratchOwnedAllocImpl, ScratchOwnedBorrowImpl, TakeVecZnxDftImpl, TakeVecZnxImpl},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, GLWEPublicKey, Infos, prepared::GLWESecretPrepared};
|
use crate::layouts::{GLWECiphertext, GLWEPublicKey, prepared::GLWESecretPrepared};
|
||||||
|
|
||||||
impl<D: DataMut> GLWEPublicKey<D> {
|
impl<D: DataMut> GLWEPublicKey<D> {
|
||||||
pub fn generate_from_sk<S: DataRef, B>(
|
pub fn generate_from_sk<S: DataRef, B>(
|
||||||
@@ -27,7 +27,7 @@ impl<D: DataMut> GLWEPublicKey<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -42,7 +42,7 @@ impl<D: DataMut> GLWEPublicKey<D> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::Distribution;
|
use crate::{Distribution, layouts::LWEInfos};
|
||||||
|
|
||||||
assert_eq!(self.n(), sk.n());
|
assert_eq!(self.n(), sk.n());
|
||||||
|
|
||||||
@@ -52,13 +52,9 @@ impl<D: DataMut> GLWEPublicKey<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Its ok to allocate scratch space here since pk is usually generated only once.
|
// Its ok to allocate scratch space here since pk is usually generated only once.
|
||||||
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(
|
let mut scratch: ScratchOwned<B> = ScratchOwned::alloc(GLWECiphertext::encrypt_sk_scratch_space(module, self));
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let mut tmp: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(self.n(), self.basek(), self.k(), self.rank());
|
let mut tmp: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(self);
|
||||||
tmp.encrypt_zero_sk(module, sk, source_xa, source_xe, scratch.borrow());
|
tmp.encrypt_zero_sk(module, sk, source_xa, source_xe, scratch.borrow());
|
||||||
self.dist = sk.dist;
|
self.dist = sk.dist;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,17 +11,21 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWESwitchingKey, GLWESecret, GLWEToLWESwitchingKey, LWESecret, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
GGLWELayoutInfos, GGLWESwitchingKey, GLWESecret, GLWEToLWESwitchingKey, LWEInfos, LWESecret, Rank,
|
||||||
|
prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_in: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESecretPrepared::bytes_of(module, rank_in)
|
GLWESecretPrepared::alloc_bytes_with(module, infos.rank_in())
|
||||||
+ (GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_in, 1)
|
+ (GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||||
| GLWESecret::bytes_of(module.n(), rank_in))
|
| GLWESecret::alloc_bytes_with(infos.n(), infos.rank_in()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +51,7 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -60,12 +64,12 @@ impl<D: DataMut> GLWEToLWESwitchingKey<D> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(sk_lwe.n() <= module.n());
|
assert!(sk_lwe.n().0 <= module.n() as u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), 1);
|
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), Rank(1));
|
||||||
sk_lwe_as_glwe.data.zero();
|
sk_lwe_as_glwe.data.zero();
|
||||||
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n()].copy_from_slice(sk_lwe.data.at(0, 0));
|
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n().into()].copy_from_slice(sk_lwe.data.at(0, 0));
|
||||||
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
||||||
|
|
||||||
self.0.encrypt_sk(
|
self.0.encrypt_sk(
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
encryption::{SIGMA, SIGMA_BOUND},
|
encryption::{SIGMA, SIGMA_BOUND},
|
||||||
layouts::{Infos, LWECiphertext, LWEPlaintext, LWESecret},
|
layouts::{LWECiphertext, LWEInfos, LWEPlaintext, LWESecret},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
||||||
@@ -29,10 +29,10 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
assert_eq!(self.n(), sk.n())
|
assert_eq!(self.n(), sk.n())
|
||||||
}
|
}
|
||||||
|
|
||||||
let basek: usize = self.basek();
|
let base2k: usize = self.base2k().into();
|
||||||
let k: usize = self.k();
|
let k: usize = self.k().into();
|
||||||
|
|
||||||
module.zn_fill_uniform(self.n() + 1, basek, &mut self.data, 0, source_xa);
|
module.zn_fill_uniform((self.n() + 1).into(), base2k, &mut self.data, 0, source_xa);
|
||||||
|
|
||||||
let mut tmp_znx: Zn<Vec<u8>> = Zn::alloc(1, 1, self.size());
|
let mut tmp_znx: Zn<Vec<u8>> = Zn::alloc(1, 1, self.size());
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
|
|
||||||
module.zn_add_normal(
|
module.zn_add_normal(
|
||||||
1,
|
1,
|
||||||
basek,
|
base2k,
|
||||||
&mut self.data,
|
&mut self.data,
|
||||||
0,
|
0,
|
||||||
k,
|
k,
|
||||||
@@ -68,7 +68,7 @@ impl<DataSelf: DataMut> LWECiphertext<DataSelf> {
|
|||||||
|
|
||||||
module.zn_normalize_inplace(
|
module.zn_normalize_inplace(
|
||||||
1,
|
1,
|
||||||
basek,
|
base2k,
|
||||||
&mut tmp_znx,
|
&mut tmp_znx,
|
||||||
0,
|
0,
|
||||||
ScratchOwned::alloc(size_of::<i64>()).borrow(),
|
ScratchOwned::alloc(size_of::<i64>()).borrow(),
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,17 +11,36 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWESwitchingKey, GLWESecret, Infos, LWESecret, LWESwitchingKey, prepared::GLWESecretPrepared},
|
layouts::{
|
||||||
|
Degree, GGLWELayoutInfos, GGLWESwitchingKey, GLWESecret, LWEInfos, LWESecret, LWESwitchingKey, Rank,
|
||||||
|
prepared::GLWESecretPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl LWESwitchingKey<Vec<u8>> {
|
impl LWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWESecret::bytes_of(module.n(), 1)
|
debug_assert_eq!(
|
||||||
+ GLWESecretPrepared::bytes_of(module, 1)
|
infos.digits().0,
|
||||||
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, 1, 1)
|
1,
|
||||||
|
"digits > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
GLWESecret::alloc_bytes_with(Degree(module.n() as u32), Rank(1))
|
||||||
|
+ GLWESecretPrepared::alloc_bytes_with(module, Rank(1))
|
||||||
|
+ GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +66,7 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -60,20 +79,20 @@ impl<D: DataMut> LWESwitchingKey<D> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(sk_lwe_in.n() <= self.n());
|
assert!(sk_lwe_in.n().0 <= self.n().0);
|
||||||
assert!(sk_lwe_out.n() <= self.n());
|
assert!(sk_lwe_out.n().0 <= self.n().0);
|
||||||
assert!(self.n() <= module.n());
|
assert!(self.n().0 <= module.n() as u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_in_glwe, scratch_1) = scratch.take_glwe_secret(self.n(), 1);
|
let (mut sk_in_glwe, scratch_1) = scratch.take_glwe_secret(self.n(), Rank(1));
|
||||||
let (mut sk_out_glwe, scratch_2) = scratch_1.take_glwe_secret(self.n(), 1);
|
let (mut sk_out_glwe, scratch_2) = scratch_1.take_glwe_secret(self.n(), Rank(1));
|
||||||
|
|
||||||
sk_out_glwe.data.at_mut(0, 0)[..sk_lwe_out.n()].copy_from_slice(sk_lwe_out.data.at(0, 0));
|
sk_out_glwe.data.at_mut(0, 0)[..sk_lwe_out.n().into()].copy_from_slice(sk_lwe_out.data.at(0, 0));
|
||||||
sk_out_glwe.data.at_mut(0, 0)[sk_lwe_out.n()..].fill(0);
|
sk_out_glwe.data.at_mut(0, 0)[sk_lwe_out.n().into()..].fill(0);
|
||||||
module.vec_znx_automorphism_inplace(-1, &mut sk_out_glwe.data.as_vec_znx_mut(), 0, scratch_2);
|
module.vec_znx_automorphism_inplace(-1, &mut sk_out_glwe.data.as_vec_znx_mut(), 0, scratch_2);
|
||||||
|
|
||||||
sk_in_glwe.data.at_mut(0, 0)[..sk_lwe_in.n()].copy_from_slice(sk_lwe_in.data.at(0, 0));
|
sk_in_glwe.data.at_mut(0, 0)[..sk_lwe_in.n().into()].copy_from_slice(sk_lwe_in.data.at(0, 0));
|
||||||
sk_in_glwe.data.at_mut(0, 0)[sk_lwe_in.n()..].fill(0);
|
sk_in_glwe.data.at_mut(0, 0)[sk_lwe_in.n().into()..].fill(0);
|
||||||
module.vec_znx_automorphism_inplace(-1, &mut sk_in_glwe.data.as_vec_znx_mut(), 0, scratch_2);
|
module.vec_znx_automorphism_inplace(-1, &mut sk_in_glwe.data.as_vec_znx_mut(), 0, scratch_2);
|
||||||
|
|
||||||
self.0.encrypt_sk(
|
self.0.encrypt_sk(
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use poulpy_hal::{
|
|||||||
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
ScratchAvailable, SvpApplyDftToDftInplace, SvpPPolAllocBytes, SvpPrepare, TakeScalarZnx, TakeVecZnx, TakeVecZnxDft,
|
||||||
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
VecZnxAddInplace, VecZnxAddNormal, VecZnxAddScalarInplace, VecZnxAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeInplace,
|
||||||
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace, VecZnxSwitchRing,
|
VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubInplace, VecZnxSwitchRing,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut},
|
||||||
source::Source,
|
source::Source,
|
||||||
@@ -11,15 +11,22 @@ use poulpy_hal::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWESecret, TakeGLWESecretPrepared,
|
TakeGLWESecret, TakeGLWESecretPrepared,
|
||||||
layouts::{GGLWESwitchingKey, GLWESecret, LWESecret, LWEToGLWESwitchingKey},
|
layouts::{Degree, GGLWELayoutInfos, GGLWESwitchingKey, GLWESecret, LWEInfos, LWESecret, LWEToGLWESwitchingKey, Rank},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_out: usize) -> usize
|
pub fn encrypt_sk_scratch_space<B: Backend, A>(module: &Module<B>, infos: &A) -> usize
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: SvpPPolAllocBytes + VecZnxNormalizeTmpBytes + VecZnxDftAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, 1, rank_out) + GLWESecret::bytes_of(module.n(), 1)
|
debug_assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
Rank(1),
|
||||||
|
"rank_in != 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::encrypt_sk_scratch_space(module, infos)
|
||||||
|
+ GLWESecret::alloc_bytes_with(Degree(module.n() as u32), infos.rank_in())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +52,7 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
|||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxFillUniform
|
+ VecZnxFillUniform
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxAddNormal
|
+ VecZnxAddNormal
|
||||||
@@ -58,12 +65,14 @@ impl<D: DataMut> LWEToGLWESwitchingKey<D> {
|
|||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(sk_lwe.n() <= module.n());
|
use crate::layouts::LWEInfos;
|
||||||
|
|
||||||
|
assert!(sk_lwe.n().0 <= module.n() as u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), 1);
|
let (mut sk_lwe_as_glwe, scratch_1) = scratch.take_glwe_secret(sk_glwe.n(), Rank(1));
|
||||||
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n()].copy_from_slice(sk_lwe.data.at(0, 0));
|
sk_lwe_as_glwe.data.at_mut(0, 0)[..sk_lwe.n().into()].copy_from_slice(sk_lwe.data.at(0, 0));
|
||||||
sk_lwe_as_glwe.data.at_mut(0, 0)[sk_lwe.n()..].fill(0);
|
sk_lwe_as_glwe.data.at_mut(0, 0)[sk_lwe.n().into()..].fill(0);
|
||||||
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
module.vec_znx_automorphism_inplace(-1, &mut sk_lwe_as_glwe.data.as_vec_znx_mut(), 0, scratch_1);
|
||||||
|
|
||||||
self.0.encrypt_sk(
|
self.0.encrypt_sk(
|
||||||
|
|||||||
@@ -1,42 +1,41 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWEAutomorphismKey, GGLWESwitchingKey, prepared::GGSWCiphertextPrepared};
|
use crate::layouts::{GGLWEAutomorphismKey, GGLWELayoutInfos, GGLWESwitchingKey, GGSWInfos, prepared::GGSWCiphertextPrepared};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
ggsw_infos: &GGSW,
|
||||||
ggsw_k: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGLWELayoutInfos,
|
||||||
|
IN: GGLWELayoutInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::external_product_scratch_space(module, basek, k_out, k_in, ggsw_k, digits, rank)
|
GGLWESwitchingKey::external_product_scratch_space(module, out_infos, in_infos, ggsw_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
ggsw_infos: &GGSW,
|
||||||
ggsw_k: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGLWELayoutInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::external_product_inplace_scratch_space(module, basek, k_out, ggsw_k, digits, rank)
|
GGLWESwitchingKey::external_product_inplace_scratch_space(module, out_infos, ggsw_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,8 +54,9 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.key.external_product(module, &lhs.key, rhs, scratch);
|
self.key.external_product(module, &lhs.key, rhs, scratch);
|
||||||
}
|
}
|
||||||
@@ -74,8 +74,9 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.key.external_product_inplace(module, rhs, scratch);
|
self.key.external_product_inplace(module, rhs, scratch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,42 +1,46 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, GLWECiphertext, Infos, prepared::GGSWCiphertextPrepared};
|
use crate::layouts::{GGLWELayoutInfos, GGLWESwitchingKey, GGSWInfos, GLWECiphertext, prepared::GGSWCiphertextPrepared};
|
||||||
|
|
||||||
impl GGLWESwitchingKey<Vec<u8>> {
|
impl GGLWESwitchingKey<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
ggsw_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGLWELayoutInfos,
|
||||||
|
IN: GGLWELayoutInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_scratch_space(
|
||||||
|
module,
|
||||||
|
&out_infos.glwe_layout(),
|
||||||
|
&in_infos.glwe_layout(),
|
||||||
|
ggsw_infos,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
ggsw_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGLWELayoutInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), ggsw_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,11 +59,14 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
use crate::layouts::GLWEInfos;
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank_in(),
|
self.rank_in(),
|
||||||
lhs.rank_in(),
|
lhs.rank_in(),
|
||||||
@@ -83,15 +90,15 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
|
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
(self.rows().min(lhs.rows())..self.rows()).for_each(|row_i| {
|
(self.rows().min(lhs.rows()).into()..self.rows().into()).for_each(|row_i| {
|
||||||
(0..self.rank_in()).for_each(|col_j| {
|
(0..self.rank_in().into()).for_each(|col_j| {
|
||||||
self.at_mut(row_i, col_j).data.zero();
|
self.at_mut(row_i, col_j).data.zero();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -110,11 +117,14 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
use crate::layouts::GLWEInfos;
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank_out(),
|
self.rank_out(),
|
||||||
rhs.rank(),
|
rhs.rank(),
|
||||||
@@ -124,8 +134,8 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.external_product_inplace(module, rhs, scratch);
|
.external_product_inplace(module, rhs, scratch);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,42 +1,47 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGSWCiphertext, GLWECiphertext, Infos, prepared::GGSWCiphertextPrepared};
|
use crate::layouts::{GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, prepared::GGSWCiphertextPrepared};
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
apply_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
IN: GGSWInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_scratch_space(module, basek, k_out, k_in, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_scratch_space(
|
||||||
|
module,
|
||||||
|
&out_infos.glwe_layout(),
|
||||||
|
&in_infos.glwe_layout(),
|
||||||
|
apply_infos,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
apply_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::external_product_inplace_scratch_space(module, basek, k_out, k_ggsw, digits, rank)
|
GLWECiphertext::external_product_inplace_scratch_space(module, &out_infos.glwe_layout(), apply_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,12 +60,13 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use crate::layouts::Infos;
|
use crate::layouts::LWEInfos;
|
||||||
|
|
||||||
assert_eq!(lhs.n(), self.n());
|
assert_eq!(lhs.n(), self.n());
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
@@ -80,28 +86,17 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
rhs.rank()
|
rhs.rank()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(scratch.available() >= GGSWCiphertext::external_product_scratch_space(module, self, lhs, rhs))
|
||||||
scratch.available()
|
|
||||||
>= GGSWCiphertext::external_product_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
lhs.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let min_rows: usize = self.rows().min(lhs.rows());
|
let min_rows: usize = self.rows().min(lhs.rows()).into();
|
||||||
|
|
||||||
(0..self.rank() + 1).for_each(|col_i| {
|
(0..(self.rank() + 1).into()).for_each(|col_i| {
|
||||||
(0..min_rows).for_each(|row_j| {
|
(0..min_rows).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
|
.external_product(module, &lhs.at(row_j, col_i), rhs, scratch);
|
||||||
});
|
});
|
||||||
(min_rows..self.rows()).for_each(|row_i| {
|
(min_rows..self.rows().into()).for_each(|row_i| {
|
||||||
self.at_mut(row_i, col_i).data.zero();
|
self.at_mut(row_i, col_i).data.zero();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -120,11 +115,14 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
|
use crate::layouts::LWEInfos;
|
||||||
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank(),
|
self.rank(),
|
||||||
@@ -135,8 +133,8 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
(0..self.rank() + 1).for_each(|col_i| {
|
(0..(self.rank() + 1).into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.external_product_inplace(module, rhs, scratch);
|
.external_product_inplace(module, rhs, scratch);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,56 +1,65 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
||||||
|
VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnxBig},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos, prepared::GGSWCiphertextPrepared};
|
use crate::layouts::{GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGSWCiphertextPrepared};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn external_product_scratch_space<B: Backend>(
|
pub fn external_product_scratch_space<B: Backend, OUT, IN, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
apply_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GLWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
let in_size: usize = in_infos
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
.k()
|
||||||
let ggsw_size: usize = k_ggsw.div_ceil(basek);
|
.div_ceil(apply_infos.base2k())
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, ggsw_size);
|
.div_ceil(apply_infos.digits().into()) as usize;
|
||||||
let a_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, in_size);
|
let out_size: usize = out_infos.size();
|
||||||
|
let ggsw_size: usize = apply_infos.size();
|
||||||
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes((apply_infos.rank() + 1).into(), ggsw_size);
|
||||||
|
let a_dft: usize = module.vec_znx_dft_alloc_bytes((apply_infos.rank() + 1).into(), in_size);
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
out_size,
|
out_size,
|
||||||
in_size,
|
in_size,
|
||||||
in_size, // rows
|
in_size, // rows
|
||||||
rank + 1, // cols in
|
(apply_infos.rank() + 1).into(), // cols in
|
||||||
rank + 1, // cols out
|
(apply_infos.rank() + 1).into(), // cols out
|
||||||
ggsw_size,
|
ggsw_size,
|
||||||
);
|
);
|
||||||
let normalize: usize = module.vec_znx_normalize_tmp_bytes();
|
let normalize_big: usize = module.vec_znx_normalize_tmp_bytes();
|
||||||
res_dft + a_dft + (vmp | normalize)
|
|
||||||
|
if in_infos.base2k() == apply_infos.base2k() {
|
||||||
|
res_dft + a_dft + (vmp | normalize_big)
|
||||||
|
} else {
|
||||||
|
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (apply_infos.rank() + 1).into(), in_size);
|
||||||
|
res_dft + ((a_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product_inplace_scratch_space<B: Backend>(
|
pub fn external_product_inplace_scratch_space<B: Backend, OUT, GGSW>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
apply_infos: &GGSW,
|
||||||
k_ggsw: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GLWEInfos,
|
||||||
|
GGSW: GGSWInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::external_product_scratch_space(module, basek, k_out, k_out, k_ggsw, digits, rank)
|
Self::external_product_scratch_space(module, out_infos, out_infos, apply_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,10 +78,13 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let basek: usize = self.basek();
|
let basek_in: usize = lhs.base2k().into();
|
||||||
|
let basek_ggsw: usize = rhs.base2k().into();
|
||||||
|
let basek_out: usize = self.base2k().into();
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -80,34 +92,22 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
|
|
||||||
assert_eq!(rhs.rank(), lhs.rank());
|
assert_eq!(rhs.rank(), lhs.rank());
|
||||||
assert_eq!(rhs.rank(), self.rank());
|
assert_eq!(rhs.rank(), self.rank());
|
||||||
assert_eq!(self.basek(), basek);
|
|
||||||
assert_eq!(lhs.basek(), basek);
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
assert_eq!(lhs.n(), self.n());
|
assert_eq!(lhs.n(), self.n());
|
||||||
assert!(
|
assert!(scratch.available() >= GLWECiphertext::external_product_scratch_space(module, self, lhs, rhs));
|
||||||
scratch.available()
|
|
||||||
>= GLWECiphertext::external_product_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
lhs.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank(),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = rhs.rank() + 1;
|
let cols: usize = (rhs.rank() + 1).into();
|
||||||
let digits: usize = rhs.digits();
|
let digits: usize = rhs.digits().into();
|
||||||
|
|
||||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise
|
let a_size: usize = (lhs.size() * basek_in).div_ceil(basek_ggsw);
|
||||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), cols, lhs.size().div_ceil(digits));
|
|
||||||
|
|
||||||
|
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), cols, rhs.size()); // Todo optimise
|
||||||
|
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), cols, a_size.div_ceil(digits));
|
||||||
a_dft.data_mut().fill(0);
|
a_dft.data_mut().fill(0);
|
||||||
|
|
||||||
{
|
if basek_in == basek_ggsw {
|
||||||
(0..digits).for_each(|di| {
|
for di in 0..digits {
|
||||||
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
||||||
a_dft.set_size((lhs.size() + di) / digits);
|
a_dft.set_size((lhs.size() + di) / digits);
|
||||||
|
|
||||||
@@ -120,22 +120,68 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
// noise is kept with respect to the ideal functionality.
|
// noise is kept with respect to the ideal functionality.
|
||||||
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
(0..cols).for_each(|col_i| {
|
for j in 0..cols {
|
||||||
module.vec_znx_dft_apply(digits, digits - 1 - di, &mut a_dft, col_i, &lhs.data, col_i);
|
module.vec_znx_dft_apply(digits, digits - 1 - di, &mut a_dft, j, &lhs.data, j);
|
||||||
});
|
}
|
||||||
|
|
||||||
if di == 0 {
|
if di == 0 {
|
||||||
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_2);
|
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_2);
|
||||||
} else {
|
} else {
|
||||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_2);
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_2);
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
} else {
|
||||||
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(module.n(), cols, a_size);
|
||||||
|
|
||||||
|
for j in 0..cols {
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
basek_ggsw,
|
||||||
|
&mut a_conv,
|
||||||
|
j,
|
||||||
|
basek_in,
|
||||||
|
&lhs.data,
|
||||||
|
j,
|
||||||
|
scratch_3,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for di in 0..digits {
|
||||||
|
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
||||||
|
a_dft.set_size((a_size + di) / digits);
|
||||||
|
|
||||||
|
// Small optimization for digits > 2
|
||||||
|
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||||
|
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||||
|
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||||
|
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||||
|
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||||
|
// noise is kept with respect to the ideal functionality.
|
||||||
|
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
|
for j in 0..cols {
|
||||||
|
module.vec_znx_dft_apply(digits, digits - 1 - di, &mut a_dft, j, &a_conv, j);
|
||||||
|
}
|
||||||
|
|
||||||
|
if di == 0 {
|
||||||
|
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_3);
|
||||||
|
} else {
|
||||||
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_3);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let res_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(res_dft);
|
let res_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||||
|
|
||||||
(0..cols).for_each(|i| {
|
(0..cols).for_each(|i| {
|
||||||
module.vec_znx_big_normalize(basek, &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
basek_out,
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
basek_ggsw,
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,42 +198,32 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let basek: usize = self.basek();
|
let basek_in: usize = self.base2k().into();
|
||||||
|
let basek_ggsw: usize = rhs.base2k().into();
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use poulpy_hal::api::ScratchAvailable;
|
use poulpy_hal::api::ScratchAvailable;
|
||||||
|
|
||||||
assert_eq!(rhs.rank(), self.rank());
|
assert_eq!(rhs.rank(), self.rank());
|
||||||
assert_eq!(self.basek(), basek);
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
assert!(
|
assert!(scratch.available() >= GLWECiphertext::external_product_inplace_scratch_space(module, self, rhs,));
|
||||||
scratch.available()
|
|
||||||
>= GLWECiphertext::external_product_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank(),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = rhs.rank() + 1;
|
let cols: usize = (rhs.rank() + 1).into();
|
||||||
let digits: usize = rhs.digits();
|
let digits: usize = rhs.digits().into();
|
||||||
|
let a_size: usize = (self.size() * basek_in).div_ceil(basek_ggsw);
|
||||||
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), cols, rhs.size()); // Todo optimise
|
|
||||||
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n(), cols, self.size().div_ceil(digits));
|
|
||||||
|
|
||||||
|
let (mut res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), cols, rhs.size()); // Todo optimise
|
||||||
|
let (mut a_dft, scratch_2) = scratch_1.take_vec_znx_dft(self.n().into(), cols, a_size.div_ceil(digits));
|
||||||
a_dft.data_mut().fill(0);
|
a_dft.data_mut().fill(0);
|
||||||
|
|
||||||
{
|
if basek_in == basek_ggsw {
|
||||||
(0..digits).for_each(|di| {
|
for di in 0..digits {
|
||||||
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
||||||
a_dft.set_size((self.size() + di) / digits);
|
a_dft.set_size((self.size() + di) / digits);
|
||||||
|
|
||||||
@@ -200,29 +236,68 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
// noise is kept with respect to the ideal functionality.
|
// noise is kept with respect to the ideal functionality.
|
||||||
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
(0..cols).for_each(|col_i| {
|
for j in 0..cols {
|
||||||
module.vec_znx_dft_apply(
|
module.vec_znx_dft_apply(digits, digits - 1 - di, &mut a_dft, j, &self.data, j);
|
||||||
digits,
|
}
|
||||||
digits - 1 - di,
|
|
||||||
&mut a_dft,
|
|
||||||
col_i,
|
|
||||||
&self.data,
|
|
||||||
col_i,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
if di == 0 {
|
if di == 0 {
|
||||||
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_2);
|
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_2);
|
||||||
} else {
|
} else {
|
||||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_2);
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_2);
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
} else {
|
||||||
|
let (mut a_conv, scratch_3) = scratch_2.take_vec_znx(module.n(), cols, a_size);
|
||||||
|
|
||||||
|
for j in 0..cols {
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
basek_ggsw,
|
||||||
|
&mut a_conv,
|
||||||
|
j,
|
||||||
|
basek_in,
|
||||||
|
&self.data,
|
||||||
|
j,
|
||||||
|
scratch_3,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for di in 0..digits {
|
||||||
|
// (lhs.size() + di) / digits = (a - (digit - di - 1)).div_ceil(digits)
|
||||||
|
a_dft.set_size((self.size() + di) / digits);
|
||||||
|
|
||||||
|
// Small optimization for digits > 2
|
||||||
|
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||||
|
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||||
|
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||||
|
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||||
|
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||||
|
// noise is kept with respect to the ideal functionality.
|
||||||
|
res_dft.set_size(rhs.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
|
for j in 0..cols {
|
||||||
|
module.vec_znx_dft_apply(digits, digits - 1 - di, &mut a_dft, j, &self.data, j);
|
||||||
|
}
|
||||||
|
|
||||||
|
if di == 0 {
|
||||||
|
module.vmp_apply_dft_to_dft(&mut res_dft, &a_dft, &rhs.data, scratch_2);
|
||||||
|
} else {
|
||||||
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &a_dft, &rhs.data, di, scratch_2);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let res_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(res_dft);
|
let res_big: VecZnxBig<&mut [u8], B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||||
|
|
||||||
(0..cols).for_each(|i| {
|
for j in 0..cols {
|
||||||
module.vec_znx_big_normalize(basek, &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
});
|
basek_in,
|
||||||
|
&mut self.data,
|
||||||
|
j,
|
||||||
|
basek_ggsw,
|
||||||
|
&res_big,
|
||||||
|
j,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,17 +3,17 @@ use std::collections::HashMap;
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxAddInplace, VecZnxAutomorphismInplace, VecZnxBigAddSmallInplace,
|
||||||
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallBInplace, VecZnxCopy,
|
VecZnxBigAutomorphismInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxBigSubSmallNegateInplace, VecZnxCopy,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalizeInplace, VecZnxRotate,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNegateInplace, VecZnxNormalize,
|
||||||
VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub, VecZnxSubABInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate, VecZnxRotateInplace, VecZnxRshInplace, VecZnxSub,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VecZnxSubInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
GLWEOperations, TakeGLWECt,
|
GLWEOperations, TakeGLWECt,
|
||||||
layouts::{GLWECiphertext, Infos, prepared::GGLWEAutomorphismKeyPrepared},
|
layouts::{GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWEAutomorphismKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// [GLWEPacker] enables only the fly GLWE packing
|
/// [GLWEPacker] enables only the fly GLWE packing
|
||||||
@@ -40,12 +40,15 @@ impl Accumulator {
|
|||||||
/// #Arguments
|
/// #Arguments
|
||||||
///
|
///
|
||||||
/// * `module`: static backend FFT tables.
|
/// * `module`: static backend FFT tables.
|
||||||
/// * `basek`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
/// * `base2k`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
||||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
||||||
/// * `rank`: rank of the GLWE ciphertext.
|
/// * `rank`: rank of the GLWE ciphertext.
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
Self {
|
Self {
|
||||||
data: GLWECiphertext::alloc(n, basek, k, rank),
|
data: GLWECiphertext::alloc(infos),
|
||||||
value: false,
|
value: false,
|
||||||
control: false,
|
control: false,
|
||||||
}
|
}
|
||||||
@@ -63,13 +66,13 @@ impl GLWEPacker {
|
|||||||
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
/// and N GLWE ciphertext can be packed. With `log_batch=2` all coefficients
|
||||||
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
/// which are multiples of X^{N/4} are packed. Meaning that N/4 ciphertexts
|
||||||
/// can be packed.
|
/// can be packed.
|
||||||
/// * `basek`: base 2 logarithm of the GLWE ciphertext in memory digit representation.
|
pub fn new<A>(infos: &A, log_batch: usize) -> Self
|
||||||
/// * `k`: base 2 precision of the GLWE ciphertext precision over the Torus.
|
where
|
||||||
/// * `rank`: rank of the GLWE ciphertext.
|
A: GLWEInfos,
|
||||||
pub fn new(n: usize, log_batch: usize, basek: usize, k: usize, rank: usize) -> Self {
|
{
|
||||||
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
let mut accumulators: Vec<Accumulator> = Vec::<Accumulator>::new();
|
||||||
let log_n: usize = (usize::BITS - (n - 1).leading_zeros()) as _;
|
let log_n: usize = infos.n().log2();
|
||||||
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(n, basek, k, rank)));
|
(0..log_n - log_batch).for_each(|_| accumulators.push(Accumulator::alloc(infos)));
|
||||||
Self {
|
Self {
|
||||||
accumulators,
|
accumulators,
|
||||||
log_batch,
|
log_batch,
|
||||||
@@ -87,18 +90,13 @@ impl GLWEPacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Number of scratch space bytes required to call [Self::add].
|
/// Number of scratch space bytes required to call [Self::add].
|
||||||
pub fn scratch_space<B: Backend>(
|
pub fn scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
ct_k: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
pack_core_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
|
pack_core_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
pub fn galois_elements<B: Backend>(module: &Module<B>) -> Vec<i64> {
|
||||||
@@ -137,17 +135,19 @@ impl GLWEPacker {
|
|||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallBInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>,
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
self.counter < self.accumulators[0].data.n(),
|
(self.counter as u32) < self.accumulators[0].data.n(),
|
||||||
"Packing limit of {} reached",
|
"Packing limit of {} reached",
|
||||||
self.accumulators[0].data.n() >> self.log_batch
|
self.accumulators[0].data.n().0 as usize >> self.log_batch
|
||||||
);
|
);
|
||||||
|
|
||||||
pack_core(
|
pack_core(
|
||||||
@@ -166,7 +166,7 @@ impl GLWEPacker {
|
|||||||
where
|
where
|
||||||
Module<B>: VecZnxCopy,
|
Module<B>: VecZnxCopy,
|
||||||
{
|
{
|
||||||
assert!(self.counter == self.accumulators[0].data.n());
|
assert!(self.counter as u32 == self.accumulators[0].data.n());
|
||||||
// Copy result GLWE into res GLWE
|
// Copy result GLWE into res GLWE
|
||||||
res.copy(
|
res.copy(
|
||||||
module,
|
module,
|
||||||
@@ -177,18 +177,13 @@ impl GLWEPacker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pack_core_scratch_space<B: Backend>(
|
fn pack_core_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
ct_k: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
combine_scratch_space(module, basek, ct_k, k_ksk, digits, rank)
|
combine_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
||||||
@@ -215,11 +210,13 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallBInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>,
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let log_n: usize = module.log_n();
|
let log_n: usize = module.log_n();
|
||||||
@@ -271,20 +268,15 @@ fn pack_core<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn combine_scratch_space<B: Backend>(
|
fn combine_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
ct_k: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(module.n(), basek, ct_k, rank)
|
GLWECiphertext::alloc_bytes(out_infos)
|
||||||
+ (GLWECiphertext::rsh_scratch_space(module.n())
|
+ (GLWECiphertext::rsh_scratch_space(module.n())
|
||||||
| GLWECiphertext::automorphism_scratch_space(module, basek, ct_k, ct_k, k_ksk, digits, rank))
|
| GLWECiphertext::automorphism_inplace_scratch_space(module, out_infos, key_infos))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [combine] merges two ciphertexts together.
|
/// [combine] merges two ciphertexts together.
|
||||||
@@ -312,19 +304,17 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
+ VecZnxAddInplace
|
+ VecZnxAddInplace
|
||||||
+ VecZnxNormalizeInplace<B>
|
+ VecZnxNormalizeInplace<B>
|
||||||
+ VecZnxSubABInplace
|
+ VecZnxSubInplace
|
||||||
+ VecZnxRotate
|
+ VecZnxRotate
|
||||||
+ VecZnxAutomorphismInplace<B>
|
+ VecZnxAutomorphismInplace<B>
|
||||||
+ VecZnxBigSubSmallBInplace<B>
|
+ VecZnxBigSubSmallNegateInplace<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>,
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx + TakeGLWECt,
|
||||||
{
|
{
|
||||||
let n: usize = acc.data.n();
|
let log_n: usize = acc.data.n().log2();
|
||||||
let log_n: usize = (u64::BITS - (n - 1).leading_zeros()) as _;
|
|
||||||
let a: &mut GLWECiphertext<Vec<u8>> = &mut acc.data;
|
let a: &mut GLWECiphertext<Vec<u8>> = &mut acc.data;
|
||||||
let basek: usize = a.basek();
|
|
||||||
let k: usize = a.k();
|
|
||||||
let rank: usize = a.rank();
|
|
||||||
|
|
||||||
let gal_el: i64 = if i == 0 {
|
let gal_el: i64 = if i == 0 {
|
||||||
-1
|
-1
|
||||||
@@ -346,7 +336,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
// since 2*(I(X) * Q/2) = I(X) * Q = 0 mod Q.
|
||||||
if acc.value {
|
if acc.value {
|
||||||
if let Some(b) = b {
|
if let Some(b) = b {
|
||||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
|
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
|
||||||
|
|
||||||
// a = a * X^-t
|
// a = a * X^-t
|
||||||
a.rotate_inplace(module, -t, scratch_1);
|
a.rotate_inplace(module, -t, scratch_1);
|
||||||
@@ -365,7 +355,7 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
if let Some(key) = auto_keys.get(&gal_el) {
|
if let Some(key) = auto_keys.get(&gal_el) {
|
||||||
tmp_b.automorphism_inplace(module, key, scratch_1);
|
tmp_b.automorphism_inplace(module, key, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
panic!("auto_key[{}] not found", gal_el);
|
panic!("auto_key[{gal_el}] not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
// a = a * X^-t + b - phi(a * X^-t - b)
|
// a = a * X^-t + b - phi(a * X^-t - b)
|
||||||
@@ -382,19 +372,19 @@ fn combine<D: DataRef, DataAK: DataRef, B: Backend>(
|
|||||||
if let Some(key) = auto_keys.get(&gal_el) {
|
if let Some(key) = auto_keys.get(&gal_el) {
|
||||||
a.automorphism_add_inplace(module, key, scratch);
|
a.automorphism_add_inplace(module, key, scratch);
|
||||||
} else {
|
} else {
|
||||||
panic!("auto_key[{}] not found", gal_el);
|
panic!("auto_key[{gal_el}] not found");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if let Some(b) = b {
|
} else if let Some(b) = b {
|
||||||
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(n, basek, k, rank);
|
let (mut tmp_b, scratch_1) = scratch.take_glwe_ct(a);
|
||||||
tmp_b.rotate(module, 1 << (log_n - i - 1), b);
|
tmp_b.rotate(module, 1 << (log_n - i - 1), b);
|
||||||
tmp_b.rsh(module, 1, scratch_1);
|
tmp_b.rsh(module, 1, scratch_1);
|
||||||
|
|
||||||
// a = (b* X^t - phi(b* X^t))
|
// a = (b* X^t - phi(b* X^t))
|
||||||
if let Some(key) = auto_keys.get(&gal_el) {
|
if let Some(key) = auto_keys.get(&gal_el) {
|
||||||
a.automorphism_sub_ba(module, &tmp_b, key, scratch_1);
|
a.automorphism_sub_negate(module, &tmp_b, key, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
panic!("auto_key[{}] not found", gal_el);
|
panic!("auto_key[{gal_el}] not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.value = true;
|
acc.value = true;
|
||||||
|
|||||||
@@ -2,15 +2,19 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAutomorphismInplace, VecZnxBigNormalize,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxRshInplace,
|
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize,
|
||||||
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
VecZnxNormalizeTmpBytes, VecZnxRshInplace, VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
layouts::{GLWECiphertext, prepared::GGLWEAutomorphismKeyPrepared},
|
TakeGLWECt,
|
||||||
|
layouts::{
|
||||||
|
Base2K, GGLWELayoutInfos, GLWECiphertext, GLWECiphertextLayout, GLWEInfos, LWEInfos,
|
||||||
|
prepared::GGLWEAutomorphismKeyPrepared,
|
||||||
|
},
|
||||||
operations::GLWEOperations,
|
operations::GLWEOperations,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -27,34 +31,38 @@ impl GLWECiphertext<Vec<u8>> {
|
|||||||
gal_els
|
gal_els
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn trace_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn trace_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
out_k: usize,
|
in_infos: &IN,
|
||||||
in_k: usize,
|
key_infos: &KEY,
|
||||||
ksk_k: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::automorphism_inplace_scratch_space(module, basek, out_k.min(in_k), ksk_k, digits, rank)
|
let trace: usize = Self::automorphism_inplace_scratch_space(module, out_infos, key_infos);
|
||||||
|
if in_infos.base2k() != key_infos.base2k() {
|
||||||
|
let glwe_conv: usize = VecZnx::alloc_bytes(
|
||||||
|
module.n(),
|
||||||
|
(key_infos.rank_out() + 1).into(),
|
||||||
|
out_infos.k().min(in_infos.k()).div_ceil(key_infos.base2k()) as usize,
|
||||||
|
) + module.vec_znx_normalize_tmp_bytes();
|
||||||
|
return glwe_conv + trace;
|
||||||
|
}
|
||||||
|
|
||||||
|
trace
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn trace_inplace_scratch_space<B: Backend>(
|
pub fn trace_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
out_k: usize,
|
|
||||||
ksk_k: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::automorphism_inplace_scratch_space(module, basek, out_k, ksk_k, digits, rank)
|
Self::trace_scratch_space(module, out_infos, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,8 +87,10 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRshInplace<B>
|
+ VecZnxRshInplace<B>
|
||||||
+ VecZnxCopy,
|
+ VecZnxCopy
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.copy(module, lhs);
|
self.copy(module, lhs);
|
||||||
self.trace_inplace(module, start, end, auto_keys, scratch);
|
self.trace_inplace(module, start, end, auto_keys, scratch);
|
||||||
@@ -104,23 +114,92 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxBigAutomorphismInplace<B>
|
+ VecZnxBigAutomorphismInplace<B>
|
||||||
+ VecZnxRshInplace<B>,
|
+ VecZnxRshInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
(start..end).for_each(|i| {
|
let basek_ksk: Base2K = auto_keys
|
||||||
self.rsh(module, 1, scratch);
|
.get(auto_keys.keys().next().unwrap())
|
||||||
|
.unwrap()
|
||||||
|
.base2k();
|
||||||
|
|
||||||
let p: i64 = if i == 0 {
|
#[cfg(debug_assertions)]
|
||||||
-1
|
{
|
||||||
} else {
|
assert_eq!(self.n(), module.n() as u32);
|
||||||
module.galois_element(1 << (i - 1))
|
assert!(start < end);
|
||||||
};
|
assert!(end <= module.log_n());
|
||||||
|
for key in auto_keys.values() {
|
||||||
if let Some(key) = auto_keys.get(&p) {
|
assert_eq!(key.n(), module.n() as u32);
|
||||||
self.automorphism_add_inplace(module, key, scratch);
|
assert_eq!(key.base2k(), basek_ksk);
|
||||||
} else {
|
assert_eq!(key.rank_in(), self.rank());
|
||||||
panic!("auto_keys[{}] is empty", p)
|
assert_eq!(key.rank_out(), self.rank());
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
|
||||||
|
if self.base2k() != basek_ksk {
|
||||||
|
let (mut self_conv, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout {
|
||||||
|
n: module.n().into(),
|
||||||
|
base2k: basek_ksk,
|
||||||
|
k: self.k(),
|
||||||
|
rank: self.rank(),
|
||||||
|
});
|
||||||
|
|
||||||
|
for j in 0..(self.rank() + 1).into() {
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
basek_ksk.into(),
|
||||||
|
&mut self_conv.data,
|
||||||
|
j,
|
||||||
|
basek_ksk.into(),
|
||||||
|
&self.data,
|
||||||
|
j,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in start..end {
|
||||||
|
self_conv.rsh(module, 1, scratch_1);
|
||||||
|
|
||||||
|
let p: i64 = if i == 0 {
|
||||||
|
-1
|
||||||
|
} else {
|
||||||
|
module.galois_element(1 << (i - 1))
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(key) = auto_keys.get(&p) {
|
||||||
|
self_conv.automorphism_add_inplace(module, key, scratch_1);
|
||||||
|
} else {
|
||||||
|
panic!("auto_keys[{p}] is empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for j in 0..(self.rank() + 1).into() {
|
||||||
|
module.vec_znx_normalize(
|
||||||
|
self.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
j,
|
||||||
|
basek_ksk.into(),
|
||||||
|
&self_conv.data,
|
||||||
|
j,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i in start..end {
|
||||||
|
self.rsh(module, 1, scratch);
|
||||||
|
|
||||||
|
let p: i64 = if i == 0 {
|
||||||
|
-1
|
||||||
|
} else {
|
||||||
|
module.galois_element(1 << (i - 1))
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(key) = auto_keys.get(&p) {
|
||||||
|
self.automorphism_add_inplace(module, key, scratch);
|
||||||
|
} else {
|
||||||
|
panic!("auto_keys[{p}] is empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,46 +1,40 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWEAutomorphismKey, GGLWESwitchingKey, GLWECiphertext, Infos,
|
GGLWEAutomorphismKey, GGLWELayoutInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos,
|
||||||
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
|
prepared::{GGLWEAutomorphismKeyPrepared, GGLWESwitchingKeyPrepared},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos,
|
||||||
|
IN: GGLWELayoutInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank, rank)
|
GGLWESwitchingKey::keyswitch_scratch_space(module, out_infos, in_infos, key_infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_infos: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k_out: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
GGLWESwitchingKey::keyswitch_inplace_scratch_space(module, out_infos, key_infos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,8 +54,10 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
self.key.keyswitch(module, &lhs.key, rhs, scratch);
|
||||||
}
|
}
|
||||||
@@ -80,43 +76,38 @@ impl<DataSelf: DataMut> GGLWEAutomorphismKey<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWESwitchingKey<Vec<u8>> {
|
impl GGLWESwitchingKey<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_apply: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos,
|
||||||
|
IN: GGLWELayoutInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits, rank_in, rank_out)
|
GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, key_apply)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k_out: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GGLWELayoutInfos + GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos + GLWEInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_out, k_ksk, digits, rank)
|
GLWECiphertext::keyswitch_inplace_scratch_space(module, out_infos, key_apply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,8 +127,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -168,17 +161,24 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
self.rows(),
|
self.rows(),
|
||||||
lhs.rows()
|
lhs.rows()
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
self.digits(),
|
||||||
|
lhs.digits(),
|
||||||
|
"ksk_out digits: {} != ksk_in digits: {}",
|
||||||
|
self.digits(),
|
||||||
|
lhs.digits()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
|
.keyswitch(module, &lhs.at(row_j, col_i), rhs, scratch);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
(self.rows().min(lhs.rows())..self.rows()).for_each(|row_i| {
|
(self.rows().min(lhs.rows()).into()..self.rows().into()).for_each(|row_i| {
|
||||||
(0..self.rank_in()).for_each(|col_j| {
|
(0..self.rank_in().into()).for_each(|col_j| {
|
||||||
self.at_mut(row_i, col_j).data.zero();
|
self.at_mut(row_i, col_j).data.zero();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -198,8 +198,10 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -212,8 +214,8 @@ impl<DataSelf: DataMut> GGLWESwitchingKey<DataSelf> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
(0..self.rank_in()).for_each(|col_i| {
|
(0..self.rank_in().into()).for_each(|col_i| {
|
||||||
(0..self.rows()).for_each(|row_j| {
|
(0..self.rows().into()).for_each(|row_j| {
|
||||||
self.at_mut(row_j, col_i)
|
self.at_mut(row_j, col_i)
|
||||||
.keyswitch_inplace(module, rhs, scratch)
|
.keyswitch_inplace(module, rhs, scratch)
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,101 +1,115 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes, VecZnxBigNormalize,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxBig, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigAllocBytes,
|
||||||
VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxDftCopy,
|
VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxCopy, VecZnxDftAddInplace, VecZnxDftAllocBytes, VecZnxDftApply,
|
||||||
VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalizeTmpBytes, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftCopy, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, VecZnx, VmpPMat, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
layouts::{
|
layouts::{
|
||||||
GGLWECiphertext, GGSWCiphertext, GLWECiphertext, Infos,
|
GGLWECiphertext, GGLWELayoutInfos, GGSWCiphertext, GGSWInfos, GLWECiphertext, GLWEInfos, LWEInfos,
|
||||||
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared},
|
prepared::{GGLWESwitchingKeyPrepared, GGLWETensorKeyPrepared},
|
||||||
},
|
},
|
||||||
operations::GLWEOperations,
|
operations::GLWEOperations,
|
||||||
};
|
};
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
pub(crate) fn expand_row_scratch_space<B: Backend>(
|
pub(crate) fn expand_row_scratch_space<B: Backend, OUT, TSK>(module: &Module<B>, out_infos: &OUT, tsk_infos: &TSK) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
self_k: usize,
|
|
||||||
k_tsk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
TSK: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigAllocBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let tsk_size: usize = k_tsk.div_ceil(basek);
|
let tsk_size: usize = tsk_infos.k().div_ceil(tsk_infos.base2k()) as usize;
|
||||||
let self_size_out: usize = self_k.div_ceil(basek);
|
let size_in: usize = out_infos
|
||||||
let self_size_in: usize = self_size_out.div_ceil(digits);
|
.k()
|
||||||
|
.div_ceil(tsk_infos.base2k())
|
||||||
|
.div_ceil(tsk_infos.digits().into()) as usize;
|
||||||
|
|
||||||
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes(rank + 1, tsk_size);
|
let tmp_dft_i: usize = module.vec_znx_dft_alloc_bytes((tsk_infos.rank_out() + 1).into(), tsk_size);
|
||||||
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, self_size_in);
|
let tmp_a: usize = module.vec_znx_dft_alloc_bytes(1, size_in);
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
self_size_out,
|
tsk_size,
|
||||||
self_size_in,
|
size_in,
|
||||||
self_size_in,
|
size_in,
|
||||||
rank,
|
(tsk_infos.rank_in()).into(), // Verify if rank+1
|
||||||
rank,
|
(tsk_infos.rank_out()).into(), // Verify if rank+1
|
||||||
tsk_size,
|
tsk_size,
|
||||||
);
|
);
|
||||||
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
|
let tmp_idft: usize = module.vec_znx_big_alloc_bytes(1, tsk_size);
|
||||||
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
let norm: usize = module.vec_znx_normalize_tmp_bytes();
|
||||||
|
|
||||||
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
tmp_dft_i + ((tmp_a + vmp) | (tmp_idft + norm))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY, TSK>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
apply_infos: &KEY,
|
||||||
k_ksk: usize,
|
tsk_infos: &TSK,
|
||||||
digits_ksk: usize,
|
|
||||||
k_tsk: usize,
|
|
||||||
digits_tsk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
IN: GGSWInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
TSK: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigAllocBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
+ VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
#[cfg(debug_assertions)]
|
||||||
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, out_size);
|
{
|
||||||
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
assert_eq!(apply_infos.rank_in(), apply_infos.rank_out());
|
||||||
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, basek, k_out, k_in, k_ksk, digits_ksk, rank, rank);
|
assert_eq!(tsk_infos.rank_in(), tsk_infos.rank_out());
|
||||||
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, basek, k_out, k_tsk, digits_tsk, rank);
|
assert_eq!(apply_infos.rank_in(), tsk_infos.rank_in());
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, out_size);
|
}
|
||||||
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
|
||||||
|
let rank: usize = apply_infos.rank_out().into();
|
||||||
|
|
||||||
|
let size_out: usize = out_infos.k().div_ceil(out_infos.base2k()) as usize;
|
||||||
|
let res_znx: usize = VecZnx::alloc_bytes(module.n(), rank + 1, size_out);
|
||||||
|
let ci_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||||
|
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, out_infos, in_infos, apply_infos);
|
||||||
|
let expand_rows: usize = GGSWCiphertext::expand_row_scratch_space(module, out_infos, tsk_infos);
|
||||||
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank + 1, size_out);
|
||||||
|
|
||||||
|
if in_infos.base2k() == tsk_infos.base2k() {
|
||||||
|
res_znx + ci_dft + (ks | expand_rows | res_dft)
|
||||||
|
} else {
|
||||||
|
let a_conv: usize = VecZnx::alloc_bytes(
|
||||||
|
module.n(),
|
||||||
|
1,
|
||||||
|
out_infos.k().div_ceil(tsk_infos.base2k()) as usize,
|
||||||
|
) + module.vec_znx_normalize_tmp_bytes();
|
||||||
|
res_znx + ci_dft + (a_conv | ks | expand_rows | res_dft)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY, TSK>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
apply_infos: &KEY,
|
||||||
k_ksk: usize,
|
tsk_infos: &TSK,
|
||||||
digits_ksk: usize,
|
|
||||||
k_tsk: usize,
|
|
||||||
digits_tsk: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: GGSWInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
TSK: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigAllocBytes
|
+ VecZnxBigAllocBytes
|
||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes,
|
+ VecZnxBigNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GGSWCiphertext::keyswitch_scratch_space(
|
GGSWCiphertext::keyswitch_scratch_space(module, out_infos, out_infos, apply_infos, tsk_infos)
|
||||||
module, basek, k_out, k_out, k_ksk, digits_ksk, k_tsk, digits_tsk, rank,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,18 +134,21 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert_eq!(self.rank(), a.rank());
|
use crate::layouts::{GLWEInfos, LWEInfos};
|
||||||
|
|
||||||
|
assert_eq!(self.rank(), a.rank_out());
|
||||||
assert_eq!(self.rows(), a.rows());
|
assert_eq!(self.rows(), a.rows());
|
||||||
assert_eq!(self.n(), module.n());
|
assert_eq!(self.n(), module.n() as u32);
|
||||||
assert_eq!(a.n(), module.n());
|
assert_eq!(a.n(), module.n() as u32);
|
||||||
assert_eq!(tsk.n(), module.n());
|
assert_eq!(tsk.n(), module.n() as u32);
|
||||||
}
|
}
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows().into()).for_each(|row_i| {
|
||||||
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
|
self.at_mut(row_i, 0).copy(module, &a.at(row_i, 0));
|
||||||
});
|
});
|
||||||
self.expand_row(module, tsk, scratch);
|
self.expand_row(module, tsk, scratch);
|
||||||
@@ -159,10 +176,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
(0..lhs.rows()).for_each(|row_i| {
|
(0..lhs.rows().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.at_mut(row_i, 0)
|
||||||
@@ -192,10 +210,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VecZnxNormalizeTmpBytes
|
+ VecZnxNormalizeTmpBytes
|
||||||
+ VecZnxDftCopy<B>
|
+ VecZnxDftCopy<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
(0..self.rows()).for_each(|row_i| {
|
(0..self.rows().into()).for_each(|row_i| {
|
||||||
// Key-switch column 0, i.e.
|
// Key-switch column 0, i.e.
|
||||||
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
// col 0: (-(a0s0 + a1s1 + a2s2) + M[i], a0, a1, a2) -> (-(a0s0' + a1s1' + a2s2') + M[i], a0, a1, a2)
|
||||||
self.at_mut(row_i, 0)
|
self.at_mut(row_i, 0)
|
||||||
@@ -220,34 +239,41 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxDftAddInplace<B>
|
+ VecZnxDftAddInplace<B>
|
||||||
+ VecZnxBigNormalize<B>
|
+ VecZnxBigNormalize<B>
|
||||||
+ VecZnxIdftApplyTmpA<B>,
|
+ VecZnxIdftApplyTmpA<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnxBig<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
assert!(
|
let basek_in: usize = self.base2k().into();
|
||||||
scratch.available()
|
let basek_tsk: usize = tsk.base2k().into();
|
||||||
>= GGSWCiphertext::expand_row_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
tsk.k(),
|
|
||||||
tsk.digits(),
|
|
||||||
tsk.rank()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let n: usize = self.n();
|
assert!(scratch.available() >= GGSWCiphertext::expand_row_scratch_space(module, self, tsk));
|
||||||
let rank: usize = self.rank();
|
|
||||||
|
let n: usize = self.n().into();
|
||||||
|
let rank: usize = self.rank().into();
|
||||||
let cols: usize = rank + 1;
|
let cols: usize = rank + 1;
|
||||||
|
|
||||||
// Keyswitch the j-th row of the col 0
|
let a_size: usize = (self.size() * basek_in).div_ceil(basek_tsk);
|
||||||
(0..self.rows()).for_each(|row_i| {
|
|
||||||
// Pre-compute DFT of (a0, a1, a2)
|
|
||||||
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, self.size());
|
|
||||||
(0..cols).for_each(|i| {
|
|
||||||
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &self.at(row_i, 0).data, i);
|
|
||||||
});
|
|
||||||
|
|
||||||
(1..cols).for_each(|col_j| {
|
// Keyswitch the j-th row of the col 0
|
||||||
|
for row_i in 0..self.rows().into() {
|
||||||
|
let a = &self.at(row_i, 0).data;
|
||||||
|
|
||||||
|
// Pre-compute DFT of (a0, a1, a2)
|
||||||
|
let (mut ci_dft, scratch_1) = scratch.take_vec_znx_dft(n, cols, a_size);
|
||||||
|
|
||||||
|
if basek_in == basek_tsk {
|
||||||
|
for i in 0..cols {
|
||||||
|
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, a, i);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(n, 1, a_size);
|
||||||
|
for i in 0..cols {
|
||||||
|
module.vec_znx_normalize(basek_tsk, &mut a_conv, 0, basek_in, a, i, scratch_2);
|
||||||
|
module.vec_znx_dft_apply(1, 0, &mut ci_dft, i, &a_conv, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for col_j in 1..cols {
|
||||||
// Example for rank 3:
|
// Example for rank 3:
|
||||||
//
|
//
|
||||||
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
// Note: M is a vector (m, Bm, B^2m, B^3m, ...), so each column is
|
||||||
@@ -268,7 +294,7 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
// col 2: (-(c0s0 + c1s1 + c2s2) , c0 , c1 + M[i], c2 )
|
||||||
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
// col 3: (-(d0s0 + d1s1 + d2s2) , d0 , d1 , d2 + M[i])
|
||||||
|
|
||||||
let digits: usize = tsk.digits();
|
let digits: usize = tsk.digits().into();
|
||||||
|
|
||||||
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
|
let (mut tmp_dft_i, scratch_2) = scratch_1.take_vec_znx_dft(n, cols, tsk.size());
|
||||||
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(digits));
|
let (mut tmp_a, scratch_3) = scratch_2.take_vec_znx_dft(n, 1, ci_dft.size().div_ceil(digits));
|
||||||
@@ -285,11 +311,11 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
// a2 * (-(h0s0 + h1s1 + h1s2) + s0s2, h0, h1, h2) = (-(a2h0s0 + a2h1s1 + a2h1s2) + a2s0s2, a2h0, a2h1, a2h2)
|
||||||
// =
|
// =
|
||||||
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
// (-(x0s0 + x1s1 + x2s2) + s0(a0s0 + a1s1 + a2s2), x0, x1, x2)
|
||||||
(1..cols).for_each(|col_i| {
|
for col_i in 1..cols {
|
||||||
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
let pmat: &VmpPMat<DataTsk, B> = &tsk.at(col_i - 1, col_j - 1).key.data; // Selects Enc(s[i]s[j])
|
||||||
|
|
||||||
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
// Extracts a[i] and multipies with Enc(s[i]s[j])
|
||||||
(0..digits).for_each(|di| {
|
for di in 0..digits {
|
||||||
tmp_a.set_size((ci_dft.size() + di) / digits);
|
tmp_a.set_size((ci_dft.size() + di) / digits);
|
||||||
|
|
||||||
// Small optimization for digits > 2
|
// Small optimization for digits > 2
|
||||||
@@ -307,8 +333,8 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
} else {
|
} else {
|
||||||
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
module.vmp_apply_dft_to_dft_add(&mut tmp_dft_i, &tmp_a, pmat, di, scratch_3);
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
});
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
// Adds -(sum a[i] * s[i]) + m) on the i-th column of tmp_idft_i
|
||||||
@@ -322,18 +348,19 @@ impl<DataSelf: DataMut> GGSWCiphertext<DataSelf> {
|
|||||||
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
// (-(x0s0 + x1s1 + x2s2), x0 + M[i], x1, x2)
|
||||||
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
module.vec_znx_dft_add_inplace(&mut tmp_dft_i, col_j, &ci_dft, 0);
|
||||||
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
|
let (mut tmp_idft, scratch_3) = scratch_2.take_vec_znx_big(n, 1, tsk.size());
|
||||||
(0..cols).for_each(|i| {
|
for i in 0..cols {
|
||||||
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
module.vec_znx_idft_apply_tmpa(&mut tmp_idft, 0, &mut tmp_dft_i, i);
|
||||||
module.vec_znx_big_normalize(
|
module.vec_znx_big_normalize(
|
||||||
self.basek(),
|
basek_in,
|
||||||
&mut self.at_mut(row_i, col_j).data,
|
&mut self.at_mut(row_i, col_j).data,
|
||||||
i,
|
i,
|
||||||
|
basek_tsk,
|
||||||
&tmp_idft,
|
&tmp_idft,
|
||||||
0,
|
0,
|
||||||
scratch_3,
|
scratch_3,
|
||||||
);
|
);
|
||||||
});
|
}
|
||||||
})
|
}
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,52 +1,64 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes, VmpApplyDftToDft,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
layouts::{Backend, DataMut, DataRef, DataViewMut, Module, Scratch, VecZnx, VecZnxBig, VecZnxDft, VmpPMat, ZnxInfos},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos, prepared::GGLWESwitchingKeyPrepared};
|
use crate::layouts::{GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, prepared::GGLWESwitchingKeyPrepared};
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_out: usize,
|
in_infos: &IN,
|
||||||
k_in: usize,
|
key_apply: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank_in: usize,
|
|
||||||
rank_out: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
IN: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
let in_size: usize = k_in.div_ceil(basek).div_ceil(digits);
|
let in_size: usize = in_infos
|
||||||
let out_size: usize = k_out.div_ceil(basek);
|
.k()
|
||||||
let ksk_size: usize = k_ksk.div_ceil(basek);
|
.div_ceil(key_apply.base2k())
|
||||||
let res_dft: usize = module.vec_znx_dft_alloc_bytes(rank_out + 1, ksk_size); // TODO OPTIMIZE
|
.div_ceil(key_apply.digits().into()) as usize;
|
||||||
let ai_dft: usize = module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
let out_size: usize = out_infos.size();
|
||||||
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(out_size, in_size, in_size, rank_in, rank_out + 1, ksk_size)
|
let ksk_size: usize = key_apply.size();
|
||||||
+ module.vec_znx_dft_alloc_bytes(rank_in, in_size);
|
let res_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_out() + 1).into(), ksk_size); // TODO OPTIMIZE
|
||||||
let normalize: usize = module.vec_znx_big_normalize_tmp_bytes();
|
let ai_dft: usize = module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||||
res_dft + ((ai_dft + vmp) | normalize)
|
let vmp: usize = module.vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
|
out_size,
|
||||||
|
in_size,
|
||||||
|
in_size,
|
||||||
|
(key_apply.rank_in()).into(),
|
||||||
|
(key_apply.rank_out() + 1).into(),
|
||||||
|
ksk_size,
|
||||||
|
) + module.vec_znx_dft_alloc_bytes((key_apply.rank_in()).into(), in_size);
|
||||||
|
let normalize_big: usize = module.vec_znx_big_normalize_tmp_bytes();
|
||||||
|
if in_infos.base2k() == key_apply.base2k() {
|
||||||
|
res_dft + ((ai_dft + vmp) | normalize_big)
|
||||||
|
} else if key_apply.digits() == 1 {
|
||||||
|
// In this case, we only need one column, temporary, that we can drop once a_dft is computed.
|
||||||
|
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), 1, in_size) + module.vec_znx_normalize_tmp_bytes();
|
||||||
|
res_dft + (((ai_dft + normalize_conv) | vmp) | normalize_big)
|
||||||
|
} else {
|
||||||
|
// Since we stride over a to get a_dft when digits > 1, we need to store the full columns of a with in the base conversion.
|
||||||
|
let normalize_conv: usize = VecZnx::alloc_bytes(module.n(), (key_apply.rank_in()).into(), in_size);
|
||||||
|
res_dft + ((ai_dft + normalize_conv + (module.vec_znx_normalize_tmp_bytes() | vmp)) | normalize_big)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_inplace_scratch_space<B: Backend>(
|
pub fn keyswitch_inplace_scratch_space<B: Backend, OUT, KEY>(module: &Module<B>, out_infos: &OUT, key_apply: &KEY) -> usize
|
||||||
module: &Module<B>,
|
|
||||||
basek: usize,
|
|
||||||
k_out: usize,
|
|
||||||
k_ksk: usize,
|
|
||||||
digits: usize,
|
|
||||||
rank: usize,
|
|
||||||
) -> usize
|
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
OUT: GLWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
Self::keyswitch_scratch_space(module, basek, k_out, k_out, k_ksk, digits, rank, rank)
|
Self::keyswitch_scratch_space(module, out_infos, out_infos, key_apply)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,10 +73,9 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
) where
|
) where
|
||||||
DataLhs: DataRef,
|
DataLhs: DataRef,
|
||||||
DataRhs: DataRef,
|
DataRhs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let basek: usize = self.basek();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
lhs.rank(),
|
lhs.rank(),
|
||||||
rhs.rank_in(),
|
rhs.rank_in(),
|
||||||
@@ -79,43 +90,26 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
self.rank(),
|
self.rank(),
|
||||||
rhs.rank_out()
|
rhs.rank_out()
|
||||||
);
|
);
|
||||||
assert_eq!(self.basek(), basek);
|
|
||||||
assert_eq!(lhs.basek(), basek);
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
assert_eq!(lhs.n(), self.n());
|
assert_eq!(lhs.n(), self.n());
|
||||||
|
|
||||||
|
let scrach_needed: usize = GLWECiphertext::keyswitch_scratch_space(module, self, lhs, rhs);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= scrach_needed,
|
||||||
>= GLWECiphertext::keyswitch_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
lhs.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
),
|
|
||||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
||||||
module,
|
module,
|
||||||
self.basek(),
|
self.base2k(),
|
||||||
self.k(),
|
self.k(),
|
||||||
|
lhs.base2k(),
|
||||||
lhs.k(),
|
lhs.k(),
|
||||||
|
rhs.base2k(),
|
||||||
rhs.k(),
|
rhs.k(),
|
||||||
rhs.digits(),
|
rhs.digits(),
|
||||||
rhs.rank_in(),
|
rhs.rank_in(),
|
||||||
rhs.rank_out(),
|
rhs.rank_out(),
|
||||||
)={}",
|
)={scrach_needed}",
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::keyswitch_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
lhs.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,10 +121,9 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
scratch: &Scratch<B>,
|
scratch: &Scratch<B>,
|
||||||
) where
|
) where
|
||||||
DataRhs: DataRef,
|
DataRhs: DataRef,
|
||||||
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes,
|
Module<B>: VecZnxDftAllocBytes + VmpApplyDftToDftTmpBytes + VecZnxBigNormalizeTmpBytes + VecZnxNormalizeTmpBytes,
|
||||||
Scratch<B>: ScratchAvailable,
|
Scratch<B>: ScratchAvailable,
|
||||||
{
|
{
|
||||||
let basek: usize = self.basek();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.rank(),
|
self.rank(),
|
||||||
rhs.rank_out(),
|
rhs.rank_out(),
|
||||||
@@ -138,41 +131,15 @@ impl<DataSelf: DataRef> GLWECiphertext<DataSelf> {
|
|||||||
self.rank(),
|
self.rank(),
|
||||||
rhs.rank_out()
|
rhs.rank_out()
|
||||||
);
|
);
|
||||||
assert_eq!(self.basek(), basek);
|
|
||||||
assert_eq!(rhs.n(), self.n());
|
assert_eq!(rhs.n(), self.n());
|
||||||
|
|
||||||
|
let scrach_needed: usize = GLWECiphertext::keyswitch_inplace_scratch_space(module, self, rhs);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
scratch.available()
|
scratch.available() >= scrach_needed,
|
||||||
>= GLWECiphertext::keyswitch_scratch_space(
|
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space()={scrach_needed}",
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
),
|
|
||||||
"scratch.available()={} < GLWECiphertext::keyswitch_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
)={}",
|
|
||||||
scratch.available(),
|
scratch.available(),
|
||||||
GLWECiphertext::keyswitch_scratch_space(
|
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.k(),
|
|
||||||
rhs.k(),
|
|
||||||
rhs.digits(),
|
|
||||||
rhs.rank_in(),
|
|
||||||
rhs.rank_out(),
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,7 +148,7 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
pub fn keyswitch<DataLhs: DataRef, DataRhs: DataRef, B: Backend>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
lhs: &GLWECiphertext<DataLhs>,
|
glwe_in: &GLWECiphertext<DataLhs>,
|
||||||
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
rhs: &GGLWESwitchingKeyPrepared<DataRhs, B>,
|
||||||
scratch: &mut Scratch<B>,
|
scratch: &mut Scratch<B>,
|
||||||
) where
|
) where
|
||||||
@@ -193,17 +160,31 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch(module, lhs, rhs, scratch);
|
self.assert_keyswitch(module, glwe_in, rhs, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
|
|
||||||
let res_big: VecZnxBig<_, B> = lhs.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
let basek_out: usize = self.base2k().into();
|
||||||
(0..self.cols()).for_each(|i| {
|
let basek_ksk: usize = rhs.base2k().into();
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
|
||||||
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||||
|
let res_big: VecZnxBig<_, B> = glwe_in.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||||
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
|
module.vec_znx_big_normalize(
|
||||||
|
basek_out,
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
basek_ksk,
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,17 +203,31 @@ impl<DataSelf: DataMut> GLWECiphertext<DataSelf> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
|
Scratch<B>: ScratchAvailable + TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
self.assert_keyswitch_inplace(module, rhs, scratch);
|
self.assert_keyswitch_inplace(module, rhs, scratch);
|
||||||
}
|
}
|
||||||
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n(), self.cols(), rhs.size()); // Todo optimise
|
|
||||||
|
let basek_in: usize = self.base2k().into();
|
||||||
|
let basek_ksk: usize = rhs.base2k().into();
|
||||||
|
|
||||||
|
let (res_dft, scratch_1) = scratch.take_vec_znx_dft(self.n().into(), (self.rank() + 1).into(), rhs.size()); // Todo optimise
|
||||||
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
let res_big: VecZnxBig<_, B> = self.keyswitch_internal(module, res_dft, rhs, scratch_1);
|
||||||
(0..self.cols()).for_each(|i| {
|
(0..(self.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_big_normalize(self.basek(), &mut self.data, i, &res_big, i, scratch_1);
|
module.vec_znx_big_normalize(
|
||||||
|
basek_in,
|
||||||
|
&mut self.data,
|
||||||
|
i,
|
||||||
|
basek_ksk,
|
||||||
|
&res_big,
|
||||||
|
i,
|
||||||
|
scratch_1,
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -257,19 +252,30 @@ impl<D: DataRef> GLWECiphertext<D> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
if rhs.digits() == 1 {
|
if rhs.digits() == 1 {
|
||||||
return keyswitch_vmp_one_digit(module, res_dft, &self.data, &rhs.key.data, scratch);
|
return keyswitch_vmp_one_digit(
|
||||||
|
module,
|
||||||
|
self.base2k().into(),
|
||||||
|
rhs.base2k().into(),
|
||||||
|
res_dft,
|
||||||
|
&self.data,
|
||||||
|
&rhs.key.data,
|
||||||
|
scratch,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
keyswitch_vmp_multiple_digits(
|
keyswitch_vmp_multiple_digits(
|
||||||
module,
|
module,
|
||||||
|
self.base2k().into(),
|
||||||
|
rhs.base2k().into(),
|
||||||
res_dft,
|
res_dft,
|
||||||
&self.data,
|
&self.data,
|
||||||
&rhs.key.data,
|
&rhs.key.data,
|
||||||
rhs.digits(),
|
rhs.digits().into(),
|
||||||
scratch,
|
scratch,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -277,6 +283,8 @@ impl<D: DataRef> GLWECiphertext<D> {
|
|||||||
|
|
||||||
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
|
fn keyswitch_vmp_one_digit<B: Backend, DataRes, DataIn, DataVmp>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
|
basek_in: usize,
|
||||||
|
basek_ksk: usize,
|
||||||
mut res_dft: VecZnxDft<DataRes, B>,
|
mut res_dft: VecZnxDft<DataRes, B>,
|
||||||
a: &VecZnx<DataIn>,
|
a: &VecZnx<DataIn>,
|
||||||
mat: &VmpPMat<DataVmp, B>,
|
mat: &VmpPMat<DataVmp, B>,
|
||||||
@@ -286,23 +294,42 @@ where
|
|||||||
DataRes: DataMut,
|
DataRes: DataMut,
|
||||||
DataIn: DataRef,
|
DataIn: DataRef,
|
||||||
DataVmp: DataRef,
|
DataVmp: DataRef,
|
||||||
Module<B>:
|
Module<B>: VecZnxDftAllocBytes
|
||||||
VecZnxDftAllocBytes + VecZnxDftApply<B> + VmpApplyDftToDft<B> + VecZnxIdftApplyConsume<B> + VecZnxBigAddSmallInplace<B>,
|
+ VecZnxDftApply<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B>,
|
+ VmpApplyDftToDft<B>
|
||||||
|
+ VecZnxIdftApplyConsume<B>
|
||||||
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let cols: usize = a.cols();
|
let cols: usize = a.cols();
|
||||||
|
|
||||||
|
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
|
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a.size());
|
||||||
(0..cols - 1).for_each(|col_i| {
|
|
||||||
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
if basek_in == basek_ksk {
|
||||||
});
|
(0..cols - 1).for_each(|col_i| {
|
||||||
|
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, a, col_i + 1);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), 1, a_size);
|
||||||
|
(0..cols - 1).for_each(|col_i| {
|
||||||
|
module.vec_znx_normalize(basek_ksk, &mut a_conv, 0, basek_in, a, col_i + 1, scratch_2);
|
||||||
|
module.vec_znx_dft_apply(1, 0, &mut ai_dft, col_i, &a_conv, 0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
module.vec_znx_big_add_small_inplace(&mut res_big, 0, a, 0);
|
||||||
res_big
|
res_big
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
|
fn keyswitch_vmp_multiple_digits<B: Backend, DataRes, DataIn, DataVmp>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
|
basek_in: usize,
|
||||||
|
basek_ksk: usize,
|
||||||
mut res_dft: VecZnxDft<DataRes, B>,
|
mut res_dft: VecZnxDft<DataRes, B>,
|
||||||
a: &VecZnx<DataIn>,
|
a: &VecZnx<DataIn>,
|
||||||
mat: &VmpPMat<DataVmp, B>,
|
mat: &VmpPMat<DataVmp, B>,
|
||||||
@@ -318,37 +345,67 @@ where
|
|||||||
+ VmpApplyDftToDft<B>
|
+ VmpApplyDftToDft<B>
|
||||||
+ VmpApplyDftToDftAdd<B>
|
+ VmpApplyDftToDftAdd<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>,
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
Scratch<B>: TakeVecZnxDft<B>,
|
+ VecZnxNormalize<B>,
|
||||||
|
Scratch<B>: TakeVecZnxDft<B> + TakeVecZnx,
|
||||||
{
|
{
|
||||||
let cols: usize = a.cols();
|
let cols: usize = a.cols();
|
||||||
let size: usize = a.size();
|
let a_size: usize = (a.size() * basek_in).div_ceil(basek_ksk);
|
||||||
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, size.div_ceil(digits));
|
let (mut ai_dft, scratch_1) = scratch.take_vec_znx_dft(a.n(), cols - 1, a_size.div_ceil(digits));
|
||||||
|
|
||||||
ai_dft.data_mut().fill(0);
|
ai_dft.data_mut().fill(0);
|
||||||
|
|
||||||
(0..digits).for_each(|di| {
|
if basek_in == basek_ksk {
|
||||||
ai_dft.set_size((size + di) / digits);
|
for di in 0..digits {
|
||||||
|
ai_dft.set_size((a_size + di) / digits);
|
||||||
|
|
||||||
// Small optimization for digits > 2
|
// Small optimization for digits > 2
|
||||||
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||||
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||||
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||||
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||||
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||||
// noise is kept with respect to the ideal functionality.
|
// noise is kept with respect to the ideal functionality.
|
||||||
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
(0..cols - 1).for_each(|col_i| {
|
for j in 0..cols - 1 {
|
||||||
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, col_i, a, col_i + 1);
|
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, j, a, j + 1);
|
||||||
});
|
}
|
||||||
|
|
||||||
if di == 0 {
|
if di == 0 {
|
||||||
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_1);
|
||||||
} else {
|
} else {
|
||||||
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
} else {
|
||||||
|
let (mut a_conv, scratch_2) = scratch_1.take_vec_znx(a.n(), cols - 1, a_size);
|
||||||
|
for j in 0..cols - 1 {
|
||||||
|
module.vec_znx_normalize(basek_ksk, &mut a_conv, j, basek_in, a, j + 1, scratch_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
for di in 0..digits {
|
||||||
|
ai_dft.set_size((a_size + di) / digits);
|
||||||
|
|
||||||
|
// Small optimization for digits > 2
|
||||||
|
// VMP produce some error e, and since we aggregate vmp * 2^{di * B}, then
|
||||||
|
// we also aggregate ei * 2^{di * B}, with the largest error being ei * 2^{(digits-1) * B}.
|
||||||
|
// As such we can ignore the last digits-2 limbs safely of the sum of vmp products.
|
||||||
|
// It is possible to further ignore the last digits-1 limbs, but this introduce
|
||||||
|
// ~0.5 to 1 bit of additional noise, and thus not chosen here to ensure that the same
|
||||||
|
// noise is kept with respect to the ideal functionality.
|
||||||
|
res_dft.set_size(mat.size() - ((digits - di) as isize - 2).max(0) as usize);
|
||||||
|
|
||||||
|
for j in 0..cols - 1 {
|
||||||
|
module.vec_znx_dft_apply(digits, digits - di - 1, &mut ai_dft, j, &a_conv, j);
|
||||||
|
}
|
||||||
|
|
||||||
|
if di == 0 {
|
||||||
|
module.vmp_apply_dft_to_dft(&mut res_dft, &ai_dft, mat, scratch_2);
|
||||||
|
} else {
|
||||||
|
module.vmp_apply_dft_to_dft_add(&mut res_dft, &ai_dft, mat, di, scratch_2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
res_dft.set_size(res_dft.max_size());
|
res_dft.set_size(res_dft.max_size());
|
||||||
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
let mut res_big: VecZnxBig<DataRes, B> = module.vec_znx_idft_apply_consume(res_dft);
|
||||||
|
|||||||
@@ -1,26 +1,31 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{
|
||||||
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
ScratchAvailable, TakeVecZnx, TakeVecZnxDft, VecZnxBigAddSmallInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
|
||||||
VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VmpApplyDftToDft, VmpApplyDftToDftAdd,
|
VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxIdftApplyConsume, VecZnxNormalize, VecZnxNormalizeTmpBytes,
|
||||||
VmpApplyDftToDftTmpBytes,
|
VmpApplyDftToDft, VmpApplyDftToDftAdd, VmpApplyDftToDftTmpBytes,
|
||||||
},
|
},
|
||||||
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
layouts::{Backend, DataMut, DataRef, Module, Scratch, ZnxView, ZnxViewMut, ZnxZero},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TakeGLWECt,
|
TakeGLWECt,
|
||||||
layouts::{GLWECiphertext, Infos, LWECiphertext, prepared::LWESwitchingKeyPrepared},
|
layouts::{
|
||||||
|
GGLWELayoutInfos, GLWECiphertext, GLWECiphertextLayout, LWECiphertext, LWEInfos, Rank, TorusPrecision,
|
||||||
|
prepared::LWESwitchingKeyPrepared,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn keyswitch_scratch_space<B: Backend>(
|
pub fn keyswitch_scratch_space<B: Backend, OUT, IN, KEY>(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
out_infos: &OUT,
|
||||||
k_lwe_out: usize,
|
in_infos: &IN,
|
||||||
k_lwe_in: usize,
|
key_infos: &KEY,
|
||||||
k_ksk: usize,
|
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
|
OUT: LWEInfos,
|
||||||
|
IN: LWEInfos,
|
||||||
|
KEY: GGLWELayoutInfos,
|
||||||
Module<B>: VecZnxDftAllocBytes
|
Module<B>: VecZnxDftAllocBytes
|
||||||
+ VmpApplyDftToDftTmpBytes
|
+ VmpApplyDftToDftTmpBytes
|
||||||
+ VecZnxBigNormalizeTmpBytes
|
+ VecZnxBigNormalizeTmpBytes
|
||||||
@@ -30,10 +35,30 @@ impl LWECiphertext<Vec<u8>> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes,
|
||||||
{
|
{
|
||||||
GLWECiphertext::bytes_of(module.n(), basek, k_lwe_out.max(k_lwe_in), 1)
|
let max_k: TorusPrecision = in_infos.k().max(out_infos.k());
|
||||||
+ GLWECiphertext::keyswitch_inplace_scratch_space(module, basek, k_lwe_out, k_ksk, 1, 1)
|
|
||||||
|
let glwe_in_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n: module.n().into(),
|
||||||
|
base2k: in_infos.base2k(),
|
||||||
|
k: max_k,
|
||||||
|
rank: Rank(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_out_infos: GLWECiphertextLayout = GLWECiphertextLayout {
|
||||||
|
n: module.n().into(),
|
||||||
|
base2k: out_infos.base2k(),
|
||||||
|
k: max_k,
|
||||||
|
rank: Rank(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
let glwe_in: usize = GLWECiphertext::alloc_bytes(&glwe_in_infos);
|
||||||
|
let glwe_out: usize = GLWECiphertext::alloc_bytes(&glwe_out_infos);
|
||||||
|
let ks: usize = GLWECiphertext::keyswitch_scratch_space(module, &glwe_out_infos, &glwe_in_infos, key_infos);
|
||||||
|
|
||||||
|
glwe_in + glwe_out + ks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,32 +80,47 @@ impl<DLwe: DataMut> LWECiphertext<DLwe> {
|
|||||||
+ VecZnxDftApply<B>
|
+ VecZnxDftApply<B>
|
||||||
+ VecZnxIdftApplyConsume<B>
|
+ VecZnxIdftApplyConsume<B>
|
||||||
+ VecZnxBigAddSmallInplace<B>
|
+ VecZnxBigAddSmallInplace<B>
|
||||||
+ VecZnxBigNormalize<B>,
|
+ VecZnxBigNormalize<B>
|
||||||
|
+ VecZnxNormalize<B>
|
||||||
|
+ VecZnxNormalizeTmpBytes
|
||||||
|
+ VecZnxCopy,
|
||||||
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
Scratch<B>: TakeVecZnxDft<B> + ScratchAvailable + TakeVecZnx,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(self.n() <= module.n());
|
assert!(self.n() <= module.n() as u32);
|
||||||
assert!(a.n() <= module.n());
|
assert!(a.n() <= module.n() as u32);
|
||||||
assert_eq!(self.basek(), a.basek());
|
assert!(scratch.available() >= LWECiphertext::keyswitch_scratch_space(module, self, a, ksk));
|
||||||
}
|
}
|
||||||
|
|
||||||
let max_k: usize = self.k().max(a.k());
|
let max_k: TorusPrecision = self.k().max(a.k());
|
||||||
let basek: usize = self.basek();
|
|
||||||
|
|
||||||
let (mut glwe, scratch_1) = scratch.take_glwe_ct(ksk.n(), basek, max_k, 1);
|
let a_size: usize = a.k().div_ceil(ksk.base2k()) as usize;
|
||||||
glwe.data.zero();
|
|
||||||
|
|
||||||
let n_lwe: usize = a.n();
|
let (mut glwe_in, scratch_1) = scratch.take_glwe_ct(&GLWECiphertextLayout {
|
||||||
|
n: ksk.n(),
|
||||||
|
base2k: a.base2k(),
|
||||||
|
k: max_k,
|
||||||
|
rank: Rank(1),
|
||||||
|
});
|
||||||
|
glwe_in.data.zero();
|
||||||
|
|
||||||
(0..a.size()).for_each(|i| {
|
let (mut glwe_out, scratch_1) = scratch_1.take_glwe_ct(&GLWECiphertextLayout {
|
||||||
let data_lwe: &[i64] = a.data.at(0, i);
|
n: ksk.n(),
|
||||||
glwe.data.at_mut(0, i)[0] = data_lwe[0];
|
base2k: self.base2k(),
|
||||||
glwe.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
k: max_k,
|
||||||
|
rank: Rank(1),
|
||||||
});
|
});
|
||||||
|
|
||||||
glwe.keyswitch_inplace(module, &ksk.0, scratch_1);
|
let n_lwe: usize = a.n().into();
|
||||||
|
|
||||||
self.sample_extract(&glwe);
|
for i in 0..a_size {
|
||||||
|
let data_lwe: &[i64] = a.data.at(0, i);
|
||||||
|
glwe_in.data.at_mut(0, i)[0] = data_lwe[0];
|
||||||
|
glwe_in.data.at_mut(1, i)[..n_lwe].copy_from_slice(&data_lwe[1..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
glwe_out.keyswitch(module, &glwe_in, &ksk.0, scratch_1);
|
||||||
|
self.sample_extract(&glwe_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWEAutomorphismKey, Infos,
|
Base2K, Degree, Digits, GGLWEAutomorphismKey, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
@@ -17,9 +17,50 @@ pub struct GGLWEAutomorphismKeyCompressed<D: Data> {
|
|||||||
pub(crate) p: i64,
|
pub(crate) p: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWEAutomorphismKeyCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.key.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GGLWEAutomorphismKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWEAutomorphismKeyCompressed<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.key.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.key.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -29,16 +70,6 @@ impl<D: DataMut> FillUniform for GGLWEAutomorphismKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWEAutomorphismKeyCompressed<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.key.reset();
|
|
||||||
self.p = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(AutomorphismKeyCompressed: p={}) {}", self.p, self.key)
|
write!(f, "(AutomorphismKeyCompressed: p={}) {}", self.p, self.key)
|
||||||
@@ -46,49 +77,34 @@ impl<D: DataRef> fmt::Display for GGLWEAutomorphismKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
impl GGLWEAutomorphismKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
GGLWEAutomorphismKeyCompressed {
|
where
|
||||||
key: GGLWESwitchingKeyCompressed::alloc(n, basek, k, rows, digits, rank, rank),
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(infos.rank_in(), infos.rank_out());
|
||||||
|
Self {
|
||||||
|
key: GGLWESwitchingKeyCompressed::alloc(infos),
|
||||||
p: 0,
|
p: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
GGLWESwitchingKeyCompressed::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, rank)
|
Self {
|
||||||
}
|
key: GGLWESwitchingKeyCompressed::alloc_with(n, base2k, k, rows, digits, rank, rank),
|
||||||
}
|
p: 0,
|
||||||
|
}
|
||||||
impl<D: Data> Infos for GGLWEAutomorphismKeyCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.key.inner()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
self.key.basek()
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(infos.rank_in(), infos.rank_out());
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
self.key.k()
|
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rows, digits, rank, rank)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWEAutomorphismKeyCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.key.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.key.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.key.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.key.rank_out()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, WriterTo, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWECiphertext, Infos,
|
Base2K, Degree, Digits, GGLWECiphertext, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GLWECiphertextCompressed},
|
compressed::{Decompress, GLWECiphertextCompressed},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
@@ -14,16 +14,57 @@ use std::fmt;
|
|||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGLWECiphertextCompressed<D: Data> {
|
pub struct GGLWECiphertextCompressed<D: Data> {
|
||||||
pub(crate) data: MatZnx<D>,
|
pub(crate) data: MatZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) rank_out: usize,
|
pub(crate) rank_out: Rank,
|
||||||
pub(crate) digits: usize,
|
pub(crate) digits: Digits,
|
||||||
pub(crate) seed: Vec<[u8; 32]>,
|
pub(crate) seed: Vec<[u8; 32]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWECiphertextCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GGLWECiphertextCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWECiphertextCompressed<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_in() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank_out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
Rows(self.data.rows() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWECiphertextCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GGLWECiphertextCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -33,133 +74,140 @@ impl<D: DataMut> FillUniform for GGLWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWECiphertextCompressed<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.digits = 0;
|
|
||||||
self.rank_out = 0;
|
|
||||||
self.seed = Vec::new();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWECiphertextCompressed<D> {
|
impl<D: DataRef> fmt::Display for GGLWECiphertextCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"(GGLWECiphertextCompressed: basek={} k={} digits={}) {}",
|
"(GGLWECiphertextCompressed: base2k={} k={} digits={}) {}",
|
||||||
self.basek, self.k, self.digits, self.data
|
self.base2k.0, self.k.0, self.digits.0, self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWECiphertextCompressed<Vec<u8>> {
|
impl GGLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(
|
||||||
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> Self {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: MatZnx::alloc(n, rows, rank_in, 1, size),
|
data: MatZnx::alloc(
|
||||||
basek,
|
n.into(),
|
||||||
|
rows.into(),
|
||||||
|
rank_in.into(),
|
||||||
|
1,
|
||||||
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
),
|
||||||
k,
|
k,
|
||||||
rank_out,
|
base2k,
|
||||||
digits,
|
digits,
|
||||||
seed: vec![[0u8; 32]; rows * rank_in],
|
rank_out,
|
||||||
|
seed: vec![[0u8; 32]; (rows.0 * rank_in.0) as usize],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
_rank_out: Rank,
|
||||||
|
) -> usize {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
MatZnx::alloc_bytes(n, rows, rank_in, 1, rows)
|
MatZnx::alloc_bytes(
|
||||||
}
|
n.into(),
|
||||||
}
|
rows.into(),
|
||||||
|
rank_in.into(),
|
||||||
impl<D: Data> Infos for GGLWECiphertextCompressed<D> {
|
1,
|
||||||
type Inner = MatZnx<D>;
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
)
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWECiphertextCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.rank_out
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.digits
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.data.cols_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.rank_out
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> GGLWECiphertextCompressed<D> {
|
impl<D: DataRef> GGLWECiphertextCompressed<D> {
|
||||||
pub(crate) fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> {
|
pub(crate) fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> {
|
||||||
|
let rank_in: usize = self.rank_in().into();
|
||||||
GLWECiphertextCompressed {
|
GLWECiphertextCompressed {
|
||||||
data: self.data.at(row, col),
|
data: self.data.at(row, col),
|
||||||
basek: self.basek,
|
|
||||||
k: self.k,
|
k: self.k,
|
||||||
|
base2k: self.base2k,
|
||||||
rank: self.rank_out,
|
rank: self.rank_out,
|
||||||
seed: self.seed[self.rank_in() * row + col],
|
seed: self.seed[rank_in * row + col],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
||||||
pub(crate) fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> {
|
pub(crate) fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> {
|
||||||
let rank_in: usize = self.rank_in();
|
let rank_in: usize = self.rank_in().into();
|
||||||
GLWECiphertextCompressed {
|
GLWECiphertextCompressed {
|
||||||
data: self.data.at_mut(row, col),
|
|
||||||
basek: self.basek,
|
|
||||||
k: self.k,
|
k: self.k,
|
||||||
|
base2k: self.base2k,
|
||||||
rank: self.rank_out,
|
rank: self.rank_out,
|
||||||
|
data: self.data.at_mut(row, col),
|
||||||
seed: self.seed[rank_in * row + col], // Warning: value is copied and not borrow mut
|
seed: self.seed[rank_in * row + col], // Warning: value is copied and not borrow mut
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -167,12 +215,12 @@ impl<D: DataMut> GGLWECiphertextCompressed<D> {
|
|||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> {
|
impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.digits = reader.read_u64::<LittleEndian>()? as usize;
|
self.digits = Digits(reader.read_u32::<LittleEndian>()?);
|
||||||
self.rank_out = reader.read_u64::<LittleEndian>()? as usize;
|
self.rank_out = Rank(reader.read_u32::<LittleEndian>()?);
|
||||||
let seed_len = reader.read_u64::<LittleEndian>()? as usize;
|
let seed_len: u32 = reader.read_u32::<LittleEndian>()?;
|
||||||
self.seed = vec![[0u8; 32]; seed_len];
|
self.seed = vec![[0u8; 32]; seed_len as usize];
|
||||||
for s in &mut self.seed {
|
for s in &mut self.seed {
|
||||||
reader.read_exact(s)?;
|
reader.read_exact(s)?;
|
||||||
}
|
}
|
||||||
@@ -182,11 +230,11 @@ impl<D: DataMut> ReaderFrom for GGLWECiphertextCompressed<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> WriterTo for GGLWECiphertextCompressed<D> {
|
impl<D: DataRef> WriterTo for GGLWECiphertextCompressed<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.digits as u64)?;
|
writer.write_u32::<LittleEndian>(self.digits.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.rank_out as u64)?;
|
writer.write_u32::<LittleEndian>(self.rank_out.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.seed.len() as u64)?;
|
writer.write_u32::<LittleEndian>(self.seed.len() as u32)?;
|
||||||
for s in &self.seed {
|
for s in &self.seed {
|
||||||
writer.write_all(s)?;
|
writer.write_all(s)?;
|
||||||
}
|
}
|
||||||
@@ -201,14 +249,12 @@ where
|
|||||||
fn decompress(&mut self, module: &Module<B>, other: &GGLWECiphertextCompressed<DR>) {
|
fn decompress(&mut self, module: &Module<B>, other: &GGLWECiphertextCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use poulpy_hal::layouts::ZnxInfos;
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.n(),
|
self.n(),
|
||||||
other.data.n(),
|
other.n(),
|
||||||
"invalid receiver: self.n()={} != other.n()={}",
|
"invalid receiver: self.n()={} != other.n()={}",
|
||||||
self.n(),
|
self.n(),
|
||||||
other.data.n()
|
other.n()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.size(),
|
self.size(),
|
||||||
@@ -241,8 +287,8 @@ where
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rank_in: usize = self.rank_in();
|
let rank_in: usize = self.rank_in().into();
|
||||||
let rows: usize = self.rows();
|
let rows: usize = self.rows().into();
|
||||||
|
|
||||||
(0..rank_in).for_each(|col_i| {
|
(0..rank_in).for_each(|col_i| {
|
||||||
(0..rows).for_each(|row_i| {
|
(0..rows).for_each(|row_i| {
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWESwitchingKey, Infos,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GGLWECiphertextCompressed},
|
compressed::{Decompress, GGLWECiphertextCompressed},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
@@ -18,9 +18,50 @@ pub struct GGLWESwitchingKeyCompressed<D: Data> {
|
|||||||
pub(crate) sk_out_n: usize, // Degree of sk_out
|
pub(crate) sk_out_n: usize, // Degree of sk_out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.key.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.key.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.key.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GGLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -30,17 +71,6 @@ impl<D: DataMut> FillUniform for GGLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWESwitchingKeyCompressed<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.key.reset();
|
|
||||||
self.sk_in_n = 0;
|
|
||||||
self.sk_out_n = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for GGLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
@@ -51,51 +81,51 @@ impl<D: DataRef> fmt::Display for GGLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GGLWESwitchingKeyCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.key.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.key.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.key.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWESwitchingKeyCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.key.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.key.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.key.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.key.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
GGLWESwitchingKeyCompressed {
|
GGLWESwitchingKeyCompressed {
|
||||||
key: GGLWECiphertextCompressed::alloc(n, basek, k, rows, digits, rank_in, rank_out),
|
key: GGLWECiphertextCompressed::alloc(infos),
|
||||||
sk_in_n: 0,
|
sk_in_n: 0,
|
||||||
sk_out_n: 0,
|
sk_out_n: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize) -> usize {
|
pub fn alloc_with(
|
||||||
GGLWECiphertextCompressed::bytes_of(n, basek, k, rows, digits, rank_in)
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> Self {
|
||||||
|
GGLWESwitchingKeyCompressed {
|
||||||
|
key: GGLWECiphertextCompressed::alloc_with(n, base2k, k, rows, digits, rank_in, rank_out),
|
||||||
|
sk_in_n: 0,
|
||||||
|
sk_out_n: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
GGLWECiphertextCompressed::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> usize {
|
||||||
|
GGLWECiphertextCompressed::alloc_bytes_with(n, base2k, k, rows, digits, rank_in, rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWETensorKey, Infos,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWETensorKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
@@ -16,9 +16,49 @@ pub struct GGLWETensorKeyCompressed<D: Data> {
|
|||||||
pub(crate) keys: Vec<GGLWESwitchingKeyCompressed<D>>,
|
pub(crate) keys: Vec<GGLWESwitchingKeyCompressed<D>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWETensorKeyCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.keys[0].n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.keys[0].base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.keys[0].k()
|
||||||
|
}
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.keys[0].size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GGLWETensorKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWETensorKeyCompressed<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.keys[0].rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.keys[0].digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.keys[0].rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWETensorKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GGLWETensorKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -30,76 +70,79 @@ impl<D: DataMut> FillUniform for GGLWETensorKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWETensorKeyCompressed<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.keys
|
|
||||||
.iter_mut()
|
|
||||||
.for_each(|key: &mut GGLWESwitchingKeyCompressed<D>| key.reset())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWETensorKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for GGLWETensorKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
writeln!(f, "(GLWETensorKeyCompressed)",)?;
|
writeln!(f, "(GLWETensorKeyCompressed)",)?;
|
||||||
for (i, key) in self.keys.iter().enumerate() {
|
for (i, key) in self.keys.iter().enumerate() {
|
||||||
write!(f, "{}: {}", i, key)?;
|
write!(f, "{i}: {key}")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
impl GGLWETensorKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
|
||||||
|
);
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
let mut keys: Vec<GGLWESwitchingKeyCompressed<Vec<u8>>> = Vec::new();
|
let mut keys: Vec<GGLWESwitchingKeyCompressed<Vec<u8>>> = Vec::new();
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
|
||||||
(0..pairs).for_each(|_| {
|
(0..pairs).for_each(|_| {
|
||||||
keys.push(GGLWESwitchingKeyCompressed::alloc(
|
keys.push(GGLWESwitchingKeyCompressed::alloc_with(
|
||||||
n, basek, k, rows, digits, 1, rank,
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
Rank(1),
|
||||||
|
rank,
|
||||||
));
|
));
|
||||||
});
|
});
|
||||||
Self { keys }
|
Self { keys }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
where
|
||||||
pairs * GGLWESwitchingKeyCompressed::bytes_of(n, basek, k, rows, digits, 1)
|
A: GGLWELayoutInfos,
|
||||||
}
|
{
|
||||||
}
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
impl<D: Data> Infos for GGLWETensorKeyCompressed<D> {
|
infos.rank_out(),
|
||||||
type Inner = MatZnx<D>;
|
"rank_in != rank_out is not supported for GGLWETensorKeyCompressed"
|
||||||
|
);
|
||||||
fn inner(&self) -> &Self::Inner {
|
let rank_out: usize = infos.rank_out().into();
|
||||||
self.keys[0].inner()
|
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1);
|
||||||
|
pairs
|
||||||
|
* GGLWESwitchingKeyCompressed::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
Rank(1),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
self.keys[0].basek()
|
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||||
}
|
pairs * GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rows, digits, Rank(1), rank)
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.keys[0].k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWETensorKeyCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.keys[0].rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.keys[0].digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.keys[0].rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.keys[0].rank_out()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,7 +177,7 @@ impl<D: DataMut> GGLWETensorKeyCompressed<D> {
|
|||||||
if i > j {
|
if i > j {
|
||||||
std::mem::swap(&mut i, &mut j);
|
std::mem::swap(&mut i, &mut j);
|
||||||
};
|
};
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, WriterTo, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGSWCiphertext, Infos,
|
Base2K, Degree, Digits, GGSWCiphertext, GGSWInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GLWECiphertextCompressed},
|
compressed::{Decompress, GLWECiphertextCompressed},
|
||||||
};
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
@@ -14,13 +14,45 @@ use std::fmt;
|
|||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGSWCiphertextCompressed<D: Data> {
|
pub struct GGSWCiphertextCompressed<D: Data> {
|
||||||
pub(crate) data: MatZnx<D>,
|
pub(crate) data: MatZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) k: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) digits: usize,
|
pub(crate) digits: Digits,
|
||||||
pub(crate) rank: usize,
|
pub(crate) rank: Rank,
|
||||||
pub(crate) seed: Vec<[u8; 32]>,
|
pub(crate) seed: Vec<[u8; 32]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGSWCiphertextCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GGSWCiphertextCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGSWInfos for GGSWCiphertextCompressed<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
Rows(self.data.rows() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGSWCiphertextCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GGSWCiphertextCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self.data)
|
write!(f, "{}", self.data)
|
||||||
@@ -31,23 +63,12 @@ impl<D: DataRef> fmt::Display for GGSWCiphertextCompressed<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"(GGSWCiphertextCompressed: basek={} k={} digits={}) {}",
|
"(GGSWCiphertextCompressed: base2k={} k={} digits={}) {}",
|
||||||
self.basek, self.k, self.digits, self.data
|
self.base2k, self.k, self.digits, self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGSWCiphertextCompressed<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.digits = 0;
|
|
||||||
self.rank = 0;
|
|
||||||
self.seed = Vec::new();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for GGSWCiphertextCompressed<D> {
|
impl<D: DataMut> FillUniform for GGSWCiphertextCompressed<D> {
|
||||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||||
self.data.fill_uniform(log_bound, source);
|
self.data.fill_uniform(log_bound, source);
|
||||||
@@ -55,114 +76,123 @@ impl<D: DataMut> FillUniform for GGSWCiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGSWCiphertextCompressed<Vec<u8>> {
|
impl GGSWCiphertextCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
debug_assert!(digits > 0, "invalid ggsw: `digits` == 0");
|
A: GGSWInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
|
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: MatZnx::alloc(n, rows, rank + 1, 1, k.div_ceil(basek)),
|
data: MatZnx::alloc(
|
||||||
basek,
|
n.into(),
|
||||||
|
rows.into(),
|
||||||
|
(rank + 1).into(),
|
||||||
|
1,
|
||||||
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
),
|
||||||
k,
|
k,
|
||||||
|
base2k,
|
||||||
digits,
|
digits,
|
||||||
rank,
|
rank,
|
||||||
seed: Vec::new(),
|
seed: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGSWInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
|
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
MatZnx::alloc_bytes(n, rows, rank + 1, 1, size)
|
MatZnx::alloc_bytes(
|
||||||
|
n.into(),
|
||||||
|
rows.into(),
|
||||||
|
(rank + 1).into(),
|
||||||
|
1,
|
||||||
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> GGSWCiphertextCompressed<D> {
|
impl<D: DataRef> GGSWCiphertextCompressed<D> {
|
||||||
pub fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> {
|
pub fn at(&self, row: usize, col: usize) -> GLWECiphertextCompressed<&[u8]> {
|
||||||
|
let rank: usize = self.rank().into();
|
||||||
GLWECiphertextCompressed {
|
GLWECiphertextCompressed {
|
||||||
data: self.data.at(row, col),
|
data: self.data.at(row, col),
|
||||||
basek: self.basek,
|
|
||||||
k: self.k,
|
k: self.k,
|
||||||
rank: self.rank(),
|
base2k: self.base2k,
|
||||||
seed: self.seed[row * (self.rank() + 1) + col],
|
rank: self.rank,
|
||||||
|
seed: self.seed[row * (rank + 1) + col],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GGSWCiphertextCompressed<D> {
|
impl<D: DataMut> GGSWCiphertextCompressed<D> {
|
||||||
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> {
|
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertextCompressed<&mut [u8]> {
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank().into();
|
||||||
GLWECiphertextCompressed {
|
GLWECiphertextCompressed {
|
||||||
data: self.data.at_mut(row, col),
|
data: self.data.at_mut(row, col),
|
||||||
basek: self.basek,
|
|
||||||
k: self.k,
|
k: self.k,
|
||||||
rank,
|
base2k: self.base2k,
|
||||||
|
rank: self.rank,
|
||||||
seed: self.seed[row * (rank + 1) + col],
|
seed: self.seed[row * (rank + 1) + col],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GGSWCiphertextCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGSWCiphertextCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.rank
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.digits
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> {
|
impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.digits = reader.read_u64::<LittleEndian>()? as usize;
|
self.digits = Digits(reader.read_u32::<LittleEndian>()?);
|
||||||
self.rank = reader.read_u64::<LittleEndian>()? as usize;
|
self.rank = Rank(reader.read_u32::<LittleEndian>()?);
|
||||||
let seed_len = reader.read_u64::<LittleEndian>()? as usize;
|
let seed_len: usize = reader.read_u32::<LittleEndian>()? as usize;
|
||||||
self.seed = vec![[0u8; 32]; seed_len];
|
self.seed = vec![[0u8; 32]; seed_len];
|
||||||
for s in &mut self.seed {
|
for s in &mut self.seed {
|
||||||
reader.read_exact(s)?;
|
reader.read_exact(s)?;
|
||||||
@@ -173,11 +203,11 @@ impl<D: DataMut> ReaderFrom for GGSWCiphertextCompressed<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> {
|
impl<D: DataRef> WriterTo for GGSWCiphertextCompressed<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.digits as u64)?;
|
writer.write_u32::<LittleEndian>(self.digits.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.rank as u64)?;
|
writer.write_u32::<LittleEndian>(self.rank.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.seed.len() as u64)?;
|
writer.write_u32::<LittleEndian>(self.seed.len() as u32)?;
|
||||||
for s in &self.seed {
|
for s in &self.seed {
|
||||||
writer.write_all(s)?;
|
writer.write_all(s)?;
|
||||||
}
|
}
|
||||||
@@ -195,8 +225,8 @@ where
|
|||||||
assert_eq!(self.rank(), other.rank())
|
assert_eq!(self.rank(), other.rank())
|
||||||
}
|
}
|
||||||
|
|
||||||
let rows: usize = self.rows();
|
let rows: usize = self.rows().into();
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank().into();
|
||||||
(0..rows).for_each(|row_i| {
|
(0..rows).for_each(|row_i| {
|
||||||
(0..rank + 1).for_each(|col_j| {
|
(0..rank + 1).for_each(|col_j| {
|
||||||
self.at_mut(row_i, col_j)
|
self.at_mut(row_i, col_j)
|
||||||
|
|||||||
@@ -1,25 +1,48 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VecZnxCopy, VecZnxFillUniform},
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, Reset, VecZnx, WriterTo},
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, VecZnx, WriterTo, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos, compressed::Decompress};
|
use crate::layouts::{Base2K, Degree, GLWECiphertext, GLWEInfos, LWEInfos, Rank, TorusPrecision, compressed::Decompress};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GLWECiphertextCompressed<D: Data> {
|
pub struct GLWECiphertextCompressed<D: Data> {
|
||||||
pub(crate) data: VecZnx<D>,
|
pub(crate) data: VecZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) rank: usize,
|
pub(crate) rank: Rank,
|
||||||
pub(crate) seed: [u8; 32],
|
pub(crate) seed: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWECiphertextCompressed<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for GLWECiphertextCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GLWECiphertextCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GLWECiphertextCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,75 +50,57 @@ impl<D: DataRef> fmt::Display for GLWECiphertextCompressed<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"GLWECiphertextCompressed: basek={} k={} rank={} seed={:?}: {}",
|
"GLWECiphertextCompressed: base2k={} k={} rank={} seed={:?}: {}",
|
||||||
self.basek(),
|
self.base2k(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.rank,
|
self.rank(),
|
||||||
self.seed,
|
self.seed,
|
||||||
self.data
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GLWECiphertextCompressed<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.rank = 0;
|
|
||||||
self.seed = [0u8; 32];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for GLWECiphertextCompressed<D> {
|
impl<D: DataMut> FillUniform for GLWECiphertextCompressed<D> {
|
||||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||||
self.data.fill_uniform(log_bound, source);
|
self.data.fill_uniform(log_bound, source);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GLWECiphertextCompressed<D> {
|
|
||||||
type Inner = VecZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GLWECiphertextCompressed<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.rank
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GLWECiphertextCompressed<Vec<u8>> {
|
impl GLWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(n, 1, k.div_ceil(basek)),
|
data: VecZnx::alloc(n.into(), 1, k.0.div_ceil(base2k.0) as usize),
|
||||||
basek,
|
base2k,
|
||||||
k,
|
k,
|
||||||
rank,
|
rank,
|
||||||
seed: [0u8; 32],
|
seed: [0u8; 32],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
GLWECiphertext::bytes_of(n, basek, k, 1)
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> usize {
|
||||||
|
VecZnx::alloc_bytes(n.into(), 1, k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> {
|
impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.rank = reader.read_u64::<LittleEndian>()? as usize;
|
self.rank = Rank(reader.read_u32::<LittleEndian>()?);
|
||||||
reader.read_exact(&mut self.seed)?;
|
reader.read_exact(&mut self.seed)?;
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
@@ -103,9 +108,9 @@ impl<D: DataMut> ReaderFrom for GLWECiphertextCompressed<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> {
|
impl<D: DataRef> WriterTo for GLWECiphertextCompressed<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.rank as u64)?;
|
writer.write_u32::<LittleEndian>(self.rank.into())?;
|
||||||
writer.write_all(&self.seed)?;
|
writer.write_all(&self.seed)?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
@@ -118,14 +123,12 @@ where
|
|||||||
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>) {
|
fn decompress(&mut self, module: &Module<B>, other: &GLWECiphertextCompressed<DR>) {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
use poulpy_hal::layouts::ZnxInfos;
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.n(),
|
self.n(),
|
||||||
other.data.n(),
|
other.n(),
|
||||||
"invalid receiver: self.n()={} != other.n()={}",
|
"invalid receiver: self.n()={} != other.n()={}",
|
||||||
self.n(),
|
self.n(),
|
||||||
other.data.n()
|
other.n()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
self.size(),
|
self.size(),
|
||||||
@@ -164,15 +167,12 @@ impl<D: DataMut> GLWECiphertext<D> {
|
|||||||
debug_assert_eq!(self.size(), other.size());
|
debug_assert_eq!(self.size(), other.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
let k: usize = other.k;
|
|
||||||
let basek: usize = other.basek;
|
|
||||||
let cols: usize = other.rank() + 1;
|
|
||||||
module.vec_znx_copy(&mut self.data, 0, &other.data, 0);
|
module.vec_znx_copy(&mut self.data, 0, &other.data, 0);
|
||||||
(1..cols).for_each(|i| {
|
(1..(other.rank() + 1).into()).for_each(|i| {
|
||||||
module.vec_znx_fill_uniform(basek, &mut self.data, i, source);
|
module.vec_znx_fill_uniform(other.base2k.into(), &mut self.data, i, source);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.basek = basek;
|
self.base2k = other.base2k;
|
||||||
self.k = k;
|
self.k = other.k;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,62 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
|
||||||
VecZnxBigNormalize, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume, VecZnxNormalize,
|
|
||||||
VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
|
||||||
},
|
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWEToLWESwitchingKey, Infos, compressed::GGLWESwitchingKeyCompressed};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
compressed::GGLWESwitchingKeyCompressed,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GLWEToLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
pub struct GLWEToLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,52 +66,12 @@ impl<D: DataMut> FillUniform for GLWEToLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GLWEToLWESwitchingKeyCompressed<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(GLWEToLWESwitchingKeyCompressed) {}", self.0)
|
write!(f, "(GLWEToLWESwitchingKeyCompressed) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GLWEToLWESwitchingKeyCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GLWEToLWESwitchingKeyCompressed<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKeyCompressed<D> {
|
impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKeyCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
@@ -86,31 +85,53 @@ impl<D: DataRef> WriterTo for GLWEToLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
impl GLWEToLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, rank_in: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKeyCompressed::alloc(
|
where
|
||||||
n, basek, k, rows, 1, rank_in, 1,
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> Self {
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
rank_in,
|
||||||
|
Rank(1),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_in: usize) -> usize
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
A: GGLWELayoutInfos,
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ SvpApplyDftToDftInplace<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxFillUniform
|
|
||||||
+ VecZnxSubABInplace
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxAddNormal
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ SvpPrepare<B>
|
|
||||||
+ SvpPPolAllocBytes
|
|
||||||
+ SvpPPolAlloc<B>,
|
|
||||||
{
|
{
|
||||||
GLWEToLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_in)
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is unsupported for GLWEToLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> usize {
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rows, Digits(1), rank_in, Rank(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,25 +2,41 @@ use std::fmt;
|
|||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::ZnFillUniform,
|
api::ZnFillUniform,
|
||||||
layouts::{
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo, Zn, ZnxInfos, ZnxView, ZnxViewMut},
|
||||||
Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, Reset, VecZnx, WriterTo, ZnxInfos, ZnxView, ZnxViewMut,
|
|
||||||
},
|
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{Infos, LWECiphertext, SetMetaData, compressed::Decompress};
|
use crate::layouts::{Base2K, Degree, LWECiphertext, LWEInfos, TorusPrecision, compressed::Decompress};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWECiphertextCompressed<D: Data> {
|
pub struct LWECiphertextCompressed<D: Data> {
|
||||||
pub(crate) data: VecZnx<D>,
|
pub(crate) data: Zn<D>,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) seed: [u8; 32],
|
pub(crate) seed: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWECiphertextCompressed<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWECiphertextCompressed<D> {
|
impl<D: DataRef> fmt::Debug for LWECiphertextCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,8 +44,8 @@ impl<D: DataRef> fmt::Display for LWECiphertextCompressed<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"LWECiphertextCompressed: basek={} k={} seed={:?}: {}",
|
"LWECiphertextCompressed: base2k={} k={} seed={:?}: {}",
|
||||||
self.basek(),
|
self.base2k(),
|
||||||
self.k(),
|
self.k(),
|
||||||
self.seed,
|
self.seed,
|
||||||
self.data
|
self.data
|
||||||
@@ -37,18 +53,6 @@ impl<D: DataRef> fmt::Display for LWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWECiphertextCompressed<D>
|
|
||||||
where
|
|
||||||
VecZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.seed = [0u8; 32];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for LWECiphertextCompressed<D> {
|
impl<D: DataMut> FillUniform for LWECiphertextCompressed<D> {
|
||||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||||
self.data.fill_uniform(log_bound, source);
|
self.data.fill_uniform(log_bound, source);
|
||||||
@@ -56,46 +60,31 @@ impl<D: DataMut> FillUniform for LWECiphertextCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWECiphertextCompressed<Vec<u8>> {
|
impl LWECiphertextCompressed<Vec<u8>> {
|
||||||
pub fn alloc(basek: usize, k: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: LWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.base2k(), infos.k())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(base2k: Base2K, k: TorusPrecision) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(1, 1, k.div_ceil(basek)),
|
data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize),
|
||||||
k,
|
k,
|
||||||
basek,
|
base2k,
|
||||||
seed: [0u8; 32],
|
seed: [0u8; 32],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> Infos for LWECiphertextCompressed<D>
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
where
|
where
|
||||||
VecZnx<D>: ZnxInfos,
|
A: LWEInfos,
|
||||||
{
|
{
|
||||||
type Inner = VecZnx<D>;
|
Self::alloc_bytes_with(infos.base2k(), infos.k())
|
||||||
|
|
||||||
fn n(&self) -> usize {
|
|
||||||
&self.inner().n() - 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
pub fn alloc_bytes_with(base2k: Base2K, k: TorusPrecision) -> usize {
|
||||||
&self.data
|
Zn::alloc_bytes(1, 1, k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<DataSelf: DataMut> SetMetaData for LWECiphertextCompressed<DataSelf> {
|
|
||||||
fn set_k(&mut self, k: usize) {
|
|
||||||
self.k = k
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_basek(&mut self, basek: usize) {
|
|
||||||
self.basek = basek
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,8 +92,8 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
|||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWECiphertextCompressed<D> {
|
impl<D: DataMut> ReaderFrom for LWECiphertextCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
reader.read_exact(&mut self.seed)?;
|
reader.read_exact(&mut self.seed)?;
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
@@ -112,8 +101,8 @@ impl<D: DataMut> ReaderFrom for LWECiphertextCompressed<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> WriterTo for LWECiphertextCompressed<D> {
|
impl<D: DataRef> WriterTo for LWECiphertextCompressed<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
writer.write_all(&self.seed)?;
|
writer.write_all(&self.seed)?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
@@ -126,7 +115,13 @@ where
|
|||||||
fn decompress(&mut self, module: &Module<B>, other: &LWECiphertextCompressed<DR>) {
|
fn decompress(&mut self, module: &Module<B>, other: &LWECiphertextCompressed<DR>) {
|
||||||
debug_assert_eq!(self.size(), other.size());
|
debug_assert_eq!(self.size(), other.size());
|
||||||
let mut source: Source = Source::new(other.seed);
|
let mut source: Source = Source::new(other.seed);
|
||||||
module.zn_fill_uniform(self.n(), other.basek(), &mut self.data, 0, &mut source);
|
module.zn_fill_uniform(
|
||||||
|
self.n().into(),
|
||||||
|
other.base2k().into(),
|
||||||
|
&mut self.data,
|
||||||
|
0,
|
||||||
|
&mut source,
|
||||||
|
);
|
||||||
(0..self.size()).for_each(|i| {
|
(0..self.size()).for_each(|i| {
|
||||||
self.data.at_mut(0, i)[0] = other.data.at(0, i)[0];
|
self.data.at_mut(0, i)[0] = other.data.at(0, i)[0];
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
|
||||||
VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
|
||||||
},
|
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
Infos, LWESwitchingKey,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, LWEInfos, LWESwitchingKey, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
||||||
};
|
};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -17,9 +13,49 @@ use std::fmt;
|
|||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
pub struct LWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWESwitchingKeyCompressed<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for LWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for LWESwitchingKeyCompressed<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for LWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -29,52 +65,12 @@ impl<D: DataMut> FillUniform for LWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWESwitchingKeyCompressed<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for LWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for LWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(LWESwitchingKeyCompressed) {}", self.0)
|
write!(f, "(LWESwitchingKeyCompressed) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWESwitchingKeyCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> LWESwitchingKeyCompressed<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWESwitchingKeyCompressed<D> {
|
impl<D: DataMut> ReaderFrom for LWESwitchingKeyCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
@@ -88,32 +84,64 @@ impl<D: DataRef> WriterTo for LWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWESwitchingKeyCompressed<Vec<u8>> {
|
impl LWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKeyCompressed::alloc(
|
where
|
||||||
n, basek, k, rows, 1, 1, 1,
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for LWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows) -> Self {
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
Rank(1),
|
||||||
|
Rank(1),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> usize
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
A: GGLWELayoutInfos,
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ SvpApplyDftToDftInplace<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxFillUniform
|
|
||||||
+ VecZnxSubABInplace
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxAddNormal
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ SvpPrepare<B>
|
|
||||||
+ SvpPPolAllocBytes
|
|
||||||
+ SvpPPolAlloc<B>,
|
|
||||||
{
|
{
|
||||||
LWESwitchingKey::encrypt_sk_scratch_space(module, basek, k)
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows) -> usize {
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rows, Digits(1), Rank(1), Rank(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{
|
api::{VecZnxCopy, VecZnxFillUniform},
|
||||||
SvpApplyDftToDftInplace, SvpPPolAlloc, SvpPPolAllocBytes, SvpPrepare, VecZnxAddInplace, VecZnxAddNormal,
|
layouts::{Backend, Data, DataMut, DataRef, FillUniform, Module, ReaderFrom, WriterTo},
|
||||||
VecZnxBigNormalize, VecZnxCopy, VecZnxDftAllocBytes, VecZnxDftApply, VecZnxFillUniform, VecZnxIdftApplyConsume,
|
|
||||||
VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxSub, VecZnxSubABInplace,
|
|
||||||
},
|
|
||||||
layouts::{Backend, Data, DataMut, DataRef, FillUniform, MatZnx, Module, ReaderFrom, Reset, WriterTo},
|
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
Infos, LWEToGLWESwitchingKey,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GLWEInfos, LWEInfos, LWEToGLWESwitchingKey, Rank, Rows, TorusPrecision,
|
||||||
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
compressed::{Decompress, GGLWESwitchingKeyCompressed},
|
||||||
};
|
};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -17,9 +13,49 @@ use std::fmt;
|
|||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWEToGLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
pub struct LWEToGLWESwitchingKeyCompressed<D: Data>(pub(crate) GGLWESwitchingKeyCompressed<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GLWEInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -29,52 +65,12 @@ impl<D: DataMut> FillUniform for LWEToGLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWEToGLWESwitchingKeyCompressed<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKeyCompressed<D> {
|
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(LWEToGLWESwitchingKeyCompressed) {}", self.0)
|
write!(f, "(LWEToGLWESwitchingKeyCompressed) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWEToGLWESwitchingKeyCompressed<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> LWEToGLWESwitchingKeyCompressed<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKeyCompressed<D> {
|
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKeyCompressed<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
@@ -88,32 +84,54 @@ impl<D: DataRef> WriterTo for LWEToGLWESwitchingKeyCompressed<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
impl LWEToGLWESwitchingKeyCompressed<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKeyCompressed::alloc(
|
where
|
||||||
n, basek, k, rows, 1, 1, rank_out,
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWEToGLWESwitchingKeyCompressed"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> Self {
|
||||||
|
Self(GGLWESwitchingKeyCompressed::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
Rank(1),
|
||||||
|
rank_out,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_sk_scratch_space<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank_out: usize) -> usize
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VecZnxDftAllocBytes
|
A: GGLWELayoutInfos,
|
||||||
+ VecZnxBigNormalize<B>
|
|
||||||
+ VecZnxDftApply<B>
|
|
||||||
+ SvpApplyDftToDftInplace<B>
|
|
||||||
+ VecZnxIdftApplyConsume<B>
|
|
||||||
+ VecZnxNormalizeTmpBytes
|
|
||||||
+ VecZnxFillUniform
|
|
||||||
+ VecZnxSubABInplace
|
|
||||||
+ VecZnxAddInplace
|
|
||||||
+ VecZnxNormalizeInplace<B>
|
|
||||||
+ VecZnxAddNormal
|
|
||||||
+ VecZnxNormalize<B>
|
|
||||||
+ VecZnxSub
|
|
||||||
+ SvpPrepare<B>
|
|
||||||
+ SvpPPolAllocBytes
|
|
||||||
+ SvpPPolAlloc<B>,
|
|
||||||
{
|
{
|
||||||
LWEToGLWESwitchingKey::encrypt_sk_scratch_space(module, basek, k, rank_out)
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> usize {
|
||||||
|
GGLWESwitchingKeyCompressed::alloc_bytes_with(n, base2k, k, rows, Digits(1), Rank(1), rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,120 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, GLWECiphertext, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWECiphertext, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GGLWEAutomorphismKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub digits: Digits,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGLWEAutomorphismKey<D: Data> {
|
pub struct GGLWEAutomorphismKey<D: Data> {
|
||||||
pub(crate) key: GGLWESwitchingKey<D>,
|
pub(crate) key: GGLWESwitchingKey<D>,
|
||||||
pub(crate) p: i64,
|
pub(crate) p: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWEAutomorphismKey<D> {
|
||||||
|
pub fn p(&self) -> i64 {
|
||||||
|
self.p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWEAutomorphismKey<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.key.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GGLWEAutomorphismKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWEAutomorphismKey<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.key.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.key.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GGLWEAutomorphismKeyLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GGLWEAutomorphismKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for GGLWEAutomorphismKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKey<D> {
|
impl<D: DataRef> fmt::Debug for GGLWEAutomorphismKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -26,16 +124,6 @@ impl<D: DataMut> FillUniform for GGLWEAutomorphismKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWEAutomorphismKey<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.key.reset();
|
|
||||||
self.p = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKey<D> {
|
impl<D: DataRef> fmt::Display for GGLWEAutomorphismKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(AutomorphismKey: p={}) {}", self.p, self.key)
|
write!(f, "(AutomorphismKey: p={}) {}", self.p, self.key)
|
||||||
@@ -43,53 +131,42 @@ impl<D: DataRef> fmt::Display for GGLWEAutomorphismKey<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWEAutomorphismKey<Vec<u8>> {
|
impl GGLWEAutomorphismKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
|
);
|
||||||
GGLWEAutomorphismKey {
|
GGLWEAutomorphismKey {
|
||||||
key: GGLWESwitchingKey::alloc(n, basek, k, rows, digits, rank, rank),
|
key: GGLWESwitchingKey::alloc(infos),
|
||||||
p: 0,
|
p: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
GGLWESwitchingKey::bytes_of(n, basek, k, rows, digits, rank, rank)
|
GGLWEAutomorphismKey {
|
||||||
}
|
key: GGLWESwitchingKey::alloc_with(n, base2k, k, rows, digits, rank, rank),
|
||||||
}
|
p: 0,
|
||||||
|
}
|
||||||
impl<D: Data> Infos for GGLWEAutomorphismKey<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.key.inner()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
self.key.basek()
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::alloc_bytes(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
pub fn bytes_of(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
self.key.k()
|
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rows, digits, rank, rank)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWEAutomorphismKey<D> {
|
|
||||||
pub fn p(&self) -> i64 {
|
|
||||||
self.p
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.key.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.key.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.key.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.key.rank_out()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,24 +1,249 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, WriterTo, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos};
|
use crate::layouts::{Base2K, BuildError, Degree, Digits, GLWECiphertext, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
pub trait GGLWELayoutInfos
|
||||||
|
where
|
||||||
|
Self: GLWEInfos,
|
||||||
|
{
|
||||||
|
fn rows(&self) -> Rows;
|
||||||
|
fn digits(&self) -> Digits;
|
||||||
|
fn rank_in(&self) -> Rank;
|
||||||
|
fn rank_out(&self) -> Rank;
|
||||||
|
fn layout(&self) -> GGLWECiphertextLayout {
|
||||||
|
GGLWECiphertextLayout {
|
||||||
|
n: self.n(),
|
||||||
|
base2k: self.base2k(),
|
||||||
|
k: self.k(),
|
||||||
|
rank_in: self.rank_in(),
|
||||||
|
rank_out: self.rank_out(),
|
||||||
|
digits: self.digits(),
|
||||||
|
rows: self.rows(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GGLWECiphertextLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub digits: Digits,
|
||||||
|
pub rank_in: Rank,
|
||||||
|
pub rank_out: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GGLWECiphertextLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GGLWECiphertextLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for GGLWECiphertextLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_in
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank_out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGLWECiphertext<D: Data> {
|
pub struct GGLWECiphertext<D: Data> {
|
||||||
pub(crate) data: MatZnx<D>,
|
pub(crate) data: MatZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) k: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) digits: usize,
|
pub(crate) digits: Digits,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWECiphertext<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GGLWECiphertext<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWECiphertext<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_in() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_out() as u32 - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
Rows(self.data.rows() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GGLWECiphertextBuilder<D: Data> {
|
||||||
|
data: Option<MatZnx<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
digits: Option<Digits>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWECiphertext<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GGLWECiphertextBuilder<D> {
|
||||||
|
GGLWECiphertextBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
digits: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWECiphertextBuilder<Vec<u8>> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
self.data = Some(MatZnx::alloc(
|
||||||
|
infos.n().into(),
|
||||||
|
infos.rows().into(),
|
||||||
|
infos.rank_in().into(),
|
||||||
|
(infos.rank_out() + 1).into(),
|
||||||
|
infos.size(),
|
||||||
|
));
|
||||||
|
self.base2k = Some(infos.base2k());
|
||||||
|
self.k = Some(infos.k());
|
||||||
|
self.digits = Some(infos.digits());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWECiphertextBuilder<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: MatZnx<D>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn digits(mut self, digits: Digits) -> Self {
|
||||||
|
self.digits = Some(digits);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GGLWECiphertext<D>, BuildError> {
|
||||||
|
let data: MatZnx<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
let digits: Digits = self.digits.ok_or(BuildError::MissingDigits)?;
|
||||||
|
|
||||||
|
if base2k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if digits == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() == 0 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GGLWECiphertext {
|
||||||
|
data,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
digits,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataRef> GGLWECiphertext<D> {
|
||||||
|
pub fn data(&self) -> &MatZnx<D> {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> GGLWECiphertext<D> {
|
||||||
|
pub fn data_mut(&mut self) -> &mut MatZnx<D> {
|
||||||
|
&mut self.data
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWECiphertext<D> {
|
impl<D: DataRef> fmt::Debug for GGLWECiphertext<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,140 +253,156 @@ impl<D: DataMut> FillUniform for GGLWECiphertext<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWECiphertext<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.digits = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWECiphertext<D> {
|
impl<D: DataRef> fmt::Display for GGLWECiphertext<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"(GGLWECiphertext: basek={} k={} digits={}) {}",
|
"(GGLWECiphertext: k={} base2k={} digits={}) {}",
|
||||||
self.basek, self.k, self.digits, self.data
|
self.k().0,
|
||||||
|
self.base2k().0,
|
||||||
|
self.digits().0,
|
||||||
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> GGLWECiphertext<D> {
|
impl<D: DataRef> GGLWECiphertext<D> {
|
||||||
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
|
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.at(row, col),
|
.data(self.data.at(row, col))
|
||||||
basek: self.basek,
|
.base2k(self.base2k())
|
||||||
k: self.k,
|
.k(self.k())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GGLWECiphertext<D> {
|
impl<D: DataMut> GGLWECiphertext<D> {
|
||||||
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> {
|
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.at_mut(row, col),
|
.base2k(self.base2k())
|
||||||
basek: self.basek,
|
.k(self.k())
|
||||||
k: self.k,
|
.data(self.data.at_mut(row, col))
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWECiphertext<Vec<u8>> {
|
impl GGLWECiphertext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(
|
||||||
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> Self {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: MatZnx::alloc(n, rows, rank_in, rank_out + 1, size),
|
data: MatZnx::alloc(
|
||||||
basek,
|
n.into(),
|
||||||
|
rows.into(),
|
||||||
|
rank_in.into(),
|
||||||
|
(rank_out + 1).into(),
|
||||||
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
),
|
||||||
k,
|
k,
|
||||||
|
base2k,
|
||||||
digits,
|
digits,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
n: Degree,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> usize {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
MatZnx::alloc_bytes(n, rows, rank_in, rank_out + 1, rows)
|
MatZnx::alloc_bytes(
|
||||||
}
|
n.into(),
|
||||||
}
|
rows.into(),
|
||||||
|
rank_in.into(),
|
||||||
impl<D: Data> Infos for GGLWECiphertext<D> {
|
(rank_out + 1).into(),
|
||||||
type Inner = MatZnx<D>;
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
)
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWECiphertext<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.digits
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.data.cols_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.data.cols_out() - 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GGLWECiphertext<D> {
|
impl<D: DataMut> ReaderFrom for GGLWECiphertext<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.digits = reader.read_u64::<LittleEndian>()? as usize;
|
self.digits = Digits(reader.read_u32::<LittleEndian>()?);
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> WriterTo for GGLWECiphertext<D> {
|
impl<D: DataRef> WriterTo for GGLWECiphertext<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.0)?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.0)?;
|
||||||
writer.write_u64::<LittleEndian>(self.digits as u64)?;
|
writer.write_u32::<LittleEndian>(self.digits.0)?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,64 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWECiphertext, GLWECiphertext, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWECiphertext, GGLWELayoutInfos, GLWECiphertext, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GGLWESwitchingKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub digits: Digits,
|
||||||
|
pub rank_in: Rank,
|
||||||
|
pub rank_out: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GGLWESwitchingKeyLayout {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GGLWESwitchingKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for GGLWESwitchingKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_in
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank_out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGLWESwitchingKey<D: Data> {
|
pub struct GGLWESwitchingKey<D: Data> {
|
||||||
pub(crate) key: GGLWECiphertext<D>,
|
pub(crate) key: GGLWECiphertext<D>,
|
||||||
@@ -15,9 +66,51 @@ pub struct GGLWESwitchingKey<D: Data> {
|
|||||||
pub(crate) sk_out_n: usize, // Degree of sk_out
|
pub(crate) sk_out_n: usize, // Degree of sk_out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWESwitchingKey<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.key.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GGLWESwitchingKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWESwitchingKey<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.key.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.key.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Debug for GGLWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -26,7 +119,9 @@ impl<D: DataRef> fmt::Display for GGLWESwitchingKey<D> {
|
|||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"(GLWESwitchingKey: sk_in_n={} sk_out_n={}) {}",
|
"(GLWESwitchingKey: sk_in_n={} sk_out_n={}) {}",
|
||||||
self.sk_in_n, self.sk_out_n, self.key.data
|
self.sk_in_n,
|
||||||
|
self.sk_out_n,
|
||||||
|
self.key.data()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,70 +132,51 @@ impl<D: DataMut> FillUniform for GGLWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWESwitchingKey<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.key.reset();
|
|
||||||
self.sk_in_n = 0;
|
|
||||||
self.sk_out_n = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GGLWESwitchingKey<Vec<u8>> {
|
impl GGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
GGLWESwitchingKey {
|
GGLWESwitchingKey {
|
||||||
key: GGLWECiphertext::alloc(n, basek, k, rows, digits, rank_in, rank_out),
|
key: GGLWECiphertext::alloc(infos),
|
||||||
sk_in_n: 0,
|
sk_in_n: 0,
|
||||||
sk_out_n: 0,
|
sk_out_n: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> usize {
|
pub fn alloc_with(
|
||||||
GGLWECiphertext::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, rank_in, rank_out)
|
n: Degree,
|
||||||
}
|
base2k: Base2K,
|
||||||
}
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
impl<D: Data> Infos for GGLWESwitchingKey<D> {
|
digits: Digits,
|
||||||
type Inner = MatZnx<D>;
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
fn inner(&self) -> &Self::Inner {
|
) -> Self {
|
||||||
self.key.inner()
|
GGLWESwitchingKey {
|
||||||
|
key: GGLWECiphertext::alloc_with(n, base2k, k, rows, digits, rank_in, rank_out),
|
||||||
|
sk_in_n: 0,
|
||||||
|
sk_out_n: 0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
self.key.basek()
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
GGLWECiphertext::alloc_bytes(infos)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
pub fn alloc_bytes_with(
|
||||||
self.key.k()
|
n: Degree,
|
||||||
}
|
base2k: Base2K,
|
||||||
}
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
impl<D: Data> GGLWESwitchingKey<D> {
|
digits: Digits,
|
||||||
pub fn rank(&self) -> usize {
|
rank_in: Rank,
|
||||||
self.key.data.cols_out() - 1
|
rank_out: Rank,
|
||||||
}
|
) -> usize {
|
||||||
|
GGLWECiphertext::alloc_bytes_with(n, base2k, k, rows, digits, rank_in, rank_out)
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.key.data.cols_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.key.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.key.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sk_degree_in(&self) -> usize {
|
|
||||||
self.sk_in_n
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sk_degree_out(&self) -> usize {
|
|
||||||
self.sk_out_n
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,113 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GGLWETensorKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub digits: Digits,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGLWETensorKey<D: Data> {
|
pub struct GGLWETensorKey<D: Data> {
|
||||||
pub(crate) keys: Vec<GGLWESwitchingKey<D>>,
|
pub(crate) keys: Vec<GGLWESwitchingKey<D>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGLWETensorKey<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.keys[0].n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.keys[0].base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.keys[0].k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.keys[0].size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GGLWETensorKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.keys[0].rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GGLWETensorKey<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.keys[0].rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.keys[0].digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.keys[0].rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GGLWETensorKeyLayout {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GGLWETensorKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for GGLWETensorKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGLWETensorKey<D> {
|
impl<D: DataRef> fmt::Debug for GGLWETensorKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,74 +119,79 @@ impl<D: DataMut> FillUniform for GGLWETensorKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGLWETensorKey<D>
|
|
||||||
where
|
|
||||||
MatZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.keys
|
|
||||||
.iter_mut()
|
|
||||||
.for_each(|key: &mut GGLWESwitchingKey<D>| key.reset())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GGLWETensorKey<D> {
|
impl<D: DataRef> fmt::Display for GGLWETensorKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
writeln!(f, "(GLWETensorKey)",)?;
|
writeln!(f, "(GLWETensorKey)",)?;
|
||||||
for (i, key) in self.keys.iter().enumerate() {
|
for (i, key) in self.keys.iter().enumerate() {
|
||||||
write!(f, "{}: {}", i, key)?;
|
write!(f, "{i}: {key}")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWETensorKey<Vec<u8>> {
|
impl GGLWETensorKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWETensorKey"
|
||||||
|
);
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
let mut keys: Vec<GGLWESwitchingKey<Vec<u8>>> = Vec::new();
|
let mut keys: Vec<GGLWESwitchingKey<Vec<u8>>> = Vec::new();
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
|
||||||
(0..pairs).for_each(|_| {
|
(0..pairs).for_each(|_| {
|
||||||
keys.push(GGLWESwitchingKey::alloc(n, basek, k, rows, digits, 1, rank));
|
keys.push(GGLWESwitchingKey::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
Rank(1),
|
||||||
|
rank,
|
||||||
|
));
|
||||||
});
|
});
|
||||||
Self { keys }
|
Self { keys }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
where
|
||||||
pairs * GGLWESwitchingKey::<Vec<u8>>::bytes_of(n, basek, k, rows, digits, 1, rank)
|
A: GGLWELayoutInfos,
|
||||||
}
|
{
|
||||||
}
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
impl<D: Data> Infos for GGLWETensorKey<D> {
|
infos.rank_out(),
|
||||||
type Inner = MatZnx<D>;
|
"rank_in != rank_out is not supported for GGLWETensorKey"
|
||||||
|
);
|
||||||
fn inner(&self) -> &Self::Inner {
|
let rank_out: usize = infos.rank_out().into();
|
||||||
self.keys[0].inner()
|
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1);
|
||||||
|
pairs
|
||||||
|
* GGLWESwitchingKey::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
Rank(1),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
self.keys[0].basek()
|
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||||
}
|
pairs * GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rows, digits, Rank(1), rank)
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.keys[0].k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGLWETensorKey<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.keys[0].rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.keys[0].rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.keys[0].rank_out()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.keys[0].digits()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,7 +201,7 @@ impl<D: DataMut> GGLWETensorKey<D> {
|
|||||||
if i > j {
|
if i > j {
|
||||||
std::mem::swap(&mut i, &mut j);
|
std::mem::swap(&mut i, &mut j);
|
||||||
};
|
};
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -115,7 +212,7 @@ impl<D: DataRef> GGLWETensorKey<D> {
|
|||||||
if i > j {
|
if i > j {
|
||||||
std::mem::swap(&mut i, &mut j);
|
std::mem::swap(&mut i, &mut j);
|
||||||
};
|
};
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,224 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, WriterTo, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, Infos};
|
use crate::layouts::{Base2K, BuildError, Degree, Digits, GLWECiphertext, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision};
|
||||||
|
|
||||||
|
pub trait GGSWInfos
|
||||||
|
where
|
||||||
|
Self: GLWEInfos,
|
||||||
|
{
|
||||||
|
fn rows(&self) -> Rows;
|
||||||
|
fn digits(&self) -> Digits;
|
||||||
|
fn layout(&self) -> GGSWCiphertextLayout {
|
||||||
|
GGSWCiphertextLayout {
|
||||||
|
n: self.n(),
|
||||||
|
base2k: self.base2k(),
|
||||||
|
k: self.k(),
|
||||||
|
rank: self.rank(),
|
||||||
|
rows: self.rows(),
|
||||||
|
digits: self.digits(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GGSWCiphertextLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub digits: Digits,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GGSWCiphertextLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl GLWEInfos for GGSWCiphertextLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGSWInfos for GGSWCiphertextLayout {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GGSWCiphertext<D: Data> {
|
pub struct GGSWCiphertext<D: Data> {
|
||||||
pub(crate) data: MatZnx<D>,
|
pub(crate) data: MatZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) k: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) digits: usize,
|
pub(crate) digits: Digits,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GGSWCiphertext<D> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GGSWCiphertext<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_out() as u32 - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGSWInfos for GGSWCiphertext<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
Rows(self.data.rows() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GGSWCiphertextBuilder<D: Data> {
|
||||||
|
data: Option<MatZnx<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
digits: Option<Digits>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGSWCiphertext<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GGSWCiphertextBuilder<D> {
|
||||||
|
GGSWCiphertextBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
digits: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGSWCiphertextBuilder<Vec<u8>> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGSWInfos,
|
||||||
|
{
|
||||||
|
debug_assert!(
|
||||||
|
infos.size() as u32 > infos.digits().0,
|
||||||
|
"invalid ggsw: ceil(k/base2k): {} <= digits: {}",
|
||||||
|
infos.size(),
|
||||||
|
infos.digits()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
infos.rows().0 * infos.digits().0 <= infos.size() as u32,
|
||||||
|
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {}",
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.size(),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.data = Some(MatZnx::alloc(
|
||||||
|
infos.n().into(),
|
||||||
|
infos.rows().into(),
|
||||||
|
(infos.rank() + 1).into(),
|
||||||
|
(infos.rank() + 1).into(),
|
||||||
|
infos.size(),
|
||||||
|
));
|
||||||
|
self.base2k = Some(infos.base2k());
|
||||||
|
self.k = Some(infos.k());
|
||||||
|
self.digits = Some(infos.digits());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGSWCiphertextBuilder<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: MatZnx<D>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn digits(mut self, digits: Digits) -> Self {
|
||||||
|
self.digits = Some(digits);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GGSWCiphertext<D>, BuildError> {
|
||||||
|
let data: MatZnx<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
let digits: Digits = self.digits.ok_or(BuildError::MissingDigits)?;
|
||||||
|
|
||||||
|
if base2k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if digits == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() == 0 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GGSWCiphertext {
|
||||||
|
data,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
digits,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GGSWCiphertext<D> {
|
impl<D: DataRef> fmt::Debug for GGSWCiphertext<D> {
|
||||||
@@ -24,21 +231,15 @@ impl<D: DataRef> fmt::Display for GGSWCiphertext<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"(GGSWCiphertext: basek={} k={} digits={}) {}",
|
"(GGSWCiphertext: k: {} base2k: {} digits: {}) {}",
|
||||||
self.basek, self.k, self.digits, self.data
|
self.k().0,
|
||||||
|
self.base2k().0,
|
||||||
|
self.digits().0,
|
||||||
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GGSWCiphertext<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
self.digits = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for GGSWCiphertext<D> {
|
impl<D: DataMut> FillUniform for GGSWCiphertext<D> {
|
||||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||||
self.data.fill_uniform(log_bound, source);
|
self.data.fill_uniform(log_bound, source);
|
||||||
@@ -47,96 +248,106 @@ impl<D: DataMut> FillUniform for GGSWCiphertext<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> GGSWCiphertext<D> {
|
impl<D: DataRef> GGSWCiphertext<D> {
|
||||||
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
|
pub fn at(&self, row: usize, col: usize) -> GLWECiphertext<&[u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.at(row, col),
|
.data(self.data.at(row, col))
|
||||||
basek: self.basek,
|
.base2k(self.base2k())
|
||||||
k: self.k,
|
.k(self.k())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GGSWCiphertext<D> {
|
impl<D: DataMut> GGSWCiphertext<D> {
|
||||||
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> {
|
pub fn at_mut(&mut self, row: usize, col: usize) -> GLWECiphertext<&mut [u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.at_mut(row, col),
|
.base2k(self.base2k())
|
||||||
basek: self.basek,
|
.k(self.k())
|
||||||
k: self.k,
|
.data(self.data.at_mut(row, col))
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GGSWCiphertext<Vec<u8>> {
|
impl GGSWCiphertext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
debug_assert!(digits > 0, "invalid ggsw: `digits` == 0");
|
A: GGSWInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
|
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: MatZnx::alloc(n, rows, rank + 1, rank + 1, k.div_ceil(basek)),
|
data: MatZnx::alloc(
|
||||||
basek,
|
n.into(),
|
||||||
|
rows.into(),
|
||||||
|
(rank + 1).into(),
|
||||||
|
(rank + 1).into(),
|
||||||
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
),
|
||||||
k,
|
k,
|
||||||
|
base2k,
|
||||||
digits,
|
digits,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
let size: usize = k.div_ceil(basek);
|
where
|
||||||
|
A: GGSWInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(
|
||||||
|
infos.n(),
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> usize {
|
||||||
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid ggsw: ceil(k/basek): {} <= digits: {}",
|
"invalid ggsw: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid ggsw: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid ggsw: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
MatZnx::alloc_bytes(n, rows, rank + 1, rank + 1, size)
|
MatZnx::alloc_bytes(
|
||||||
}
|
n.into(),
|
||||||
}
|
rows.into(),
|
||||||
|
(rank + 1).into(),
|
||||||
impl<D: Data> Infos for GGSWCiphertext<D> {
|
(rank + 1).into(),
|
||||||
type Inner = MatZnx<D>;
|
k.0.div_ceil(base2k.0) as usize,
|
||||||
|
)
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GGSWCiphertext<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.digits
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,18 +355,18 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
|||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GGSWCiphertext<D> {
|
impl<D: DataMut> ReaderFrom for GGSWCiphertext<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.digits = reader.read_u64::<LittleEndian>()? as usize;
|
self.digits = Digits(reader.read_u32::<LittleEndian>()?);
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> WriterTo for GGSWCiphertext<D> {
|
impl<D: DataRef> WriterTo for GGSWCiphertext<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.digits as u64)?;
|
writer.write_u32::<LittleEndian>(self.digits.into())?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,193 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, Reset, ToOwnedDeep, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo},
|
layouts::{
|
||||||
|
Data, DataMut, DataRef, FillUniform, ReaderFrom, ToOwnedDeep, VecZnx, VecZnxToMut, VecZnxToRef, WriterTo, ZnxInfos,
|
||||||
|
},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{Infos, SetMetaData};
|
use crate::layouts::{Base2K, BuildError, Degree, LWEInfos, Rank, TorusPrecision};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
pub trait GLWEInfos
|
||||||
|
where
|
||||||
|
Self: LWEInfos,
|
||||||
|
{
|
||||||
|
fn rank(&self) -> Rank;
|
||||||
|
fn glwe_layout(&self) -> GLWECiphertextLayout {
|
||||||
|
GLWECiphertextLayout {
|
||||||
|
n: self.n(),
|
||||||
|
base2k: self.base2k(),
|
||||||
|
k: self.k(),
|
||||||
|
rank: self.rank(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait GLWELayoutSet {
|
||||||
|
fn set_k(&mut self, k: TorusPrecision);
|
||||||
|
fn set_basek(&mut self, base2k: Base2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GLWECiphertextLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GLWECiphertextLayout {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GLWECiphertextLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GLWECiphertext<D: Data> {
|
pub struct GLWECiphertext<D: Data> {
|
||||||
pub data: VecZnx<D>,
|
pub(crate) data: VecZnx<D>,
|
||||||
pub basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> GLWELayoutSet for GLWECiphertext<D> {
|
||||||
|
fn set_basek(&mut self, base2k: Base2K) {
|
||||||
|
self.base2k = base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_k(&mut self, k: TorusPrecision) {
|
||||||
|
self.k = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataRef> GLWECiphertext<D> {
|
||||||
|
pub fn data(&self) -> &VecZnx<D> {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> GLWECiphertext<D> {
|
||||||
|
pub fn data_mut(&mut self) -> &mut VecZnx<D> {
|
||||||
|
&mut self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GLWECiphertextBuilder<D: Data> {
|
||||||
|
data: Option<VecZnx<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWECiphertext<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GLWECiphertextBuilder<D> {
|
||||||
|
GLWECiphertextBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWECiphertextBuilder<Vec<u8>> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, layout: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
self.data = Some(VecZnx::alloc(
|
||||||
|
layout.n().into(),
|
||||||
|
(layout.rank() + 1).into(),
|
||||||
|
layout.size(),
|
||||||
|
));
|
||||||
|
self.base2k = Some(layout.base2k());
|
||||||
|
self.k = Some(layout.k());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWECiphertextBuilder<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: VecZnx<D>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GLWECiphertext<D>, BuildError> {
|
||||||
|
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
|
||||||
|
if base2k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() == 0 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GLWECiphertext { data, base2k, k })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWECiphertext<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWECiphertext<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(self.data.cols() as u32 - 1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> ToOwnedDeep for GLWECiphertext<D> {
|
impl<D: DataRef> ToOwnedDeep for GLWECiphertext<D> {
|
||||||
@@ -19,15 +195,15 @@ impl<D: DataRef> ToOwnedDeep for GLWECiphertext<D> {
|
|||||||
fn to_owned_deep(&self) -> Self::Owned {
|
fn to_owned_deep(&self) -> Self::Owned {
|
||||||
GLWECiphertext {
|
GLWECiphertext {
|
||||||
data: self.data.to_owned_deep(),
|
data: self.data.to_owned_deep(),
|
||||||
basek: self.basek,
|
|
||||||
k: self.k,
|
k: self.k,
|
||||||
|
base2k: self.base2k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GLWECiphertext<D> {
|
impl<D: DataRef> fmt::Debug for GLWECiphertext<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,25 +211,14 @@ impl<D: DataRef> fmt::Display for GLWECiphertext<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"GLWECiphertext: basek={} k={}: {}",
|
"GLWECiphertext: base2k={} k={}: {}",
|
||||||
self.basek(),
|
self.base2k().0,
|
||||||
self.k(),
|
self.k().0,
|
||||||
self.data
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GLWECiphertext<D>
|
|
||||||
where
|
|
||||||
VecZnx<D>: Reset,
|
|
||||||
{
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for GLWECiphertext<D> {
|
impl<D: DataMut> FillUniform for GLWECiphertext<D> {
|
||||||
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source) {
|
||||||
self.data.fill_uniform(log_bound, source);
|
self.data.fill_uniform(log_bound, source);
|
||||||
@@ -61,91 +226,75 @@ impl<D: DataMut> FillUniform for GLWECiphertext<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(n, rank + 1, k.div_ceil(basek)),
|
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
|
||||||
basek,
|
base2k,
|
||||||
k,
|
k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
VecZnx::alloc_bytes(n, rank + 1, k.div_ceil(basek))
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), infos.rank())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
||||||
|
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GLWECiphertext<D> {
|
pub trait GLWECiphertextToRef {
|
||||||
type Inner = VecZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GLWECiphertext<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.cols() - 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> SetMetaData for GLWECiphertext<D> {
|
|
||||||
fn set_k(&mut self, k: usize) {
|
|
||||||
self.k = k
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_basek(&mut self, basek: usize) {
|
|
||||||
self.basek = basek
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait GLWECiphertextToRef: Infos {
|
|
||||||
fn to_ref(&self) -> GLWECiphertext<&[u8]>;
|
fn to_ref(&self) -> GLWECiphertext<&[u8]>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> GLWECiphertextToRef for GLWECiphertext<D> {
|
impl<D: DataRef> GLWECiphertextToRef for GLWECiphertext<D> {
|
||||||
fn to_ref(&self) -> GLWECiphertext<&[u8]> {
|
fn to_ref(&self) -> GLWECiphertext<&[u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.to_ref(),
|
.k(self.k())
|
||||||
basek: self.basek,
|
.base2k(self.base2k())
|
||||||
k: self.k,
|
.data(self.data.to_ref())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GLWECiphertextToMut: Infos {
|
pub trait GLWECiphertextToMut {
|
||||||
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]>;
|
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GLWECiphertextToMut for GLWECiphertext<D> {
|
impl<D: DataMut> GLWECiphertextToMut for GLWECiphertext<D> {
|
||||||
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> {
|
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.to_mut(),
|
.k(self.k())
|
||||||
basek: self.basek,
|
.base2k(self.base2k())
|
||||||
k: self.k,
|
.data(self.data.to_mut())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GLWECiphertext<D> {
|
impl<D: DataMut> ReaderFrom for GLWECiphertext<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> WriterTo for GLWECiphertext<D> {
|
impl<D: DataRef> WriterTo for GLWECiphertext<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.0)?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.0)?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,57 +1,193 @@
|
|||||||
use poulpy_hal::layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, WriterTo};
|
use poulpy_hal::layouts::{Data, DataMut, DataRef, ReaderFrom, VecZnx, WriterTo, ZnxInfos};
|
||||||
|
|
||||||
use crate::{dist::Distribution, layouts::Infos};
|
use crate::{
|
||||||
|
dist::Distribution,
|
||||||
|
layouts::{Base2K, BuildError, Degree, GLWEInfos, LWEInfos, Rank, TorusPrecision},
|
||||||
|
};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq)]
|
#[derive(PartialEq, Eq)]
|
||||||
pub struct GLWEPublicKey<D: Data> {
|
pub struct GLWEPublicKey<D: Data> {
|
||||||
pub(crate) data: VecZnx<D>,
|
pub(crate) data: VecZnx<D>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) dist: Distribution,
|
pub(crate) dist: Distribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GLWEPublicKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWEPublicKey<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWEPublicKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(self.data.cols() as u32 - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GLWEPublicKeyLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.k.0.div_ceil(self.base2k.0) as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GLWEPublicKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GLWEPublicKeyBuilder<D: Data> {
|
||||||
|
data: Option<VecZnx<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEPublicKey<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GLWEPublicKeyBuilder<D> {
|
||||||
|
GLWEPublicKeyBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEPublicKeyBuilder<Vec<u8>> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, layout: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
self.data = Some(VecZnx::alloc(
|
||||||
|
layout.n().into(),
|
||||||
|
(layout.rank() + 1).into(),
|
||||||
|
layout.size(),
|
||||||
|
));
|
||||||
|
self.base2k = Some(layout.base2k());
|
||||||
|
self.k = Some(layout.k());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEPublicKeyBuilder<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: VecZnx<D>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GLWEPublicKey<D>, BuildError> {
|
||||||
|
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
|
||||||
|
if base2k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() == 0 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GLWEPublicKey {
|
||||||
|
data,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
dist: Distribution::NONE,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl GLWEPublicKey<Vec<u8>> {
|
impl GLWEPublicKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), infos.rank())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(n, rank + 1, k.div_ceil(basek)),
|
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
|
||||||
basek,
|
base2k,
|
||||||
k,
|
k,
|
||||||
dist: Distribution::NONE,
|
dist: Distribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, basek: usize, k: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
VecZnx::alloc_bytes(n, rank + 1, k.div_ceil(basek))
|
where
|
||||||
}
|
A: GLWEInfos,
|
||||||
}
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), infos.rank())
|
||||||
impl<D: Data> Infos for GLWEPublicKey<D> {
|
|
||||||
type Inner = VecZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
||||||
self.basek
|
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GLWEPublicKey<D> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.cols() - 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GLWEPublicKey<D> {
|
impl<D: DataMut> ReaderFrom for GLWEPublicKey<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
match Distribution::read_from(reader) {
|
match Distribution::read_from(reader) {
|
||||||
Ok(dist) => self.dist = dist,
|
Ok(dist) => self.dist = dist,
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
@@ -62,8 +198,8 @@ impl<D: DataMut> ReaderFrom for GLWEPublicKey<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> WriterTo for GLWEPublicKey<D> {
|
impl<D: DataRef> WriterTo for GLWEPublicKey<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.0)?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.0)?;
|
||||||
match self.dist.write_to(writer) {
|
match self.dist.write_to(writer) {
|
||||||
Ok(()) => {}
|
Ok(()) => {}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
|
|||||||
@@ -1,83 +1,202 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::layouts::{Data, DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef};
|
use poulpy_hal::layouts::{Data, DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef, ZnxInfos};
|
||||||
|
|
||||||
use crate::layouts::{GLWECiphertext, GLWECiphertextToMut, GLWECiphertextToRef, Infos, SetMetaData};
|
use crate::layouts::{
|
||||||
|
Base2K, BuildError, Degree, GLWECiphertext, GLWECiphertextToMut, GLWECiphertextToRef, GLWEInfos, GLWELayoutSet, LWEInfos,
|
||||||
|
Rank, TorusPrecision,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GLWEPlaintextLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GLWEPlaintextLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GLWEPlaintextLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct GLWEPlaintext<D: Data> {
|
pub struct GLWEPlaintext<D: Data> {
|
||||||
pub data: VecZnx<D>,
|
pub data: VecZnx<D>,
|
||||||
pub basek: usize,
|
pub base2k: Base2K,
|
||||||
pub k: usize,
|
pub k: TorusPrecision,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: DataMut> GLWELayoutSet for GLWEPlaintext<D> {
|
||||||
|
fn set_basek(&mut self, base2k: Base2K) {
|
||||||
|
self.base2k = base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_k(&mut self, k: TorusPrecision) {
|
||||||
|
self.k = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWEPlaintext<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWEPlaintext<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(self.data.cols() as u32 - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GLWEPlaintextBuilder<D: Data> {
|
||||||
|
data: Option<VecZnx<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEPlaintext<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GLWEPlaintextBuilder<D> {
|
||||||
|
GLWEPlaintextBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEPlaintextBuilder<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: VecZnx<D>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GLWEPlaintext<D>, BuildError> {
|
||||||
|
let data: VecZnx<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
|
||||||
|
if base2k.0 == 0 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k.0 == 0 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() != 1 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GLWEPlaintext { data, base2k, k })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GLWEPlaintext<D> {
|
impl<D: DataRef> fmt::Display for GLWEPlaintext<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"GLWEPlaintext: basek={} k={}: {}",
|
"GLWEPlaintext: base2k={} k={}: {}",
|
||||||
self.basek(),
|
self.base2k().0,
|
||||||
self.k(),
|
self.k().0,
|
||||||
self.data
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GLWEPlaintext<D> {
|
|
||||||
type Inner = VecZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> SetMetaData for GLWEPlaintext<D> {
|
|
||||||
fn set_k(&mut self, k: usize) {
|
|
||||||
self.k = k
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_basek(&mut self, basek: usize) {
|
|
||||||
self.basek = basek
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GLWEPlaintext<Vec<u8>> {
|
impl GLWEPlaintext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.base2k(), infos.k(), Rank(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> Self {
|
||||||
|
debug_assert!(rank.0 == 0);
|
||||||
Self {
|
Self {
|
||||||
data: VecZnx::alloc(n, 1, k.div_ceil(basek)),
|
data: VecZnx::alloc(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize),
|
||||||
basek,
|
base2k,
|
||||||
k,
|
k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn byte_of(n: usize, basek: usize, k: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
VecZnx::alloc_bytes(n, 1, k.div_ceil(basek))
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k(), Rank(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rank: Rank) -> usize {
|
||||||
|
debug_assert!(rank.0 == 0);
|
||||||
|
VecZnx::alloc_bytes(n.into(), (rank + 1).into(), k.0.div_ceil(base2k.0) as usize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> GLWECiphertextToRef for GLWEPlaintext<D> {
|
impl<D: DataRef> GLWECiphertextToRef for GLWEPlaintext<D> {
|
||||||
fn to_ref(&self) -> GLWECiphertext<&[u8]> {
|
fn to_ref(&self) -> GLWECiphertext<&[u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.to_ref(),
|
.data(self.data.to_ref())
|
||||||
basek: self.basek,
|
.k(self.k())
|
||||||
k: self.k,
|
.base2k(self.base2k())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GLWECiphertextToMut for GLWEPlaintext<D> {
|
impl<D: DataMut> GLWECiphertextToMut for GLWEPlaintext<D> {
|
||||||
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> {
|
fn to_mut(&mut self) -> GLWECiphertext<&mut [u8]> {
|
||||||
GLWECiphertext {
|
GLWECiphertext::builder()
|
||||||
data: self.data.to_mut(),
|
.k(self.k())
|
||||||
basek: self.basek,
|
.base2k(self.base2k())
|
||||||
k: self.k,
|
.data(self.data.to_mut())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,39 @@ use poulpy_hal::{
|
|||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::dist::Distribution;
|
use crate::{
|
||||||
|
dist::Distribution,
|
||||||
|
layouts::{Base2K, Degree, GLWEInfos, LWEInfos, Rank, TorusPrecision},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GLWESecretLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub rank: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GLWESecretLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
Base2K(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
TorusPrecision(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl GLWEInfos for GLWESecretLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GLWESecret<D: Data> {
|
pub struct GLWESecret<D: Data> {
|
||||||
@@ -11,64 +43,88 @@ pub struct GLWESecret<D: Data> {
|
|||||||
pub(crate) dist: Distribution,
|
pub(crate) dist: Distribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWESecret<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
Base2K(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
TorusPrecision(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWESecret<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
Rank(self.data.cols() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl GLWESecret<Vec<u8>> {
|
impl GLWESecret<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, rank: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GLWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.rank())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, rank: Rank) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: ScalarZnx::alloc(n, rank),
|
data: ScalarZnx::alloc(n.into(), rank.into()),
|
||||||
dist: Distribution::NONE,
|
dist: Distribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(n: usize, rank: usize) -> usize {
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
ScalarZnx::alloc_bytes(n, rank)
|
where
|
||||||
}
|
A: GLWEInfos,
|
||||||
}
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.rank())
|
||||||
impl<D: Data> GLWESecret<D> {
|
|
||||||
pub fn n(&self) -> usize {
|
|
||||||
self.data.n()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn log_n(&self) -> usize {
|
pub fn alloc_bytes_with(n: Degree, rank: Rank) -> usize {
|
||||||
self.data.log_n()
|
ScalarZnx::alloc_bytes(n.into(), rank.into())
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.data.cols()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> GLWESecret<D> {
|
impl<D: DataMut> GLWESecret<D> {
|
||||||
pub fn fill_ternary_prob(&mut self, prob: f64, source: &mut Source) {
|
pub fn fill_ternary_prob(&mut self, prob: f64, source: &mut Source) {
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank().into()).for_each(|i| {
|
||||||
self.data.fill_ternary_prob(i, prob, source);
|
self.data.fill_ternary_prob(i, prob, source);
|
||||||
});
|
});
|
||||||
self.dist = Distribution::TernaryProb(prob);
|
self.dist = Distribution::TernaryProb(prob);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_ternary_hw(&mut self, hw: usize, source: &mut Source) {
|
pub fn fill_ternary_hw(&mut self, hw: usize, source: &mut Source) {
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank().into()).for_each(|i| {
|
||||||
self.data.fill_ternary_hw(i, hw, source);
|
self.data.fill_ternary_hw(i, hw, source);
|
||||||
});
|
});
|
||||||
self.dist = Distribution::TernaryFixed(hw);
|
self.dist = Distribution::TernaryFixed(hw);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_binary_prob(&mut self, prob: f64, source: &mut Source) {
|
pub fn fill_binary_prob(&mut self, prob: f64, source: &mut Source) {
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank().into()).for_each(|i| {
|
||||||
self.data.fill_binary_prob(i, prob, source);
|
self.data.fill_binary_prob(i, prob, source);
|
||||||
});
|
});
|
||||||
self.dist = Distribution::BinaryProb(prob);
|
self.dist = Distribution::BinaryProb(prob);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_binary_hw(&mut self, hw: usize, source: &mut Source) {
|
pub fn fill_binary_hw(&mut self, hw: usize, source: &mut Source) {
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank().into()).for_each(|i| {
|
||||||
self.data.fill_binary_hw(i, hw, source);
|
self.data.fill_binary_hw(i, hw, source);
|
||||||
});
|
});
|
||||||
self.dist = Distribution::BinaryFixed(hw);
|
self.dist = Distribution::BinaryFixed(hw);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_binary_block(&mut self, block_size: usize, source: &mut Source) {
|
pub fn fill_binary_block(&mut self, block_size: usize, source: &mut Source) {
|
||||||
(0..self.rank()).for_each(|i| {
|
(0..self.rank().into()).for_each(|i| {
|
||||||
self.data.fill_binary_block(i, block_size, source);
|
self.data.fill_binary_block(i, block_size, source);
|
||||||
});
|
});
|
||||||
self.dist = Distribution::BinaryBlock(block_size);
|
self.dist = Distribution::BinaryBlock(block_size);
|
||||||
|
|||||||
@@ -1,19 +1,109 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct GLWEToLWESwitchingKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub rank_in: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for GLWEToLWESwitchingKeyLayout {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for GLWEToLWESwitchingKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for GLWEToLWESwitchingKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_in
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
Digits(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
Rank(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A special [GLWESwitchingKey] required to for the conversion from [GLWECiphertext] to [LWECiphertext].
|
/// A special [GLWESwitchingKey] required to for the conversion from [GLWECiphertext] to [LWECiphertext].
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct GLWEToLWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
pub struct GLWEToLWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for GLWEToLWESwitchingKey<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for GLWEToLWESwitchingKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GGLWELayoutInfos for GLWEToLWESwitchingKey<D> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Debug for GLWEToLWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -23,52 +113,12 @@ impl<D: DataMut> FillUniform for GLWEToLWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for GLWEToLWESwitchingKey<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Display for GLWEToLWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(GLWEToLWESwitchingKey) {}", self.0)
|
write!(f, "(GLWEToLWESwitchingKey) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for GLWEToLWESwitchingKey<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> GLWEToLWESwitchingKey<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKey<D> {
|
impl<D: DataMut> ReaderFrom for GLWEToLWESwitchingKey<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
@@ -82,7 +132,53 @@ impl<D: DataRef> WriterTo for GLWEToLWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
impl GLWEToLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, rank_in: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKey::alloc(n, basek, k, rows, 1, rank_in, 1))
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for GLWEToLWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for GLWEToLWESwitchingKey"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKey::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> Self {
|
||||||
|
Self(GGLWESwitchingKey::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
rank_in,
|
||||||
|
Rank(1),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for GLWEToLWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for GLWEToLWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_in: Rank) -> usize {
|
||||||
|
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rows, Digits(1), rank_in, Rank(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
use poulpy_hal::layouts::ZnxInfos;
|
|
||||||
|
|
||||||
pub trait Infos {
|
|
||||||
type Inner: ZnxInfos;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner;
|
|
||||||
|
|
||||||
/// Returns the ring degree of the polynomials.
|
|
||||||
fn n(&self) -> usize {
|
|
||||||
self.inner().n()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the base two logarithm of the ring dimension of the polynomials.
|
|
||||||
fn log_n(&self) -> usize {
|
|
||||||
self.inner().log_n()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of rows.
|
|
||||||
fn rows(&self) -> usize {
|
|
||||||
self.inner().rows()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of polynomials in each row.
|
|
||||||
fn cols(&self) -> usize {
|
|
||||||
self.inner().cols()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rank(&self) -> usize {
|
|
||||||
self.cols() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of size per polynomial.
|
|
||||||
fn size(&self) -> usize {
|
|
||||||
let size: usize = self.inner().size();
|
|
||||||
debug_assert!(size >= self.k().div_ceil(self.basek()));
|
|
||||||
size
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the total number of small polynomials.
|
|
||||||
fn poly_count(&self) -> usize {
|
|
||||||
self.rows() * self.cols() * self.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the base 2 logarithm of the ciphertext base.
|
|
||||||
fn basek(&self) -> usize;
|
|
||||||
|
|
||||||
/// Returns the bit precision of the ciphertext.
|
|
||||||
fn k(&self) -> usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait SetMetaData {
|
|
||||||
fn set_basek(&mut self, basek: usize);
|
|
||||||
fn set_k(&mut self, k: usize);
|
|
||||||
}
|
|
||||||
@@ -1,15 +1,75 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, Reset, WriterTo, Zn, ZnToMut, ZnToRef, ZnxInfos},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo, Zn, ZnToMut, ZnToRef, ZnxInfos},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::layouts::{Base2K, BuildError, Degree, TorusPrecision};
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
|
||||||
|
pub trait LWEInfos {
|
||||||
|
fn n(&self) -> Degree;
|
||||||
|
fn k(&self) -> TorusPrecision;
|
||||||
|
fn max_k(&self) -> TorusPrecision {
|
||||||
|
TorusPrecision(self.k().0 * self.size() as u32)
|
||||||
|
}
|
||||||
|
fn base2k(&self) -> Base2K;
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.k().0.div_ceil(self.base2k().0) as usize
|
||||||
|
}
|
||||||
|
fn lwe_layout(&self) -> LWECiphertextLayout {
|
||||||
|
LWECiphertextLayout {
|
||||||
|
n: self.n(),
|
||||||
|
k: self.k(),
|
||||||
|
base2k: self.base2k(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct LWECiphertextLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for LWECiphertextLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWECiphertext<D: Data> {
|
pub struct LWECiphertext<D: Data> {
|
||||||
pub(crate) data: Zn<D>,
|
pub(crate) data: Zn<D>,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWECiphertext<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32 - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> LWECiphertext<D> {
|
impl<D: DataRef> LWECiphertext<D> {
|
||||||
@@ -26,7 +86,7 @@ impl<D: DataMut> LWECiphertext<D> {
|
|||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWECiphertext<D> {
|
impl<D: DataRef> fmt::Debug for LWECiphertext<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,22 +94,14 @@ impl<D: DataRef> fmt::Display for LWECiphertext<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"LWECiphertext: basek={} k={}: {}",
|
"LWECiphertext: base2k={} k={}: {}",
|
||||||
self.basek(),
|
self.base2k().0,
|
||||||
self.k(),
|
self.k().0,
|
||||||
self.data
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWECiphertext<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.data.reset();
|
|
||||||
self.basek = 0;
|
|
||||||
self.k = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> FillUniform for LWECiphertext<D>
|
impl<D: DataMut> FillUniform for LWECiphertext<D>
|
||||||
where
|
where
|
||||||
Zn<D>: FillUniform,
|
Zn<D>: FillUniform,
|
||||||
@@ -60,45 +112,106 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWECiphertext<Vec<u8>> {
|
impl LWECiphertext<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: LWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.n(), infos.base2k(), infos.k())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: Zn::alloc(n + 1, 1, k.div_ceil(basek)),
|
data: Zn::alloc((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize),
|
||||||
k,
|
k,
|
||||||
basek,
|
base2k,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: LWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_bytes_with(infos.n(), infos.base2k(), infos.k())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision) -> usize {
|
||||||
|
Zn::alloc_bytes((n + 1).into(), 1, k.0.div_ceil(base2k.0) as usize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWECiphertextBuilder<Vec<u8>> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, layout: A) -> Self
|
||||||
|
where
|
||||||
|
A: LWEInfos,
|
||||||
|
{
|
||||||
|
self.data = Some(Zn::alloc((layout.n() + 1).into(), 1, layout.size()));
|
||||||
|
self.base2k = Some(layout.base2k());
|
||||||
|
self.k = Some(layout.k());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LWECiphertextBuilder<D: Data> {
|
||||||
|
data: Option<Zn<D>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWECiphertext<D> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> LWECiphertextBuilder<D> {
|
||||||
|
LWECiphertextBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWECiphertext<D>
|
impl<D: Data> LWECiphertextBuilder<D> {
|
||||||
where
|
#[inline]
|
||||||
Zn<D>: ZnxInfos,
|
pub fn data(mut self, data: Zn<D>) -> Self {
|
||||||
{
|
self.data = Some(data);
|
||||||
type Inner = Zn<D>;
|
self
|
||||||
|
}
|
||||||
fn n(&self) -> usize {
|
#[inline]
|
||||||
&self.inner().n() - 1
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
pub fn build(self) -> Result<LWECiphertext<D>, BuildError> {
|
||||||
&self.data
|
let data: Zn<D> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
}
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
if base2k.0 == 0 {
|
||||||
self.basek
|
return Err(BuildError::ZeroBase2K);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
if k.0 == 0 {
|
||||||
self.k
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<DataSelf: DataMut> SetMetaData for LWECiphertext<DataSelf> {
|
if data.n() == 0 {
|
||||||
fn set_k(&mut self, k: usize) {
|
return Err(BuildError::ZeroDegree);
|
||||||
self.k = k
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn set_basek(&mut self, basek: usize) {
|
if data.cols() == 0 {
|
||||||
self.basek = basek
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(LWECiphertext { data, base2k, k })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,11 +221,12 @@ pub trait LWECiphertextToRef {
|
|||||||
|
|
||||||
impl<D: DataRef> LWECiphertextToRef for LWECiphertext<D> {
|
impl<D: DataRef> LWECiphertextToRef for LWECiphertext<D> {
|
||||||
fn to_ref(&self) -> LWECiphertext<&[u8]> {
|
fn to_ref(&self) -> LWECiphertext<&[u8]> {
|
||||||
LWECiphertext {
|
LWECiphertext::builder()
|
||||||
data: self.data.to_ref(),
|
.base2k(self.base2k())
|
||||||
basek: self.basek,
|
.k(self.k())
|
||||||
k: self.k,
|
.data(self.data.to_ref())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,30 +237,27 @@ pub trait LWECiphertextToMut {
|
|||||||
|
|
||||||
impl<D: DataMut> LWECiphertextToMut for LWECiphertext<D> {
|
impl<D: DataMut> LWECiphertextToMut for LWECiphertext<D> {
|
||||||
fn to_mut(&mut self) -> LWECiphertext<&mut [u8]> {
|
fn to_mut(&mut self) -> LWECiphertext<&mut [u8]> {
|
||||||
LWECiphertext {
|
LWECiphertext::builder()
|
||||||
data: self.data.to_mut(),
|
.base2k(self.base2k())
|
||||||
basek: self.basek,
|
.k(self.k())
|
||||||
k: self.k,
|
.data(self.data.to_mut())
|
||||||
}
|
.build()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
|
||||||
|
|
||||||
use crate::layouts::{Infos, SetMetaData};
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWECiphertext<D> {
|
impl<D: DataMut> ReaderFrom for LWECiphertext<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.k = reader.read_u64::<LittleEndian>()? as usize;
|
self.k = TorusPrecision(reader.read_u32::<LittleEndian>()?);
|
||||||
self.basek = reader.read_u64::<LittleEndian>()? as usize;
|
self.base2k = Base2K(reader.read_u32::<LittleEndian>()?);
|
||||||
self.data.read_from(reader)
|
self.data.read_from(reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> WriterTo for LWECiphertext<D> {
|
impl<D: DataRef> WriterTo for LWECiphertext<D> {
|
||||||
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
fn write_to<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||||
writer.write_u64::<LittleEndian>(self.k as u64)?;
|
writer.write_u32::<LittleEndian>(self.k.into())?;
|
||||||
writer.write_u64::<LittleEndian>(self.basek as u64)?;
|
writer.write_u32::<LittleEndian>(self.base2k.into())?;
|
||||||
self.data.write_to(writer)
|
self.data.write_to(writer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,170 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct LWESwitchingKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for LWESwitchingKeyLayout {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for LWESwitchingKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for LWESwitchingKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
Rank(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
Digits(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
Rank(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
pub struct LWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWESwitchingKey<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for LWESwitchingKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GGLWELayoutInfos for LWESwitchingKey<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LWESwitchingKey<Vec<u8>> {
|
impl LWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKey::alloc(n, basek, k, rows, 1, 1, 1))
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKey::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows) -> Self {
|
||||||
|
Self(GGLWESwitchingKey::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
Rank(1),
|
||||||
|
Rank(1),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_out().0,
|
||||||
|
1,
|
||||||
|
"rank_out > 1 is not supported for LWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows) -> usize {
|
||||||
|
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rows, Digits(1), Rank(1), Rank(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Debug for LWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,52 +174,12 @@ impl<D: DataMut> FillUniform for LWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWESwitchingKey<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for LWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Display for LWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(LWESwitchingKey) {}", self.0)
|
write!(f, "(LWESwitchingKey) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWESwitchingKey<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> LWESwitchingKey<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWESwitchingKey<D> {
|
impl<D: DataMut> ReaderFrom for LWESwitchingKey<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
|
|||||||
@@ -1,21 +1,70 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::layouts::{Data, DataMut, DataRef, Zn, ZnToMut, ZnToRef};
|
use poulpy_hal::layouts::{Data, DataMut, DataRef, Zn, ZnToMut, ZnToRef, ZnxInfos};
|
||||||
|
|
||||||
use crate::layouts::{Infos, SetMetaData};
|
use crate::layouts::{Base2K, Degree, LWEInfos, TorusPrecision};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct LWEPlaintextLayout {
|
||||||
|
k: TorusPrecision,
|
||||||
|
base2k: Base2K,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for LWEPlaintextLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.k.0.div_ceil(self.base2k.0) as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct LWEPlaintext<D: Data> {
|
pub struct LWEPlaintext<D: Data> {
|
||||||
pub(crate) data: Zn<D>,
|
pub(crate) data: Zn<D>,
|
||||||
pub(crate) k: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) basek: usize,
|
pub(crate) base2k: Base2K,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWEPlaintext<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32 - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LWEPlaintext<Vec<u8>> {
|
impl LWEPlaintext<Vec<u8>> {
|
||||||
pub fn alloc(basek: usize, k: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: LWEInfos,
|
||||||
|
{
|
||||||
|
Self::alloc_with(infos.base2k(), infos.k())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(base2k: Base2K, k: TorusPrecision) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: Zn::alloc(1, 1, k.div_ceil(basek)),
|
data: Zn::alloc(1, 1, k.0.div_ceil(base2k.0) as usize),
|
||||||
k,
|
k,
|
||||||
basek,
|
base2k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -24,40 +73,14 @@ impl<D: DataRef> fmt::Display for LWEPlaintext<D> {
|
|||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"LWEPlaintext: basek={} k={}: {}",
|
"LWEPlaintext: base2k={} k={}: {}",
|
||||||
self.basek(),
|
self.base2k().0,
|
||||||
self.k(),
|
self.k().0,
|
||||||
self.data
|
self.data
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWEPlaintext<D> {
|
|
||||||
type Inner = Zn<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> SetMetaData for LWEPlaintext<D> {
|
|
||||||
fn set_k(&mut self, k: usize) {
|
|
||||||
self.k = k
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_basek(&mut self, basek: usize) {
|
|
||||||
self.basek = basek
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait LWEPlaintextToRef {
|
pub trait LWEPlaintextToRef {
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
fn to_ref(&self) -> LWEPlaintext<&[u8]>;
|
fn to_ref(&self) -> LWEPlaintext<&[u8]>;
|
||||||
@@ -67,7 +90,7 @@ impl<D: DataRef> LWEPlaintextToRef for LWEPlaintext<D> {
|
|||||||
fn to_ref(&self) -> LWEPlaintext<&[u8]> {
|
fn to_ref(&self) -> LWEPlaintext<&[u8]> {
|
||||||
LWEPlaintext {
|
LWEPlaintext {
|
||||||
data: self.data.to_ref(),
|
data: self.data.to_ref(),
|
||||||
basek: self.basek,
|
base2k: self.base2k,
|
||||||
k: self.k,
|
k: self.k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -82,7 +105,7 @@ impl<D: DataMut> LWEPlaintextToMut for LWEPlaintext<D> {
|
|||||||
fn to_mut(&mut self) -> LWEPlaintext<&mut [u8]> {
|
fn to_mut(&mut self) -> LWEPlaintext<&mut [u8]> {
|
||||||
LWEPlaintext {
|
LWEPlaintext {
|
||||||
data: self.data.to_mut(),
|
data: self.data.to_mut(),
|
||||||
basek: self.basek,
|
base2k: self.base2k,
|
||||||
k: self.k,
|
k: self.k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,10 @@ use poulpy_hal::{
|
|||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::dist::Distribution;
|
use crate::{
|
||||||
|
dist::Distribution,
|
||||||
|
layouts::{Base2K, Degree, LWEInfos, TorusPrecision},
|
||||||
|
};
|
||||||
|
|
||||||
pub struct LWESecret<D: Data> {
|
pub struct LWESecret<D: Data> {
|
||||||
pub(crate) data: ScalarZnx<D>,
|
pub(crate) data: ScalarZnx<D>,
|
||||||
@@ -11,9 +14,9 @@ pub struct LWESecret<D: Data> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWESecret<Vec<u8>> {
|
impl LWESecret<Vec<u8>> {
|
||||||
pub fn alloc(n: usize) -> Self {
|
pub fn alloc(n: Degree) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: ScalarZnx::alloc(n, 1),
|
data: ScalarZnx::alloc(n.into(), 1),
|
||||||
dist: Distribution::NONE,
|
dist: Distribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -33,17 +36,20 @@ impl<D: DataRef> LWESecret<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> LWESecret<D> {
|
impl<D: Data> LWEInfos for LWESecret<D> {
|
||||||
pub fn n(&self) -> usize {
|
fn base2k(&self) -> Base2K {
|
||||||
self.data.n()
|
Base2K(0)
|
||||||
|
}
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
TorusPrecision(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn log_n(&self) -> usize {
|
fn n(&self) -> Degree {
|
||||||
self.data.log_n()
|
Degree(self.data.n() as u32)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
fn size(&self) -> usize {
|
||||||
self.data.cols()
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,108 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
layouts::{Data, DataMut, DataRef, FillUniform, MatZnx, ReaderFrom, Reset, WriterTo},
|
layouts::{Data, DataMut, DataRef, FillUniform, ReaderFrom, WriterTo},
|
||||||
source::Source,
|
source::Source,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{GGLWESwitchingKey, Infos};
|
use crate::layouts::{
|
||||||
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
|
||||||
|
pub struct LWEToGLWESwitchingKeyLayout {
|
||||||
|
pub n: Degree,
|
||||||
|
pub base2k: Base2K,
|
||||||
|
pub k: TorusPrecision,
|
||||||
|
pub rows: Rows,
|
||||||
|
pub rank_out: Rank,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GLWEInfos for LWEToGLWESwitchingKeyLayout {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GGLWELayoutInfos for LWEToGLWESwitchingKeyLayout {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
Rank(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
Digits(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.rank_out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.rows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
#[derive(PartialEq, Eq, Clone)]
|
||||||
pub struct LWEToGLWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
pub struct LWEToGLWESwitchingKey<D: Data>(pub(crate) GGLWESwitchingKey<D>);
|
||||||
|
|
||||||
|
impl<D: Data> LWEInfos for LWEToGLWESwitchingKey<D> {
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.0.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.0.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.0.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.0.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data> GLWEInfos for LWEToGLWESwitchingKey<D> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<D: Data> GGLWELayoutInfos for LWEToGLWESwitchingKey<D> {
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.0.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.0.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.0.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.0.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Debug for LWEToGLWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self)
|
write!(f, "{self}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,52 +112,12 @@ impl<D: DataMut> FillUniform for LWEToGLWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut> Reset for LWEToGLWESwitchingKey<D> {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.0.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKey<D> {
|
impl<D: DataRef> fmt::Display for LWEToGLWESwitchingKey<D> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "(LWEToGLWESwitchingKey) {}", self.0)
|
write!(f, "(LWEToGLWESwitchingKey) {}", self.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Infos for LWEToGLWESwitchingKey<D> {
|
|
||||||
type Inner = MatZnx<D>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.0.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.0.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.0.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data> LWEToGLWESwitchingKey<D> {
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.0.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.0.rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.0.rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.0.rank_out()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKey<D> {
|
impl<D: DataMut> ReaderFrom for LWEToGLWESwitchingKey<D> {
|
||||||
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
fn read_from<R: std::io::Read>(&mut self, reader: &mut R) -> std::io::Result<()> {
|
||||||
self.0.read_from(reader)
|
self.0.read_from(reader)
|
||||||
@@ -81,7 +131,53 @@ impl<D: DataRef> WriterTo for LWEToGLWESwitchingKey<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
impl LWEToGLWESwitchingKey<Vec<u8>> {
|
||||||
pub fn alloc(n: usize, basek: usize, k: usize, rows: usize, rank_out: usize) -> Self {
|
pub fn alloc<A>(infos: &A) -> Self
|
||||||
Self(GGLWESwitchingKey::alloc(n, basek, k, rows, 1, 1, rank_out))
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
Self(GGLWESwitchingKey::alloc(infos))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> Self {
|
||||||
|
Self(GGLWESwitchingKey::alloc_with(
|
||||||
|
n,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
Digits(1),
|
||||||
|
Rank(1),
|
||||||
|
rank_out,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.rank_in().0,
|
||||||
|
1,
|
||||||
|
"rank_in > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
debug_assert_eq!(
|
||||||
|
infos.digits().0,
|
||||||
|
1,
|
||||||
|
"digits > 1 is not supported for LWEToGLWESwitchingKey"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKey::alloc_bytes(infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(n: Degree, base2k: Base2K, k: TorusPrecision, rows: Rows, rank_out: Rank) -> usize {
|
||||||
|
GGLWESwitchingKey::alloc_bytes_with(n, base2k, k, rows, Digits(1), Rank(1), rank_out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ mod glwe_pk;
|
|||||||
mod glwe_pt;
|
mod glwe_pt;
|
||||||
mod glwe_sk;
|
mod glwe_sk;
|
||||||
mod glwe_to_lwe_ksk;
|
mod glwe_to_lwe_ksk;
|
||||||
mod infos;
|
|
||||||
mod lwe_ct;
|
mod lwe_ct;
|
||||||
mod lwe_ksk;
|
mod lwe_ksk;
|
||||||
mod lwe_pt;
|
mod lwe_pt;
|
||||||
@@ -28,9 +27,195 @@ pub use glwe_pk::*;
|
|||||||
pub use glwe_pt::*;
|
pub use glwe_pt::*;
|
||||||
pub use glwe_sk::*;
|
pub use glwe_sk::*;
|
||||||
pub use glwe_to_lwe_ksk::*;
|
pub use glwe_to_lwe_ksk::*;
|
||||||
pub use infos::*;
|
|
||||||
pub use lwe_ct::*;
|
pub use lwe_ct::*;
|
||||||
pub use lwe_ksk::*;
|
pub use lwe_ksk::*;
|
||||||
pub use lwe_pt::*;
|
pub use lwe_pt::*;
|
||||||
pub use lwe_sk::*;
|
pub use lwe_sk::*;
|
||||||
pub use lwe_to_glwe_ksk::*;
|
pub use lwe_to_glwe_ksk::*;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum BuildError {
|
||||||
|
MissingData,
|
||||||
|
MissingBase2K,
|
||||||
|
MissingK,
|
||||||
|
MissingDigits,
|
||||||
|
ZeroDegree,
|
||||||
|
NonPowerOfTwoDegree,
|
||||||
|
ZeroBase2K,
|
||||||
|
ZeroTorusPrecision,
|
||||||
|
ZeroCols,
|
||||||
|
ZeroLimbs,
|
||||||
|
ZeroRank,
|
||||||
|
ZeroDigits,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Newtype over `u32` with arithmetic and comparisons against same type and `u32`.
|
||||||
|
/// Arithmetic is **saturating** (add/sub/mul) to avoid debug-overflow panics.
|
||||||
|
macro_rules! newtype_u32 {
|
||||||
|
($name:ident) => {
|
||||||
|
#[repr(transparent)]
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct $name(pub u32);
|
||||||
|
|
||||||
|
// ----- Conversions -----
|
||||||
|
impl From<$name> for u32 {
|
||||||
|
#[inline]
|
||||||
|
fn from(v: $name) -> u32 {
|
||||||
|
v.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<$name> for usize {
|
||||||
|
#[inline]
|
||||||
|
fn from(v: $name) -> usize {
|
||||||
|
v.0 as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u32> for $name {
|
||||||
|
#[inline]
|
||||||
|
fn from(v: u32) -> $name {
|
||||||
|
$name(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<usize> for $name {
|
||||||
|
#[inline]
|
||||||
|
fn from(v: usize) -> $name {
|
||||||
|
$name(v as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----- Display -----
|
||||||
|
impl ::core::fmt::Display for $name {
|
||||||
|
#[inline]
|
||||||
|
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
|
||||||
|
write!(f, "{}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== Arithmetic (same type) =====
|
||||||
|
impl ::core::ops::Add for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn add(self, rhs: $name) -> $name {
|
||||||
|
$name(self.0.saturating_add(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Sub for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn sub(self, rhs: $name) -> $name {
|
||||||
|
$name(self.0.saturating_sub(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Mul for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn mul(self, rhs: $name) -> $name {
|
||||||
|
$name(self.0.saturating_mul(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== Arithmetic (with u32) =====
|
||||||
|
impl ::core::ops::Add<u32> for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn add(self, rhs: u32) -> $name {
|
||||||
|
$name(self.0.saturating_add(rhs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Sub<u32> for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn sub(self, rhs: u32) -> $name {
|
||||||
|
$name(self.0.saturating_sub(rhs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Mul<u32> for $name {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn mul(self, rhs: u32) -> $name {
|
||||||
|
$name(self.0.saturating_mul(rhs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl $name {
|
||||||
|
#[inline]
|
||||||
|
pub const fn as_u32(self) -> u32 {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub const fn as_usize(self) -> usize {
|
||||||
|
self.0 as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn div_ceil<T: Into<u32>>(self, rhs: T) -> u32 {
|
||||||
|
self.0.div_ceil(rhs.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional symmetric forms: u32 (+|-|*) $name -> $name
|
||||||
|
impl ::core::ops::Add<$name> for u32 {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn add(self, rhs: $name) -> $name {
|
||||||
|
$name(self.saturating_add(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Sub<$name> for u32 {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn sub(self, rhs: $name) -> $name {
|
||||||
|
$name(self.saturating_sub(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::ops::Mul<$name> for u32 {
|
||||||
|
type Output = $name;
|
||||||
|
#[inline]
|
||||||
|
fn mul(self, rhs: $name) -> $name {
|
||||||
|
$name(self.saturating_mul(rhs.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===== Cross-type comparisons with u32 (both directions) =====
|
||||||
|
impl ::core::cmp::PartialEq<u32> for $name {
|
||||||
|
#[inline]
|
||||||
|
fn eq(&self, other: &u32) -> bool {
|
||||||
|
self.0 == *other
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::cmp::PartialEq<$name> for u32 {
|
||||||
|
#[inline]
|
||||||
|
fn eq(&self, other: &$name) -> bool {
|
||||||
|
*self == other.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::cmp::PartialOrd<u32> for $name {
|
||||||
|
#[inline]
|
||||||
|
fn partial_cmp(&self, other: &u32) -> Option<::core::cmp::Ordering> {
|
||||||
|
self.0.partial_cmp(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::cmp::PartialOrd<$name> for u32 {
|
||||||
|
#[inline]
|
||||||
|
fn partial_cmp(&self, other: &$name) -> Option<::core::cmp::Ordering> {
|
||||||
|
self.partial_cmp(&other.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
newtype_u32!(Degree);
|
||||||
|
newtype_u32!(TorusPrecision);
|
||||||
|
newtype_u32!(Base2K);
|
||||||
|
newtype_u32!(Rows);
|
||||||
|
newtype_u32!(Rank);
|
||||||
|
newtype_u32!(Digits);
|
||||||
|
|
||||||
|
impl Degree {
|
||||||
|
pub fn log2(&self) -> usize {
|
||||||
|
let n: usize = self.0 as usize;
|
||||||
|
(usize::BITS - (n - 1).leading_zeros()) as _
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWEAutomorphismKey, Infos,
|
Base2K, Degree, Digits, GGLWEAutomorphismKey, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -14,61 +14,107 @@ pub struct GGLWEAutomorphismKeyPrepared<D: Data, B: Backend> {
|
|||||||
pub(crate) p: i64,
|
pub(crate) p: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
|
||||||
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
|
||||||
where
|
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
|
||||||
{
|
|
||||||
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
|
|
||||||
key: GGLWESwitchingKeyPrepared::alloc(module, basek, k, rows, digits, rank, rank),
|
|
||||||
p: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
|
||||||
where
|
|
||||||
Module<B>: VmpPMatAllocBytes,
|
|
||||||
{
|
|
||||||
GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, rank, rank)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> Infos for GGLWEAutomorphismKeyPrepared<D, B> {
|
|
||||||
type Inner = VmpPMat<D, B>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.key.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.key.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.key.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> GGLWEAutomorphismKeyPrepared<D, B> {
|
impl<D: Data, B: Backend> GGLWEAutomorphismKeyPrepared<D, B> {
|
||||||
pub fn p(&self) -> i64 {
|
pub fn p(&self) -> i64 {
|
||||||
self.p
|
self.p
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
impl<D: Data, B: Backend> LWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
|
||||||
self.key.digits()
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rank(&self) -> usize {
|
fn base2k(&self) -> Base2K {
|
||||||
self.key.rank()
|
self.key.base2k()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GLWEInfos for GGLWEAutomorphismKeyPrepared<D, B> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWEAutomorphismKeyPrepared<D, B> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
self.key.rank_in()
|
self.key.rank_in()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
fn rank_out(&self) -> Rank {
|
||||||
self.key.rank_out()
|
self.key.rank_out()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
|
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
|
||||||
|
);
|
||||||
|
GGLWEAutomorphismKeyPrepared::<Vec<u8>, B> {
|
||||||
|
key: GGLWESwitchingKeyPrepared::alloc(module, infos),
|
||||||
|
p: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self
|
||||||
|
where
|
||||||
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
|
{
|
||||||
|
GGLWEAutomorphismKeyPrepared {
|
||||||
|
key: GGLWESwitchingKeyPrepared::alloc_with(module, base2k, k, rows, digits, rank, rank),
|
||||||
|
p: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAllocBytes,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWEAutomorphismKeyPrepared"
|
||||||
|
);
|
||||||
|
GGLWESwitchingKeyPrepared::alloc_bytes(module, infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
module: &Module<B>,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank: Rank,
|
||||||
|
) -> usize
|
||||||
|
where
|
||||||
|
Module<B>: VmpPMatAllocBytes,
|
||||||
|
{
|
||||||
|
GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, digits, rank, rank)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWEAutomorphismKey<DR>> for GGLWEAutomorphismKeyPrepared<D, B>
|
impl<D: DataMut, DR: DataRef, B: Backend> Prepare<B, GGLWEAutomorphismKey<DR>> for GGLWEAutomorphismKeyPrepared<D, B>
|
||||||
@@ -86,14 +132,7 @@ where
|
|||||||
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWEAutomorphismKeyPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(
|
let mut atk_prepared: GGLWEAutomorphismKeyPrepared<Vec<u8>, B> = GGLWEAutomorphismKeyPrepared::alloc(module, self);
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rows(),
|
|
||||||
self.digits(),
|
|
||||||
self.rank(),
|
|
||||||
);
|
|
||||||
atk_prepared.prepare(module, self, scratch);
|
atk_prepared.prepare(module, self, scratch);
|
||||||
atk_prepared
|
atk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,115 +1,262 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat, ZnxInfos},
|
||||||
|
oep::VmpPMatAllocBytesImpl,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWECiphertext, Infos,
|
Base2K, BuildError, Degree, Digits, GGLWECiphertext, GGLWELayoutInfos, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
prepared::{Prepare, PrepareAlloc},
|
prepared::{Prepare, PrepareAlloc},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq)]
|
#[derive(PartialEq, Eq)]
|
||||||
pub struct GGLWECiphertextPrepared<D: Data, B: Backend> {
|
pub struct GGLWECiphertextPrepared<D: Data, B: Backend> {
|
||||||
pub(crate) data: VmpPMat<D, B>,
|
pub(crate) data: VmpPMat<D, B>,
|
||||||
pub(crate) basek: usize,
|
pub(crate) k: TorusPrecision,
|
||||||
pub(crate) k: usize,
|
pub(crate) base2k: Base2K,
|
||||||
pub(crate) digits: usize,
|
pub(crate) digits: Digits,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> LWEInfos for GGLWECiphertextPrepared<D, B> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
Degree(self.data.n() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.base2k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.k
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.data.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GLWEInfos for GGLWECiphertextPrepared<D, B> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWECiphertextPrepared<D, B> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_in() as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
Rank(self.data.cols_out() as u32 - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.digits
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
Rows(self.data.rows() as u32)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GGLWECiphertextPreparedBuilder<D: Data, B: Backend> {
|
||||||
|
data: Option<VmpPMat<D, B>>,
|
||||||
|
base2k: Option<Base2K>,
|
||||||
|
k: Option<TorusPrecision>,
|
||||||
|
digits: Option<Digits>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWECiphertextPrepared<D, B> {
|
||||||
|
#[inline]
|
||||||
|
pub fn builder() -> GGLWECiphertextPreparedBuilder<D, B> {
|
||||||
|
GGLWECiphertextPreparedBuilder {
|
||||||
|
data: None,
|
||||||
|
base2k: None,
|
||||||
|
k: None,
|
||||||
|
digits: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B: Backend> GGLWECiphertextPreparedBuilder<Vec<u8>, B> {
|
||||||
|
#[inline]
|
||||||
|
pub fn layout<A>(mut self, infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
B: VmpPMatAllocBytesImpl<B>,
|
||||||
|
{
|
||||||
|
self.data = Some(VmpPMat::alloc(
|
||||||
|
infos.n().into(),
|
||||||
|
infos.rows().into(),
|
||||||
|
infos.rank_in().into(),
|
||||||
|
(infos.rank_out() + 1).into(),
|
||||||
|
infos.size(),
|
||||||
|
));
|
||||||
|
self.base2k = Some(infos.base2k());
|
||||||
|
self.k = Some(infos.k());
|
||||||
|
self.digits = Some(infos.digits());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWECiphertextPreparedBuilder<D, B> {
|
||||||
|
#[inline]
|
||||||
|
pub fn data(mut self, data: VmpPMat<D, B>) -> Self {
|
||||||
|
self.data = Some(data);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn base2k(mut self, base2k: Base2K) -> Self {
|
||||||
|
self.base2k = Some(base2k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn k(mut self, k: TorusPrecision) -> Self {
|
||||||
|
self.k = Some(k);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn digits(mut self, digits: Digits) -> Self {
|
||||||
|
self.digits = Some(digits);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(self) -> Result<GGLWECiphertextPrepared<D, B>, BuildError> {
|
||||||
|
let data: VmpPMat<D, B> = self.data.ok_or(BuildError::MissingData)?;
|
||||||
|
let base2k: Base2K = self.base2k.ok_or(BuildError::MissingBase2K)?;
|
||||||
|
let k: TorusPrecision = self.k.ok_or(BuildError::MissingK)?;
|
||||||
|
let digits: Digits = self.digits.ok_or(BuildError::MissingDigits)?;
|
||||||
|
|
||||||
|
if base2k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if digits == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroBase2K);
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == 0_u32 {
|
||||||
|
return Err(BuildError::ZeroTorusPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.n() == 0 {
|
||||||
|
return Err(BuildError::ZeroDegree);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.cols() == 0 {
|
||||||
|
return Err(BuildError::ZeroCols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.size() == 0 {
|
||||||
|
return Err(BuildError::ZeroLimbs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(GGLWECiphertextPrepared {
|
||||||
|
data,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
digits,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWECiphertextPrepared<Vec<u8>, B> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
|
||||||
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
|
||||||
|
Self::alloc_with(
|
||||||
|
module,
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(
|
||||||
|
module: &Module<B>,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
|
) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
data: module.vmp_pmat_alloc(rows, rank_in, rank_out + 1, size),
|
data: module.vmp_pmat_alloc(rows.into(), rank_in.into(), (rank_out + 1).into(), size),
|
||||||
basek,
|
|
||||||
k,
|
k,
|
||||||
|
base2k,
|
||||||
digits,
|
digits,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
pub fn bytes_of(
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAllocBytes,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(module.n(), infos.n().0 as usize, "module.n() != infos.n()");
|
||||||
|
Self::alloc_bytes_with(
|
||||||
|
module,
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k: usize,
|
k: TorusPrecision,
|
||||||
rows: usize,
|
rows: Rows,
|
||||||
digits: usize,
|
digits: Digits,
|
||||||
rank_in: usize,
|
rank_in: Rank,
|
||||||
rank_out: usize,
|
rank_out: Rank,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
let size: usize = k.div_ceil(basek);
|
let size: usize = k.0.div_ceil(base2k.0) as usize;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
size > digits,
|
size as u32 > digits.0,
|
||||||
"invalid gglwe: ceil(k/basek): {} <= digits: {}",
|
"invalid gglwe: ceil(k/base2k): {size} <= digits: {}",
|
||||||
size,
|
digits.0
|
||||||
digits
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
rows * digits <= size,
|
rows.0 * digits.0 <= size as u32,
|
||||||
"invalid gglwe: rows: {} * digits:{} > ceil(k/basek): {}",
|
"invalid gglwe: rows: {} * digits:{} > ceil(k/base2k): {size}",
|
||||||
rows,
|
rows.0,
|
||||||
digits,
|
digits.0,
|
||||||
size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
module.vmp_pmat_alloc_bytes(rows, rank_in, rank_out + 1, rows)
|
module.vmp_pmat_alloc_bytes(rows.into(), rank_in.into(), (rank_out + 1).into(), size)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> Infos for GGLWECiphertextPrepared<D, B> {
|
|
||||||
type Inner = VmpPMat<D, B>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
&self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.basek
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> GGLWECiphertextPrepared<D, B> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.digits
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.data.cols_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.data.cols_out() - 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,8 +266,8 @@ where
|
|||||||
{
|
{
|
||||||
fn prepare(&mut self, module: &Module<B>, other: &GGLWECiphertext<DR>, scratch: &mut Scratch<B>) {
|
fn prepare(&mut self, module: &Module<B>, other: &GGLWECiphertext<DR>, scratch: &mut Scratch<B>) {
|
||||||
module.vmp_prepare(&mut self.data, &other.data, scratch);
|
module.vmp_prepare(&mut self.data, &other.data, scratch);
|
||||||
self.basek = other.basek;
|
|
||||||
self.k = other.k;
|
self.k = other.k;
|
||||||
|
self.base2k = other.base2k;
|
||||||
self.digits = other.digits;
|
self.digits = other.digits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -130,15 +277,7 @@ where
|
|||||||
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWECiphertextPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(
|
let mut atk_prepared: GGLWECiphertextPrepared<Vec<u8>, B> = GGLWECiphertextPrepared::alloc(module, self);
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rows(),
|
|
||||||
self.digits(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out(),
|
|
||||||
);
|
|
||||||
atk_prepared.prepare(module, self, scratch);
|
atk_prepared.prepare(module, self, scratch);
|
||||||
atk_prepared
|
atk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWESwitchingKey, Infos,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWESwitchingKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
prepared::{GGLWECiphertextPrepared, Prepare, PrepareAlloc},
|
prepared::{GGLWECiphertextPrepared, Prepare, PrepareAlloc},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -15,75 +15,103 @@ pub struct GGLWESwitchingKeyPrepared<D: Data, B: Backend> {
|
|||||||
pub(crate) sk_out_n: usize, // Degree of sk_out
|
pub(crate) sk_out_n: usize, // Degree of sk_out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> LWEInfos for GGLWESwitchingKeyPrepared<D, B> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.key.n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.key.base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.key.k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.key.size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GLWEInfos for GGLWESwitchingKeyPrepared<D, B> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWESwitchingKeyPrepared<D, B> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.key.rank_in()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.key.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.key.digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.key.rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
|
||||||
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank_in: usize, rank_out: usize) -> Self
|
|
||||||
where
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
|
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()");
|
||||||
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
|
||||||
key: GGLWECiphertextPrepared::alloc(module, basek, k, rows, digits, rank_in, rank_out),
|
key: GGLWECiphertextPrepared::alloc(module, infos),
|
||||||
sk_in_n: 0,
|
sk_in_n: 0,
|
||||||
sk_out_n: 0,
|
sk_out_n: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub fn alloc_with(
|
||||||
pub fn bytes_of(
|
|
||||||
module: &Module<B>,
|
module: &Module<B>,
|
||||||
basek: usize,
|
base2k: Base2K,
|
||||||
k: usize,
|
k: TorusPrecision,
|
||||||
rows: usize,
|
rows: Rows,
|
||||||
digits: usize,
|
digits: Digits,
|
||||||
rank_in: usize,
|
rank_in: Rank,
|
||||||
rank_out: usize,
|
rank_out: Rank,
|
||||||
|
) -> Self
|
||||||
|
where
|
||||||
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
|
{
|
||||||
|
GGLWESwitchingKeyPrepared::<Vec<u8>, B> {
|
||||||
|
key: GGLWECiphertextPrepared::alloc_with(module, base2k, k, rows, digits, rank_in, rank_out),
|
||||||
|
sk_in_n: 0,
|
||||||
|
sk_out_n: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAllocBytes,
|
||||||
|
{
|
||||||
|
debug_assert_eq!(module.n() as u32, infos.n(), "module.n() != infos.n()");
|
||||||
|
GGLWECiphertextPrepared::alloc_bytes(module, infos)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
module: &Module<B>,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank_in: Rank,
|
||||||
|
rank_out: Rank,
|
||||||
) -> usize
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
GGLWECiphertextPrepared::bytes_of(module, basek, k, rows, digits, rank_in, rank_out)
|
GGLWECiphertextPrepared::alloc_bytes_with(module, base2k, k, rows, digits, rank_in, rank_out)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> Infos for GGLWESwitchingKeyPrepared<D, B> {
|
|
||||||
type Inner = VmpPMat<D, B>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.key.inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.key.basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.key.k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> GGLWESwitchingKeyPrepared<D, B> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.key.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.key.data.cols_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.key.data.cols_out() - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.key.digits()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sk_degree_in(&self) -> usize {
|
|
||||||
self.sk_in_n
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sk_degree_out(&self) -> usize {
|
|
||||||
self.sk_out_n
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,15 +131,7 @@ where
|
|||||||
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWESwitchingKeyPrepared<Vec<u8>, B> {
|
||||||
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(
|
let mut atk_prepared: GGLWESwitchingKeyPrepared<Vec<u8>, B> = GGLWESwitchingKeyPrepared::alloc(module, self);
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rows(),
|
|
||||||
self.digits(),
|
|
||||||
self.rank_in(),
|
|
||||||
self.rank_out(),
|
|
||||||
);
|
|
||||||
atk_prepared.prepare(module, self, scratch);
|
atk_prepared.prepare(module, self, scratch);
|
||||||
atk_prepared
|
atk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use poulpy_hal::{
|
use poulpy_hal::{
|
||||||
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
api::{VmpPMatAlloc, VmpPMatAllocBytes, VmpPrepare},
|
||||||
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch, VmpPMat},
|
layouts::{Backend, Data, DataMut, DataRef, Module, Scratch},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::layouts::{
|
use crate::layouts::{
|
||||||
GGLWETensorKey, Infos,
|
Base2K, Degree, Digits, GGLWELayoutInfos, GGLWETensorKey, GLWEInfos, LWEInfos, Rank, Rows, TorusPrecision,
|
||||||
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
prepared::{GGLWESwitchingKeyPrepared, Prepare, PrepareAlloc},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -13,61 +13,126 @@ pub struct GGLWETensorKeyPrepared<D: Data, B: Backend> {
|
|||||||
pub(crate) keys: Vec<GGLWESwitchingKeyPrepared<D, B>>,
|
pub(crate) keys: Vec<GGLWESwitchingKeyPrepared<D, B>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> LWEInfos for GGLWETensorKeyPrepared<D, B> {
|
||||||
|
fn n(&self) -> Degree {
|
||||||
|
self.keys[0].n()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn base2k(&self) -> Base2K {
|
||||||
|
self.keys[0].base2k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn k(&self) -> TorusPrecision {
|
||||||
|
self.keys[0].k()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.keys[0].size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GLWEInfos for GGLWETensorKeyPrepared<D, B> {
|
||||||
|
fn rank(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Data, B: Backend> GGLWELayoutInfos for GGLWETensorKeyPrepared<D, B> {
|
||||||
|
fn rank_in(&self) -> Rank {
|
||||||
|
self.rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rank_out(&self) -> Rank {
|
||||||
|
self.keys[0].rank_out()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn digits(&self) -> Digits {
|
||||||
|
self.keys[0].digits()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rows(&self) -> Rows {
|
||||||
|
self.keys[0].rows()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
impl<B: Backend> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
||||||
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> Self
|
pub fn alloc<A>(module: &Module<B>, infos: &A) -> Self
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWETensorKeyPrepared"
|
||||||
|
);
|
||||||
|
Self::alloc_with(
|
||||||
|
module,
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_with(module: &Module<B>, base2k: Base2K, k: TorusPrecision, rows: Rows, digits: Digits, rank: Rank) -> Self
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAlloc<B>,
|
Module<B>: VmpPMatAlloc<B>,
|
||||||
{
|
{
|
||||||
let mut keys: Vec<GGLWESwitchingKeyPrepared<Vec<u8>, B>> = Vec::new();
|
let mut keys: Vec<GGLWESwitchingKeyPrepared<Vec<u8>, B>> = Vec::new();
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: u32 = (((rank.0 + 1) * rank.0) >> 1).max(1);
|
||||||
(0..pairs).for_each(|_| {
|
(0..pairs).for_each(|_| {
|
||||||
keys.push(GGLWESwitchingKeyPrepared::alloc(
|
keys.push(GGLWESwitchingKeyPrepared::alloc_with(
|
||||||
module, basek, k, rows, digits, 1, rank,
|
module,
|
||||||
|
base2k,
|
||||||
|
k,
|
||||||
|
rows,
|
||||||
|
digits,
|
||||||
|
Rank(1),
|
||||||
|
rank,
|
||||||
));
|
));
|
||||||
});
|
});
|
||||||
Self { keys }
|
Self { keys }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes_of(module: &Module<B>, basek: usize, k: usize, rows: usize, digits: usize, rank: usize) -> usize
|
pub fn alloc_bytes<A>(module: &Module<B>, infos: &A) -> usize
|
||||||
|
where
|
||||||
|
A: GGLWELayoutInfos,
|
||||||
|
Module<B>: VmpPMatAllocBytes,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
infos.rank_in(),
|
||||||
|
infos.rank_out(),
|
||||||
|
"rank_in != rank_out is not supported for GGLWETensorKey"
|
||||||
|
);
|
||||||
|
let rank_out: usize = infos.rank_out().into();
|
||||||
|
let pairs: usize = (((rank_out + 1) * rank_out) >> 1).max(1);
|
||||||
|
pairs
|
||||||
|
* GGLWESwitchingKeyPrepared::alloc_bytes_with(
|
||||||
|
module,
|
||||||
|
infos.base2k(),
|
||||||
|
infos.k(),
|
||||||
|
infos.rows(),
|
||||||
|
infos.digits(),
|
||||||
|
Rank(1),
|
||||||
|
infos.rank_out(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_bytes_with(
|
||||||
|
module: &Module<B>,
|
||||||
|
base2k: Base2K,
|
||||||
|
k: TorusPrecision,
|
||||||
|
rows: Rows,
|
||||||
|
digits: Digits,
|
||||||
|
rank: Rank,
|
||||||
|
) -> usize
|
||||||
where
|
where
|
||||||
Module<B>: VmpPMatAllocBytes,
|
Module<B>: VmpPMatAllocBytes,
|
||||||
{
|
{
|
||||||
let pairs: usize = (((rank + 1) * rank) >> 1).max(1);
|
let pairs: usize = (((rank.0 + 1) * rank.0) >> 1).max(1) as usize;
|
||||||
pairs * GGLWESwitchingKeyPrepared::bytes_of(module, basek, k, rows, digits, 1, rank)
|
pairs * GGLWESwitchingKeyPrepared::alloc_bytes_with(module, base2k, k, rows, digits, Rank(1), rank)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> Infos for GGLWETensorKeyPrepared<D, B> {
|
|
||||||
type Inner = VmpPMat<D, B>;
|
|
||||||
|
|
||||||
fn inner(&self) -> &Self::Inner {
|
|
||||||
self.keys[0].inner()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn basek(&self) -> usize {
|
|
||||||
self.keys[0].basek()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn k(&self) -> usize {
|
|
||||||
self.keys[0].k()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Data, B: Backend> GGLWETensorKeyPrepared<D, B> {
|
|
||||||
pub fn rank(&self) -> usize {
|
|
||||||
self.keys[0].rank()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_in(&self) -> usize {
|
|
||||||
self.keys[0].rank_in()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rank_out(&self) -> usize {
|
|
||||||
self.keys[0].rank_out()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn digits(&self) -> usize {
|
|
||||||
self.keys[0].digits()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +142,7 @@ impl<D: DataMut, B: Backend> GGLWETensorKeyPrepared<D, B> {
|
|||||||
if i > j {
|
if i > j {
|
||||||
std::mem::swap(&mut i, &mut j);
|
std::mem::swap(&mut i, &mut j);
|
||||||
};
|
};
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
&mut self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -88,7 +153,7 @@ impl<D: DataRef, B: Backend> GGLWETensorKeyPrepared<D, B> {
|
|||||||
if i > j {
|
if i > j {
|
||||||
std::mem::swap(&mut i, &mut j);
|
std::mem::swap(&mut i, &mut j);
|
||||||
};
|
};
|
||||||
let rank: usize = self.rank();
|
let rank: usize = self.rank_out().into();
|
||||||
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
&self.keys[i * rank + j - (i * (i + 1) / 2)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -116,14 +181,7 @@ where
|
|||||||
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
Module<B>: VmpPMatAlloc<B> + VmpPrepare<B>,
|
||||||
{
|
{
|
||||||
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
fn prepare_alloc(&self, module: &Module<B>, scratch: &mut Scratch<B>) -> GGLWETensorKeyPrepared<Vec<u8>, B> {
|
||||||
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(
|
let mut tsk_prepared: GGLWETensorKeyPrepared<Vec<u8>, B> = GGLWETensorKeyPrepared::alloc(module, self);
|
||||||
module,
|
|
||||||
self.basek(),
|
|
||||||
self.k(),
|
|
||||||
self.rows(),
|
|
||||||
self.digits(),
|
|
||||||
self.rank(),
|
|
||||||
);
|
|
||||||
tsk_prepared.prepare(module, self, scratch);
|
tsk_prepared.prepare(module, self, scratch);
|
||||||
tsk_prepared
|
tsk_prepared
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user