Add cross-basek normalization (#90)

* added cross_basek_normalization

* updated method signatures to take layouts

* fixed cross-base normalization

fix #91
fix #93
This commit is contained in:
Jean-Philippe Bossuat
2025-09-30 14:40:10 +02:00
committed by GitHub
parent 4da790ea6a
commit 37e13b965c
216 changed files with 12481 additions and 7745 deletions

View File

@@ -41,7 +41,7 @@ macro_rules! cross_backend_test_suite {
backend_ref = $backend_ref:ty,
backend_test = $backend_test:ty,
size = $size:expr,
basek = $basek:expr,
base2k = $base2k:expr,
tests = {
$( $(#[$attr:meta])* $test_name:ident => $impl:path ),+ $(,)?
}
@@ -60,7 +60,7 @@ macro_rules! cross_backend_test_suite {
$(#[$attr])*
#[test]
fn $test_name() {
($impl)($basek, &*MODULE_REF, &*MODULE_TEST);
($impl)($base2k, &*MODULE_REF, &*MODULE_TEST);
}
)+
}

View File

@@ -1,7 +1,7 @@
use std::fmt::Debug;
use crate::{
layouts::{FillUniform, ReaderFrom, Reset, WriterTo},
layouts::{FillUniform, ReaderFrom, WriterTo},
source::Source,
};
@@ -10,7 +10,7 @@ use crate::{
/// - `T` must implement I/O traits, zeroing, cloning, and random filling.
pub fn test_reader_writer_interface<T>(mut original: T)
where
T: WriterTo + ReaderFrom + PartialEq + Eq + Debug + Clone + Reset + FillUniform,
T: WriterTo + ReaderFrom + PartialEq + Eq + Debug + Clone + FillUniform,
{
// Fill original with uniform random data
let mut source = Source::new([0u8; 32]);
@@ -20,9 +20,9 @@ where
let mut buffer = Vec::new();
original.write_to(&mut buffer).expect("write_to failed");
// Prepare receiver: same shape, but zeroed
// Prepare receiver: same shape, but randomized
let mut receiver = original.clone();
receiver.reset();
receiver.fill_uniform(50, &mut source);
// Deserialize from buffer
let mut reader: &[u8] = &buffer;

View File

@@ -10,7 +10,7 @@ use crate::{
source::Source,
};
pub fn test_svp_apply_dft<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_svp_apply_dft<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: SvpPrepare<BR>
+ SvpApplyDft<BR>
@@ -40,7 +40,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
let mut scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
scalar.fill_uniform(basek, &mut source);
scalar.fill_uniform(base2k, &mut source);
let scalar_digest: u64 = scalar.digest_u64();
@@ -60,7 +60,7 @@ where
for a_size in [1, 2, 3, 4] {
// Create a random input VecZnx
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
@@ -91,17 +91,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -113,7 +115,7 @@ where
}
}
pub fn test_svp_apply_dft_to_dft<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_svp_apply_dft_to_dft<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: SvpPrepare<BR>
+ SvpApplyDftToDft<BR>
@@ -145,7 +147,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
let mut scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
scalar.fill_uniform(basek, &mut source);
scalar.fill_uniform(base2k, &mut source);
let scalar_digest: u64 = scalar.digest_u64();
@@ -165,7 +167,7 @@ where
for a_size in [3] {
// Create a random input VecZnx
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
@@ -211,17 +213,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -233,7 +237,7 @@ where
}
}
pub fn test_svp_apply_dft_to_dft_add<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_svp_apply_dft_to_dft_add<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: SvpPrepare<BR>
+ SvpApplyDftToDftAdd<BR>
@@ -265,7 +269,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
let mut scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
scalar.fill_uniform(basek, &mut source);
scalar.fill_uniform(base2k, &mut source);
let scalar_digest: u64 = scalar.digest_u64();
@@ -285,7 +289,7 @@ where
for a_size in [1, 2, 3, 4] {
// Create a random input VecZnx
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
@@ -302,7 +306,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, res_size);
let mut res_dft_test: VecZnxDft<Vec<u8>, BT> = module_test.vec_znx_dft_alloc(cols, res_size);
@@ -336,17 +340,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -359,7 +365,7 @@ where
}
pub fn test_svp_apply_dft_to_dft_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
@@ -393,7 +399,7 @@ pub fn test_svp_apply_dft_to_dft_inplace<BR: Backend, BT: Backend>(
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
let mut scalar: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
scalar.fill_uniform(basek, &mut source);
scalar.fill_uniform(base2k, &mut source);
let scalar_digest: u64 = scalar.digest_u64();
@@ -412,7 +418,7 @@ pub fn test_svp_apply_dft_to_dft_inplace<BR: Backend, BT: Backend>(
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let res_digest: u64 = res.digest_u64();
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, res_size);
@@ -442,17 +448,19 @@ pub fn test_svp_apply_dft_to_dft_inplace<BR: Backend, BT: Backend>(
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),

View File

@@ -8,38 +8,18 @@ use crate::{
VecZnxMergeRingsTmpBytes, VecZnxMulXpMinusOne, VecZnxMulXpMinusOneInplace, VecZnxMulXpMinusOneInplaceTmpBytes,
VecZnxNegate, VecZnxNegateInplace, VecZnxNormalize, VecZnxNormalizeInplace, VecZnxNormalizeTmpBytes, VecZnxRotate,
VecZnxRotateInplace, VecZnxRotateInplaceTmpBytes, VecZnxRsh, VecZnxRshInplace, VecZnxSplitRing, VecZnxSplitRingTmpBytes,
VecZnxSub, VecZnxSubABInplace, VecZnxSubBAInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
VecZnxSub, VecZnxSubInplace, VecZnxSubNegateInplace, VecZnxSubScalar, VecZnxSubScalarInplace, VecZnxSwitchRing,
},
layouts::{Backend, DigestU64, FillUniform, Module, ScalarZnx, ScratchOwned, VecZnx, ZnxInfos, ZnxView, ZnxViewMut},
reference::znx::znx_copy_ref,
source::Source,
};
pub fn test_vec_znx_encode_vec_i64_lo_norm() {
pub fn test_vec_znx_encode_vec_i64() {
let n: usize = 32;
let basek: usize = 17;
let base2k: usize = 17;
let size: usize = 5;
let k: usize = size * basek - 5;
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, 2, size);
let mut source: Source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
raw.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
(0..a.cols()).for_each(|col_i| {
let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut()
.for_each(|x| *x = (source.next_i64() << 56) >> 56);
a.encode_vec_i64(basek, col_i, k, &have, 10);
let mut want: Vec<i64> = vec![i64::default(); n];
a.decode_vec_i64(basek, col_i, k, &mut want);
assert_eq!(have, want, "{:?} != {:?}", &have, &want);
});
}
pub fn test_vec_znx_encode_vec_i64_hi_norm() {
let n: usize = 32;
let basek: usize = 17;
let size: usize = 5;
for k in [1, basek / 2, size * basek - 5] {
for k in [1, base2k / 2, size * base2k - 5] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, 2, size);
let mut source = Source::new([0u8; 32]);
let raw: &mut [i64] = a.raw_mut();
@@ -53,15 +33,15 @@ pub fn test_vec_znx_encode_vec_i64_hi_norm() {
*x = source.next_i64();
}
});
a.encode_vec_i64(basek, col_i, k, &have, 63);
a.encode_vec_i64(base2k, col_i, k, &have);
let mut want: Vec<i64> = vec![i64::default(); n];
a.decode_vec_i64(basek, col_i, k, &mut want);
a.decode_vec_i64(base2k, col_i, k, &mut want);
assert_eq!(have, want, "{:?} != {:?}", &have, &want);
})
}
}
pub fn test_vec_znx_add_scalar<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_add_scalar<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxAddScalar,
Module<BT>: VecZnxAddScalar,
@@ -74,12 +54,12 @@ where
let cols: usize = 2;
let mut a: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
for a_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -87,8 +67,8 @@ where
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
rest_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
rest_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
@@ -103,7 +83,7 @@ where
}
}
pub fn test_vec_znx_add_scalar_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_add_scalar_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxAddScalarInplace,
Module<BT>: VecZnxAddScalarInplace,
@@ -116,14 +96,14 @@ where
let cols: usize = 2;
let mut b: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut rest_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
rest_ref.fill_uniform(basek, &mut source);
rest_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(rest_ref.raw());
for i in 0..cols {
@@ -135,7 +115,7 @@ where
assert_eq!(rest_ref, res_test);
}
}
pub fn test_vec_znx_add<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_add<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxAdd,
Module<BT>: VecZnxAdd,
@@ -148,13 +128,13 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
@@ -163,8 +143,8 @@ where
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
@@ -181,7 +161,7 @@ where
}
}
pub fn test_vec_znx_add_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_add_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxAddInplace,
Module<BT>: VecZnxAddInplace,
@@ -194,14 +174,14 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
@@ -215,7 +195,7 @@ where
}
}
pub fn test_vec_znx_automorphism<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_automorphism<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxAutomorphism,
Module<BT>: VecZnxAutomorphism,
@@ -228,7 +208,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -261,7 +241,7 @@ where
}
pub fn test_vec_znx_automorphism_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
@@ -284,7 +264,7 @@ pub fn test_vec_znx_automorphism_inplace<BR: Backend, BT: Backend>(
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
// Fill a with random i64
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
znx_copy_ref(res_test.raw_mut(), res_ref.raw());
let p: i64 = -7;
@@ -309,7 +289,7 @@ pub fn test_vec_znx_automorphism_inplace<BR: Backend, BT: Backend>(
}
}
pub fn test_vec_znx_copy<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_copy<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxCopy,
Module<BT>: VecZnxCopy,
@@ -322,7 +302,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -330,8 +310,8 @@ where
let mut res_1: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_0.fill_uniform(basek, &mut source);
res_1.fill_uniform(basek, &mut source);
res_0.fill_uniform(base2k, &mut source);
res_1.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
@@ -345,7 +325,7 @@ where
}
}
pub fn test_vec_znx_merge_rings<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_merge_rings<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxMergeRings<BR> + ModuleNew<BR> + VecZnxMergeRingsTmpBytes,
Module<BT>: VecZnxMergeRings<BT> + ModuleNew<BT> + VecZnxMergeRingsTmpBytes,
@@ -367,7 +347,7 @@ where
];
a.iter_mut().for_each(|ai| {
ai.fill_uniform(basek, &mut source);
ai.fill_uniform(base2k, &mut source);
});
let a_digests: [u64; 2] = [a[0].digest_u64(), a[1].digest_u64()];
@@ -376,8 +356,8 @@ where
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
for i in 0..cols {
module_ref.vec_znx_merge_rings(&mut res_test, i, &a, i, scratch_ref.borrow());
@@ -390,7 +370,7 @@ where
}
}
pub fn test_vec_znx_mul_xp_minus_one<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_mul_xp_minus_one<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxMulXpMinusOne,
Module<BT>: VecZnxMulXpMinusOne,
@@ -403,7 +383,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
@@ -437,7 +417,7 @@ where
}
pub fn test_vec_znx_mul_xp_minus_one_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
@@ -460,7 +440,7 @@ pub fn test_vec_znx_mul_xp_minus_one_inplace<BR: Backend, BT: Backend>(
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
// Fill a with random i64
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
znx_copy_ref(res_test.raw_mut(), res_ref.raw());
let p: i64 = -7;
@@ -483,7 +463,7 @@ pub fn test_vec_znx_mul_xp_minus_one_inplace<BR: Backend, BT: Backend>(
}
}
pub fn test_vec_znx_negate<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_negate<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxNegate,
Module<BT>: VecZnxNegate,
@@ -496,14 +476,14 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
@@ -517,7 +497,7 @@ where
}
}
pub fn test_vec_znx_negate_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_negate_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxNegateInplace,
Module<BT>: VecZnxNegateInplace,
@@ -532,7 +512,7 @@ where
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
@@ -544,7 +524,7 @@ where
}
}
pub fn test_vec_znx_normalize<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_normalize<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxNormalize<BR> + VecZnxNormalizeTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -562,7 +542,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -570,13 +550,21 @@ where
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
module_ref.vec_znx_normalize(basek, &mut res_ref, i, &a, i, scratch_ref.borrow());
module_test.vec_znx_normalize(basek, &mut res_test, i, &a, i, scratch_test.borrow());
module_ref.vec_znx_normalize(base2k, &mut res_ref, i, base2k, &a, i, scratch_ref.borrow());
module_test.vec_znx_normalize(
base2k,
&mut res_test,
i,
base2k,
&a,
i,
scratch_test.borrow(),
);
}
assert_eq!(a.digest_u64(), a_digest);
@@ -585,7 +573,7 @@ where
}
}
pub fn test_vec_znx_normalize_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_normalize_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxNormalizeInplace<BR> + VecZnxNormalizeTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -605,20 +593,20 @@ where
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
// Reference
for i in 0..cols {
module_ref.vec_znx_normalize_inplace(basek, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_normalize_inplace(basek, &mut res_test, i, scratch_test.borrow());
module_ref.vec_znx_normalize_inplace(base2k, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_normalize_inplace(base2k, &mut res_test, i, scratch_test.borrow());
}
assert_eq!(res_ref, res_test);
}
}
pub fn test_vec_znx_rotate<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_rotate<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxRotate,
Module<BT>: VecZnxRotate,
@@ -631,7 +619,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -663,7 +651,7 @@ where
}
}
pub fn test_vec_znx_rotate_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_rotate_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxRotateInplace<BR> + VecZnxRotateInplaceTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -684,7 +672,7 @@ where
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, size);
// Fill a with random i64
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
znx_copy_ref(res_test.raw_mut(), res_ref.raw());
let p: i64 = -5;
@@ -714,7 +702,7 @@ where
Module<B>: VecZnxFillUniform,
{
let n: usize = module.n();
let basek: usize = 17;
let base2k: usize = 17;
let size: usize = 5;
let mut source: Source = Source::new([0u8; 32]);
let cols: usize = 2;
@@ -722,19 +710,17 @@ where
let one_12_sqrt: f64 = 0.28867513459481287;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = VecZnx::alloc(n, cols, size);
module.vec_znx_fill_uniform(basek, &mut a, col_i, &mut source);
module.vec_znx_fill_uniform(base2k, &mut a, col_i, &mut source);
(0..cols).for_each(|col_j| {
if col_j != col_i {
(0..size).for_each(|limb_i| {
assert_eq!(a.at(col_j, limb_i), zero);
})
} else {
let std: f64 = a.std(basek, col_i);
let std: f64 = a.std(base2k, col_i);
assert!(
(std - one_12_sqrt).abs() < 0.01,
"std={} ~!= {}",
std,
one_12_sqrt
"std={std} ~!= {one_12_sqrt}",
);
}
})
@@ -746,7 +732,7 @@ where
Module<B>: VecZnxFillNormal,
{
let n: usize = module.n();
let basek: usize = 17;
let base2k: usize = 17;
let k: usize = 2 * 17;
let size: usize = 5;
let sigma: f64 = 3.2;
@@ -757,15 +743,15 @@ where
let k_f64: f64 = (1u64 << k as u64) as f64;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = VecZnx::alloc(n, cols, size);
module.vec_znx_fill_normal(basek, &mut a, col_i, k, &mut source, sigma, bound);
module.vec_znx_fill_normal(base2k, &mut a, col_i, k, &mut source, sigma, bound);
(0..cols).for_each(|col_j| {
if col_j != col_i {
(0..size).for_each(|limb_i| {
assert_eq!(a.at(col_j, limb_i), zero);
})
} else {
let std: f64 = a.std(basek, col_i) * k_f64;
assert!((std - sigma).abs() < 0.1, "std={} ~!= {}", std, sigma);
let std: f64 = a.std(base2k, col_i) * k_f64;
assert!((std - sigma).abs() < 0.1, "std={std} ~!= {sigma}");
}
})
});
@@ -776,7 +762,7 @@ where
Module<B>: VecZnxFillNormal + VecZnxAddNormal,
{
let n: usize = module.n();
let basek: usize = 17;
let base2k: usize = 17;
let k: usize = 2 * 17;
let size: usize = 5;
let sigma: f64 = 3.2;
@@ -788,19 +774,18 @@ where
let sqrt2: f64 = SQRT_2;
(0..cols).for_each(|col_i| {
let mut a: VecZnx<_> = VecZnx::alloc(n, cols, size);
module.vec_znx_fill_normal(basek, &mut a, col_i, k, &mut source, sigma, bound);
module.vec_znx_add_normal(basek, &mut a, col_i, k, &mut source, sigma, bound);
module.vec_znx_fill_normal(base2k, &mut a, col_i, k, &mut source, sigma, bound);
module.vec_znx_add_normal(base2k, &mut a, col_i, k, &mut source, sigma, bound);
(0..cols).for_each(|col_j| {
if col_j != col_i {
(0..size).for_each(|limb_i| {
assert_eq!(a.at(col_j, limb_i), zero);
})
} else {
let std: f64 = a.std(basek, col_i) * k_f64;
let std: f64 = a.std(base2k, col_i) * k_f64;
assert!(
(std - sigma * sqrt2).abs() < 0.1,
"std={} ~!= {}",
std,
"std={std} ~!= {}",
sigma * sqrt2
);
}
@@ -808,7 +793,7 @@ where
});
}
pub fn test_vec_znx_lsh<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_lsh<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxLsh<BR> + VecZnxLshTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -826,22 +811,22 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
for k in 0..res_size * basek {
for k in 0..res_size * base2k {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
module_ref.vec_znx_lsh(basek, k, &mut res_ref, i, &a, i, scratch_ref.borrow());
module_test.vec_znx_lsh(basek, k, &mut res_test, i, &a, i, scratch_test.borrow());
module_ref.vec_znx_lsh(base2k, k, &mut res_ref, i, &a, i, scratch_ref.borrow());
module_test.vec_znx_lsh(base2k, k, &mut res_test, i, &a, i, scratch_test.borrow());
}
assert_eq!(a.digest_u64(), a_digest);
@@ -851,7 +836,7 @@ where
}
}
pub fn test_vec_znx_lsh_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_lsh_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxLshInplace<BR> + VecZnxLshTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -868,16 +853,16 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_lsh_tmp_bytes());
for res_size in [1, 2, 3, 4] {
for k in 0..basek * res_size {
for k in 0..base2k * res_size {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
module_ref.vec_znx_lsh_inplace(basek, k, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_lsh_inplace(basek, k, &mut res_test, i, scratch_test.borrow());
module_ref.vec_znx_lsh_inplace(base2k, k, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_lsh_inplace(base2k, k, &mut res_test, i, scratch_test.borrow());
}
assert_eq!(res_ref, res_test);
@@ -885,7 +870,7 @@ where
}
}
pub fn test_vec_znx_rsh<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_rsh<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxRsh<BR> + VecZnxLshTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -902,22 +887,22 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
for k in 0..res_size * basek {
for k in 0..res_size * base2k {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
module_ref.vec_znx_rsh(basek, k, &mut res_ref, i, &a, i, scratch_ref.borrow());
module_test.vec_znx_rsh(basek, k, &mut res_test, i, &a, i, scratch_test.borrow());
module_ref.vec_znx_rsh(base2k, k, &mut res_ref, i, &a, i, scratch_ref.borrow());
module_test.vec_znx_rsh(base2k, k, &mut res_test, i, &a, i, scratch_test.borrow());
}
assert_eq!(a.digest_u64(), a_digest);
@@ -927,7 +912,7 @@ where
}
}
pub fn test_vec_znx_rsh_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_rsh_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxRshInplace<BR> + VecZnxLshTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -943,16 +928,16 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_lsh_tmp_bytes());
for res_size in [1, 2, 3, 4] {
for k in 0..basek * res_size {
for k in 0..base2k * res_size {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
module_ref.vec_znx_rsh_inplace(basek, k, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_rsh_inplace(basek, k, &mut res_test, i, scratch_test.borrow());
module_ref.vec_znx_rsh_inplace(base2k, k, &mut res_ref, i, scratch_ref.borrow());
module_test.vec_znx_rsh_inplace(base2k, k, &mut res_test, i, scratch_test.borrow());
}
assert_eq!(res_ref, res_test);
@@ -960,7 +945,7 @@ where
}
}
pub fn test_vec_znx_split_ring<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_split_ring<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSplitRing<BR> + ModuleNew<BR> + VecZnxSplitRingTmpBytes,
ScratchOwned<BR>: ScratchOwnedAlloc<BR> + ScratchOwnedBorrow<BR>,
@@ -977,7 +962,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -992,11 +977,11 @@ where
];
res_ref.iter_mut().for_each(|ri| {
ri.fill_uniform(basek, &mut source);
ri.fill_uniform(base2k, &mut source);
});
res_test.iter_mut().for_each(|ri| {
ri.fill_uniform(basek, &mut source);
ri.fill_uniform(base2k, &mut source);
});
for i in 0..cols {
@@ -1013,7 +998,7 @@ where
}
}
pub fn test_vec_znx_sub_scalar<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_sub_scalar<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSubScalar,
Module<BT>: VecZnxSubScalar,
@@ -1025,12 +1010,12 @@ where
let cols: usize = 2;
let mut a: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -1038,8 +1023,8 @@ where
let mut res_1: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_0.fill_uniform(basek, &mut source);
res_1.fill_uniform(basek, &mut source);
res_0.fill_uniform(base2k, &mut source);
res_1.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
@@ -1054,7 +1039,7 @@ where
}
}
pub fn test_vec_znx_sub_scalar_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_sub_scalar_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSubScalarInplace,
Module<BT>: VecZnxSubScalarInplace,
@@ -1066,14 +1051,14 @@ where
let cols: usize = 2;
let mut a: ScalarZnx<Vec<u8>> = ScalarZnx::alloc(n, cols);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res_0: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_1: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_0.fill_uniform(basek, &mut source);
res_0.fill_uniform(base2k, &mut source);
res_1.raw_mut().copy_from_slice(res_0.raw());
for i in 0..cols {
@@ -1086,7 +1071,7 @@ where
}
}
pub fn test_vec_znx_sub<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_sub<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSub,
Module<BT>: VecZnxSub,
@@ -1099,12 +1084,12 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -1112,8 +1097,8 @@ where
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
// Set d to garbage
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Reference
for i in 0..cols {
@@ -1130,10 +1115,10 @@ where
}
}
pub fn test_vec_znx_sub_ab_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_sub_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSubABInplace,
Module<BT>: VecZnxSubABInplace,
Module<BR>: VecZnxSubInplace,
Module<BT>: VecZnxSubInplace,
{
assert_eq!(module_ref.n(), module_test.n());
let n: usize = module_ref.n();
@@ -1143,19 +1128,19 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
module_test.vec_znx_sub_ab_inplace(&mut res_ref, i, &a, i);
module_ref.vec_znx_sub_ab_inplace(&mut res_test, i, &a, i);
module_test.vec_znx_sub_inplace(&mut res_ref, i, &a, i);
module_ref.vec_znx_sub_inplace(&mut res_test, i, &a, i);
}
assert_eq!(a.digest_u64(), a_digest);
@@ -1164,10 +1149,10 @@ where
}
}
pub fn test_vec_znx_sub_ba_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_sub_negate_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSubBAInplace,
Module<BT>: VecZnxSubBAInplace,
Module<BR>: VecZnxSubNegateInplace,
Module<BT>: VecZnxSubNegateInplace,
{
assert_eq!(module_ref.n(), module_test.n());
let n: usize = module_ref.n();
@@ -1177,19 +1162,19 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.raw_mut().copy_from_slice(res_ref.raw());
for i in 0..cols {
module_test.vec_znx_sub_ba_inplace(&mut res_ref, i, &a, i);
module_ref.vec_znx_sub_ba_inplace(&mut res_test, i, &a, i);
module_test.vec_znx_sub_negate_inplace(&mut res_ref, i, &a, i);
module_ref.vec_znx_sub_negate_inplace(&mut res_test, i, &a, i);
}
assert_eq!(a.digest_u64(), a_digest);
@@ -1198,7 +1183,7 @@ where
}
}
pub fn test_vec_znx_switch_ring<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_switch_ring<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxSwitchRing,
Module<BT>: VecZnxSwitchRing,
@@ -1213,7 +1198,7 @@ where
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
// Fill a with random i64
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -1221,8 +1206,8 @@ where
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n << 1, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n << 1, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Normalize on c
for i in 0..cols {
@@ -1238,8 +1223,8 @@ where
let mut res_ref: VecZnx<Vec<u8>> = VecZnx::alloc(n >> 1, cols, res_size);
let mut res_test: VecZnx<Vec<u8>> = VecZnx::alloc(n >> 1, cols, res_size);
res_ref.fill_uniform(basek, &mut source);
res_test.fill_uniform(basek, &mut source);
res_ref.fill_uniform(base2k, &mut source);
res_test.fill_uniform(base2k, &mut source);
// Normalize on c
for i in 0..cols {

View File

@@ -5,14 +5,14 @@ use crate::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxBigAdd, VecZnxBigAddInplace, VecZnxBigAddSmall, VecZnxBigAddSmallInplace,
VecZnxBigAlloc, VecZnxBigAutomorphism, VecZnxBigAutomorphismInplace, VecZnxBigAutomorphismInplaceTmpBytes,
VecZnxBigFromSmall, VecZnxBigNegate, VecZnxBigNegateInplace, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes,
VecZnxBigSub, VecZnxBigSubABInplace, VecZnxBigSubBAInplace, VecZnxBigSubSmallA, VecZnxBigSubSmallAInplace,
VecZnxBigSubSmallB, VecZnxBigSubSmallBInplace,
VecZnxBigSub, VecZnxBigSubInplace, VecZnxBigSubNegateInplace, VecZnxBigSubSmallA, VecZnxBigSubSmallB,
VecZnxBigSubSmallInplace, VecZnxBigSubSmallNegateInplace,
},
layouts::{Backend, DataViewMut, DigestU64, FillUniform, Module, ScratchOwned, VecZnx, VecZnxBig},
source::Source,
};
pub fn test_vec_znx_big_add<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_add<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>:
VecZnxBigAdd<BR> + VecZnxBigAlloc<BR> + VecZnxBigFromSmall<BR> + VecZnxBigNormalize<BR> + VecZnxBigNormalizeTmpBytes,
@@ -32,7 +32,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
@@ -50,7 +50,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest = b.digest_u64();
let mut b_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, b_size);
@@ -93,17 +93,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -119,7 +121,7 @@ where
}
}
pub fn test_vec_znx_big_add_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_add_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigAddInplace<BR>
+ VecZnxBigAlloc<BR>
@@ -145,7 +147,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -160,7 +162,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -186,17 +188,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -211,7 +215,7 @@ where
}
}
pub fn test_vec_znx_big_add_small<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_add_small<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>:
VecZnxBigAddSmall<BR> + VecZnxBigAlloc<BR> + VecZnxBigFromSmall<BR> + VecZnxBigNormalize<BR> + VecZnxBigNormalizeTmpBytes,
@@ -231,7 +235,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -246,7 +250,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -275,17 +279,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -302,7 +308,7 @@ where
}
pub fn test_vec_znx_big_add_small_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
@@ -330,13 +336,13 @@ pub fn test_vec_znx_big_add_small_inplace<BR: Backend, BT: Backend>(
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -361,17 +367,19 @@ pub fn test_vec_znx_big_add_small_inplace<BR: Backend, BT: Backend>(
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -386,7 +394,7 @@ pub fn test_vec_znx_big_add_small_inplace<BR: Backend, BT: Backend>(
}
}
pub fn test_vec_znx_big_automorphism<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_automorphism<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigAutomorphism<BR>
+ VecZnxBigAlloc<BR>
@@ -412,7 +420,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -451,17 +459,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -478,7 +488,7 @@ where
}
pub fn test_vec_znx_big_automorphism_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
@@ -512,7 +522,7 @@ pub fn test_vec_znx_big_automorphism_inplace<BR: Backend, BT: Backend>(
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -536,17 +546,19 @@ pub fn test_vec_znx_big_automorphism_inplace<BR: Backend, BT: Backend>(
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -561,7 +573,7 @@ pub fn test_vec_znx_big_automorphism_inplace<BR: Backend, BT: Backend>(
}
}
pub fn test_vec_znx_big_negate<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_negate<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>:
VecZnxBigNegate<BR> + VecZnxBigAlloc<BR> + VecZnxBigFromSmall<BR> + VecZnxBigNormalize<BR> + VecZnxBigNormalizeTmpBytes,
@@ -581,7 +593,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -619,17 +631,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -644,7 +658,7 @@ where
}
}
pub fn test_vec_znx_big_negate_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_negate_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigNegateInplace<BR>
+ VecZnxBigAlloc<BR>
@@ -672,7 +686,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -695,17 +709,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -719,7 +735,7 @@ where
}
}
pub fn test_vec_znx_big_normalize<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_normalize<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigAlloc<BR>
+ VecZnxBigFromSmall<BR>
@@ -772,8 +788,24 @@ where
// Reference
for j in 0..cols {
module_ref.vec_znx_big_normalize(basek, &mut res_ref, j, &a_ref, j, scratch_ref.borrow());
module_test.vec_znx_big_normalize(basek, &mut res_test, j, &a_test, j, scratch_test.borrow());
module_ref.vec_znx_big_normalize(
base2k,
&mut res_ref,
j,
base2k,
&a_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
base2k,
&mut res_test,
j,
base2k,
&a_test,
j,
scratch_test.borrow(),
);
}
assert_eq!(a_ref.digest_u64(), a_ref_digest);
@@ -784,7 +816,7 @@ where
}
}
pub fn test_vec_znx_big_sub<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_sub<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>:
VecZnxBigSub<BR> + VecZnxBigAlloc<BR> + VecZnxBigFromSmall<BR> + VecZnxBigNormalize<BR> + VecZnxBigNormalizeTmpBytes,
@@ -804,7 +836,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -819,7 +851,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let mut b_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, b_size);
let mut b_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, b_size);
@@ -859,17 +891,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -885,14 +919,14 @@ where
}
}
pub fn test_vec_znx_big_sub_ab_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_sub_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigSubABInplace<BR>
Module<BR>: VecZnxBigSubInplace<BR>
+ VecZnxBigAlloc<BR>
+ VecZnxBigFromSmall<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxBigSubABInplace<BT>
Module<BT>: VecZnxBigSubInplace<BT>
+ VecZnxBigAlloc<BT>
+ VecZnxBigFromSmall<BT>
+ VecZnxBigNormalize<BT>
@@ -911,7 +945,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -926,7 +960,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -937,8 +971,8 @@ where
}
for i in 0..cols {
module_ref.vec_znx_big_sub_ab_inplace(&mut res_big_ref, i, &a_ref, i);
module_test.vec_znx_big_sub_ab_inplace(&mut res_big_test, i, &a_test, i);
module_ref.vec_znx_big_sub_inplace(&mut res_big_ref, i, &a_ref, i);
module_test.vec_znx_big_sub_inplace(&mut res_big_test, i, &a_test, i);
}
assert_eq!(a_ref.digest_u64(), a_ref_digest);
@@ -952,17 +986,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -977,14 +1013,17 @@ where
}
}
pub fn test_vec_znx_big_sub_ba_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigSubBAInplace<BR>
pub fn test_vec_znx_big_sub_negate_inplace<BR: Backend, BT: Backend>(
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
Module<BR>: VecZnxBigSubNegateInplace<BR>
+ VecZnxBigAlloc<BR>
+ VecZnxBigFromSmall<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxBigSubBAInplace<BT>
Module<BT>: VecZnxBigSubNegateInplace<BT>
+ VecZnxBigAlloc<BT>
+ VecZnxBigFromSmall<BT>
+ VecZnxBigNormalize<BT>
@@ -1003,7 +1042,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -1018,7 +1057,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -1029,8 +1068,8 @@ where
}
for i in 0..cols {
module_ref.vec_znx_big_sub_ba_inplace(&mut res_big_ref, i, &a_ref, i);
module_test.vec_znx_big_sub_ba_inplace(&mut res_big_test, i, &a_test, i);
module_ref.vec_znx_big_sub_negate_inplace(&mut res_big_ref, i, &a_ref, i);
module_test.vec_znx_big_sub_negate_inplace(&mut res_big_test, i, &a_test, i);
}
assert_eq!(a_ref.digest_u64(), a_ref_digest);
@@ -1044,17 +1083,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -1069,7 +1110,7 @@ where
}
}
pub fn test_vec_znx_big_sub_small_a<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_sub_small_a<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigSubSmallA<BR>
+ VecZnxBigAlloc<BR>
@@ -1095,7 +1136,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -1110,7 +1151,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -1139,17 +1180,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -1165,7 +1208,7 @@ where
}
}
pub fn test_vec_znx_big_sub_small_b<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_big_sub_small_b<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxBigSubSmallB<BR>
+ VecZnxBigAlloc<BR>
@@ -1191,7 +1234,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let mut a_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, a_size);
let mut a_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, a_size);
@@ -1206,7 +1249,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -1235,17 +1278,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -1262,16 +1307,16 @@ where
}
pub fn test_vec_znx_big_sub_small_a_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
Module<BR>: VecZnxBigSubSmallAInplace<BR>
Module<BR>: VecZnxBigSubSmallInplace<BR>
+ VecZnxBigAlloc<BR>
+ VecZnxBigFromSmall<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxBigSubSmallAInplace<BT>
Module<BT>: VecZnxBigSubSmallInplace<BT>
+ VecZnxBigAlloc<BT>
+ VecZnxBigFromSmall<BT>
+ VecZnxBigNormalize<BT>
@@ -1290,13 +1335,13 @@ pub fn test_vec_znx_big_sub_small_a_inplace<BR: Backend, BT: Backend>(
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -1307,8 +1352,8 @@ pub fn test_vec_znx_big_sub_small_a_inplace<BR: Backend, BT: Backend>(
}
for i in 0..cols {
module_ref.vec_znx_big_sub_small_a_inplace(&mut res_big_ref, i, &a, i);
module_test.vec_znx_big_sub_small_a_inplace(&mut res_big_test, i, &a, i);
module_ref.vec_znx_big_sub_small_inplace(&mut res_big_ref, i, &a, i);
module_test.vec_znx_big_sub_small_inplace(&mut res_big_test, i, &a, i);
}
assert_eq!(a.digest_u64(), a_digest);
@@ -1321,17 +1366,19 @@ pub fn test_vec_znx_big_sub_small_a_inplace<BR: Backend, BT: Backend>(
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -1347,16 +1394,16 @@ pub fn test_vec_znx_big_sub_small_a_inplace<BR: Backend, BT: Backend>(
}
pub fn test_vec_znx_big_sub_small_b_inplace<BR: Backend, BT: Backend>(
basek: usize,
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
Module<BR>: VecZnxBigSubSmallBInplace<BR>
Module<BR>: VecZnxBigSubSmallNegateInplace<BR>
+ VecZnxBigAlloc<BR>
+ VecZnxBigFromSmall<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxBigSubSmallBInplace<BT>
Module<BT>: VecZnxBigSubSmallNegateInplace<BT>
+ VecZnxBigAlloc<BT>
+ VecZnxBigFromSmall<BT>
+ VecZnxBigNormalize<BT>
@@ -1375,13 +1422,13 @@ pub fn test_vec_znx_big_sub_small_b_inplace<BR: Backend, BT: Backend>(
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, res_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let mut res_big_ref: VecZnxBig<Vec<u8>, BR> = module_ref.vec_znx_big_alloc(cols, res_size);
let mut res_big_test: VecZnxBig<Vec<u8>, BT> = module_test.vec_znx_big_alloc(cols, res_size);
@@ -1392,8 +1439,8 @@ pub fn test_vec_znx_big_sub_small_b_inplace<BR: Backend, BT: Backend>(
}
for i in 0..cols {
module_ref.vec_znx_big_sub_small_b_inplace(&mut res_big_ref, i, &a, i);
module_test.vec_znx_big_sub_small_b_inplace(&mut res_big_test, i, &a, i);
module_ref.vec_znx_big_sub_small_negate_inplace(&mut res_big_ref, i, &a, i);
module_test.vec_znx_big_sub_small_negate_inplace(&mut res_big_test, i, &a, i);
}
assert_eq!(a.digest_u64(), a_digest);
@@ -1406,17 +1453,19 @@ pub fn test_vec_znx_big_sub_small_b_inplace<BR: Backend, BT: Backend>(
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),

View File

@@ -3,14 +3,14 @@ use rand::RngCore;
use crate::{
api::{
ScratchOwnedAlloc, ScratchOwnedBorrow, VecZnxBigAlloc, VecZnxBigNormalize, VecZnxBigNormalizeTmpBytes, VecZnxDftAdd,
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftCopy, VecZnxDftSub, VecZnxDftSubABInplace,
VecZnxDftSubBAInplace, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
VecZnxDftAddInplace, VecZnxDftAlloc, VecZnxDftApply, VecZnxDftCopy, VecZnxDftSub, VecZnxDftSubInplace,
VecZnxDftSubNegateInplace, VecZnxIdftApply, VecZnxIdftApplyConsume, VecZnxIdftApplyTmpA, VecZnxIdftApplyTmpBytes,
},
layouts::{Backend, DataViewMut, DigestU64, FillUniform, Module, ScratchOwned, VecZnx, VecZnxBig, VecZnxDft},
source::Source,
};
pub fn test_vec_znx_dft_add<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_dft_add<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftAdd<BR>
+ VecZnxDftAlloc<BR>
@@ -38,7 +38,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -56,7 +56,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
let mut b_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, b_size);
@@ -102,17 +102,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -128,7 +130,7 @@ where
}
}
pub fn test_vec_znx_dft_add_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_dft_add_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftAddInplace<BR>
+ VecZnxDftAlloc<BR>
@@ -155,7 +157,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -173,7 +175,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let res_digest: u64 = res.digest_u64();
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, res_size);
@@ -206,17 +208,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -231,7 +235,7 @@ where
}
}
pub fn test_vec_znx_copy<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_copy<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftCopy<BR>
+ VecZnxDftAlloc<BR>
@@ -259,7 +263,7 @@ where
for a_size in [1, 2, 6, 11] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -307,17 +311,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -333,7 +339,7 @@ where
}
}
pub fn test_vec_znx_idft_apply<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_idft_apply<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftApply<BR>
+ VecZnxDftAlloc<BR>
@@ -361,7 +367,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -406,17 +412,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -432,7 +440,7 @@ where
}
}
pub fn test_vec_znx_idft_apply_tmpa<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_idft_apply_tmpa<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftApply<BR>
+ VecZnxDftAlloc<BR>
@@ -460,7 +468,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -494,17 +502,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -520,7 +530,7 @@ where
}
}
pub fn test_vec_znx_idft_apply_consume<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_idft_apply_consume<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftApply<BR>
+ VecZnxIdftApplyTmpBytes
@@ -550,7 +560,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
for res_size in [1, 2, 3, 4] {
@@ -579,17 +589,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -605,7 +617,7 @@ where
}
}
pub fn test_vec_znx_dft_sub<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_dft_sub<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftSub<BR>
+ VecZnxDftAlloc<BR>
@@ -633,7 +645,7 @@ where
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -651,7 +663,7 @@ where
for b_size in [1, 2, 3, 4] {
let mut b: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, b_size);
b.fill_uniform(basek, &mut source);
b.fill_uniform(base2k, &mut source);
let b_digest: u64 = b.digest_u64();
let mut b_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, b_size);
@@ -697,17 +709,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -723,15 +737,15 @@ where
}
}
pub fn test_vec_znx_dft_sub_ab_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vec_znx_dft_sub_inplace<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftSubABInplace<BR>
Module<BR>: VecZnxDftSubInplace<BR>
+ VecZnxDftAlloc<BR>
+ VecZnxDftApply<BR>
+ VecZnxIdftApplyConsume<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxDftSubABInplace<BT>
Module<BT>: VecZnxDftSubInplace<BT>
+ VecZnxDftAlloc<BT>
+ VecZnxDftApply<BT>
+ VecZnxIdftApplyConsume<BT>
@@ -750,7 +764,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -768,7 +782,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let res_digest: u64 = res.digest_u64();
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, res_size);
@@ -783,8 +797,8 @@ where
// Reference
for i in 0..cols {
module_ref.vec_znx_dft_sub_ab_inplace(&mut res_dft_ref, i, &a_dft_ref, i);
module_test.vec_znx_dft_sub_ab_inplace(&mut res_dft_test, i, &a_dft_test, i);
module_ref.vec_znx_dft_sub_inplace(&mut res_dft_ref, i, &a_dft_ref, i);
module_test.vec_znx_dft_sub_inplace(&mut res_dft_test, i, &a_dft_test, i);
}
assert_eq!(a_dft_ref.digest_u64(), a_dft_ref_digest);
@@ -801,17 +815,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -826,15 +842,18 @@ where
}
}
pub fn test_vec_znx_dft_sub_ba_inplace<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: VecZnxDftSubBAInplace<BR>
pub fn test_vec_znx_dft_sub_negate_inplace<BR: Backend, BT: Backend>(
base2k: usize,
module_ref: &Module<BR>,
module_test: &Module<BT>,
) where
Module<BR>: VecZnxDftSubNegateInplace<BR>
+ VecZnxDftAlloc<BR>
+ VecZnxDftApply<BR>
+ VecZnxIdftApplyConsume<BR>
+ VecZnxBigNormalize<BR>
+ VecZnxBigNormalizeTmpBytes,
Module<BT>: VecZnxDftSubBAInplace<BT>
Module<BT>: VecZnxDftSubNegateInplace<BT>
+ VecZnxDftAlloc<BT>
+ VecZnxDftApply<BT>
+ VecZnxIdftApplyConsume<BT>
@@ -853,7 +872,7 @@ where
let mut scratch_test: ScratchOwned<BT> = ScratchOwned::alloc(module_test.vec_znx_big_normalize_tmp_bytes());
for a_size in [1, 2, 3, 4] {
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, a_size);
@@ -871,7 +890,7 @@ where
for res_size in [1, 2, 3, 4] {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols, a_size);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let res_digest: u64 = res.digest_u64();
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols, res_size);
@@ -886,8 +905,8 @@ where
// Reference
for i in 0..cols {
module_ref.vec_znx_dft_sub_ba_inplace(&mut res_dft_ref, i, &a_dft_ref, i);
module_test.vec_znx_dft_sub_ba_inplace(&mut res_dft_test, i, &a_dft_test, i);
module_ref.vec_znx_dft_sub_negate_inplace(&mut res_dft_ref, i, &a_dft_ref, i);
module_test.vec_znx_dft_sub_negate_inplace(&mut res_dft_test, i, &a_dft_test, i);
}
assert_eq!(a_dft_ref.digest_u64(), a_dft_ref_digest);
@@ -904,17 +923,19 @@ where
for j in 0..cols {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),

View File

@@ -11,7 +11,7 @@ use rand::RngCore;
use crate::layouts::{Backend, VecZnxDft, VmpPMat};
pub fn test_vmp_apply_dft<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vmp_apply_dft<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: ModuleNew<BR>
+ VmpApplyDftTmpBytes
@@ -53,11 +53,11 @@ where
let rows: usize = cols_in;
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols_in, size_in);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
let mut mat: MatZnx<Vec<u8>> = MatZnx::alloc(n, rows, cols_in, cols_out, size_out);
mat.fill_uniform(basek, &mut source);
mat.fill_uniform(base2k, &mut source);
let mat_digest: u64 = mat.digest_u64();
let mut pmat_ref: VmpPMat<Vec<u8>, BR> = module_ref.vmp_pmat_alloc(rows, cols_in, cols_out, size_out);
@@ -90,17 +90,19 @@ where
for j in 0..cols_out {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -117,7 +119,7 @@ where
}
}
pub fn test_vmp_apply_dft_to_dft<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vmp_apply_dft_to_dft<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: ModuleNew<BR>
+ VmpApplyDftToDftTmpBytes
@@ -162,7 +164,7 @@ where
let rows: usize = size_in;
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols_in, size_in);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols_in, size_in);
@@ -176,7 +178,7 @@ where
assert_eq!(a.digest_u64(), a_digest);
let mut mat: MatZnx<Vec<u8>> = MatZnx::alloc(n, rows, cols_in, cols_out, size_out);
mat.fill_uniform(basek, &mut source);
mat.fill_uniform(base2k, &mut source);
let mat_digest: u64 = mat.digest_u64();
let mut pmat_ref: VmpPMat<Vec<u8>, BR> = module_ref.vmp_pmat_alloc(rows, cols_in, cols_out, size_out);
@@ -217,17 +219,19 @@ where
for j in 0..cols_out {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),
@@ -244,7 +248,7 @@ where
}
}
pub fn test_vmp_apply_dft_to_dft_add<BR: Backend, BT: Backend>(basek: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
pub fn test_vmp_apply_dft_to_dft_add<BR: Backend, BT: Backend>(base2k: usize, module_ref: &Module<BR>, module_test: &Module<BT>)
where
Module<BR>: ModuleNew<BR>
+ VmpApplyDftToDftAddTmpBytes
@@ -289,7 +293,7 @@ where
let rows: usize = size_in;
let mut a: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols_in, size_in);
a.fill_uniform(basek, &mut source);
a.fill_uniform(base2k, &mut source);
let a_digest: u64 = a.digest_u64();
let mut a_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols_in, size_in);
@@ -303,7 +307,7 @@ where
assert_eq!(a.digest_u64(), a_digest);
let mut mat: MatZnx<Vec<u8>> = MatZnx::alloc(n, rows, cols_in, cols_out, size_out);
mat.fill_uniform(basek, &mut source);
mat.fill_uniform(base2k, &mut source);
let mat_digest: u64 = mat.digest_u64();
let mut pmat_ref: VmpPMat<Vec<u8>, BR> = module_ref.vmp_pmat_alloc(rows, cols_in, cols_out, size_out);
@@ -316,7 +320,7 @@ where
for limb_offset in 0..size_out {
let mut res: VecZnx<Vec<u8>> = VecZnx::alloc(n, cols_out, size_out);
res.fill_uniform(basek, &mut source);
res.fill_uniform(base2k, &mut source);
let res_digest: u64 = res.digest_u64();
let mut res_dft_ref: VecZnxDft<Vec<u8>, BR> = module_ref.vec_znx_dft_alloc(cols_out, size_out);
@@ -355,17 +359,19 @@ where
for j in 0..cols_out {
module_ref.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_ref,
j,
base2k,
&res_big_ref,
j,
scratch_ref.borrow(),
);
module_test.vec_znx_big_normalize(
basek,
base2k,
&mut res_small_test,
j,
base2k,
&res_big_test,
j,
scratch_test.borrow(),