mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
working rlwe encryption example with interleaved polynomial
This commit is contained in:
@@ -13,7 +13,7 @@ fn main() {
|
|||||||
let log_scale: usize = msg_size * log_base2k - 5;
|
let log_scale: usize = msg_size * log_base2k - 5;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
|
|
||||||
let mut carry: Vec<u8> = alloc_aligned(module.vec_znx_big_normalize_tmp_bytes(2));
|
let mut carry: Vec<u8> = alloc_aligned(module.vec_znx_big_normalize_tmp_bytes());
|
||||||
|
|
||||||
let seed: [u8; 32] = [0; 32];
|
let seed: [u8; 32] = [0; 32];
|
||||||
let mut source: Source = Source::new(seed);
|
let mut source: Source = Source::new(seed);
|
||||||
@@ -28,69 +28,95 @@ fn main() {
|
|||||||
// s_ppol <- DFT(s)
|
// s_ppol <- DFT(s)
|
||||||
module.svp_prepare(&mut s_ppol, &s);
|
module.svp_prepare(&mut s_ppol, &s);
|
||||||
|
|
||||||
// ct = (c0, c1)
|
// Allocates a VecZnx with two columns: ct=(0, 0)
|
||||||
let mut ct: VecZnx = module.new_vec_znx(2, ct_size);
|
let mut ct: VecZnx = module.new_vec_znx(
|
||||||
|
2, // Number of columns
|
||||||
|
ct_size, // Number of small poly per column
|
||||||
|
);
|
||||||
|
|
||||||
// Fill c1 with random values
|
// Fill the second column with random values: ct = (0, a)
|
||||||
module.fill_uniform(log_base2k, &mut ct, 1, ct_size, &mut source);
|
module.fill_uniform(log_base2k, &mut ct, 1, ct_size, &mut source);
|
||||||
|
|
||||||
// Scratch space for DFT values
|
// Scratch space for DFT values
|
||||||
let mut buf_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(1, ct.size());
|
let mut buf_dft: VecZnxDft<FFT64> = module.new_vec_znx_dft(
|
||||||
|
1, // Number of columns
|
||||||
// Applies buf_dft <- s * c1
|
ct.size(), // Number of polynomials per column
|
||||||
module.svp_apply_dft(
|
|
||||||
&mut buf_dft, // DFT(c1 * s)
|
|
||||||
&s_ppol,
|
|
||||||
&ct,
|
|
||||||
1, // c1
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Alias scratch space (VecZnxDftis always at least as big as VecZnxBig)
|
// Applies DFT(ct[1]) * DFT(s)
|
||||||
|
module.svp_apply_dft(
|
||||||
|
&mut buf_dft, // DFT(ct[1] * s)
|
||||||
|
&s_ppol, // DFT(s)
|
||||||
|
&ct,
|
||||||
|
1, // Selects the second column of ct
|
||||||
|
);
|
||||||
|
|
||||||
|
// Alias scratch space (VecZnxDft<B> is always at least as big as VecZnxBig<B>)
|
||||||
let mut buf_big: VecZnxBig<FFT64> = buf_dft.as_vec_znx_big();
|
let mut buf_big: VecZnxBig<FFT64> = buf_dft.as_vec_znx_big();
|
||||||
|
|
||||||
// BIG(c1 * s) <- IDFT(DFT(c1 * s)) (not normalized)
|
// BIG(ct[1] * s) <- IDFT(DFT(ct[1] * s)) (not normalized)
|
||||||
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft);
|
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft);
|
||||||
|
|
||||||
// m <- (0)
|
// Creates a plaintext: VecZnx with 1 column
|
||||||
let mut m: VecZnx = module.new_vec_znx(1, msg_size);
|
let mut m: VecZnx = module.new_vec_znx(
|
||||||
|
1, // Number of columns
|
||||||
|
msg_size, // Number of small polynomials
|
||||||
|
);
|
||||||
let mut want: Vec<i64> = vec![0; n];
|
let mut want: Vec<i64> = vec![0; n];
|
||||||
want.iter_mut()
|
want.iter_mut()
|
||||||
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
||||||
m.encode_vec_i64(0, log_base2k, log_scale, &want, 4);
|
m.encode_vec_i64(0, log_base2k, log_scale, &want, 4);
|
||||||
m.normalize(log_base2k, &mut carry);
|
m.normalize(log_base2k, &mut carry);
|
||||||
|
|
||||||
// m - BIG(c1 * s)
|
// m - BIG(ct[1] * s)
|
||||||
module.vec_znx_big_sub_small_ab_inplace(&mut buf_big, &m);
|
module.vec_znx_big_sub_small_a_inplace(
|
||||||
|
&mut buf_big,
|
||||||
|
0, // Selects the first column of the receiver
|
||||||
|
&m,
|
||||||
|
0, // Selects the first column of the message
|
||||||
|
);
|
||||||
|
|
||||||
// c0 <- m - BIG(c1 * s)
|
// Normalizes back to VecZnx
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut ct, &buf_big, &mut carry);
|
// ct[0] <- m - BIG(c1 * s)
|
||||||
|
module.vec_znx_big_normalize(
|
||||||
|
log_base2k, &mut ct, 0, // Selects the first column of ct (ct[0])
|
||||||
|
&buf_big, 0, // Selects the first column of buf_big
|
||||||
|
&mut carry,
|
||||||
|
);
|
||||||
|
|
||||||
ct.print(ct.sl());
|
// Add noise to ct[0]
|
||||||
|
// ct[0] <- ct[0] + e
|
||||||
// (c0 + e, c1)
|
|
||||||
module.add_normal(
|
module.add_normal(
|
||||||
log_base2k,
|
log_base2k,
|
||||||
&mut ct,
|
&mut ct,
|
||||||
0, // c0
|
0, // Selects the first column of ct (ct[0])
|
||||||
log_base2k * ct_size,
|
log_base2k * ct_size, // Scaling of the noise: 2^{-log_base2k * limbs}
|
||||||
&mut source,
|
&mut source,
|
||||||
3.2,
|
3.2, // Standard deviation
|
||||||
19.0,
|
19.0, // Truncatation bound
|
||||||
);
|
);
|
||||||
|
|
||||||
// Decrypt
|
// Final ciphertext: ct = (-a * s + m + e, a)
|
||||||
|
|
||||||
|
// Decryption
|
||||||
|
|
||||||
|
// DFT(ct[1] * s)
|
||||||
|
module.svp_apply_dft(
|
||||||
|
&mut buf_dft,
|
||||||
|
&s_ppol,
|
||||||
|
&ct,
|
||||||
|
1, // Selects the second column of ct (ct[1])
|
||||||
|
);
|
||||||
|
|
||||||
// DFT(c1 * s)
|
|
||||||
module.svp_apply_dft(&mut buf_dft, &s_ppol, &ct, 1);
|
|
||||||
// BIG(c1 * s) = IDFT(DFT(c1 * s))
|
// BIG(c1 * s) = IDFT(DFT(c1 * s))
|
||||||
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft);
|
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft);
|
||||||
|
|
||||||
// BIG(c1 * s) + c0
|
// BIG(c1 * s) + ct[0]
|
||||||
module.vec_znx_big_add_small_inplace(&mut buf_big, &ct);
|
module.vec_znx_big_add_small_inplace(&mut buf_big, 0, &ct, 0);
|
||||||
|
|
||||||
// m + e <- BIG(c1 * s + c0)
|
// m + e <- BIG(ct[1] * s + ct[0])
|
||||||
let mut res: VecZnx = module.new_vec_znx(1, ct_size);
|
let mut res: VecZnx = module.new_vec_znx(1, ct_size);
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut res, &buf_big, &mut carry);
|
module.vec_znx_big_normalize(log_base2k, &mut res, 0, &buf_big, 0, &mut carry);
|
||||||
|
|
||||||
// have = m * 2^{log_scale} + e
|
// have = m * 2^{log_scale} + e
|
||||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ fn main() {
|
|||||||
module.vec_znx_idft_tmp_a(&mut c_big, &mut c_dft);
|
module.vec_znx_idft_tmp_a(&mut c_big, &mut c_dft);
|
||||||
|
|
||||||
let mut res: VecZnx = module.new_vec_znx(1, limbs_vec);
|
let mut res: VecZnx = module.new_vec_znx(1, limbs_vec);
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut res, &c_big, &mut buf);
|
module.vec_znx_big_normalize(log_base2k, &mut res, 0, &c_big, 0, &mut buf);
|
||||||
|
|
||||||
let mut values_res: Vec<i64> = vec![i64::default(); n];
|
let mut values_res: Vec<i64> = vec![i64::default(); n];
|
||||||
res.decode_vec_i64(0, log_base2k, log_k, &mut values_res);
|
res.decode_vec_i64(0, log_base2k, log_k, &mut values_res);
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
use std::cmp::min;
|
|
||||||
|
|
||||||
use crate::ffi::vec_znx;
|
use crate::ffi::vec_znx;
|
||||||
use crate::internals::{apply_binary_op, apply_unary_op, ffi_binary_op_factory_type_1, ffi_ternary_op_factory};
|
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, ZnxBase, ZnxInfos, ZnxLayout, assert_alignement};
|
||||||
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, ZnxBase, ZnxBasics, ZnxInfos, ZnxLayout, assert_alignement};
|
|
||||||
|
|
||||||
pub trait VecZnxBigOps<B: Backend> {
|
pub trait VecZnxBigOps<B: Backend> {
|
||||||
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
|
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
|
||||||
@@ -41,40 +38,80 @@ pub trait VecZnxBigOps<B: Backend> {
|
|||||||
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize;
|
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize;
|
||||||
|
|
||||||
/// Adds `a` to `b` and stores the result on `c`.
|
/// Adds `a` to `b` and stores the result on `c`.
|
||||||
fn vec_znx_big_add(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnxBig<B>);
|
fn vec_znx_big_add(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<B>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<B>,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<B>,
|
||||||
|
col_b: usize,
|
||||||
|
);
|
||||||
|
|
||||||
/// Adds `a` to `b` and stores the result on `b`.
|
/// Adds `a` to `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_add_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
|
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
|
||||||
|
|
||||||
/// Adds `a` to `b` and stores the result on `c`.
|
/// Adds `a` to `b` and stores the result on `c`.
|
||||||
fn vec_znx_big_add_small(&self, c: &mut VecZnxBig<B>, a: &VecZnx, b: &VecZnxBig<B>);
|
fn vec_znx_big_add_small(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<B>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnx,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<B>,
|
||||||
|
col_b: usize,
|
||||||
|
);
|
||||||
|
|
||||||
/// Adds `a` to `b` and stores the result on `b`.
|
/// Adds `a` to `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_add_small_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
|
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
|
||||||
|
|
||||||
/// Subtracts `a` to `b` and stores the result on `c`.
|
/// Subtracts `a` to `b` and stores the result on `c`.
|
||||||
fn vec_znx_big_sub(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnxBig<B>);
|
fn vec_znx_big_sub(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<B>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<B>,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<B>,
|
||||||
|
col_b: usize,
|
||||||
|
);
|
||||||
|
|
||||||
/// Subtracts `a` to `b` and stores the result on `b`.
|
/// Subtracts `a` to `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ab_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
|
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `b`.
|
/// Subtracts `b` to `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_ba_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
|
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `c`.
|
/// Subtracts `b` to `a` and stores the result on `c`.
|
||||||
fn vec_znx_big_sub_small_ab(&self, c: &mut VecZnxBig<B>, a: &VecZnx, b: &VecZnxBig<B>);
|
fn vec_znx_big_sub_small_a(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<B>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnx,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<B>,
|
||||||
|
col_b: usize,
|
||||||
|
);
|
||||||
|
|
||||||
/// Subtracts `a` to `b` and stores the result on `b`.
|
/// Subtracts `a` to `b` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_small_ab_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
|
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `c`.
|
/// Subtracts `b` to `a` and stores the result on `c`.
|
||||||
fn vec_znx_big_sub_small_ba(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnx);
|
fn vec_znx_big_sub_small_b(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<B>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<B>,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnx,
|
||||||
|
col_b: usize,
|
||||||
|
);
|
||||||
|
|
||||||
/// Subtracts `b` to `a` and stores the result on `b`.
|
/// Subtracts `b` to `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_sub_small_ba_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
|
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
|
||||||
|
|
||||||
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
|
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
|
||||||
fn vec_znx_big_normalize_tmp_bytes(&self, cols: usize) -> usize;
|
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize;
|
||||||
|
|
||||||
/// Normalizes `a` and stores the result on `b`.
|
/// Normalizes `a` and stores the result on `b`.
|
||||||
///
|
///
|
||||||
@@ -82,13 +119,21 @@ pub trait VecZnxBigOps<B: Backend> {
|
|||||||
///
|
///
|
||||||
/// * `log_base2k`: normalization basis.
|
/// * `log_base2k`: normalization basis.
|
||||||
/// * `tmp_bytes`: scratch space of size at least [VecZnxBigOps::vec_znx_big_normalize].
|
/// * `tmp_bytes`: scratch space of size at least [VecZnxBigOps::vec_znx_big_normalize].
|
||||||
fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig<B>, tmp_bytes: &mut [u8]);
|
fn vec_znx_big_normalize(
|
||||||
|
&self,
|
||||||
|
log_base2k: usize,
|
||||||
|
res: &mut VecZnx,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<B>,
|
||||||
|
col_a: usize,
|
||||||
|
tmp_bytes: &mut [u8],
|
||||||
|
);
|
||||||
|
|
||||||
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
|
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
|
||||||
fn vec_znx_big_automorphism(&self, k: i64, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
|
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
|
||||||
|
|
||||||
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `a`.
|
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `a`.
|
||||||
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>);
|
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>, col_a: usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VecZnxBigOps<FFT64> for Module<FFT64> {
|
impl VecZnxBigOps<FFT64> for Module<FFT64> {
|
||||||
@@ -108,170 +153,267 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
|
|||||||
VecZnxBig::bytes_of(self, cols, size)
|
VecZnxBig::bytes_of(self, cols, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_add(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnxBig<FFT64>) {
|
fn vec_znx_big_add(
|
||||||
let op = ffi_ternary_op_factory(
|
&self,
|
||||||
self.ptr,
|
res: &mut VecZnxBig<FFT64>,
|
||||||
c.size(),
|
col_res: usize,
|
||||||
c.sl(),
|
a: &VecZnxBig<FFT64>,
|
||||||
a.size(),
|
col_a: usize,
|
||||||
a.sl(),
|
b: &VecZnxBig<FFT64>,
|
||||||
b.size(),
|
col_b: usize,
|
||||||
b.sl(),
|
) {
|
||||||
vec_znx::vec_znx_add,
|
|
||||||
);
|
|
||||||
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnxBig<FFT64>, false>(self, c, a, b, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_add_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_add(self, &mut *b_ptr, a, &*b_ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnxBig<FFT64>) {
|
|
||||||
let op = ffi_ternary_op_factory(
|
|
||||||
self.ptr,
|
|
||||||
c.size(),
|
|
||||||
c.sl(),
|
|
||||||
a.size(),
|
|
||||||
a.sl(),
|
|
||||||
b.size(),
|
|
||||||
b.sl(),
|
|
||||||
vec_znx::vec_znx_sub,
|
|
||||||
);
|
|
||||||
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnxBig<FFT64>, true>(self, c, a, b, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_ab_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_sub(self, &mut *b_ptr, a, &*b_ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_ba_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_sub(self, &mut *b_ptr, &*b_ptr, a);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_small_ba(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnx) {
|
|
||||||
let op = ffi_ternary_op_factory(
|
|
||||||
self.ptr,
|
|
||||||
c.size(),
|
|
||||||
c.sl(),
|
|
||||||
a.size(),
|
|
||||||
a.sl(),
|
|
||||||
b.size(),
|
|
||||||
b.sl(),
|
|
||||||
vec_znx::vec_znx_sub,
|
|
||||||
);
|
|
||||||
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnx, true>(self, c, a, b, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_small_ba_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_sub_small_ba(self, &mut *b_ptr, &*b_ptr, a);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_small_ab(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnx, b: &VecZnxBig<FFT64>) {
|
|
||||||
let op = ffi_ternary_op_factory(
|
|
||||||
self.ptr,
|
|
||||||
c.size(),
|
|
||||||
c.sl(),
|
|
||||||
a.size(),
|
|
||||||
a.sl(),
|
|
||||||
b.size(),
|
|
||||||
b.sl(),
|
|
||||||
vec_znx::vec_znx_sub,
|
|
||||||
);
|
|
||||||
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnx, VecZnxBig<FFT64>, true>(self, c, a, b, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_sub_small_ab_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_sub_small_ab(self, &mut *b_ptr, a, &*b_ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_add_small(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnx, b: &VecZnxBig<FFT64>) {
|
|
||||||
let op = ffi_ternary_op_factory(
|
|
||||||
self.ptr,
|
|
||||||
c.size(),
|
|
||||||
c.sl(),
|
|
||||||
a.size(),
|
|
||||||
a.sl(),
|
|
||||||
b.size(),
|
|
||||||
b.sl(),
|
|
||||||
vec_znx::vec_znx_add,
|
|
||||||
);
|
|
||||||
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnx, VecZnxBig<FFT64>, false>(self, c, a, b, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_add_small_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
|
|
||||||
unsafe {
|
|
||||||
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
|
|
||||||
Self::vec_znx_big_add_small(self, &mut *b_ptr, a, &*b_ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_normalize_tmp_bytes(&self, cols: usize) -> usize {
|
|
||||||
Self::vec_znx_normalize_tmp_bytes(self, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig<FFT64>, tmp_bytes: &mut [u8]) {
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(tmp_bytes.len() >= Self::vec_znx_big_normalize_tmp_bytes(&self, a.cols()));
|
assert_eq!(a.n(), self.n());
|
||||||
assert_alignement(tmp_bytes.as_ptr());
|
assert_eq!(b.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert_ne!(a.as_ptr(), b.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_add(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(col_res, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(col_a, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
b.at_ptr(col_b, 0),
|
||||||
|
b.size() as u64,
|
||||||
|
b.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let a_size: usize = a.size();
|
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
|
||||||
let b_size: usize = b.size();
|
unsafe {
|
||||||
let a_sl: usize = a.sl();
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
let b_sl: usize = b.sl();
|
Self::vec_znx_big_add(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
|
||||||
let a_cols: usize = a.cols();
|
}
|
||||||
let b_cols: usize = b.cols();
|
}
|
||||||
let min_cols: usize = min(a_cols, b_cols);
|
|
||||||
(0..min_cols).for_each(|i| unsafe {
|
fn vec_znx_big_sub(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<FFT64>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<FFT64>,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<FFT64>,
|
||||||
|
col_b: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(b.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert_ne!(a.as_ptr(), b.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(col_res, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(col_a, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
b.at_ptr(col_b, 0),
|
||||||
|
b.size() as u64,
|
||||||
|
b.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
|
||||||
|
unsafe {
|
||||||
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
|
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
|
||||||
|
unsafe {
|
||||||
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
|
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_small_b(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<FFT64>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<FFT64>,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnx,
|
||||||
|
col_b: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(b.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert_ne!(a.as_ptr(), b.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(col_res, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(col_a, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
b.at_ptr(col_b, 0),
|
||||||
|
b.size() as u64,
|
||||||
|
b.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
|
||||||
|
unsafe {
|
||||||
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
|
Self::vec_znx_big_sub_small_b(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_small_a(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<FFT64>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnx,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<FFT64>,
|
||||||
|
col_b: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(b.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert_ne!(a.as_ptr(), b.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_sub(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(col_res, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(col_a, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
b.at_ptr(col_b, 0),
|
||||||
|
b.size() as u64,
|
||||||
|
b.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
|
||||||
|
unsafe {
|
||||||
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
|
Self::vec_znx_big_sub_small_a(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_add_small(
|
||||||
|
&self,
|
||||||
|
res: &mut VecZnxBig<FFT64>,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnx,
|
||||||
|
col_a: usize,
|
||||||
|
b: &VecZnxBig<FFT64>,
|
||||||
|
col_b: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(b.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert_ne!(a.as_ptr(), b.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_add(
|
||||||
|
self.ptr,
|
||||||
|
res.at_mut_ptr(col_res, 0),
|
||||||
|
res.size() as u64,
|
||||||
|
res.sl() as u64,
|
||||||
|
a.at_ptr(col_a, 0),
|
||||||
|
a.size() as u64,
|
||||||
|
a.sl() as u64,
|
||||||
|
b.at_ptr(col_b, 0),
|
||||||
|
b.size() as u64,
|
||||||
|
b.sl() as u64,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, a_col: usize) {
|
||||||
|
unsafe {
|
||||||
|
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
|
||||||
|
Self::vec_znx_big_add_small(self, &mut *res_ptr, col_res, a, a_col, &*res_ptr, col_res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
|
||||||
|
Self::vec_znx_normalize_tmp_bytes(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vec_znx_big_normalize(
|
||||||
|
&self,
|
||||||
|
log_base2k: usize,
|
||||||
|
res: &mut VecZnx,
|
||||||
|
col_res: usize,
|
||||||
|
a: &VecZnxBig<FFT64>,
|
||||||
|
col_a: usize,
|
||||||
|
tmp_bytes: &mut [u8],
|
||||||
|
) {
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
|
||||||
|
assert_alignement(tmp_bytes.as_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
log_base2k as u64,
|
log_base2k as u64,
|
||||||
b.at_mut_ptr(i, 0),
|
res.at_mut_ptr(col_res, 0),
|
||||||
b_size as u64,
|
res.size() as u64,
|
||||||
b_sl as u64,
|
res.sl() as u64,
|
||||||
a.at_ptr(i, 0),
|
a.at_ptr(col_a, 0),
|
||||||
a_size as u64,
|
a.size() as u64,
|
||||||
a_sl as u64,
|
a.sl() as u64,
|
||||||
tmp_bytes.as_mut_ptr(),
|
tmp_bytes.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
});
|
}
|
||||||
|
|
||||||
(min_cols..b_cols).for_each(|i| (0..b_size).for_each(|j| b.zero_at(i, j)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_automorphism(&self, k: i64, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
|
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
|
||||||
let op = ffi_binary_op_factory_type_1(
|
#[cfg(debug_assertions)]
|
||||||
|
{
|
||||||
|
assert_eq!(a.n(), self.n());
|
||||||
|
assert_eq!(res.n(), self.n());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
vec_znx::vec_znx_automorphism(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
k,
|
k,
|
||||||
b.size(),
|
res.at_mut_ptr(col_res, 0),
|
||||||
b.sl(),
|
res.size() as u64,
|
||||||
a.size(),
|
res.sl() as u64,
|
||||||
a.sl(),
|
a.at_ptr(col_a, 0),
|
||||||
vec_znx::vec_znx_automorphism,
|
a.size() as u64,
|
||||||
);
|
a.sl() as u64,
|
||||||
apply_unary_op::<FFT64, VecZnxBig<FFT64>>(self, b, a, op);
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>) {
|
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>, col_a: usize) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a_ptr: *mut VecZnxBig<FFT64> = a as *mut VecZnxBig<FFT64>;
|
let a_ptr: *mut VecZnxBig<FFT64> = a as *mut VecZnxBig<FFT64>;
|
||||||
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, &*a_ptr);
|
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, col_a, &*a_ptr, col_a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user