working rlwe encryption example with interleaved polynomial

This commit is contained in:
Jean-Philippe Bossuat
2025-04-29 21:53:27 +02:00
parent 06d0c5e832
commit 2cc51eee18
3 changed files with 371 additions and 203 deletions

View File

@@ -1,8 +1,5 @@
use std::cmp::min;
use crate::ffi::vec_znx;
use crate::internals::{apply_binary_op, apply_unary_op, ffi_binary_op_factory_type_1, ffi_ternary_op_factory};
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, ZnxBase, ZnxBasics, ZnxInfos, ZnxLayout, assert_alignement};
use crate::{Backend, FFT64, Module, VecZnx, VecZnxBig, VecZnxOps, ZnxBase, ZnxInfos, ZnxLayout, assert_alignement};
pub trait VecZnxBigOps<B: Backend> {
/// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
@@ -41,40 +38,80 @@ pub trait VecZnxBigOps<B: Backend> {
fn bytes_of_vec_znx_big(&self, cols: usize, size: usize) -> usize;
/// Adds `a` to `b` and stores the result on `c`.
fn vec_znx_big_add(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnxBig<B>);
fn vec_znx_big_add(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnxBig<B>,
col_a: usize,
b: &VecZnxBig<B>,
col_b: usize,
);
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
/// Adds `a` to `b` and stores the result on `c`.
fn vec_znx_big_add_small(&self, c: &mut VecZnxBig<B>, a: &VecZnx, b: &VecZnxBig<B>);
fn vec_znx_big_add_small(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<B>,
col_b: usize,
);
/// Adds `a` to `b` and stores the result on `b`.
fn vec_znx_big_add_small_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
/// Subtracts `a` to `b` and stores the result on `c`.
fn vec_znx_big_sub(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnxBig<B>);
fn vec_znx_big_sub(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnxBig<B>,
col_a: usize,
b: &VecZnxBig<B>,
col_b: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
fn vec_znx_big_sub_ab_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
/// Subtracts `b` to `a` and stores the result on `b`.
fn vec_znx_big_sub_ba_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
fn vec_znx_big_sub_small_ab(&self, c: &mut VecZnxBig<B>, a: &VecZnx, b: &VecZnxBig<B>);
fn vec_znx_big_sub_small_a(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<B>,
col_b: usize,
);
/// Subtracts `a` to `b` and stores the result on `b`.
fn vec_znx_big_sub_small_ab_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
/// Subtracts `b` to `a` and stores the result on `c`.
fn vec_znx_big_sub_small_ba(&self, c: &mut VecZnxBig<B>, a: &VecZnxBig<B>, b: &VecZnx);
fn vec_znx_big_sub_small_b(
&self,
res: &mut VecZnxBig<B>,
col_res: usize,
a: &VecZnxBig<B>,
col_a: usize,
b: &VecZnx,
col_b: usize,
);
/// Subtracts `b` to `a` and stores the result on `b`.
fn vec_znx_big_sub_small_ba_inplace(&self, b: &mut VecZnxBig<B>, a: &VecZnx);
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnx, col_a: usize);
/// Returns the minimum number of bytes to apply [VecZnxBigOps::vec_znx_big_normalize].
fn vec_znx_big_normalize_tmp_bytes(&self, cols: usize) -> usize;
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize;
/// Normalizes `a` and stores the result on `b`.
///
@@ -82,13 +119,21 @@ pub trait VecZnxBigOps<B: Backend> {
///
/// * `log_base2k`: normalization basis.
/// * `tmp_bytes`: scratch space of size at least [VecZnxBigOps::vec_znx_big_normalize].
fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig<B>, tmp_bytes: &mut [u8]);
fn vec_znx_big_normalize(
&self,
log_base2k: usize,
res: &mut VecZnx,
col_res: usize,
a: &VecZnxBig<B>,
col_a: usize,
tmp_bytes: &mut [u8],
);
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `b`.
fn vec_znx_big_automorphism(&self, k: i64, b: &mut VecZnxBig<B>, a: &VecZnxBig<B>);
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<B>, col_res: usize, a: &VecZnxBig<B>, col_a: usize);
/// Applies the automorphism X^i -> X^ik on `a` and stores the result on `a`.
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>);
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<B>, col_a: usize);
}
impl VecZnxBigOps<FFT64> for Module<FFT64> {
@@ -108,170 +153,267 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
VecZnxBig::bytes_of(self, cols, size)
}
fn vec_znx_big_add(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnxBig<FFT64>) {
let op = ffi_ternary_op_factory(
self.ptr,
c.size(),
c.sl(),
a.size(),
a.sl(),
b.size(),
b.sl(),
vec_znx::vec_znx_add,
);
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnxBig<FFT64>, false>(self, c, a, b, op);
}
fn vec_znx_big_add_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add(self, &mut *b_ptr, a, &*b_ptr);
}
}
fn vec_znx_big_sub(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnxBig<FFT64>) {
let op = ffi_ternary_op_factory(
self.ptr,
c.size(),
c.sl(),
a.size(),
a.sl(),
b.size(),
b.sl(),
vec_znx::vec_znx_sub,
);
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnxBig<FFT64>, true>(self, c, a, b, op);
}
fn vec_znx_big_sub_ab_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *b_ptr, a, &*b_ptr);
}
}
fn vec_znx_big_sub_ba_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *b_ptr, &*b_ptr, a);
}
}
fn vec_znx_big_sub_small_ba(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>, b: &VecZnx) {
let op = ffi_ternary_op_factory(
self.ptr,
c.size(),
c.sl(),
a.size(),
a.sl(),
b.size(),
b.sl(),
vec_znx::vec_znx_sub,
);
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnxBig<FFT64>, VecZnx, true>(self, c, a, b, op);
}
fn vec_znx_big_sub_small_ba_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_ba(self, &mut *b_ptr, &*b_ptr, a);
}
}
fn vec_znx_big_sub_small_ab(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnx, b: &VecZnxBig<FFT64>) {
let op = ffi_ternary_op_factory(
self.ptr,
c.size(),
c.sl(),
a.size(),
a.sl(),
b.size(),
b.sl(),
vec_znx::vec_znx_sub,
);
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnx, VecZnxBig<FFT64>, true>(self, c, a, b, op);
}
fn vec_znx_big_sub_small_ab_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_ab(self, &mut *b_ptr, a, &*b_ptr);
}
}
fn vec_znx_big_add_small(&self, c: &mut VecZnxBig<FFT64>, a: &VecZnx, b: &VecZnxBig<FFT64>) {
let op = ffi_ternary_op_factory(
self.ptr,
c.size(),
c.sl(),
a.size(),
a.sl(),
b.size(),
b.sl(),
vec_znx::vec_znx_add,
);
apply_binary_op::<FFT64, VecZnxBig<FFT64>, VecZnx, VecZnxBig<FFT64>, false>(self, c, a, b, op);
}
fn vec_znx_big_add_small_inplace(&self, b: &mut VecZnxBig<FFT64>, a: &VecZnx) {
unsafe {
let b_ptr: *mut VecZnxBig<FFT64> = b as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add_small(self, &mut *b_ptr, a, &*b_ptr);
}
}
fn vec_znx_big_normalize_tmp_bytes(&self, cols: usize) -> usize {
Self::vec_znx_normalize_tmp_bytes(self, cols)
}
fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig<FFT64>, tmp_bytes: &mut [u8]) {
fn vec_znx_big_add(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
) {
#[cfg(debug_assertions)]
{
assert!(tmp_bytes.len() >= Self::vec_znx_big_normalize_tmp_bytes(&self, a.cols()));
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(res.n(), self.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_add_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
}
}
fn vec_znx_big_sub(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(res.n(), self.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_ab_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
}
}
fn vec_znx_big_sub_ba_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
}
}
fn vec_znx_big_sub_small_b(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
b: &VecZnx,
col_b: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(res.n(), self.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_small_b_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_b(self, &mut *res_ptr, col_res, &*res_ptr, col_res, a, col_a);
}
}
fn vec_znx_big_sub_small_a(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(res.n(), self.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_sub(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_sub_small_a_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, col_a: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_sub_small_a(self, &mut *res_ptr, col_res, a, col_a, &*res_ptr, col_res);
}
}
fn vec_znx_big_add_small(
&self,
res: &mut VecZnxBig<FFT64>,
col_res: usize,
a: &VecZnx,
col_a: usize,
b: &VecZnxBig<FFT64>,
col_b: usize,
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(b.n(), self.n());
assert_eq!(res.n(), self.n());
assert_ne!(a.as_ptr(), b.as_ptr());
}
unsafe {
vec_znx::vec_znx_add(
self.ptr,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
b.at_ptr(col_b, 0),
b.size() as u64,
b.sl() as u64,
)
}
}
fn vec_znx_big_add_small_inplace(&self, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnx, a_col: usize) {
unsafe {
let res_ptr: *mut VecZnxBig<FFT64> = res as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_add_small(self, &mut *res_ptr, col_res, a, a_col, &*res_ptr, col_res);
}
}
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize {
Self::vec_znx_normalize_tmp_bytes(self)
}
fn vec_znx_big_normalize(
&self,
log_base2k: usize,
res: &mut VecZnx,
col_res: usize,
a: &VecZnxBig<FFT64>,
col_a: usize,
tmp_bytes: &mut [u8],
) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
assert!(tmp_bytes.len() >= Self::vec_znx_normalize_tmp_bytes(&self));
assert_alignement(tmp_bytes.as_ptr());
}
let a_size: usize = a.size();
let b_size: usize = b.size();
let a_sl: usize = a.sl();
let b_sl: usize = b.sl();
let a_cols: usize = a.cols();
let b_cols: usize = b.cols();
let min_cols: usize = min(a_cols, b_cols);
(0..min_cols).for_each(|i| unsafe {
unsafe {
vec_znx::vec_znx_normalize_base2k(
self.ptr,
log_base2k as u64,
b.at_mut_ptr(i, 0),
b_size as u64,
b_sl as u64,
a.at_ptr(i, 0),
a_size as u64,
a_sl as u64,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
tmp_bytes.as_mut_ptr(),
);
});
(min_cols..b_cols).for_each(|i| (0..b_size).for_each(|j| b.zero_at(i, j)));
}
}
fn vec_znx_big_automorphism(&self, k: i64, b: &mut VecZnxBig<FFT64>, a: &VecZnxBig<FFT64>) {
let op = ffi_binary_op_factory_type_1(
self.ptr,
k,
b.size(),
b.sl(),
a.size(),
a.sl(),
vec_znx::vec_znx_automorphism,
);
apply_unary_op::<FFT64, VecZnxBig<FFT64>>(self, b, a, op);
fn vec_znx_big_automorphism(&self, k: i64, res: &mut VecZnxBig<FFT64>, col_res: usize, a: &VecZnxBig<FFT64>, col_a: usize) {
#[cfg(debug_assertions)]
{
assert_eq!(a.n(), self.n());
assert_eq!(res.n(), self.n());
}
unsafe {
vec_znx::vec_znx_automorphism(
self.ptr,
k,
res.at_mut_ptr(col_res, 0),
res.size() as u64,
res.sl() as u64,
a.at_ptr(col_a, 0),
a.size() as u64,
a.sl() as u64,
)
}
}
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>) {
fn vec_znx_big_automorphism_inplace(&self, k: i64, a: &mut VecZnxBig<FFT64>, col_a: usize) {
unsafe {
let a_ptr: *mut VecZnxBig<FFT64> = a as *mut VecZnxBig<FFT64>;
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, &*a_ptr);
Self::vec_znx_big_automorphism(self, k, &mut *a_ptr, col_a, &*a_ptr, col_a);
}
}
}