Added basic key-switching + file formatting

This commit is contained in:
Jean-Philippe Bossuat
2025-04-24 10:43:51 +02:00
parent 4196477300
commit ad6e8169e5
33 changed files with 319 additions and 715 deletions

View File

@@ -1,14 +1,9 @@
use base2k::ffi::reim::*; use base2k::ffi::reim::*;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main};
use std::ffi::c_void; use std::ffi::c_void;
fn fft(c: &mut Criterion) { fn fft(c: &mut Criterion) {
fn forward<'a>( fn forward<'a>(m: u32, log_bound: u32, reim_fft_precomp: *mut reim_fft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
m: u32,
log_bound: u32,
reim_fft_precomp: *mut reim_fft_precomp,
a: &'a [i64],
) -> Box<dyn FnMut() + 'a> {
unsafe { unsafe {
let buf_a: *mut f64 = reim_fft_precomp_get_buffer(reim_fft_precomp, 0); let buf_a: *mut f64 = reim_fft_precomp_get_buffer(reim_fft_precomp, 0);
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr()); reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
@@ -16,12 +11,7 @@ fn fft(c: &mut Criterion) {
} }
} }
fn backward<'a>( fn backward<'a>(m: u32, log_bound: u32, reim_ifft_precomp: *mut reim_ifft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
m: u32,
log_bound: u32,
reim_ifft_precomp: *mut reim_ifft_precomp,
a: &'a [i64],
) -> Box<dyn FnMut() + 'a> {
Box::new(move || unsafe { Box::new(move || unsafe {
let buf_a: *mut f64 = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0); let buf_a: *mut f64 = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0);
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr()); reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
@@ -29,8 +19,7 @@ fn fft(c: &mut Criterion) {
}) })
} }
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("fft");
c.benchmark_group("fft");
for log_n in 10..17 { for log_n in 10..17 {
let n: usize = 1 << log_n; let n: usize = 1 << log_n;

View File

@@ -1,6 +1,6 @@
use base2k::{ use base2k::{
alloc_aligned, Encoding, Infos, Module, Sampling, Scalar, SvpPPol, SvpPPolOps, VecZnx, BACKEND, Encoding, Infos, Module, Sampling, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft,
VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, BACKEND, VecZnxDftOps, VecZnxOps, alloc_aligned,
}; };
use itertools::izip; use itertools::izip;
use sampling::source::Source; use sampling::source::Source;

View File

@@ -1,6 +1,6 @@
use base2k::{ use base2k::{
alloc_aligned, Encoding, Infos, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, BACKEND, Encoding, Infos, Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VecZnxVec, VmpPMat,
VecZnxDftOps, VecZnxOps, VecZnxVec, VmpPMat, VmpPMatOps, BACKEND, VmpPMatOps, alloc_aligned,
}; };
fn main() { fn main() {
@@ -16,8 +16,7 @@ fn main() {
let cols: usize = cols + 1; let cols: usize = cols + 1;
// Maximum size of the byte scratch needed // Maximum size of the byte scratch needed
let tmp_bytes: usize = module.vmp_prepare_tmp_bytes(rows, cols) let tmp_bytes: usize = module.vmp_prepare_tmp_bytes(rows, cols) | module.vmp_apply_dft_tmp_bytes(cols, cols, rows, cols);
| module.vmp_apply_dft_tmp_bytes(cols, cols, rows, cols);
let mut buf: Vec<u8> = alloc_aligned(tmp_bytes); let mut buf: Vec<u8> = alloc_aligned(tmp_bytes);

View File

@@ -40,14 +40,7 @@ pub trait Encoding {
/// * `i`: index of the coefficient on which to encode the data. /// * `i`: index of the coefficient on which to encode the data.
/// * `data`: data to encode on the receiver. /// * `data`: data to encode on the receiver.
/// * `log_max`: base two logarithm of the infinity norm of the input data. /// * `log_max`: base two logarithm of the infinity norm of the input data.
fn encode_coeff_i64( fn encode_coeff_i64(&mut self, log_base2k: usize, log_k: usize, i: usize, data: i64, log_max: usize);
&mut self,
log_base2k: usize,
log_k: usize,
i: usize,
data: i64,
log_max: usize,
);
/// decode a single of i64 from the receiver at the given index. /// decode a single of i64 from the receiver at the given index.
/// ///
@@ -73,14 +66,7 @@ impl Encoding for VecZnx {
decode_vec_float(self, log_base2k, data) decode_vec_float(self, log_base2k, data)
} }
fn encode_coeff_i64( fn encode_coeff_i64(&mut self, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) {
&mut self,
log_base2k: usize,
log_k: usize,
i: usize,
value: i64,
log_max: usize,
) {
encode_coeff_i64(self, log_base2k, log_k, i, value, log_max) encode_coeff_i64(self, log_base2k, log_k, i, value, log_max)
} }
@@ -119,8 +105,7 @@ fn encode_vec_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, data: &[i64],
.enumerate() .enumerate()
.for_each(|(i, i_rev)| { .for_each(|(i, i_rev)| {
let shift: usize = i * log_base2k; let shift: usize = i * log_base2k;
izip!(a.at_mut(i_rev)[..size].iter_mut(), data[..size].iter()) izip!(a.at_mut(i_rev)[..size].iter_mut(), data[..size].iter()).for_each(|(y, x)| *y = (x >> shift) & mask);
.for_each(|(y, x)| *y = (x >> shift) & mask);
}) })
} }
@@ -189,14 +174,7 @@ fn decode_vec_float(a: &VecZnx, log_base2k: usize, data: &mut [Float]) {
}); });
} }
fn encode_coeff_i64( fn encode_coeff_i64(a: &mut VecZnx, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) {
a: &mut VecZnx,
log_base2k: usize,
log_k: usize,
i: usize,
value: i64,
log_max: usize,
) {
debug_assert!(i < a.n()); debug_assert!(i < a.n());
let cols: usize = (log_k + log_base2k - 1) / log_base2k; let cols: usize = (log_k + log_base2k - 1) / log_base2k;
debug_assert!( debug_assert!(

View File

@@ -62,10 +62,7 @@ unsafe extern "C" {
pub unsafe fn new_reim_fft_precomp(m: u32, num_buffers: u32) -> *mut REIM_FFT_PRECOMP; pub unsafe fn new_reim_fft_precomp(m: u32, num_buffers: u32) -> *mut REIM_FFT_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_fft_precomp_get_buffer( pub unsafe fn reim_fft_precomp_get_buffer(tables: *const REIM_FFT_PRECOMP, buffer_index: u32) -> *mut f64;
tables: *const REIM_FFT_PRECOMP,
buffer_index: u32,
) -> *mut f64;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_fft_buffer(m: u32) -> *mut f64; pub unsafe fn new_reim_fft_buffer(m: u32) -> *mut f64;
@@ -80,10 +77,7 @@ unsafe extern "C" {
pub unsafe fn new_reim_ifft_precomp(m: u32, num_buffers: u32) -> *mut REIM_IFFT_PRECOMP; pub unsafe fn new_reim_ifft_precomp(m: u32, num_buffers: u32) -> *mut REIM_IFFT_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_ifft_precomp_get_buffer( pub unsafe fn reim_ifft_precomp_get_buffer(tables: *const REIM_IFFT_PRECOMP, buffer_index: u32) -> *mut f64;
tables: *const REIM_IFFT_PRECOMP,
buffer_index: u32,
) -> *mut f64;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_ifft(tables: *const REIM_IFFT_PRECOMP, data: *mut f64); pub unsafe fn reim_ifft(tables: *const REIM_IFFT_PRECOMP, data: *mut f64);
@@ -92,120 +86,58 @@ unsafe extern "C" {
pub unsafe fn new_reim_fftvec_mul_precomp(m: u32) -> *mut REIM_FFTVEC_MUL_PRECOMP; pub unsafe fn new_reim_fftvec_mul_precomp(m: u32) -> *mut REIM_FFTVEC_MUL_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_fftvec_mul( pub unsafe fn reim_fftvec_mul(tables: *const REIM_FFTVEC_MUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
tables: *const REIM_FFTVEC_MUL_PRECOMP,
r: *mut f64,
a: *const f64,
b: *const f64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_fftvec_addmul_precomp(m: u32) -> *mut REIM_FFTVEC_ADDMUL_PRECOMP; pub unsafe fn new_reim_fftvec_addmul_precomp(m: u32) -> *mut REIM_FFTVEC_ADDMUL_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_fftvec_addmul( pub unsafe fn reim_fftvec_addmul(tables: *const REIM_FFTVEC_ADDMUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
tables: *const REIM_FFTVEC_ADDMUL_PRECOMP,
r: *mut f64,
a: *const f64,
b: *const f64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_from_znx32_precomp( pub unsafe fn new_reim_from_znx32_precomp(m: u32, log2bound: u32) -> *mut REIM_FROM_ZNX32_PRECOMP;
m: u32,
log2bound: u32,
) -> *mut REIM_FROM_ZNX32_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_znx32( pub unsafe fn reim_from_znx32(tables: *const REIM_FROM_ZNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
tables: *const REIM_FROM_ZNX32_PRECOMP,
r: *mut ::std::os::raw::c_void,
a: *const i32,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_znx64( pub unsafe fn reim_from_znx64(tables: *const REIM_FROM_ZNX64_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i64);
tables: *const REIM_FROM_ZNX64_PRECOMP,
r: *mut ::std::os::raw::c_void,
a: *const i64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_from_znx64_precomp(m: u32, maxbnd: u32) -> *mut REIM_FROM_ZNX64_PRECOMP; pub unsafe fn new_reim_from_znx64_precomp(m: u32, maxbnd: u32) -> *mut REIM_FROM_ZNX64_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_znx64_simple( pub unsafe fn reim_from_znx64_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, a: *const i64);
m: u32,
log2bound: u32,
r: *mut ::std::os::raw::c_void,
a: *const i64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_from_tnx32_precomp(m: u32) -> *mut REIM_FROM_TNX32_PRECOMP; pub unsafe fn new_reim_from_tnx32_precomp(m: u32) -> *mut REIM_FROM_TNX32_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_tnx32( pub unsafe fn reim_from_tnx32(tables: *const REIM_FROM_TNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
tables: *const REIM_FROM_TNX32_PRECOMP,
r: *mut ::std::os::raw::c_void,
a: *const i32,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_to_tnx32_precomp( pub unsafe fn new_reim_to_tnx32_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX32_PRECOMP;
m: u32,
divisor: f64,
log2overhead: u32,
) -> *mut REIM_TO_TNX32_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_tnx32( pub unsafe fn reim_to_tnx32(tables: *const REIM_TO_TNX32_PRECOMP, r: *mut i32, a: *const ::std::os::raw::c_void);
tables: *const REIM_TO_TNX32_PRECOMP,
r: *mut i32,
a: *const ::std::os::raw::c_void,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_to_tnx_precomp( pub unsafe fn new_reim_to_tnx_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX_PRECOMP;
m: u32,
divisor: f64,
log2overhead: u32,
) -> *mut REIM_TO_TNX_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_tnx(tables: *const REIM_TO_TNX_PRECOMP, r: *mut f64, a: *const f64); pub unsafe fn reim_to_tnx(tables: *const REIM_TO_TNX_PRECOMP, r: *mut f64, a: *const f64);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_tnx_simple( pub unsafe fn reim_to_tnx_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut f64, a: *const f64);
m: u32,
divisor: f64,
log2overhead: u32,
r: *mut f64,
a: *const f64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn new_reim_to_znx64_precomp( pub unsafe fn new_reim_to_znx64_precomp(m: u32, divisor: f64, log2bound: u32) -> *mut REIM_TO_ZNX64_PRECOMP;
m: u32,
divisor: f64,
log2bound: u32,
) -> *mut REIM_TO_ZNX64_PRECOMP;
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_znx64( pub unsafe fn reim_to_znx64(precomp: *const REIM_TO_ZNX64_PRECOMP, r: *mut i64, a: *const ::std::os::raw::c_void);
precomp: *const REIM_TO_ZNX64_PRECOMP,
r: *mut i64,
a: *const ::std::os::raw::c_void,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_znx64_simple( pub unsafe fn reim_to_znx64_simple(m: u32, divisor: f64, log2bound: u32, r: *mut i64, a: *const ::std::os::raw::c_void);
m: u32,
divisor: f64,
log2bound: u32,
r: *mut i64,
a: *const ::std::os::raw::c_void,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_fft_simple(m: u32, data: *mut ::std::os::raw::c_void); pub unsafe fn reim_fft_simple(m: u32, data: *mut ::std::os::raw::c_void);
@@ -230,22 +162,11 @@ unsafe extern "C" {
); );
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_znx32_simple( pub unsafe fn reim_from_znx32_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
m: u32,
log2bound: u32,
r: *mut ::std::os::raw::c_void,
x: *const i32,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_from_tnx32_simple(m: u32, r: *mut ::std::os::raw::c_void, x: *const i32); pub unsafe fn reim_from_tnx32_simple(m: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn reim_to_tnx32_simple( pub unsafe fn reim_to_tnx32_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut i32, x: *const ::std::os::raw::c_void);
m: u32,
divisor: f64,
log2overhead: u32,
r: *mut i32,
x: *const ::std::os::raw::c_void,
);
} }

View File

@@ -44,14 +44,7 @@ unsafe extern "C" {
); );
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn vec_znx_dft( pub unsafe fn vec_znx_dft(module: *const MODULE, res: *mut VEC_ZNX_DFT, res_size: u64, a: *const i64, a_size: u64, a_sl: u64);
module: *const MODULE,
res: *mut VEC_ZNX_DFT,
res_size: u64,
a: *const i64,
a_size: u64,
a_sl: u64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn vec_znx_idft( pub unsafe fn vec_znx_idft(

View File

@@ -52,13 +52,7 @@ unsafe extern "C" {
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn vmp_apply_dft_tmp_bytes( pub unsafe fn vmp_apply_dft_tmp_bytes(module: *const MODULE, res_size: u64, a_size: u64, nrows: u64, ncols: u64) -> u64;
module: *const MODULE,
res_size: u64,
a_size: u64,
nrows: u64,
ncols: u64,
) -> u64;
} }
unsafe extern "C" { unsafe extern "C" {

View File

@@ -64,24 +64,11 @@ unsafe extern "C" {
pub unsafe fn rnx_mul_xp_minus_one_inplace(nn: u64, p: i64, res: *mut f64); pub unsafe fn rnx_mul_xp_minus_one_inplace(nn: u64, p: i64, res: *mut f64);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn znx_normalize( pub unsafe fn znx_normalize(nn: u64, base_k: u64, out: *mut i64, carry_out: *mut i64, in_: *const i64, carry_in: *const i64);
nn: u64,
base_k: u64,
out: *mut i64,
carry_out: *mut i64,
in_: *const i64,
carry_in: *const i64,
);
} }
unsafe extern "C" { unsafe extern "C" {
pub unsafe fn znx_small_single_product( pub unsafe fn znx_small_single_product(module: *const MODULE, res: *mut i64, a: *const i64, b: *const i64, tmp: *mut u8);
module: *const MODULE,
res: *mut i64,
a: *const i64,
b: *const i64,
tmp: *mut u8,
);
} }
unsafe extern "C" { unsafe extern "C" {

View File

@@ -1,11 +1,5 @@
pub mod encoding; pub mod encoding;
#[allow( #[allow(non_camel_case_types, non_snake_case, non_upper_case_globals, dead_code, improper_ctypes)]
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
dead_code,
improper_ctypes
)]
// Other modules and exports // Other modules and exports
pub mod ffi; pub mod ffi;
pub mod infos; pub mod infos;
@@ -42,7 +36,10 @@ pub fn is_aligned<T>(ptr: *const T) -> bool {
} }
pub fn assert_alignement<T>(ptr: *const T) { pub fn assert_alignement<T>(ptr: *const T) {
assert!(is_aligned(ptr), "invalid alignement: ensure passed bytes have been allocated with [alloc_aligned_u8] or [alloc_aligned]") assert!(
is_aligned(ptr),
"invalid alignement: ensure passed bytes have been allocated with [alloc_aligned_u8] or [alloc_aligned]"
)
} }
pub fn cast<T, V>(data: &[T]) -> &[V] { pub fn cast<T, V>(data: &[T]) -> &[V] {
@@ -57,7 +54,7 @@ pub fn cast_mut<T, V>(data: &[T]) -> &mut [V] {
unsafe { std::slice::from_raw_parts_mut(ptr, len) } unsafe { std::slice::from_raw_parts_mut(ptr, len) }
} }
use std::alloc::{alloc, Layout}; use std::alloc::{Layout, alloc};
use std::ptr; use std::ptr;
/// Allocates a block of bytes with a custom alignement. /// Allocates a block of bytes with a custom alignement.

View File

@@ -1,5 +1,5 @@
use crate::ffi::module::{delete_module_info, module_info_t, new_module_info, MODULE};
use crate::GALOISGENERATOR; use crate::GALOISGENERATOR;
use crate::ffi::module::{MODULE, delete_module_info, module_info_t, new_module_info};
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
#[repr(u8)] #[repr(u8)]
@@ -56,8 +56,7 @@ impl Module {
if gen == 0 { if gen == 0 {
return 1; return 1;
} }
((mod_exp_u64(GALOISGENERATOR, gen.abs() as usize) & (self.cyclotomic_order() - 1)) as i64) ((mod_exp_u64(GALOISGENERATOR, gen.abs() as usize) & (self.cyclotomic_order() - 1)) as i64) * gen.signum()
* gen.signum()
} }
// Returns gen^-1 // Returns gen^-1
@@ -65,8 +64,7 @@ impl Module {
if gen == 0 { if gen == 0 {
panic!("cannot invert 0") panic!("cannot invert 0")
} }
((mod_exp_u64(gen.abs() as u64, (self.cyclotomic_order() - 1) as usize) ((mod_exp_u64(gen.abs() as u64, (self.cyclotomic_order() - 1) as usize) & (self.cyclotomic_order() - 1)) as i64)
& (self.cyclotomic_order() - 1)) as i64)
* gen.signum() * gen.signum()
} }

View File

@@ -18,15 +18,7 @@ pub trait Sampling {
); );
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\]. /// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
fn add_normal( fn add_normal(&self, log_base2k: usize, a: &mut VecZnx, log_k: usize, source: &mut Source, sigma: f64, bound: f64);
&self,
log_base2k: usize,
a: &mut VecZnx,
log_k: usize,
source: &mut Source,
sigma: f64,
bound: f64,
);
} }
impl Sampling for Module { impl Sampling for Module {
@@ -76,15 +68,7 @@ impl Sampling for Module {
} }
} }
fn add_normal( fn add_normal(&self, log_base2k: usize, a: &mut VecZnx, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
&self,
log_base2k: usize,
a: &mut VecZnx,
log_k: usize,
source: &mut Source,
sigma: f64,
bound: f64,
) {
self.add_dist_f64( self.add_dist_f64(
log_base2k, log_base2k,
a, a,

View File

@@ -1,7 +1,7 @@
use crate::{Encoding, Infos, VecZnx}; use crate::{Encoding, Infos, VecZnx};
use rug::Float;
use rug::float::Round; use rug::float::Round;
use rug::ops::{AddAssignRound, DivAssignRound, SubAssignRound}; use rug::ops::{AddAssignRound, DivAssignRound, SubAssignRound};
use rug::Float;
impl VecZnx { impl VecZnx {
pub fn std(&self, log_base2k: usize) -> f64 { pub fn std(&self, log_base2k: usize) -> f64 {

View File

@@ -1,8 +1,8 @@
use crate::ffi::svp::{self, svp_ppol_t}; use crate::ffi::svp::{self, svp_ppol_t};
use crate::ffi::vec_znx_dft::vec_znx_dft_t; use crate::ffi::vec_znx_dft::vec_znx_dft_t;
use crate::{assert_alignement, Module, VecZnx, VecZnxDft, BACKEND}; use crate::{BACKEND, Module, VecZnx, VecZnxDft, assert_alignement};
use crate::{alloc_aligned, cast_mut, Infos}; use crate::{Infos, alloc_aligned, cast_mut};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand_core::RngCore; use rand_core::RngCore;
use rand_distr::{Distribution, WeightedIndex}; use rand_distr::{Distribution, WeightedIndex};

View File

@@ -1,8 +1,8 @@
use crate::cast_mut; use crate::cast_mut;
use crate::ffi::vec_znx; use crate::ffi::vec_znx;
use crate::ffi::znx; use crate::ffi::znx;
use crate::{alloc_aligned, assert_alignement};
use crate::{Infos, Module}; use crate::{Infos, Module};
use crate::{alloc_aligned, assert_alignement};
use itertools::izip; use itertools::izip;
use std::cmp::min; use std::cmp::min;

View File

@@ -1,5 +1,5 @@
use crate::ffi::vec_znx_big::{self, vec_znx_big_t}; use crate::ffi::vec_znx_big::{self, vec_znx_big_t};
use crate::{alloc_aligned, assert_alignement, Infos, Module, VecZnx, VecZnxDft, BACKEND}; use crate::{BACKEND, Infos, Module, VecZnx, VecZnxDft, alloc_aligned, assert_alignement};
pub struct VecZnxBig { pub struct VecZnxBig {
pub data: Vec<u8>, pub data: Vec<u8>,
@@ -141,13 +141,7 @@ pub trait VecZnxBigOps {
fn vec_znx_big_normalize_tmp_bytes(&self) -> usize; fn vec_znx_big_normalize_tmp_bytes(&self) -> usize;
/// b <- normalize(a) /// b <- normalize(a)
fn vec_znx_big_normalize( fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig, tmp_bytes: &mut [u8]);
&self,
log_base2k: usize,
b: &mut VecZnx,
a: &VecZnxBig,
tmp_bytes: &mut [u8],
);
fn vec_znx_big_range_normalize_base2k_tmp_bytes(&self) -> usize; fn vec_znx_big_range_normalize_base2k_tmp_bytes(&self) -> usize;
@@ -256,13 +250,7 @@ impl VecZnxBigOps for Module {
unsafe { vec_znx_big::vec_znx_big_normalize_base2k_tmp_bytes(self.ptr) as usize } unsafe { vec_znx_big::vec_znx_big_normalize_base2k_tmp_bytes(self.ptr) as usize }
} }
fn vec_znx_big_normalize( fn vec_znx_big_normalize(&self, log_base2k: usize, b: &mut VecZnx, a: &VecZnxBig, tmp_bytes: &mut [u8]) {
&self,
log_base2k: usize,
b: &mut VecZnx,
a: &VecZnxBig,
tmp_bytes: &mut [u8],
) {
debug_assert!( debug_assert!(
tmp_bytes.len() >= <Module as VecZnxBigOps>::vec_znx_big_normalize_tmp_bytes(self), tmp_bytes.len() >= <Module as VecZnxBigOps>::vec_znx_big_normalize_tmp_bytes(self),
"invalid tmp_bytes: tmp_bytes.len()={} <= self.vec_znx_big_normalize_tmp_bytes()={}", "invalid tmp_bytes: tmp_bytes.len()={} <= self.vec_znx_big_normalize_tmp_bytes()={}",

View File

@@ -1,8 +1,8 @@
use crate::ffi::vec_znx_big::vec_znx_big_t; use crate::ffi::vec_znx_big::vec_znx_big_t;
use crate::ffi::vec_znx_dft; use crate::ffi::vec_znx_dft;
use crate::ffi::vec_znx_dft::{bytes_of_vec_znx_dft, vec_znx_dft_t}; use crate::ffi::vec_znx_dft::{bytes_of_vec_znx_dft, vec_znx_dft_t};
use crate::{alloc_aligned, VecZnx, DEFAULTALIGN}; use crate::{BACKEND, Infos, Module, VecZnxBig, assert_alignement};
use crate::{assert_alignement, Infos, Module, VecZnxBig, BACKEND}; use crate::{DEFAULTALIGN, VecZnx, alloc_aligned};
pub struct VecZnxDft { pub struct VecZnxDft {
pub data: Vec<u8>, pub data: Vec<u8>,
@@ -307,11 +307,9 @@ impl VecZnxDftOps for Module {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{ use crate::{BACKEND, Module, Sampling, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, alloc_aligned};
alloc_aligned, Module, Sampling, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, BACKEND,
};
use itertools::izip; use itertools::izip;
use sampling::source::{new_seed, Source}; use sampling::source::{Source, new_seed};
#[test] #[test]
fn test_automorphism_dft() { fn test_automorphism_dft() {

View File

@@ -1,9 +1,7 @@
use crate::ffi::vec_znx_big::vec_znx_big_t; use crate::ffi::vec_znx_big::vec_znx_big_t;
use crate::ffi::vec_znx_dft::vec_znx_dft_t; use crate::ffi::vec_znx_dft::vec_znx_dft_t;
use crate::ffi::vmp::{self, vmp_pmat_t}; use crate::ffi::vmp::{self, vmp_pmat_t};
use crate::{ use crate::{BACKEND, Infos, Module, VecZnx, VecZnxBig, VecZnxDft, alloc_aligned, assert_alignement};
alloc_aligned, assert_alignement, Infos, Module, VecZnx, VecZnxBig, VecZnxDft, BACKEND,
};
/// Vector Matrix Product Prepared Matrix: a vector of [VecZnx], /// Vector Matrix Product Prepared Matrix: a vector of [VecZnx],
/// stored as a 3D matrix in the DFT domain in a single contiguous array. /// stored as a 3D matrix in the DFT domain in a single contiguous array.
@@ -100,8 +98,7 @@ impl VmpPMat {
if self.n < 8 { if self.n < 8 {
res.copy_from_slice( res.copy_from_slice(
&self.raw::<T>()[(row + col * self.rows()) * self.n() &self.raw::<T>()[(row + col * self.rows()) * self.n()..(row + col * self.rows()) * (self.n() + 1)],
..(row + col * self.rows()) * (self.n() + 1)],
); );
} else { } else {
(0..self.n >> 3).for_each(|blk| { (0..self.n >> 3).for_each(|blk| {
@@ -120,10 +117,7 @@ impl VmpPMat {
if col == (ncols - 1) && (ncols & 1 == 1) { if col == (ncols - 1) && (ncols & 1 == 1) {
&self.raw::<T>()[blk * nrows * ncols * 8 + col * nrows * 8 + row * 8..] &self.raw::<T>()[blk * nrows * ncols * 8 + col * nrows * 8 + row * 8..]
} else { } else {
&self.raw::<T>()[blk * nrows * ncols * 8 &self.raw::<T>()[blk * nrows * ncols * 8 + (col / 2) * (2 * nrows) * 8 + row * 2 * 8 + (col % 2) * 8..]
+ (col / 2) * (2 * nrows) * 8
+ row * 2 * 8
+ (col % 2) * 8..]
} }
} }
} }
@@ -220,13 +214,7 @@ pub trait VmpPMatOps {
/// * `a_cols`: number of cols of the input [VecZnx]. /// * `a_cols`: number of cols of the input [VecZnx].
/// * `rows`: number of rows of the input [VmpPMat]. /// * `rows`: number of rows of the input [VmpPMat].
/// * `cols`: number of cols of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat].
fn vmp_apply_dft_tmp_bytes( fn vmp_apply_dft_tmp_bytes(&self, c_cols: usize, a_cols: usize, rows: usize, cols: usize) -> usize;
&self,
c_cols: usize,
a_cols: usize,
rows: usize,
cols: usize,
) -> usize;
/// Applies the vector matrix product [VecZnxDft] x [VmpPMat]. /// Applies the vector matrix product [VecZnxDft] x [VmpPMat].
/// ///
@@ -288,13 +276,7 @@ pub trait VmpPMatOps {
/// * `a_cols`: number of cols of the input [VecZnxDft]. /// * `a_cols`: number of cols of the input [VecZnxDft].
/// * `rows`: number of rows of the input [VmpPMat]. /// * `rows`: number of rows of the input [VmpPMat].
/// * `cols`: number of cols of the input [VmpPMat]. /// * `cols`: number of cols of the input [VmpPMat].
fn vmp_apply_dft_to_dft_tmp_bytes( fn vmp_apply_dft_to_dft_tmp_bytes(&self, c_cols: usize, a_cols: usize, rows: usize, cols: usize) -> usize;
&self,
c_cols: usize,
a_cols: usize,
rows: usize,
cols: usize,
) -> usize;
/// Applies the vector matrix product [VecZnxDft] x [VmpPMat]. /// Applies the vector matrix product [VecZnxDft] x [VmpPMat].
/// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. /// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes].
@@ -348,13 +330,7 @@ pub trait VmpPMatOps {
/// * `a`: the left operand [VecZnxDft] of the vector matrix product. /// * `a`: the left operand [VecZnxDft] of the vector matrix product.
/// * `b`: the right operand [VmpPMat] of the vector matrix product. /// * `b`: the right operand [VmpPMat] of the vector matrix product.
/// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. /// * `buf`: scratch space, the size can be obtained with [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes].
fn vmp_apply_dft_to_dft_add( fn vmp_apply_dft_to_dft_add(&self, c: &mut VecZnxDft, a: &VecZnxDft, b: &VmpPMat, buf: &mut [u8]);
&self,
c: &mut VecZnxDft,
a: &VecZnxDft,
b: &VmpPMat,
buf: &mut [u8],
);
/// Applies the vector matrix product [VecZnxDft] x [VmpPMat] in place. /// Applies the vector matrix product [VecZnxDft] x [VmpPMat] in place.
/// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes]. /// The size of `buf` is given by [VmpPMatOps::vmp_apply_dft_to_dft_tmp_bytes].
@@ -521,13 +497,7 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_apply_dft_tmp_bytes( fn vmp_apply_dft_tmp_bytes(&self, res_cols: usize, a_cols: usize, gct_rows: usize, gct_cols: usize) -> usize {
&self,
res_cols: usize,
a_cols: usize,
gct_rows: usize,
gct_cols: usize,
) -> usize {
unsafe { unsafe {
vmp::vmp_apply_dft_tmp_bytes( vmp::vmp_apply_dft_tmp_bytes(
self.ptr, self.ptr,
@@ -540,9 +510,7 @@ impl VmpPMatOps for Module {
} }
fn vmp_apply_dft(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, tmp_bytes: &mut [u8]) { fn vmp_apply_dft(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, tmp_bytes: &mut [u8]) {
debug_assert!( debug_assert!(tmp_bytes.len() >= self.vmp_apply_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()));
tmp_bytes.len() >= self.vmp_apply_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols())
);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
@@ -564,9 +532,7 @@ impl VmpPMatOps for Module {
} }
fn vmp_apply_dft_add(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, tmp_bytes: &mut [u8]) { fn vmp_apply_dft_add(&self, c: &mut VecZnxDft, a: &VecZnx, b: &VmpPMat, tmp_bytes: &mut [u8]) {
debug_assert!( debug_assert!(tmp_bytes.len() >= self.vmp_apply_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()));
tmp_bytes.len() >= self.vmp_apply_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols())
);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
@@ -587,13 +553,7 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_apply_dft_to_dft_tmp_bytes( fn vmp_apply_dft_to_dft_tmp_bytes(&self, res_cols: usize, a_cols: usize, gct_rows: usize, gct_cols: usize) -> usize {
&self,
res_cols: usize,
a_cols: usize,
gct_rows: usize,
gct_cols: usize,
) -> usize {
unsafe { unsafe {
vmp::vmp_apply_dft_to_dft_tmp_bytes( vmp::vmp_apply_dft_to_dft_tmp_bytes(
self.ptr, self.ptr,
@@ -605,17 +565,8 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_apply_dft_to_dft( fn vmp_apply_dft_to_dft(&self, c: &mut VecZnxDft, a: &VecZnxDft, b: &VmpPMat, tmp_bytes: &mut [u8]) {
&self, debug_assert!(tmp_bytes.len() >= self.vmp_apply_dft_to_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()));
c: &mut VecZnxDft,
a: &VecZnxDft,
b: &VmpPMat,
tmp_bytes: &mut [u8],
) {
debug_assert!(
tmp_bytes.len()
>= self.vmp_apply_dft_to_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols())
);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
@@ -635,17 +586,8 @@ impl VmpPMatOps for Module {
} }
} }
fn vmp_apply_dft_to_dft_add( fn vmp_apply_dft_to_dft_add(&self, c: &mut VecZnxDft, a: &VecZnxDft, b: &VmpPMat, tmp_bytes: &mut [u8]) {
&self, debug_assert!(tmp_bytes.len() >= self.vmp_apply_dft_to_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols()));
c: &mut VecZnxDft,
a: &VecZnxDft,
b: &VmpPMat,
tmp_bytes: &mut [u8],
) {
debug_assert!(
tmp_bytes.len()
>= self.vmp_apply_dft_to_dft_tmp_bytes(c.cols(), a.cols(), b.rows(), b.cols())
);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
@@ -666,10 +608,7 @@ impl VmpPMatOps for Module {
} }
fn vmp_apply_dft_to_dft_inplace(&self, b: &mut VecZnxDft, a: &VmpPMat, tmp_bytes: &mut [u8]) { fn vmp_apply_dft_to_dft_inplace(&self, b: &mut VecZnxDft, a: &VmpPMat, tmp_bytes: &mut [u8]) {
debug_assert!( debug_assert!(tmp_bytes.len() >= self.vmp_apply_dft_to_dft_tmp_bytes(b.cols(), b.cols(), a.rows(), a.cols()));
tmp_bytes.len()
>= self.vmp_apply_dft_to_dft_tmp_bytes(b.cols(), b.cols(), a.rows(), a.cols())
);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
@@ -693,8 +632,7 @@ impl VmpPMatOps for Module {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{ use crate::{
alloc_aligned, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, Module, Sampling, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, alloc_aligned,
VecZnxOps, VmpPMat, VmpPMatOps,
}; };
use sampling::source::Source; use sampling::source::Source;
@@ -712,8 +650,7 @@ mod tests {
let mut vmpmat_0: VmpPMat = module.new_vmp_pmat(vpmat_rows, vpmat_cols); let mut vmpmat_0: VmpPMat = module.new_vmp_pmat(vpmat_rows, vpmat_cols);
let mut vmpmat_1: VmpPMat = module.new_vmp_pmat(vpmat_rows, vpmat_cols); let mut vmpmat_1: VmpPMat = module.new_vmp_pmat(vpmat_rows, vpmat_cols);
let mut tmp_bytes: Vec<u8> = let mut tmp_bytes: Vec<u8> = alloc_aligned(module.vmp_prepare_tmp_bytes(vpmat_rows, vpmat_cols));
alloc_aligned(module.vmp_prepare_tmp_bytes(vpmat_rows, vpmat_cols));
for row_i in 0..vpmat_rows { for row_i in 0..vpmat_rows {
let mut source: Source = Source::new([0u8; 32]); let mut source: Source = Source::new([0u8; 32]);

View File

@@ -1,6 +1,5 @@
use base2k::{ use base2k::{
BACKEND, Infos, Module, Sampling, SvpPPolOps, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, BACKEND, Infos, Module, Sampling, SvpPPolOps, VecZnx, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat, alloc_aligned_u8,
VmpPMat, alloc_aligned_u8,
}; };
use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main};
use rlwe::{ use rlwe::{
@@ -28,8 +27,7 @@ fn bench_gadget_product_inplace(c: &mut Criterion) {
}) })
} }
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("gadget_product_inplace");
c.benchmark_group("gadget_product_inplace");
for log_n in 10..11 { for log_n in 10..11 {
let params_lit: ParametersLiteral = ParametersLiteral { let params_lit: ParametersLiteral = ParametersLiteral {

View File

@@ -22,10 +22,8 @@ fn main() {
let params: Parameters = Parameters::new(&params_lit); let params: Parameters = Parameters::new(&params_lit);
let mut tmp_bytes: Vec<u8> = alloc_aligned( let mut tmp_bytes: Vec<u8> =
params.decrypt_rlwe_tmp_byte(params.log_q()) alloc_aligned(params.decrypt_rlwe_tmp_byte(params.log_q()) | params.encrypt_rlwe_sk_tmp_bytes(params.log_q()));
| params.encrypt_rlwe_sk_tmp_bytes(params.log_q()),
);
let mut source: Source = Source::new([0; 32]); let mut source: Source = Source::new([0; 32]);
let mut sk: SecretKey = SecretKey::new(params.module()); let mut sk: SecretKey = SecretKey::new(params.module());

View File

@@ -2,12 +2,13 @@ use crate::{
ciphertext::{Ciphertext, new_gadget_ciphertext}, ciphertext::{Ciphertext, new_gadget_ciphertext},
elem::ElemCommon, elem::ElemCommon,
encryptor::{encrypt_grlwe_sk, encrypt_grlwe_sk_tmp_bytes}, encryptor::{encrypt_grlwe_sk, encrypt_grlwe_sk_tmp_bytes},
key_switching::{key_switch_rlwe, key_switch_rlwe_inplace, key_switch_tmp_bytes},
keys::SecretKey, keys::SecretKey,
parameters::Parameters, parameters::Parameters,
}; };
use base2k::{ use base2k::{
Module, Scalar, ScalarOps, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, Module, Scalar, ScalarOps, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat,
VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, assert_alignement, VmpPMatOps, assert_alignement,
}; };
use sampling::source::Source; use sampling::source::Source;
use std::{cmp::min, collections::HashMap}; use std::{cmp::min, collections::HashMap};
@@ -18,15 +19,8 @@ pub struct AutomorphismKey {
pub p: i64, pub p: i64,
} }
pub fn automorphis_key_new_tmp_bytes( pub fn automorphis_key_new_tmp_bytes(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> usize {
module: &Module, module.bytes_of_scalar() + module.bytes_of_svp_ppol() + encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q)
log_base2k: usize,
rows: usize,
log_q: usize,
) -> usize {
module.bytes_of_scalar()
+ module.bytes_of_svp_ppol()
+ encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q)
} }
impl Parameters { impl Parameters {
@@ -34,12 +28,7 @@ impl Parameters {
automorphis_key_new_tmp_bytes(self.module(), self.log_base2k(), rows, log_q) automorphis_key_new_tmp_bytes(self.module(), self.log_base2k(), rows, log_q)
} }
pub fn automorphism_tmp_bytes( pub fn automorphism_tmp_bytes(&self, res_logq: usize, in_logq: usize, gct_logq: usize) -> usize {
&self,
res_logq: usize,
in_logq: usize,
gct_logq: usize,
) -> usize {
automorphism_tmp_bytes( automorphism_tmp_bytes(
self.module(), self.module(),
self.log_base2k(), self.log_base2k(),
@@ -122,8 +111,7 @@ impl AutomorphismKey {
let mut keys: Vec<AutomorphismKey> = Vec::new(); let mut keys: Vec<AutomorphismKey> = Vec::new();
p.iter().for_each(|pi| { p.iter().for_each(|pi| {
let mut value: Ciphertext<VmpPMat> = let mut value: Ciphertext<VmpPMat> = new_gadget_ciphertext(module, log_base2k, rows, log_q);
new_gadget_ciphertext(module, log_base2k, rows, log_q);
let p_inv: i64 = module.galois_element_inv(*pi); let p_inv: i64 = module.galois_element_inv(*pi);
@@ -143,19 +131,8 @@ impl AutomorphismKey {
} }
} }
pub fn automorphism_tmp_bytes( pub fn automorphism_tmp_bytes(module: &Module, log_base2k: usize, res_logq: usize, in_logq: usize, gct_logq: usize) -> usize {
module: &Module, key_switch_tmp_bytes(module, log_base2k, res_logq, in_logq, gct_logq)
log_base2k: usize,
res_logq: usize,
in_logq: usize,
gct_logq: usize,
) -> usize {
let gct_cols: usize = (gct_logq + log_base2k - 1) / log_base2k;
let in_cols: usize = (in_logq + log_base2k - 1) / log_base2k;
let res_cols: usize = (res_logq + log_base2k - 1) / log_base2k;
return module.vmp_apply_dft_to_dft_tmp_bytes(res_cols, in_cols, in_cols, gct_cols)
+ module.bytes_of_vec_znx_dft(std::cmp::min(res_cols, in_cols))
+ module.bytes_of_vec_znx_dft(gct_cols);
} }
pub fn automorphism( pub fn automorphism(
@@ -166,67 +143,14 @@ pub fn automorphism(
b_cols: usize, b_cols: usize,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) {
let cols: usize = min(min(c.cols(), a.cols()), b.value.rows()); key_switch_rlwe(module, c, a, &b.value, b_cols, tmp_bytes);
#[cfg(debug_assertions)]
{
assert!(b_cols <= b.value.cols());
assert!(
tmp_bytes.len()
>= automorphism_tmp_bytes(
module,
c.cols(),
a.cols(),
b.value.rows(),
b.value.cols()
)
);
assert_alignement(tmp_bytes.as_ptr());
}
let (tmp_bytes_a1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let (tmp_bytes_res_dft, tmp_bytes) =
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_a1_dft);
let mut res_dft: VecZnxDft =
module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_res_dft);
let mut res_big: VecZnxBig = res_dft.as_vec_znx_big();
// a1_dft = DFT(a[1])
module.vec_znx_dft(&mut a1_dft, a.at(1));
// res_dft = IDFT(<DFT(a), DFT([-A*AUTO(s, -p) + 2^{-K*i}*s + E])>) = [-b*AUTO(s, -p) + a * s + e]
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(0), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
// res_dft = [-b*AUTO(s, -p) + a * s + e] + [-a * s + m + e] = [-b*AUTO(s, -p) + m + e]
module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0));
// c[0] = NORMALIZE([-b*AUTO(s, -p) + m + e])
module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(0), &mut res_big, tmp_bytes);
// c[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)] // c[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)]
module.vec_znx_automorphism_inplace(b.p, c.at_mut(0)); module.vec_znx_automorphism_inplace(b.p, c.at_mut(0));
// res_dft = IDFT(<DFT(a), DFT([A])>) = [b]
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(1), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
// c[1] = b
module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(1), &mut res_big, tmp_bytes);
// c[1] = AUTO(b, p) // c[1] = AUTO(b, p)
module.vec_znx_automorphism_inplace(b.p, c.at_mut(1)); module.vec_znx_automorphism_inplace(b.p, c.at_mut(1));
} }
pub fn automorphism_inplace_tmp_bytes( pub fn automorphism_inplace_tmp_bytes(module: &Module, c_cols: usize, a_cols: usize, b_rows: usize, b_cols: usize) -> usize {
module: &Module,
c_cols: usize,
a_cols: usize,
b_rows: usize,
b_cols: usize,
) -> usize {
return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols)
+ 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols)); + 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols));
} }
@@ -238,60 +162,9 @@ pub fn automorphism_inplace(
b_cols: usize, b_cols: usize,
tmp_bytes: &mut [u8], tmp_bytes: &mut [u8],
) { ) {
let cols: usize = min(a.cols(), b.value.rows()); key_switch_rlwe_inplace(module, a, &b.value, b_cols, tmp_bytes);
#[cfg(debug_assertions)]
{
assert!(b_cols <= b.value.cols());
assert!(
tmp_bytes.len()
>= automorphism_inplace_tmp_bytes(
module,
a.cols(),
a.cols(),
b.value.rows(),
b.value.cols()
)
);
assert_alignement(tmp_bytes.as_ptr());
}
let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft);
let mut res_dft: VecZnxDft =
module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_res_dft);
let mut res_big: VecZnxBig = res_dft.as_vec_znx_big();
// a1_dft = DFT(a[1])
module.vec_znx_dft(&mut a1_dft, a.at(1));
// res_dft = IDFT(<DFT(a), DFT([-A*AUTO(s, -p) + 2^{-K*i}*s + E])>) = [-b*AUTO(s, -p) + a * s + e]
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(0), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
// res_dft = [-b*AUTO(s, -p) + a * s + e] + [-a * s + m + e] = [-b*AUTO(s, -p) + m + e]
module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0));
// a[0] = NORMALIZE([-b*AUTO(s, -p) + m + e])
module.vec_znx_big_normalize(a.log_base2k(), a.at_mut(0), &mut res_big, tmp_bytes);
// a[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)] // a[0] = AUTO([-b*AUTO(s, -p) + m + e], p) = [-AUTO(b, p)*s + AUTO(m, p) + AUTO(b, e)]
module.vec_znx_automorphism_inplace(b.p, a.at_mut(0)); module.vec_znx_automorphism_inplace(b.p, a.at_mut(0));
// res_dft = IDFT(<DFT(a), DFT([A])>) = [b]
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.value.at(1), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
(0..b_cols).for_each(|col_i| {
let raw: &[i64] = res_big.raw::<i64>(module);
println!("{:?}", &raw[col_i * module.n()..(col_i + 1) * module.n()])
});
// a[1] = b
module.vec_znx_big_normalize(a.log_base2k(), a.at_mut(1), &mut res_big, tmp_bytes);
// a[1] = AUTO(b, p) // a[1] = AUTO(b, p)
module.vec_znx_automorphism_inplace(b.p, a.at_mut(1)); module.vec_znx_automorphism_inplace(b.p, a.at_mut(1));
} }
@@ -307,16 +180,7 @@ pub fn automorphism_big(
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
assert!( assert!(tmp_bytes.len() >= automorphism_tmp_bytes(module, c.cols(), a.cols(), b.value.rows(), b.value.cols()));
tmp_bytes.len()
>= automorphism_tmp_bytes(
module,
c.cols(),
a.cols(),
b.value.rows(),
b.value.cols()
)
);
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
} }
@@ -359,9 +223,7 @@ mod test {
parameters::{Parameters, ParametersLiteral}, parameters::{Parameters, ParametersLiteral},
plaintext::Plaintext, plaintext::Plaintext,
}; };
use base2k::{ use base2k::{BACKEND, Encoding, Module, SvpPPol, SvpPPolOps, VecZnx, VecZnxOps, alloc_aligned};
BACKEND, Encoding, Module, SvpPPol, SvpPPolOps, VecZnx, VecZnxOps, alloc_aligned,
};
use sampling::source::{Source, new_seed}; use sampling::source::{Source, new_seed};
#[test] #[test]
@@ -477,8 +339,7 @@ mod test {
let var_msg: f64 = (params.xs() as f64) / params.n() as f64; let var_msg: f64 = (params.xs() as f64) / params.n() as f64;
let var_a_err: f64 = 1f64 / 12f64; let var_a_err: f64 = 1f64 / 12f64;
let noise_pred: f64 = let noise_pred: f64 = params.noise_grlwe_product(var_msg, var_a_err, ct_auto.log_q(), auto_key.value.log_q());
params.noise_grlwe_product(var_msg, var_a_err, ct_auto.log_q(), auto_key.value.log_q());
println!("noise_pred: {}", noise_pred); println!("noise_pred: {}", noise_pred);
println!("noise_have: {}", noise_have); println!("noise_have: {}", noise_have);

View File

@@ -74,24 +74,14 @@ pub fn new_rlwe_ciphertext(module: &Module, log_base2k: usize, log_q: usize) ->
Ciphertext::<VecZnx>::new(module, log_base2k, log_q, rows) Ciphertext::<VecZnx>::new(module, log_base2k, log_q, rows)
} }
pub fn new_gadget_ciphertext( pub fn new_gadget_ciphertext(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Ciphertext<VmpPMat> {
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> Ciphertext<VmpPMat> {
let cols: usize = (log_q + log_base2k - 1) / log_base2k; let cols: usize = (log_q + log_base2k - 1) / log_base2k;
let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, 2, rows, cols); let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, 2, rows, cols);
elem.log_q = log_q; elem.log_q = log_q;
Ciphertext(elem) Ciphertext(elem)
} }
pub fn new_rgsw_ciphertext( pub fn new_rgsw_ciphertext(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> Ciphertext<VmpPMat> {
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> Ciphertext<VmpPMat> {
let cols: usize = (log_q + log_base2k - 1) / log_base2k; let cols: usize = (log_q + log_base2k - 1) / log_base2k;
let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, 4, rows, cols); let mut elem: Elem<VmpPMat> = Elem::<VmpPMat>::new(module, log_base2k, 4, rows, cols);
elem.log_q = log_q; elem.log_q = log_q;

View File

@@ -33,24 +33,12 @@ impl Parameters {
) )
} }
pub fn decrypt_rlwe( pub fn decrypt_rlwe(&self, res: &mut Plaintext, ct: &Ciphertext<VecZnx>, sk: &SvpPPol, tmp_bytes: &mut [u8]) {
&self,
res: &mut Plaintext,
ct: &Ciphertext<VecZnx>,
sk: &SvpPPol,
tmp_bytes: &mut [u8],
) {
decrypt_rlwe(self.module(), &mut res.0, &ct.0, sk, tmp_bytes) decrypt_rlwe(self.module(), &mut res.0, &ct.0, sk, tmp_bytes)
} }
} }
pub fn decrypt_rlwe( pub fn decrypt_rlwe(module: &Module, res: &mut Elem<VecZnx>, a: &Elem<VecZnx>, sk: &SvpPPol, tmp_bytes: &mut [u8]) {
module: &Module,
res: &mut Elem<VecZnx>,
a: &Elem<VecZnx>,
sk: &SvpPPol,
tmp_bytes: &mut [u8],
) {
let cols: usize = a.cols(); let cols: usize = a.cols();
assert!( assert!(
@@ -60,8 +48,7 @@ pub fn decrypt_rlwe(
decrypt_rlwe_tmp_byte(module, cols) decrypt_rlwe_tmp_byte(module, cols)
); );
let (tmp_bytes_vec_znx_dft, tmp_bytes_normalize) = let (tmp_bytes_vec_znx_dft, tmp_bytes_normalize) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let mut res_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft); let mut res_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft);
let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big(); let mut res_big: base2k::VecZnxBig = res_dft.as_vec_znx_big();

View File

@@ -8,20 +8,8 @@ pub struct Elem<T> {
} }
pub trait ElemVecZnx { pub trait ElemVecZnx {
fn from_bytes( fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, size: usize, bytes: &mut [u8]) -> Elem<VecZnx>;
module: &Module, fn from_bytes_borrow(module: &Module, log_base2k: usize, log_q: usize, size: usize, bytes: &mut [u8]) -> Elem<VecZnx>;
log_base2k: usize,
log_q: usize,
size: usize,
bytes: &mut [u8],
) -> Elem<VecZnx>;
fn from_bytes_borrow(
module: &Module,
log_base2k: usize,
log_q: usize,
size: usize,
bytes: &mut [u8],
) -> Elem<VecZnx>;
fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, size: usize) -> usize; fn bytes_of(module: &Module, log_base2k: usize, log_q: usize, size: usize) -> usize;
fn zero(&mut self); fn zero(&mut self);
} }
@@ -32,13 +20,7 @@ impl ElemVecZnx for Elem<VecZnx> {
module.n() * cols * size * 8 module.n() * cols * size * 8
} }
fn from_bytes( fn from_bytes(module: &Module, log_base2k: usize, log_q: usize, size: usize, bytes: &mut [u8]) -> Elem<VecZnx> {
module: &Module,
log_base2k: usize,
log_q: usize,
size: usize,
bytes: &mut [u8],
) -> Elem<VecZnx> {
assert!(size > 0); assert!(size > 0);
let n: usize = module.n(); let n: usize = module.n();
assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, size)); assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, size));
@@ -58,13 +40,7 @@ impl ElemVecZnx for Elem<VecZnx> {
} }
} }
fn from_bytes_borrow( fn from_bytes_borrow(module: &Module, log_base2k: usize, log_q: usize, size: usize, bytes: &mut [u8]) -> Elem<VecZnx> {
module: &Module,
log_base2k: usize,
log_q: usize,
size: usize,
bytes: &mut [u8],
) -> Elem<VecZnx> {
assert!(size > 0); assert!(size > 0);
let n: usize = module.n(); let n: usize = module.n();
assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, size)); assert!(bytes.len() >= Self::bytes_of(module, log_base2k, log_q, size));

View File

@@ -5,8 +5,8 @@ use crate::parameters::Parameters;
use crate::plaintext::Plaintext; use crate::plaintext::Plaintext;
use base2k::sampling::Sampling; use base2k::sampling::Sampling;
use base2k::{ use base2k::{
Infos, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, Infos, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat,
VecZnxDftOps, VecZnxOps, VmpPMat, VmpPMatOps, VmpPMatOps,
}; };
use sampling::source::{Source, new_seed}; use sampling::source::{Source, new_seed};
@@ -75,12 +75,7 @@ impl EncryptorSk {
self.source_xe = Source::new(seed) self.source_xe = Source::new(seed)
} }
pub fn encrypt_rlwe_sk( pub fn encrypt_rlwe_sk(&mut self, params: &Parameters, ct: &mut Ciphertext<VecZnx>, pt: Option<&Plaintext>) {
&mut self,
params: &Parameters,
ct: &mut Ciphertext<VecZnx>,
pt: Option<&Plaintext>,
) {
assert!( assert!(
self.initialized == true, self.initialized == true,
"invalid call to [EncryptorSk.encrypt_rlwe_sk]: [EncryptorSk] has not been initialized with a [SecretKey]" "invalid call to [EncryptorSk.encrypt_rlwe_sk]: [EncryptorSk] has not been initialized with a [SecretKey]"
@@ -113,8 +108,7 @@ impl EncryptorSk {
} }
pub fn encrypt_rlwe_sk_tmp_bytes(module: &Module, log_base2k: usize, log_q: usize) -> usize { pub fn encrypt_rlwe_sk_tmp_bytes(module: &Module, log_base2k: usize, log_q: usize) -> usize {
module.bytes_of_vec_znx_dft((log_q + log_base2k - 1) / log_base2k) module.bytes_of_vec_znx_dft((log_q + log_base2k - 1) / log_base2k) + module.vec_znx_big_normalize_tmp_bytes()
+ module.vec_znx_big_normalize_tmp_bytes()
} }
pub fn encrypt_rlwe_sk( pub fn encrypt_rlwe_sk(
module: &Module, module: &Module,
@@ -157,8 +151,7 @@ fn encrypt_rlwe_sk_core<const PT_POS: u8>(
// c1 <- Z_{2^prec}[X]/(X^{N}+1) // c1 <- Z_{2^prec}[X]/(X^{N}+1)
module.fill_uniform(log_base2k, c1, cols, source_xa); module.fill_uniform(log_base2k, c1, cols, source_xa);
let (tmp_bytes_vec_znx_dft, tmp_bytes_normalize) = let (tmp_bytes_vec_znx_dft, tmp_bytes_normalize) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
// Scratch space for DFT values // Scratch space for DFT values
let mut buf_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft); let mut buf_dft: VecZnxDft = VecZnxDft::from_bytes_borrow(module, cols, tmp_bytes_vec_znx_dft);
@@ -214,12 +207,7 @@ impl Parameters {
} }
} }
pub fn encrypt_grlwe_sk_tmp_bytes( pub fn encrypt_grlwe_sk_tmp_bytes(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> usize {
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> usize {
let cols = (log_q + log_base2k - 1) / log_base2k; let cols = (log_q + log_base2k - 1) / log_base2k;
Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 2) Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 2)
+ Plaintext::bytes_of(module, log_base2k, log_q) + Plaintext::bytes_of(module, log_base2k, log_q)
@@ -260,12 +248,7 @@ impl Parameters {
} }
} }
pub fn encrypt_rgsw_sk_tmp_bytes( pub fn encrypt_rgsw_sk_tmp_bytes(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> usize {
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> usize {
let cols = (log_q + log_base2k - 1) / log_base2k; let cols = (log_q + log_base2k - 1) / log_base2k;
Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 2) Elem::<VecZnx>::bytes_of(module, log_base2k, log_q, 2)
+ Plaintext::bytes_of(module, log_base2k, log_q) + Plaintext::bytes_of(module, log_base2k, log_q)
@@ -347,10 +330,8 @@ fn encrypt_grlwe_sk_core<const PT_POS: u8>(
let (tmp_bytes_enc_sk, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_enc_sk); let (tmp_bytes_enc_sk, tmp_bytes) = tmp_bytes.split_at_mut(bytes_of_enc_sk);
let (tmp_bytes_elem, tmp_bytes_vmp_prepare_row) = tmp_bytes.split_at_mut(bytes_of_elem); let (tmp_bytes_elem, tmp_bytes_vmp_prepare_row) = tmp_bytes.split_at_mut(bytes_of_elem);
let mut tmp_elem: Elem<VecZnx> = let mut tmp_elem: Elem<VecZnx> = Elem::<VecZnx>::from_bytes_borrow(module, log_base2k, log_q, 2, tmp_bytes_elem);
Elem::<VecZnx>::from_bytes_borrow(module, log_base2k, log_q, 2, tmp_bytes_elem); let mut tmp_pt: Plaintext = Plaintext::from_bytes_borrow(module, log_base2k, log_q, tmp_bytes_pt);
let mut tmp_pt: Plaintext =
Plaintext::from_bytes_borrow(module, log_base2k, log_q, tmp_bytes_pt);
(0..rows).for_each(|row_i| { (0..rows).for_each(|row_i| {
// Sets the i-th row of the RLWE sample to m (i.e. m * 2^{-log_base2k*i}) // Sets the i-th row of the RLWE sample to m (i.e. m * 2^{-log_base2k*i})

View File

@@ -1,7 +1,5 @@
use crate::{ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters}; use crate::{ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters};
use base2k::{ use base2k::{Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps};
Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps,
};
use std::cmp::min; use std::cmp::min;
pub fn gadget_product_core_tmp_bytes( pub fn gadget_product_core_tmp_bytes(
@@ -19,13 +17,7 @@ pub fn gadget_product_core_tmp_bytes(
} }
impl Parameters { impl Parameters {
pub fn gadget_product_tmp_bytes( pub fn gadget_product_tmp_bytes(&self, res_log_q: usize, in_log_q: usize, gct_rows: usize, gct_log_q: usize) -> usize {
&self,
res_log_q: usize,
in_log_q: usize,
gct_rows: usize,
gct_log_q: usize,
) -> usize {
gadget_product_core_tmp_bytes( gadget_product_core_tmp_bytes(
self.module(), self.module(),
self.log_base2k(), self.log_base2k(),
@@ -52,13 +44,7 @@ pub fn gadget_product_core(
module.vmp_apply_dft_to_dft_inplace(res_dft_1, b.at(1), tmp_bytes); module.vmp_apply_dft_to_dft_inplace(res_dft_1, b.at(1), tmp_bytes);
} }
pub fn gadget_product_big_tmp_bytes( pub fn gadget_product_big_tmp_bytes(module: &Module, c_cols: usize, a_cols: usize, b_rows: usize, b_cols: usize) -> usize {
module: &Module,
c_cols: usize,
a_cols: usize,
b_rows: usize,
b_cols: usize,
) -> usize {
return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols)
+ 2 * module.bytes_of_vec_znx_dft(min(c_cols, a_cols)); + 2 * module.bytes_of_vec_znx_dft(min(c_cols, a_cols));
} }
@@ -144,8 +130,8 @@ mod test {
plaintext::Plaintext, plaintext::Plaintext,
}; };
use base2k::{ use base2k::{
BACKEND, Infos, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, BACKEND, Infos, Sampling, SvpPPolOps, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VecZnxOps, VmpPMat,
VecZnxDftOps, VecZnxOps, VmpPMat, alloc_aligned_u8, alloc_aligned_u8,
}; };
use sampling::source::{Source, new_seed}; use sampling::source::{Source, new_seed};
@@ -226,8 +212,7 @@ mod test {
.fill_uniform(log_base2k, &mut a, params.cols_q(), &mut source_xa); .fill_uniform(log_base2k, &mut a, params.cols_q(), &mut source_xa);
// res = g^-1(a) * gct // res = g^-1(a) * gct
let mut elem_res: Elem<VecZnx> = let mut elem_res: Elem<VecZnx> = Elem::<VecZnx>::new(params.module(), log_base2k, params.log_qp(), 2);
Elem::<VecZnx>::new(params.module(), log_base2k, params.log_qp(), 2);
// Ideal output = a * s // Ideal output = a * s
let mut a_dft: VecZnxDft = params.module().new_vec_znx_dft(a.cols()); let mut a_dft: VecZnxDft = params.module().new_vec_znx_dft(a.cols());
@@ -237,16 +222,12 @@ mod test {
// a * sk0 // a * sk0
params.module().svp_apply_dft(&mut a_dft, &sk0_svp_ppol, &a); params.module().svp_apply_dft(&mut a_dft, &sk0_svp_ppol, &a);
params.module().vec_znx_idft_tmp_a(&mut a_big, &mut a_dft); params.module().vec_znx_idft_tmp_a(&mut a_big, &mut a_dft);
params.module().vec_znx_big_normalize( params
params.log_base2k(), .module()
&mut a_times_s, .vec_znx_big_normalize(params.log_base2k(), &mut a_times_s, &a_big, &mut tmp_bytes);
&a_big,
&mut tmp_bytes,
);
// Plaintext for decrypted output of gadget product // Plaintext for decrypted output of gadget product
let mut pt: Plaintext = let mut pt: Plaintext = Plaintext::new(params.module(), params.log_base2k(), params.log_qp());
Plaintext::new(params.module(), params.log_base2k(), params.log_qp());
// Iterates over all possible cols values for input/output polynomials and gadget ciphertext. // Iterates over all possible cols values for input/output polynomials and gadget ciphertext.
@@ -289,20 +270,14 @@ mod test {
.vec_znx_idft_tmp_a(&mut res_big_1, &mut res_dft_1); .vec_znx_idft_tmp_a(&mut res_big_1, &mut res_dft_1);
// res_big_0 = normalize(res_big_0) // res_big_0 = normalize(res_big_0)
params.module().vec_znx_big_normalize( params
log_base2k, .module()
elem_res.at_mut(0), .vec_znx_big_normalize(log_base2k, elem_res.at_mut(0), &res_big_0, &mut tmp_bytes);
&res_big_0,
&mut tmp_bytes,
);
// res_big_1 = normalize(res_big_1) // res_big_1 = normalize(res_big_1)
params.module().vec_znx_big_normalize( params
log_base2k, .module()
elem_res.at_mut(1), .vec_znx_big_normalize(log_base2k, elem_res.at_mut(1), &res_big_1, &mut tmp_bytes);
&res_big_1,
&mut tmp_bytes,
);
// <(-c*sk1 + a*sk0 + e, a), (1, sk1)> = a*sk0 + e // <(-c*sk1 + a*sk0 + e, a), (1, sk1)> = a*sk0 + e
decrypt_rlwe( decrypt_rlwe(
@@ -337,8 +312,7 @@ mod test {
println!("{} {} {} {}", var_msg, var_a_err, a_logq, b_logq); println!("{} {} {} {}", var_msg, var_a_err, a_logq, b_logq);
let noise_pred: f64 = let noise_pred: f64 = params.noise_grlwe_product(var_msg, var_a_err, a_logq, b_logq);
params.noise_grlwe_product(var_msg, var_a_err, a_logq, b_logq);
println!("noise_pred: {}", noise_pred); println!("noise_pred: {}", noise_pred);
println!("noise_have: {}", noise_have); println!("noise_have: {}", noise_have);
@@ -350,13 +324,7 @@ mod test {
} }
impl Parameters { impl Parameters {
pub fn noise_grlwe_product( pub fn noise_grlwe_product(&self, var_msg: f64, var_a_err: f64, a_logq: usize, b_logq: usize) -> f64 {
&self,
var_msg: f64,
var_a_err: f64,
a_logq: usize,
b_logq: usize,
) -> f64 {
let n: f64 = self.n() as f64; let n: f64 = self.n() as f64;
let var_xs: f64 = self.xs() as f64; let var_xs: f64 = self.xs() as f64;
@@ -407,8 +375,7 @@ pub fn noise_grlwe_product(
// lhs = a_cols * n * (var_base * var_gct_err_lhs + var_e_a * var_msg * p^2) // lhs = a_cols * n * (var_base * var_gct_err_lhs + var_e_a * var_msg * p^2)
// rhs = a_cols * n * var_base * var_gct_err_rhs * var_xs // rhs = a_cols * n * var_base * var_gct_err_rhs * var_xs
let mut noise: f64 = let mut noise: f64 = (a_cols as f64) * n * var_base * (var_gct_err_lhs + var_xs * var_gct_err_rhs);
(a_cols as f64) * n * var_base * (var_gct_err_lhs + var_xs * var_gct_err_rhs);
noise += var_msg * var_a_err * a_scale * a_scale * n; noise += var_msg * var_a_err * a_scale * a_scale * n;
noise = noise.sqrt(); noise = noise.sqrt();
noise /= b_scale; noise /= b_scale;

View File

@@ -7,11 +7,7 @@ use sampling::source::Source;
pub struct KeyGenerator {} pub struct KeyGenerator {}
impl KeyGenerator { impl KeyGenerator {
pub fn gen_secret_key_thread_safe( pub fn gen_secret_key_thread_safe(&self, params: &Parameters, source: &mut Source) -> SecretKey {
&self,
params: &Parameters,
source: &mut Source,
) -> SecretKey {
let mut sk: SecretKey = SecretKey::new(params.module()); let mut sk: SecretKey = SecretKey::new(params.module());
sk.fill_ternary_hw(params.xs(), source); sk.fill_ternary_hw(params.xs(), source);
sk sk
@@ -26,8 +22,7 @@ impl KeyGenerator {
) -> PublicKey { ) -> PublicKey {
let mut xa_source: Source = source.branch(); let mut xa_source: Source = source.branch();
let mut xe_source: Source = source.branch(); let mut xe_source: Source = source.branch();
let mut pk: PublicKey = let mut pk: PublicKey = PublicKey::new(params.module(), params.log_base2k(), params.log_qp());
PublicKey::new(params.module(), params.log_base2k(), params.log_qp());
pk.gen_thread_safe( pk.gen_thread_safe(
params.module(), params.module(),
sk_ppol, sk_ppol,
@@ -40,12 +35,7 @@ impl KeyGenerator {
} }
} }
pub fn gen_switching_key_tmp_bytes( pub fn gen_switching_key_tmp_bytes(module: &Module, log_base2k: usize, rows: usize, log_q: usize) -> usize {
module: &Module,
log_base2k: usize,
rows: usize,
log_q: usize,
) -> usize {
encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q) encrypt_grlwe_sk_tmp_bytes(module, log_base2k, rows, log_q)
} }

79
rlwe/src/key_switching.rs Normal file
View File

@@ -0,0 +1,79 @@
use crate::ciphertext::Ciphertext;
use crate::elem::ElemCommon;
use base2k::{Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps, assert_alignement};
use std::cmp::min;
pub fn key_switch_tmp_bytes(module: &Module, log_base2k: usize, res_logq: usize, in_logq: usize, gct_logq: usize) -> usize {
let gct_cols: usize = (gct_logq + log_base2k - 1) / log_base2k;
let in_cols: usize = (in_logq + log_base2k - 1) / log_base2k;
let res_cols: usize = (res_logq + log_base2k - 1) / log_base2k;
return module.vmp_apply_dft_to_dft_tmp_bytes(res_cols, in_cols, in_cols, gct_cols)
+ module.bytes_of_vec_znx_dft(std::cmp::min(res_cols, in_cols))
+ module.bytes_of_vec_znx_dft(gct_cols);
}
pub fn key_switch_rlwe(
module: &Module,
c: &mut Ciphertext<VecZnx>,
a: &Ciphertext<VecZnx>,
b: &Ciphertext<VmpPMat>,
b_cols: usize,
tmp_bytes: &mut [u8],
) {
key_switch_rlwe_core(module, c, a, b, b_cols, tmp_bytes);
}
pub fn key_switch_rlwe_inplace(
module: &Module,
a: &mut Ciphertext<VecZnx>,
b: &Ciphertext<VmpPMat>,
b_cols: usize,
tmp_bytes: &mut [u8],
) {
key_switch_rlwe_core(module, a, a, b, b_cols, tmp_bytes);
}
fn key_switch_rlwe_core(
module: &Module,
c: *mut Ciphertext<VecZnx>,
a: *const Ciphertext<VecZnx>,
b: &Ciphertext<VmpPMat>,
b_cols: usize,
tmp_bytes: &mut [u8],
) {
// SAFETY WARNING: must ensure `c` and `a` are valid for read/write
let c: &mut Ciphertext<VecZnx> = unsafe { &mut *c };
let a: &Ciphertext<VecZnx> = unsafe { &*a };
let cols: usize = min(min(c.cols(), a.cols()), b.rows());
#[cfg(debug_assertions)]
{
assert!(b_cols <= b.cols());
assert!(tmp_bytes.len() >= key_switch_tmp_bytes(module, c.cols(), a.cols(), b.rows(), b.cols()));
assert_alignement(tmp_bytes.as_ptr());
}
let (tmp_bytes_a1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let mut a1_dft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_a1_dft);
let mut res_dft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_res_dft);
let mut res_big = res_dft.as_vec_znx_big();
module.vec_znx_dft(&mut a1_dft, a.at(1));
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.at(0), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
module.vec_znx_big_add_small_inplace(&mut res_big, a.at(0));
module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(0), &mut res_big, tmp_bytes);
module.vmp_apply_dft_to_dft(&mut res_dft, &a1_dft, b.at(1), tmp_bytes);
module.vec_znx_idft_tmp_a(&mut res_big, &mut res_dft);
module.vec_znx_big_normalize(c.log_base2k(), c.at_mut(1), &mut res_big, tmp_bytes);
}
pub fn key_switch_grlwe(module: &Module, c: &mut Ciphertext<VecZnx>, a: &Ciphertext<VecZnx>, b: &Ciphertext<VmpPMat>) {}
pub fn key_switch_rgsw(module: &Module, c: &mut Ciphertext<VecZnx>, a: &Ciphertext<VecZnx>, b: &Ciphertext<VmpPMat>) {}

View File

@@ -5,6 +5,7 @@ pub mod elem;
pub mod encryptor; pub mod encryptor;
pub mod gadget_product; pub mod gadget_product;
pub mod key_generator; pub mod key_generator;
pub mod key_switching;
pub mod keys; pub mod keys;
pub mod parameters; pub mod parameters;
pub mod plaintext; pub mod plaintext;

View File

@@ -43,12 +43,7 @@ impl Plaintext {
)) ))
} }
pub fn from_bytes_borrow( pub fn from_bytes_borrow(module: &Module, log_base2k: usize, log_q: usize, bytes: &mut [u8]) -> Self {
module: &Module,
log_base2k: usize,
log_q: usize,
bytes: &mut [u8],
) -> Self {
Self(Elem::<VecZnx>::from_bytes_borrow( Self(Elem::<VecZnx>::from_bytes_borrow(
module, log_base2k, log_q, 1, bytes, module, log_base2k, log_q, 1, bytes,
)) ))

View File

@@ -1,17 +1,9 @@
use crate::{ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters}; use crate::{ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters};
use base2k::{ use base2k::{Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps, assert_alignement};
Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMat, VmpPMatOps,
assert_alignement,
};
use std::cmp::min; use std::cmp::min;
impl Parameters { impl Parameters {
pub fn rgsw_product_tmp_bytes( pub fn rgsw_product_tmp_bytes(&self, res_logq: usize, in_logq: usize, gct_logq: usize) -> usize {
&self,
res_logq: usize,
in_logq: usize,
gct_logq: usize,
) -> usize {
rgsw_product_tmp_bytes( rgsw_product_tmp_bytes(
self.module(), self.module(),
self.log_base2k(), self.log_base2k(),
@@ -21,13 +13,7 @@ impl Parameters {
) )
} }
} }
pub fn rgsw_product_tmp_bytes( pub fn rgsw_product_tmp_bytes(module: &Module, log_base2k: usize, res_logq: usize, in_logq: usize, gct_logq: usize) -> usize {
module: &Module,
log_base2k: usize,
res_logq: usize,
in_logq: usize,
gct_logq: usize,
) -> usize {
let gct_cols: usize = (gct_logq + log_base2k - 1) / log_base2k; let gct_cols: usize = (gct_logq + log_base2k - 1) / log_base2k;
let in_cols: usize = (in_logq + log_base2k - 1) / log_base2k; let in_cols: usize = (in_logq + log_base2k - 1) / log_base2k;
let res_cols: usize = (res_logq + log_base2k - 1) / log_base2k; let res_cols: usize = (res_logq + log_base2k - 1) / log_base2k;
@@ -50,26 +36,15 @@ pub fn rgsw_product(
assert_eq!(c.size(), 2); assert_eq!(c.size(), 2);
assert_eq!(a.size(), 2); assert_eq!(a.size(), 2);
assert_eq!(b.size(), 4); assert_eq!(b.size(), 4);
assert!( assert!(tmp_bytes.len() >= rgsw_product_tmp_bytes(module, c.cols(), a.cols(), min(b.rows(), a.cols()), b_cols));
tmp_bytes.len()
>= rgsw_product_tmp_bytes(
module,
c.cols(),
a.cols(),
min(b.rows(), a.cols()),
b_cols
)
);
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
} }
let (tmp_bytes_ai_dft, tmp_bytes) = let (tmp_bytes_ai_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(a.cols()));
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(a.cols()));
let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols)); let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols)); let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let mut ai_dft: VecZnxDft = let mut ai_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(a.cols(), tmp_bytes_ai_dft);
module.new_vec_znx_dft_from_bytes_borrow(a.cols(), tmp_bytes_ai_dft);
let mut c0_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c0_dft); let mut c0_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c0_dft);
let mut c1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c1_dft); let mut c1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c1_dft);
@@ -103,26 +78,15 @@ pub fn rgsw_product_inplace(
assert!(b_cols <= b.cols()); assert!(b_cols <= b.cols());
assert_eq!(a.size(), 2); assert_eq!(a.size(), 2);
assert_eq!(b.size(), 4); assert_eq!(b.size(), 4);
assert!( assert!(tmp_bytes.len() >= rgsw_product_tmp_bytes(module, a.cols(), a.cols(), min(b.rows(), a.cols()), b_cols));
tmp_bytes.len()
>= rgsw_product_tmp_bytes(
module,
a.cols(),
a.cols(),
min(b.rows(), a.cols()),
b_cols
)
);
assert_alignement(tmp_bytes.as_ptr()); assert_alignement(tmp_bytes.as_ptr());
} }
let (tmp_bytes_ai_dft, tmp_bytes) = let (tmp_bytes_ai_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(a.cols()));
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(a.cols()));
let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols)); let (tmp_bytes_c0_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols)); let (tmp_bytes_c1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let mut ai_dft: VecZnxDft = let mut ai_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(a.cols(), tmp_bytes_ai_dft);
module.new_vec_znx_dft_from_bytes_borrow(a.cols(), tmp_bytes_ai_dft);
let mut c0_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c0_dft); let mut c0_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c0_dft);
let mut c1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c1_dft); let mut c1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_c1_dft);
@@ -156,10 +120,7 @@ mod test {
plaintext::Plaintext, plaintext::Plaintext,
rgsw_product::rgsw_product_inplace, rgsw_product::rgsw_product_inplace,
}; };
use base2k::{ use base2k::{BACKEND, Encoding, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxOps, VmpPMat, alloc_aligned};
BACKEND, Encoding, Module, Scalar, SvpPPol, SvpPPolOps, VecZnx, VecZnxOps, VmpPMat,
alloc_aligned,
};
use sampling::source::{Source, new_seed}; use sampling::source::{Source, new_seed};
#[test] #[test]
@@ -206,8 +167,7 @@ mod test {
let mut sk_svp_ppol: SvpPPol = module.new_svp_ppol(); let mut sk_svp_ppol: SvpPPol = module.new_svp_ppol();
module.svp_prepare(&mut sk_svp_ppol, &sk.0); module.svp_prepare(&mut sk_svp_ppol, &sk.0);
let mut ct_rgsw: Ciphertext<VmpPMat> = let mut ct_rgsw: Ciphertext<VmpPMat> = new_rgsw_ciphertext(module, log_base2k, gct_rows, log_qp);
new_rgsw_ciphertext(module, log_base2k, gct_rows, log_qp);
let k: i64 = 3; let k: i64 = 3;
@@ -268,8 +228,7 @@ mod test {
let var_a0_err: f64 = params.xe() * params.xe(); let var_a0_err: f64 = params.xe() * params.xe();
let var_a1_err: f64 = 1f64 / 12f64; let var_a1_err: f64 = 1f64 / 12f64;
let noise_pred: f64 = let noise_pred: f64 = params.noise_rgsw_product(var_msg, var_a0_err, var_a1_err, ct.log_q(), ct_rgsw.log_q());
params.noise_rgsw_product(var_msg, var_a0_err, var_a1_err, ct.log_q(), ct_rgsw.log_q());
println!("noise_pred: {}", noise_pred); println!("noise_pred: {}", noise_pred);
println!("noise_have: {}", noise_have); println!("noise_have: {}", noise_have);
@@ -279,14 +238,7 @@ mod test {
} }
impl Parameters { impl Parameters {
pub fn noise_rgsw_product( pub fn noise_rgsw_product(&self, var_msg: f64, var_a0_err: f64, var_a1_err: f64, a_logq: usize, b_logq: usize) -> f64 {
&self,
var_msg: f64,
var_a0_err: f64,
var_a1_err: f64,
a_logq: usize,
b_logq: usize,
) -> f64 {
let n: f64 = self.n() as f64; let n: f64 = self.n() as f64;
let var_xs: f64 = self.xs() as f64; let var_xs: f64 = self.xs() as f64;
@@ -339,8 +291,7 @@ pub fn noise_rgsw_product(
// lhs = a_cols * n * (var_base * var_gct_err_lhs + var_e_a * var_msg * p^2) // lhs = a_cols * n * (var_base * var_gct_err_lhs + var_e_a * var_msg * p^2)
// rhs = a_cols * n * var_base * var_gct_err_rhs * var_xs // rhs = a_cols * n * var_base * var_gct_err_rhs * var_xs
let mut noise: f64 = let mut noise: f64 = 2.0 * (a_cols as f64) * n * var_base * (var_gct_err_lhs + var_xs * var_gct_err_rhs);
2.0 * (a_cols as f64) * n * var_base * (var_gct_err_lhs + var_xs * var_gct_err_rhs);
noise += var_msg * var_a0_err * a_scale * a_scale * n; noise += var_msg * var_a0_err * a_scale * a_scale * n;
noise += var_msg * var_a1_err * a_scale * a_scale * n * var_xs; noise += var_msg * var_a1_err * a_scale * a_scale * n * var_xs;
noise = noise.sqrt(); noise = noise.sqrt();

View File

@@ -1,9 +1,5 @@
use crate::{ use crate::{automorphism::AutomorphismKey, ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters};
automorphism::AutomorphismKey, ciphertext::Ciphertext, elem::ElemCommon, parameters::Parameters, use base2k::{Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMatOps, assert_alignement};
};
use base2k::{
Module, VecZnx, VecZnxBig, VecZnxBigOps, VecZnxDft, VecZnxDftOps, VmpPMatOps, assert_alignement,
};
use std::collections::HashMap; use std::collections::HashMap;
pub fn trace_galois_elements(module: &Module) -> Vec<i64> { pub fn trace_galois_elements(module: &Module) -> Vec<i64> {
@@ -24,13 +20,7 @@ impl Parameters {
} }
} }
pub fn trace_tmp_bytes( pub fn trace_tmp_bytes(module: &Module, c_cols: usize, a_cols: usize, b_rows: usize, b_cols: usize) -> usize {
module: &Module,
c_cols: usize,
a_cols: usize,
b_rows: usize,
b_cols: usize,
) -> usize {
return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols) return module.vmp_apply_dft_to_dft_tmp_bytes(c_cols, a_cols, b_rows, b_cols)
+ 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols)); + 2 * module.bytes_of_vec_znx_dft(std::cmp::min(c_cols, a_cols));
} }
@@ -70,12 +60,10 @@ pub fn trace_inplace(
let cols: usize = std::cmp::min(b_cols, a.cols()); let cols: usize = std::cmp::min(b_cols, a.cols());
let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols)); let (tmp_bytes_b1_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(cols));
let (tmp_bytes_res_dft, tmp_bytes) = let (tmp_bytes_res_dft, tmp_bytes) = tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
tmp_bytes.split_at_mut(module.bytes_of_vec_znx_dft(b_cols));
let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft); let mut a1_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(cols, tmp_bytes_b1_dft);
let mut res_dft: VecZnxDft = let mut res_dft: VecZnxDft = module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_res_dft);
module.new_vec_znx_dft_from_bytes_borrow(b_cols, tmp_bytes_res_dft);
let mut res_big: VecZnxBig = res_dft.as_vec_znx_big(); let mut res_big: VecZnxBig = res_dft.as_vec_znx_big();
let log_base2k: usize = a.log_base2k(); let log_base2k: usize = a.log_base2k();

79
rustfmt.toml Normal file
View File

@@ -0,0 +1,79 @@
max_width = 130
hard_tabs = false
tab_spaces = 4
newline_style = "Auto"
indent_style = "Block"
use_small_heuristics = "Default"
fn_call_width = 60
attr_fn_like_width = 100
struct_lit_width = 18
struct_variant_width = 35
array_width = 60
chain_width = 60
single_line_if_else_max_width = 50
single_line_let_else_max_width = 50
wrap_comments = false
format_code_in_doc_comments = true
doc_comment_code_block_width = 100
comment_width = 80
normalize_comments = true
normalize_doc_attributes = true
format_strings = true
format_macro_matchers = false
format_macro_bodies = true
skip_macro_invocations = []
hex_literal_case = "Preserve"
empty_item_single_line = true
struct_lit_single_line = true
fn_single_line = false
where_single_line = false
imports_indent = "Block"
imports_layout = "Mixed"
imports_granularity = "Preserve"
group_imports = "Preserve"
reorder_imports = true
reorder_modules = true
reorder_impl_items = false
type_punctuation_density = "Wide"
space_before_colon = false
space_after_colon = true
spaces_around_ranges = false
binop_separator = "Front"
remove_nested_parens = true
combine_control_expr = true
short_array_element_width_threshold = 10
overflow_delimited_expr = false
struct_field_align_threshold = 0
enum_discrim_align_threshold = 0
match_arm_blocks = true
match_arm_leading_pipes = "Never"
force_multiline_blocks = false
fn_params_layout = "Tall"
brace_style = "SameLineWhere"
control_brace_style = "AlwaysSameLine"
trailing_semicolon = true
trailing_comma = "Vertical"
match_block_trailing_comma = false
blank_lines_upper_bound = 1
blank_lines_lower_bound = 0
edition = "2024"
style_edition = "2024"
inline_attribute_width = 0
format_generated_files = true
generated_marker_line_search_limit = 5
merge_derives = true
use_try_shorthand = false
use_field_init_shorthand = false
force_explicit_abi = true
condense_wildcard_suffixes = false
color = "Auto"
required_version = "1.8.0"
unstable_features = true
disable_all_formatting = false
skip_children = false
show_parse_errors = true
error_on_line_overflow = false
error_on_unformatted = false
ignore = []
emit_mode = "Files"
make_backup = false

View File

@@ -1,5 +1,5 @@
use rand_chacha::rand_core::SeedableRng;
use rand_chacha::ChaCha8Rng; use rand_chacha::ChaCha8Rng;
use rand_chacha::rand_core::SeedableRng;
use rand_core::{OsRng, RngCore}; use rand_core::{OsRng, RngCore};
const MAXF64: f64 = 9007199254740992.0; const MAXF64: f64 = 9007199254740992.0;