various API uniformization

This commit is contained in:
Jean-Philippe Bossuat
2025-01-28 15:00:43 +01:00
parent 1ac719ce7e
commit 6fcd5c743d
19 changed files with 438 additions and 84 deletions

View File

@@ -1,6 +1,4 @@
use base2k::module::{Module, FFT64};
use base2k::scalar::Scalar;
use base2k::vector::Vector;
use base2k::{Module, Scalar, SvpPPol, VecZnx, VecZnxBig, VecZnxDft, FFT64};
use itertools::izip;
use sampling::source::Source;
@@ -16,35 +14,35 @@ fn main() {
let seed: [u8; 32] = [0; 32];
let mut source: Source = Source::new(seed);
let mut res: Vector = Vector::new(n, log_base2k, prec);
let mut res: VecZnx = VecZnx::new(n, log_base2k, prec);
// s <- Z_{-1, 0, 1}[X]/(X^{N}+1)
let mut s: Scalar = Scalar::new(n);
s.fill_ternary_prob(0.5, &mut source);
// Buffer to store s in the DFT domain
let mut s_ppol: base2k::module::SVPPOL = module.svp_new_ppol();
let mut s_ppol: SvpPPol = module.svp_new_ppol();
// s_ppol <- DFT(s)
module.svp_prepare(&mut s_ppol, &s);
// a <- Z_{2^prec}[X]/(X^{N}+1)
let mut a: Vector = Vector::new(n, log_base2k, prec);
let mut a: VecZnx = VecZnx::new(n, log_base2k, prec);
a.fill_uniform(&mut source);
// Scratch space for DFT values
let mut buf_dft: base2k::module::VECZNXDFT = module.new_vec_znx_dft(a.limbs());
let mut buf_dft: VecZnxDft = module.new_vec_znx_dft(a.limbs());
// Applies buf_dft <- s * a
module.svp_apply_dft(&mut buf_dft, &s_ppol, &a);
// Alias scratch space
let mut buf_big: base2k::module::VECZNXBIG = buf_dft.as_vec_znx_big();
let mut buf_big: VecZnxBig = buf_dft.as_vec_znx_big();
// buf_big <- IDFT(buf_dft) (not normalized)
module.vec_znx_idft_tmp_a(&mut buf_big, &mut buf_dft, a.limbs());
let mut m: Vector = Vector::new(n, log_base2k, prec - log_scale);
let mut m: VecZnx = VecZnx::new(n, log_base2k, prec - log_scale);
let mut want: Vec<i64> = vec![0; n];
want.iter_mut()
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
@@ -57,7 +55,7 @@ fn main() {
module.vec_znx_big_sub_small_a_inplace(&mut buf_big, &m);
// b <- normalize(buf_big) + e
let mut b: Vector = Vector::new(n, log_base2k, prec);
let mut b: VecZnx = VecZnx::new(n, log_base2k, prec);
module.vec_znx_big_normalize(&mut b, &buf_big, &mut carry);
b.add_normal(&mut source, 3.2, 19.0);

View File

@@ -1,7 +1,3 @@
pub mod module;
pub mod scalar;
pub mod vector;
#[allow(
non_camel_case_types,
non_snake_case,
@@ -13,20 +9,36 @@ pub mod bindings {
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
pub mod vec_znx_arithmetic;
pub mod module;
#[allow(unused_imports)]
pub use module::*;
pub mod scalar;
#[allow(unused_imports)]
pub use scalar::*;
pub mod vec_znx;
#[allow(unused_imports)]
pub use vec_znx::*;
pub mod vec_znx_arithmetic;
#[allow(unused_imports)]
pub use vec_znx_arithmetic::*;
pub mod vec_znx_big_arithmetic;
#[allow(unused_imports)]
pub use vec_znx_big_arithmetic::*;
pub mod vec_znx_dft;
#[allow(unused_imports)]
pub use vec_znx_dft::*;
pub mod scalar_vector_product;
#[allow(unused_imports)]
pub use scalar_vector_product::*;
pub const GALOISGENERATOR: u64 = 5;
#[allow(dead_code)]
fn cast_mut_u64_to_mut_u8_slice(data: &mut [u64]) -> &mut [u8] {
let ptr: *mut u8 = data.as_mut_ptr() as *mut u8;

View File

@@ -2,11 +2,13 @@ use crate::bindings::{
module_info_t, new_module_info, svp_ppol_t, vec_znx_bigcoeff_t, vec_znx_dft_t, MODULE,
};
use crate::GALOISGENERATOR;
pub type MODULETYPE = u8;
pub const FFT64: u8 = 0;
pub const NTT120: u8 = 1;
pub struct Module(pub *mut MODULE);
pub struct Module(pub *mut MODULE, pub usize);
impl Module {
// Instantiates a new module.
@@ -16,30 +18,64 @@ impl Module {
if m.is_null() {
panic!("Failed to create module.");
}
Self(m)
Self(m, n)
}
}
pub fn n(&self) -> usize {
self.1
}
pub fn log_n(&self) -> usize {
(usize::BITS - (self.n() - 1).leading_zeros()) as _
}
pub fn cyclotomic_order(&self) -> u64 {
(self.n() << 1) as _
}
// GALOISGENERATOR^|gen| * sign(gen)
pub fn galois_element(&self, gen: i64) -> i64 {
if gen == 0 {
return 1;
}
let mut gal_el: u64 = 1;
let mut gen_1_pow: u64 = GALOISGENERATOR;
let mut e: usize = gen.abs() as usize;
while e > 0 {
if e & 1 == 1 {
gal_el = gal_el.wrapping_mul(gen_1_pow);
}
gen_1_pow = gen_1_pow.wrapping_mul(gen_1_pow);
e >>= 1;
}
gal_el &= self.cyclotomic_order() - 1;
(gal_el as i64) * gen.signum()
}
}
pub struct SVPPOL(pub *mut svp_ppol_t);
pub struct SvpPPol(pub *mut svp_ppol_t);
pub struct VECZNXBIG(pub *mut vec_znx_bigcoeff_t, pub usize);
pub struct VecZnxBig(pub *mut vec_znx_bigcoeff_t, pub usize);
// Stores a vector of
impl VECZNXBIG {
pub fn as_vec_znx_dft(&mut self) -> VECZNXDFT {
VECZNXDFT(self.0 as *mut vec_znx_dft_t, self.1)
impl VecZnxBig {
pub fn as_vec_znx_dft(&mut self) -> VecZnxDft {
VecZnxDft(self.0 as *mut vec_znx_dft_t, self.1)
}
pub fn limbs(&self) -> usize {
self.1
}
}
pub struct VECZNXDFT(pub *mut vec_znx_dft_t, pub usize);
pub struct VecZnxDft(pub *mut vec_znx_dft_t, pub usize);
impl VECZNXDFT {
pub fn as_vec_znx_big(&mut self) -> VECZNXBIG {
VECZNXBIG(self.0 as *mut vec_znx_bigcoeff_t, self.1)
impl VecZnxDft {
pub fn as_vec_znx_big(&mut self) -> VecZnxBig {
VecZnxBig(self.0 as *mut vec_znx_bigcoeff_t, self.1)
}
pub fn limbs(&self) -> usize {
self.1

View File

@@ -1,22 +1,21 @@
use crate::bindings::{new_svp_ppol, svp_apply_dft, svp_prepare};
use crate::module::{Module, SVPPOL, VECZNXDFT};
use crate::scalar::Scalar;
use crate::vector::Vector;
use crate::{Module, SvpPPol, VecZnx, VecZnxDft};
impl Module {
// Prepares a scalar polynomial (1 limb) for a scalar x vector product.
// Method will panic if a.limbs() != 1.
pub fn svp_prepare(&self, svp_ppol: &mut SVPPOL, a: &Scalar) {
pub fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar) {
unsafe { svp_prepare(self.0, svp_ppol.0, a.as_ptr()) }
}
// Allocates a scalar-vector-product prepared-poly (SVPPOL).
pub fn svp_new_ppol(&self) -> SVPPOL {
unsafe { SVPPOL(new_svp_ppol(self.0)) }
// Allocates a scalar-vector-product prepared-poly (VecZnxBig).
pub fn svp_new_ppol(&self) -> SvpPPol {
unsafe { SvpPPol(new_svp_ppol(self.0)) }
}
// Applies a scalar x vector product: res <- a (ppol) x b
pub fn svp_apply_dft(&self, c: &mut VECZNXDFT, a: &SVPPOL, b: &Vector) {
pub fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx) {
let limbs: u64 = b.limbs() as u64;
assert!(
c.limbs() as u64 >= limbs,

View File

@@ -2,46 +2,54 @@ use crate::bindings::{
znx_automorphism_i64, znx_automorphism_inplace_i64, znx_normalize, znx_zero_i64_ref,
};
use crate::cast_mut_u8_to_mut_i64_slice;
use crate::module::Module;
use itertools::izip;
use rand_distr::{Distribution, Normal};
use sampling::source::Source;
use std::cmp::min;
pub struct Vector {
impl Module {
pub fn new_vec_znx(&self, log_base2k: usize, log_q: usize) -> VecZnx {
VecZnx::new(self.n(), log_base2k, log_q)
}
}
#[derive(Clone)]
pub struct VecZnx {
pub n: usize,
pub log_base2k: usize,
pub prec: usize,
pub log_q: usize,
pub data: Vec<i64>,
}
impl Vector {
pub fn new(n: usize, log_base2k: usize, prec: usize) -> Self {
impl VecZnx {
pub fn new(n: usize, log_base2k: usize, log_q: usize) -> Self {
Self {
n: n,
log_base2k: log_base2k,
prec: prec,
data: vec![i64::default(); Self::buffer_size(n, log_base2k, prec)],
log_q: log_q,
data: vec![i64::default(); Self::buffer_size(n, log_base2k, log_q)],
}
}
pub fn buffer_size(n: usize, log_base2k: usize, prec: usize) -> usize {
n * ((prec + log_base2k - 1) / log_base2k)
pub fn buffer_size(n: usize, log_base2k: usize, log_q: usize) -> usize {
n * ((log_q + log_base2k - 1) / log_base2k)
}
pub fn from_buffer(&mut self, n: usize, log_base2k: usize, prec: usize, buf: &[i64]) {
let size = Self::buffer_size(n, log_base2k, prec);
pub fn from_buffer(&mut self, n: usize, log_base2k: usize, log_q: usize, buf: &[i64]) {
let size = Self::buffer_size(n, log_base2k, log_q);
assert!(
buf.len() >= size,
"invalid buffer: buf.len()={} < self.buffer_size(n={}, k={}, prec={})={}",
"invalid buffer: buf.len()={} < self.buffer_size(n={}, k={}, log_q={})={}",
buf.len(),
n,
log_base2k,
prec,
log_q,
size
);
self.n = n;
self.log_base2k = log_base2k;
self.prec = prec;
self.log_q = log_q;
self.data = Vec::from(&buf[..size])
}
@@ -53,14 +61,19 @@ impl Vector {
self.n
}
pub fn prec(&self) -> usize {
self.prec
pub fn log_q(&self) -> usize {
self.log_q
}
pub fn limbs(&self) -> usize {
self.data.len() / self.n
}
pub fn copy_from(&mut self, a: &VecZnx) {
let size = min(self.data.len(), a.data.len());
self.data[..size].copy_from_slice(&a.data[..size])
}
pub fn as_ptr(&self) -> *const i64 {
self.data.as_ptr()
}
@@ -87,7 +100,7 @@ impl Vector {
pub fn set_i64(&mut self, data: &[i64], log_max: usize) {
let size: usize = min(data.len(), self.n());
let k_rem: usize = self.log_base2k - (self.prec % self.log_base2k);
let k_rem: usize = self.log_base2k - (self.log_q % self.log_base2k);
// If 2^{log_base2k} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
@@ -151,7 +164,7 @@ impl Vector {
self.n
);
data.copy_from_slice(self.at(0));
let rem: usize = self.log_base2k - (self.prec % self.log_base2k);
let rem: usize = self.log_base2k - (self.log_q % self.log_base2k);
(1..self.limbs()).for_each(|i| {
if i == self.limbs() - 1 && rem != self.log_base2k {
let k_rem: usize = self.log_base2k - rem;
@@ -166,6 +179,22 @@ impl Vector {
})
}
pub fn get_single_i64(&self, i: usize) -> i64 {
assert!(i < self.n());
let mut res: i64 = self.data[i];
let rem: usize = self.log_base2k - (self.log_q % self.log_base2k);
(1..self.limbs()).for_each(|i| {
let x = self.data[i * self.n];
if i == self.limbs() - 1 && rem != self.log_base2k {
let k_rem: usize = self.log_base2k - rem;
res = (res << k_rem) + (x >> rem);
} else {
res = (res << self.log_base2k) + x;
}
});
res
}
pub fn automorphism_inplace(&mut self, gal_el: i64) {
unsafe {
(0..self.limbs()).for_each(|i| {
@@ -173,7 +202,7 @@ impl Vector {
})
}
}
pub fn automorphism(&mut self, gal_el: i64, a: &mut Vector) {
pub fn automorphism(&mut self, gal_el: i64, a: &mut VecZnx) {
unsafe {
(0..self.limbs()).for_each(|i| {
znx_automorphism_i64(self.n as u64, gal_el, a.at_mut_ptr(i), self.at_ptr(i))
@@ -192,7 +221,7 @@ impl Vector {
.iter_mut()
.for_each(|x| *x = (source.next_u64n(base2k, mask) as i64) - base2k_half);
let log_base2k_rem: usize = self.prec % self.log_base2k;
let log_base2k_rem: usize = self.log_q % self.log_base2k;
if log_base2k_rem != 0 {
base2k = 1 << log_base2k_rem;
@@ -206,7 +235,7 @@ impl Vector {
}
pub fn add_dist_f64<T: Distribution<f64>>(&mut self, source: &mut Source, dist: T, bound: f64) {
let log_base2k_rem: usize = self.prec % self.log_base2k;
let log_base2k_rem: usize = self.log_q % self.log_base2k;
if log_base2k_rem != 0 {
self.at_mut(self.limbs() - 1).iter_mut().for_each(|a| {
@@ -237,13 +266,13 @@ impl Vector {
}
assert!(
k <= self.prec,
k <= self.log_q,
"invalid argument k: k={} > self.prec()={}",
k,
self.prec()
self.log_q()
);
self.prec -= k;
self.log_q -= k;
self.data
.truncate((self.limbs() - k / self.log_base2k) * self.n());
@@ -256,11 +285,43 @@ impl Vector {
.for_each(|x: &mut i64| *x &= mask)
}
}
pub fn rsh(&mut self, k: usize, carry: &mut [u8]) {
let limbs: usize = self.limbs();
let limbs_steps: usize = k / self.log_base2k;
self.data.rotate_right(self.n * limbs_steps);
unsafe {
znx_zero_i64_ref((self.n * limbs_steps) as u64, self.data.as_mut_ptr());
}
let k_rem = k % self.log_base2k;
if k_rem != 0 {
let carry_i64: &mut [i64] = cast_mut_u8_to_mut_i64_slice(carry);
unsafe {
znx_zero_i64_ref(self.n() as u64, carry_i64.as_mut_ptr());
}
let mask: i64 = (1 << k_rem) - 1;
let log_base2k: usize = self.log_base2k;
println!("mask: {} log_base2k: {}", mask, log_base2k);
(limbs_steps..limbs).for_each(|i| {
izip!(carry_i64.iter_mut(), self.at_mut(i).iter_mut()).for_each(|(ci, xi)| {
*xi += *ci << log_base2k;
*ci = *xi & mask;
*xi /= 1 << k_rem;
});
})
}
}
}
#[cfg(test)]
mod tests {
use crate::vector::Vector;
use crate::VecZnx;
use itertools::izip;
use sampling::source::Source;
@@ -269,7 +330,7 @@ mod tests {
let n: usize = 32;
let k: usize = 19;
let prec: usize = 128;
let mut a: Vector = Vector::new(n, k, prec);
let mut a: VecZnx = VecZnx::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
have.iter_mut()
.enumerate()
@@ -285,7 +346,7 @@ mod tests {
let n: usize = 8;
let k: usize = 17;
let prec: usize = 84;
let mut a: Vector = Vector::new(n, k, prec);
let mut a: VecZnx = VecZnx::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
let mut source = Source::new([1; 32]);
have.iter_mut().for_each(|x| {
@@ -305,7 +366,7 @@ mod tests {
let n: usize = 8;
let k: usize = 17;
let prec: usize = 84;
let mut a: Vector = Vector::new(n, k, prec);
let mut a: VecZnx = VecZnx::new(n, k, prec);
let mut have: Vec<i64> = vec![i64::default(); n];
let mut source = Source::new([1; 32]);
have.iter_mut().for_each(|x| {

View File

@@ -1,9 +1,111 @@
use crate::bindings::vec_znx_automorphism;
use crate::module::Module;
use crate::vector::Vector;
use crate::bindings::{vec_znx_add, vec_znx_automorphism, vec_znx_rotate, vec_znx_sub};
use crate::{Module, VecZnx};
impl Module {
pub fn vec_znx_automorphism(&self, gal_el: i64, b: &mut Vector, a: &Vector) {
// c <- a + b
pub fn vec_znx_add(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx) {
unsafe {
vec_znx_add(
self.0,
c.as_mut_ptr(),
c.limbs() as u64,
c.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
b.as_ptr(),
b.limbs() as u64,
b.n() as u64,
)
}
}
// b <- a + b
pub fn vec_znx_add_inplace(&self, b: &mut VecZnx, a: &VecZnx) {
unsafe {
vec_znx_add(
self.0,
b.as_mut_ptr(),
b.limbs() as u64,
b.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
b.as_ptr(),
b.limbs() as u64,
b.n() as u64,
)
}
}
// c <- a + b
pub fn vec_znx_sub(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx) {
unsafe {
vec_znx_sub(
self.0,
c.as_mut_ptr(),
c.limbs() as u64,
c.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
b.as_ptr(),
b.limbs() as u64,
b.n() as u64,
)
}
}
// b <- a + b
pub fn vec_znx_sub_inplace(&self, b: &mut VecZnx, a: &VecZnx) {
unsafe {
vec_znx_sub(
self.0,
b.as_mut_ptr(),
b.limbs() as u64,
b.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
b.as_ptr(),
b.limbs() as u64,
b.n() as u64,
)
}
}
pub fn vec_znx_rotate(&self, k: i64, a: &mut VecZnx, b: &VecZnx) {
unsafe {
vec_znx_rotate(
self.0,
k,
a.as_mut_ptr(),
a.limbs() as u64,
a.n() as u64,
b.as_ptr(),
b.limbs() as u64,
b.n() as u64,
)
}
}
pub fn vec_znx_rotate_inplace(&self, k: i64, a: &mut VecZnx) {
unsafe {
vec_znx_rotate(
self.0,
k,
a.as_mut_ptr(),
a.limbs() as u64,
a.n() as u64,
a.as_ptr(),
a.limbs() as u64,
a.n() as u64,
)
}
}
// b <- a(X^gal_el)
pub fn vec_znx_automorphism(&self, gal_el: i64, b: &mut VecZnx, a: &VecZnx) {
unsafe {
vec_znx_automorphism(
self.0,
@@ -18,7 +120,8 @@ impl Module {
}
}
pub fn vec_znx_automorphism_inplace(&self, gal_el: i64, a: &mut Vector) {
// a <- a(X^gal_el)
pub fn vec_znx_automorphism_inplace(&self, gal_el: i64, a: &mut VecZnx) {
unsafe {
vec_znx_automorphism(
self.0,

View File

@@ -2,17 +2,16 @@ use crate::bindings::{
new_vec_znx_big, vec_znx_big_add_small, vec_znx_big_automorphism, vec_znx_big_normalize_base2k,
vec_znx_big_normalize_base2k_tmp_bytes, vec_znx_big_sub_small_a,
};
use crate::module::{Module, VECZNXBIG};
use crate::vector::Vector;
use crate::{Module, VecZnx, VecZnxBig};
impl Module {
// Allocates a vector Z[X]/(X^N+1) that stores not normalized values.
pub fn new_vec_znx_big(&self, limbs: usize) -> VECZNXBIG {
unsafe { VECZNXBIG(new_vec_znx_big(self.0, limbs as u64), limbs) }
pub fn new_vec_znx_big(&self, limbs: usize) -> VecZnxBig {
unsafe { VecZnxBig(new_vec_znx_big(self.0, limbs as u64), limbs) }
}
// b <- b - a
pub fn vec_znx_big_sub_small_a_inplace(&self, b: &mut VECZNXBIG, a: &Vector) {
pub fn vec_znx_big_sub_small_a_inplace(&self, b: &mut VecZnxBig, a: &VecZnx) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
@@ -35,7 +34,7 @@ impl Module {
}
// c <- b - a
pub fn big_sub_small_a(&self, c: &mut VECZNXBIG, a: &Vector, b: &VECZNXBIG) {
pub fn vec_znx_big_sub_small_a(&self, c: &mut VecZnxBig, a: &VecZnx, b: &VecZnxBig) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
@@ -64,7 +63,7 @@ impl Module {
}
// c <- b + a
pub fn vec_znx_big_add_small(&self, c: &mut VECZNXBIG, a: &Vector, b: &VECZNXBIG) {
pub fn vec_znx_big_add_small(&self, c: &mut VecZnxBig, a: &VecZnx, b: &VecZnxBig) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
@@ -93,7 +92,7 @@ impl Module {
}
// b <- b + a
pub fn vec_znx_big_add_small_inplace(&self, b: &mut VECZNXBIG, a: &Vector) {
pub fn vec_znx_big_add_small_inplace(&self, b: &mut VecZnxBig, a: &VecZnx) {
let limbs: usize = a.limbs();
assert!(
b.limbs() >= limbs,
@@ -120,7 +119,7 @@ impl Module {
}
// b <- normalize(a)
pub fn vec_znx_big_normalize(&self, b: &mut Vector, a: &VECZNXBIG, tmp_bytes: &mut [u8]) {
pub fn vec_znx_big_normalize(&self, b: &mut VecZnx, a: &VecZnxBig, tmp_bytes: &mut [u8]) {
let limbs: usize = b.limbs();
assert!(
b.limbs() >= limbs,
@@ -148,13 +147,13 @@ impl Module {
}
}
pub fn big_automorphism(&self, gal_el: i64, b: &mut VECZNXBIG, a: &VECZNXBIG) {
pub fn vec_znx_big_automorphism(&self, gal_el: i64, b: &mut VecZnxBig, a: &VecZnxBig) {
unsafe {
vec_znx_big_automorphism(self.0, gal_el, b.0, b.limbs() as u64, a.0, a.limbs() as u64);
}
}
pub fn big_automorphism_inplace(&self, gal_el: i64, a: &mut VECZNXBIG) {
pub fn vec_znx_big_automorphism_inplace(&self, gal_el: i64, a: &mut VecZnxBig) {
unsafe {
vec_znx_big_automorphism(self.0, gal_el, a.0, a.limbs() as u64, a.0, a.limbs() as u64);
}

View File

@@ -1,14 +1,14 @@
use crate::bindings::{new_vec_znx_dft, vec_znx_idft, vec_znx_idft_tmp_a, vec_znx_idft_tmp_bytes};
use crate::module::{Module, VECZNXBIG, VECZNXDFT};
use crate::module::{Module, VecZnxBig, VecZnxDft};
impl Module {
// Allocates a vector Z[X]/(X^N+1) that stores normalized in the DFT space.
pub fn new_vec_znx_dft(&self, limbs: usize) -> VECZNXDFT {
unsafe { VECZNXDFT(new_vec_znx_dft(self.0, limbs as u64), limbs) }
pub fn new_vec_znx_dft(&self, limbs: usize) -> VecZnxDft {
unsafe { VecZnxDft(new_vec_znx_dft(self.0, limbs as u64), limbs) }
}
// b <- IDFT(a), uses a as scratch space.
pub fn vec_znx_idft_tmp_a(&self, b: &mut VECZNXBIG, a: &mut VECZNXDFT, a_limbs: usize) {
pub fn vec_znx_idft_tmp_a(&self, b: &mut VecZnxBig, a: &mut VecZnxDft, a_limbs: usize) {
assert!(
b.limbs() >= a_limbs,
"invalid c_vector: b_vector.limbs()={} < a_limbs={}",
@@ -26,8 +26,8 @@ impl Module {
// b <- IDFT(a), scratch space size obtained with [vec_znx_idft_tmp_bytes].
pub fn vec_znx_idft(
&self,
b_vector: &mut VECZNXBIG,
a_vector: &mut VECZNXDFT,
b_vector: &mut VecZnxBig,
a_vector: &mut VecZnxDft,
a_limbs: usize,
tmp_bytes: &mut [u8],
) {