From 0519510667f6211b7b96dece0b68f6812d9d349f Mon Sep 17 00:00:00 2001 From: Jean-Philippe Bossuat Date: Mon, 10 Feb 2025 16:01:30 +0100 Subject: [PATCH] added from_bytes for SvpPPol --- base2k/examples/vector_matrix_product.rs | 5 ++-- base2k/src/svp.rs | 26 +++++++++++++----- base2k/src/vec_znx.rs | 35 ++++++++++++++++-------- base2k/src/vec_znx_big.rs | 6 ++-- base2k/src/vec_znx_dft.rs | 6 ++-- 5 files changed, 53 insertions(+), 25 deletions(-) diff --git a/base2k/examples/vector_matrix_product.rs b/base2k/examples/vector_matrix_product.rs index 8e4c9da..fb10d3d 100644 --- a/base2k/examples/vector_matrix_product.rs +++ b/base2k/examples/vector_matrix_product.rs @@ -1,8 +1,7 @@ use base2k::{ - Encoding, Free, Infos, Matrix3D, Module, VecZnx, VecZnxBig, VecZnxDft, VecZnxOps, VmpPMat, - VmpPMatOps, FFT64, + Encoding, Free, Infos, Module, VecZnx, VecZnxBig, VecZnxDft, VecZnxOps, VmpPMat, VmpPMatOps, + FFT64, }; -use std::cmp::min; fn main() { let log_n = 5; diff --git a/base2k/src/svp.rs b/base2k/src/svp.rs index d7b17a7..cc07247 100644 --- a/base2k/src/svp.rs +++ b/base2k/src/svp.rs @@ -68,6 +68,10 @@ impl SvpPPol { self.1 } + pub fn from_bytes(size: usize, bytes: &mut [u8]) -> SvpPPol { + SvpPPol(bytes.as_mut_ptr() as *mut svp::svp_ppol_t, size) + } + /// Returns the number of limbs of the [SvpPPol], which is always 1. pub fn limbs(&self) -> usize { 1 @@ -75,26 +79,34 @@ impl SvpPPol { } pub trait SvpPPolOps { - /// Prepares a [crate::Scalar] for a [SvpPPolOps::svp_apply_dft]. - fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar); - /// Allocates a new [SvpPPol]. fn svp_new_ppol(&self) -> SvpPPol; + /// Returns the minimum number of bytes necessary to allocate + /// a new [SvpPPol] through [SvpPPol::from_bytes]. + fn bytes_of_svp_ppol(&self) -> usize; + + /// Prepares a [crate::Scalar] for a [SvpPPolOps::svp_apply_dft]. + fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar); + /// Applies the [SvpPPol] x [VecZnxDft] product, where each limb of /// the [VecZnxDft] is multiplied with [SvpPPol]. fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx); } impl SvpPPolOps for Module { - fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar) { - unsafe { svp::svp_prepare(self.0, svp_ppol.0, a.as_ptr()) } - } - fn svp_new_ppol(&self) -> SvpPPol { unsafe { SvpPPol(svp::new_svp_ppol(self.0), self.n()) } } + fn bytes_of_svp_ppol(&self) -> usize { + unsafe { svp::bytes_of_svp_ppol(self.0) as usize } + } + + fn svp_prepare(&self, svp_ppol: &mut SvpPPol, a: &Scalar) { + unsafe { svp::svp_prepare(self.0, svp_ppol.0, a.as_ptr()) } + } + fn svp_apply_dft(&self, c: &mut VecZnxDft, a: &SvpPPol, b: &VecZnx) { let limbs: u64 = b.limbs() as u64; assert!( diff --git a/base2k/src/vec_znx.rs b/base2k/src/vec_znx.rs index 24e22c1..f0a9421 100644 --- a/base2k/src/vec_znx.rs +++ b/base2k/src/vec_znx.rs @@ -21,19 +21,21 @@ impl VecZnx { pub fn new(n: usize, limbs: usize) -> Self { Self { n: n, - data: vec![i64::default(); Self::buffer_size(n, limbs)], + data: vec![i64::default(); n * limbs], } } - /// Returns the minimum size of the [i64] array required to assign a - /// new backend array to a [VecZnx] through [VecZnx::from_buffer]. - pub fn buffer_size(n: usize, limbs: usize) -> usize { - n * limbs + /// Returns the minimum size of the [u8] array required to assign a + /// new backend array to a [VecZnx] through [VecZnx::from_bytes]. + pub fn bytes(n: usize, limbs: usize) -> usize { + n * limbs * 8 } - /// Assigns a new backing array to a [VecZnx]. - pub fn from_buffer(&mut self, n: usize, limbs: usize, buf: &mut [i64]) { - let size = Self::buffer_size(n, limbs); + /// Returns a new [VecZnx] with the provided data as backing array. + /// User must ensure that data is properly alligned and that + /// the size of data is at least equal to [Module::bytes_of_vec_znx]. + pub fn from_bytes(n: usize, limbs: usize, buf: &mut [u8]) -> VecZnx { + let size = Self::bytes(n, limbs); assert!( buf.len() >= size, "invalid buffer: buf.len()={} < self.buffer_size(n={}, limbs={})={}", @@ -42,8 +44,11 @@ impl VecZnx { limbs, size ); - self.n = n; - self.data = Vec::from(&buf[..size]) + + VecZnx { + n: n, + data: Vec::from(cast_mut_u8_to_mut_i64_slice(&mut buf[..size])), + } } /// Copies the coefficients of `a` on the receiver. @@ -377,6 +382,10 @@ pub trait VecZnxOps { /// * `limbs`: the number of limbs. fn new_vec_znx(&self, limbs: usize) -> VecZnx; + /// Returns the minimum number of bytes necessary to allocate + /// a new [VecZnx] through [VecZnx::from_bytes]. + fn bytes_of_vec_znx(&self, limbs: usize) -> usize; + /// c <- a + b. fn vec_znx_add(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx); @@ -429,6 +438,10 @@ impl VecZnxOps for Module { VecZnx::new(self.n(), limbs) } + fn bytes_of_vec_znx(&self, limbs: usize) -> usize { + self.n() * limbs * 8 + } + // c <- a + b fn vec_znx_add(&self, c: &mut VecZnx, a: &VecZnx, b: &VecZnx) { unsafe { @@ -630,7 +643,7 @@ impl VecZnxOps for Module { ) }); - a.iter().enumerate().for_each(|(i, ai)| { + a.iter().enumerate().for_each(|(_, ai)| { ai.switch_degree(b); self.vec_znx_rotate_inplace(-1, b); }); diff --git a/base2k/src/vec_znx_big.rs b/base2k/src/vec_znx_big.rs index 7113f00..f8a6ac5 100644 --- a/base2k/src/vec_znx_big.rs +++ b/base2k/src/vec_znx_big.rs @@ -5,10 +5,10 @@ use crate::{Infos, Module, VecZnx, VecZnxDft}; pub struct VecZnxBig(pub *mut vec_znx_big::vec_znx_bigcoeff_t, pub usize); impl VecZnxBig { - /// Casts a contiguous array of [u8] into as a [VecZnxDft]. + /// Returns a new [VecZnxBig] with the provided data as backing array. /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [Module::bytes_of_vec_znx_big]. - pub fn from_bytes(&self, limbs: usize, data: &mut [u8]) -> VecZnxBig { + pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxBig { VecZnxBig( data.as_mut_ptr() as *mut vec_znx_big::vec_znx_bigcoeff_t, limbs, @@ -29,6 +29,8 @@ impl Module { unsafe { VecZnxBig(vec_znx_big::new_vec_znx_big(self.0, limbs as u64), limbs) } } + /// Returns the minimum number of bytes necessary to allocate + /// a new [VecZnxBig] through [VecZnxBig::from_bytes]. pub fn bytes_of_vec_znx_big(&self, limbs: usize) -> usize { unsafe { vec_znx_big::bytes_of_vec_znx_big(self.0, limbs as u64) as usize } } diff --git a/base2k/src/vec_znx_dft.rs b/base2k/src/vec_znx_dft.rs index 29fef62..fd4f4f7 100644 --- a/base2k/src/vec_znx_dft.rs +++ b/base2k/src/vec_znx_dft.rs @@ -6,10 +6,10 @@ use crate::{Module, VecZnxBig}; pub struct VecZnxDft(pub *mut vec_znx_dft::vec_znx_dft_t, pub usize); impl VecZnxDft { - /// Casts a contiguous array of [u8] into as a [VecZnxDft]. + /// Returns a new [VecZnxDft] with the provided data as backing array. /// User must ensure that data is properly alligned and that /// the size of data is at least equal to [Module::bytes_of_vec_znx_dft]. - pub fn from_bytes(&self, limbs: usize, data: &mut [u8]) -> VecZnxDft { + pub fn from_bytes(limbs: usize, data: &mut [u8]) -> VecZnxDft { VecZnxDft(data.as_mut_ptr() as *mut vec_znx_dft::vec_znx_dft_t, limbs) } @@ -30,6 +30,8 @@ impl Module { unsafe { VecZnxDft(vec_znx_dft::new_vec_znx_dft(self.0, limbs as u64), limbs) } } + /// Returns the minimum number of bytes necessary to allocate + /// a new [VecZnxDft] through [VecZnxDft::from_bytes]. pub fn bytes_of_vec_znx_dft(&self, limbs: usize) -> usize { unsafe { bytes_of_vec_znx_dft(self.0, limbs as u64) as usize } }