Add cross-basek normalization (#90)

* added cross_basek_normalization

* updated method signatures to take layouts

* fixed cross-base normalization

fix #91
fix #93
This commit is contained in:
Jean-Philippe Bossuat
2025-09-30 14:40:10 +02:00
committed by GitHub
parent 4da790ea6a
commit 37e13b965c
216 changed files with 12481 additions and 7745 deletions

View File

@@ -3,65 +3,108 @@ use rug::{Assign, Float};
use crate::{
layouts::{DataMut, DataRef, VecZnx, VecZnxToMut, VecZnxToRef, Zn, ZnToMut, ZnToRef, ZnxInfos, ZnxView, ZnxViewMut},
reference::znx::znx_zero_ref,
reference::znx::{
ZnxNormalizeFinalStepInplace, ZnxNormalizeFirstStepInplace, ZnxNormalizeMiddleStepInplace, ZnxRef, ZnxZero,
get_carry_i128, get_digit_i128, znx_zero_ref,
},
};
impl<D: DataMut> VecZnx<D> {
pub fn encode_vec_i64(&mut self, basek: usize, col: usize, k: usize, data: &[i64], log_max: usize) {
let size: usize = k.div_ceil(basek);
pub fn encode_vec_i64(&mut self, base2k: usize, col: usize, k: usize, data: &[i64]) {
let size: usize = k.div_ceil(base2k);
#[cfg(debug_assertions)]
{
let a: VecZnx<&mut [u8]> = self.to_mut();
assert!(
size <= a.size(),
"invalid argument k.div_ceil(basek)={} > a.size()={}",
"invalid argument k.div_ceil(base2k)={} > a.size()={}",
size,
a.size()
);
assert!(col < a.cols());
assert!(data.len() <= a.n())
assert!(data.len() == a.n())
}
let data_len: usize = data.len();
let mut a: VecZnx<&mut [u8]> = self.to_mut();
let k_rem: usize = basek - (k % basek);
let a_size: usize = a.size();
// Zeroes coefficients of the i-th column
(0..a.size()).for_each(|i| {
for i in 0..a_size {
znx_zero_ref(a.at_mut(col, i));
});
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == basek {
a.at_mut(col, size - 1)[..data_len].copy_from_slice(&data[..data_len]);
} else {
let mask: i64 = (1 << basek) - 1;
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size)
.rev()
.enumerate()
.for_each(|(i, i_rev)| {
let shift: usize = i * basek;
izip!(a.at_mut(col, i_rev).iter_mut(), data.iter()).for_each(|(y, x)| *y = (x >> shift) & mask);
})
}
// Case where self.prec % self.k != 0.
if k_rem != basek {
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size).rev().for_each(|i| {
a.at_mut(col, i)[..data_len]
.iter_mut()
.for_each(|x| *x <<= k_rem);
})
// Copies the data on the correct limb
a.at_mut(col, size - 1).copy_from_slice(data);
let mut carry: Vec<i64> = vec![0i64; a.n()];
let k_rem: usize = (base2k - (k % base2k)) % base2k;
// Normalizes and shift if necessary.
for j in (0..size).rev() {
if j == size - 1 {
ZnxRef::znx_normalize_first_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
} else if j == 0 {
ZnxRef::znx_normalize_final_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
} else {
ZnxRef::znx_normalize_middle_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
}
}
}
pub fn encode_coeff_i64(&mut self, basek: usize, col: usize, k: usize, idx: usize, data: i64, log_max: usize) {
let size: usize = k.div_ceil(basek);
pub fn encode_vec_i128(&mut self, base2k: usize, col: usize, k: usize, data: &[i128]) {
let size: usize = k.div_ceil(base2k);
#[cfg(debug_assertions)]
{
let a: VecZnx<&mut [u8]> = self.to_mut();
assert!(
size <= a.size(),
"invalid argument k.div_ceil(base2k)={} > a.size()={}",
size,
a.size()
);
assert!(col < a.cols());
assert!(data.len() == a.n())
}
let mut a: VecZnx<&mut [u8]> = self.to_mut();
let a_size: usize = a.size();
{
let mut carry_i128: Vec<i128> = vec![0i128; a.n()];
carry_i128.copy_from_slice(data);
for j in (0..size).rev() {
for (x, a) in izip!(a.at_mut(col, j).iter_mut(), carry_i128.iter_mut()) {
let digit: i128 = get_digit_i128(base2k, *a);
let carry: i128 = get_carry_i128(base2k, *a, digit);
*x = digit as i64;
*a = carry;
}
}
}
for j in size..a_size {
ZnxRef::znx_zero(a.at_mut(col, j));
}
let mut carry: Vec<i64> = vec![0i64; a.n()];
let k_rem: usize = (base2k - (k % base2k)) % base2k;
for j in (0..size).rev() {
if j == a_size - 1 {
ZnxRef::znx_normalize_first_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
} else if j == 0 {
ZnxRef::znx_normalize_final_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
} else {
ZnxRef::znx_normalize_middle_step_inplace(base2k, k_rem, a.at_mut(col, j), &mut carry);
}
}
}
pub fn encode_coeff_i64(&mut self, base2k: usize, col: usize, k: usize, idx: usize, data: i64) {
let size: usize = k.div_ceil(base2k);
#[cfg(debug_assertions)]
{
@@ -69,46 +112,42 @@ impl<D: DataMut> VecZnx<D> {
assert!(idx < a.n());
assert!(
size <= a.size(),
"invalid argument k.div_ceil(basek)={} > a.size()={}",
"invalid argument k.div_ceil(base2k)={} > a.size()={}",
size,
a.size()
);
assert!(col < a.cols());
}
let k_rem: usize = basek - (k % basek);
let mut a: VecZnx<&mut [u8]> = self.to_mut();
(0..a.size()).for_each(|j| a.at_mut(col, j)[idx] = 0);
let a_size = a.size();
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == basek {
a.at_mut(col, size - 1)[idx] = data;
} else {
let mask: i64 = (1 << basek) - 1;
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size)
.rev()
.enumerate()
.for_each(|(j, j_rev)| {
a.at_mut(col, j_rev)[idx] = (data >> (j * basek)) & mask;
})
for j in 0..a_size {
a.at_mut(col, j)[idx] = 0
}
// Case where prec % k != 0.
if k_rem != basek {
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size).rev().for_each(|j| {
a.at_mut(col, j)[idx] <<= k_rem;
})
a.at_mut(col, size - 1)[idx] = data;
let mut carry: Vec<i64> = vec![0i64; 1];
let k_rem: usize = (base2k - (k % base2k)) % base2k;
for j in (0..size).rev() {
let slice = &mut a.at_mut(col, j)[idx..idx + 1];
if j == size - 1 {
ZnxRef::znx_normalize_first_step_inplace(base2k, k_rem, slice, &mut carry);
} else if j == 0 {
ZnxRef::znx_normalize_final_step_inplace(base2k, k_rem, slice, &mut carry);
} else {
ZnxRef::znx_normalize_middle_step_inplace(base2k, k_rem, slice, &mut carry);
}
}
}
}
impl<D: DataRef> VecZnx<D> {
pub fn decode_vec_i64(&self, basek: usize, col: usize, k: usize, data: &mut [i64]) {
let size: usize = k.div_ceil(basek);
pub fn decode_vec_i64(&self, base2k: usize, col: usize, k: usize, data: &mut [i64]) {
let size: usize = k.div_ceil(base2k);
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
@@ -123,26 +162,26 @@ impl<D: DataRef> VecZnx<D> {
let a: VecZnx<&[u8]> = self.to_ref();
data.copy_from_slice(a.at(col, 0));
let rem: usize = basek - (k % basek);
if k < basek {
let rem: usize = base2k - (k % base2k);
if k < base2k {
data.iter_mut().for_each(|x| *x >>= rem);
} else {
(1..size).for_each(|i| {
if i == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
if i == size - 1 && rem != base2k {
let k_rem: usize = (base2k - rem) % base2k;
izip!(a.at(col, i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << k_rem) + (x >> rem);
});
} else {
izip!(a.at(col, i).iter(), data.iter_mut()).for_each(|(x, y)| {
*y = (*y << basek) + x;
*y = (*y << base2k) + x;
});
}
})
}
}
pub fn decode_coeff_i64(&self, basek: usize, col: usize, k: usize, idx: usize) -> i64 {
pub fn decode_coeff_i64(&self, base2k: usize, col: usize, k: usize, idx: usize) -> i64 {
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
@@ -151,22 +190,22 @@ impl<D: DataRef> VecZnx<D> {
}
let a: VecZnx<&[u8]> = self.to_ref();
let size: usize = k.div_ceil(basek);
let size: usize = k.div_ceil(base2k);
let mut res: i64 = 0;
let rem: usize = basek - (k % basek);
let rem: usize = base2k - (k % base2k);
(0..size).for_each(|j| {
let x: i64 = a.at(col, j)[idx];
if j == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
if j == size - 1 && rem != base2k {
let k_rem: usize = (base2k - rem) % base2k;
res = (res << k_rem) + (x >> rem);
} else {
res = (res << basek) + x;
res = (res << base2k) + x;
}
});
res
}
pub fn decode_vec_float(&self, basek: usize, col: usize, data: &mut [Float]) {
pub fn decode_vec_float(&self, base2k: usize, col: usize, data: &mut [Float]) {
#[cfg(debug_assertions)]
{
let a: VecZnx<&[u8]> = self.to_ref();
@@ -181,12 +220,12 @@ impl<D: DataRef> VecZnx<D> {
let a: VecZnx<&[u8]> = self.to_ref();
let size: usize = a.size();
let prec: u32 = (basek * size) as u32;
let prec: u32 = (base2k * size) as u32;
// 2^{basek}
let base = Float::with_val(prec, (1u64 << basek) as f64);
// 2^{base2k}
let base: Float = Float::with_val(prec, (1u64 << base2k) as f64);
// y[i] = sum x[j][i] * 2^{-basek*j}
// y[i] = sum x[j][i] * 2^{-base2k*j}
(0..size).for_each(|i| {
if i == 0 {
izip!(a.at(col, size - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| {
@@ -204,78 +243,74 @@ impl<D: DataRef> VecZnx<D> {
}
impl<D: DataMut> Zn<D> {
pub fn encode_i64(&mut self, basek: usize, k: usize, data: i64, log_max: usize) {
let size: usize = k.div_ceil(basek);
pub fn encode_i64(&mut self, base2k: usize, k: usize, data: i64) {
let size: usize = k.div_ceil(base2k);
#[cfg(debug_assertions)]
{
let a: Zn<&mut [u8]> = self.to_mut();
assert!(
size <= a.size(),
"invalid argument k.div_ceil(basek)={} > a.size()={}",
"invalid argument k.div_ceil(base2k)={} > a.size()={}",
size,
a.size()
);
}
let k_rem: usize = basek - (k % basek);
let mut a: Zn<&mut [u8]> = self.to_mut();
(0..a.size()).for_each(|j| a.at_mut(0, j)[0] = 0);
let a_size = a.size();
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
// values on the last limb.
// Else we decompose values base2k.
if log_max + k_rem < 63 || k_rem == basek {
a.at_mut(0, size - 1)[0] = data;
} else {
let mask: i64 = (1 << basek) - 1;
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size)
.rev()
.enumerate()
.for_each(|(j, j_rev)| {
a.at_mut(0, j_rev)[0] = (data >> (j * basek)) & mask;
})
for j in 0..a_size {
a.at_mut(0, j)[0] = 0
}
// Case where prec % k != 0.
if k_rem != basek {
let steps: usize = size.min(log_max.div_ceil(basek));
(size - steps..size).rev().for_each(|j| {
a.at_mut(0, j)[0] <<= k_rem;
})
a.at_mut(0, size - 1)[0] = data;
let mut carry: Vec<i64> = vec![0i64; 1];
let k_rem: usize = (base2k - (k % base2k)) % base2k;
for j in (0..size).rev() {
let slice = &mut a.at_mut(0, j)[..1];
if j == size - 1 {
ZnxRef::znx_normalize_first_step_inplace(base2k, k_rem, slice, &mut carry);
} else if j == 0 {
ZnxRef::znx_normalize_final_step_inplace(base2k, k_rem, slice, &mut carry);
} else {
ZnxRef::znx_normalize_middle_step_inplace(base2k, k_rem, slice, &mut carry);
}
}
}
}
impl<D: DataRef> Zn<D> {
pub fn decode_i64(&self, basek: usize, k: usize) -> i64 {
pub fn decode_i64(&self, base2k: usize, k: usize) -> i64 {
let a: Zn<&[u8]> = self.to_ref();
let size: usize = k.div_ceil(basek);
let size: usize = k.div_ceil(base2k);
let mut res: i64 = 0;
let rem: usize = basek - (k % basek);
let rem: usize = base2k - (k % base2k);
(0..size).for_each(|j| {
let x: i64 = a.at(0, j)[0];
if j == size - 1 && rem != basek {
let k_rem: usize = basek - rem;
if j == size - 1 && rem != base2k {
let k_rem: usize = (base2k - rem) % base2k;
res = (res << k_rem) + (x >> rem);
} else {
res = (res << basek) + x;
res = (res << base2k) + x;
}
});
res
}
pub fn decode_float(&self, basek: usize) -> Float {
pub fn decode_float(&self, base2k: usize) -> Float {
let a: Zn<&[u8]> = self.to_ref();
let size: usize = a.size();
let prec: u32 = (basek * size) as u32;
let prec: u32 = (base2k * size) as u32;
// 2^{basek}
let base: Float = Float::with_val(prec, (1 << basek) as f64);
let mut res: Float = Float::with_val(prec, (1 << basek) as f64);
// 2^{base2k}
let base: Float = Float::with_val(prec, (1 << base2k) as f64);
let mut res: Float = Float::with_val(prec, (1 << base2k) as f64);
// y[i] = sum x[j][i] * 2^{-basek*j}
// y[i] = sum x[j][i] * 2^{-base2k*j}
(0..size).for_each(|i| {
if i == 0 {
res.assign(a.at(0, size - i - 1)[0]);

View File

@@ -1,7 +1,7 @@
use crate::{
alloc_aligned,
layouts::{
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, Reset, ToOwnedDeep, VecZnx, WriterTo,
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, ToOwnedDeep, VecZnx, WriterTo,
ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
},
source::Source,
@@ -54,7 +54,7 @@ impl<D: DataRef> ToOwnedDeep for MatZnx<D> {
impl<D: DataRef> fmt::Debug for MatZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}
@@ -211,17 +211,6 @@ impl<D: DataMut> FillUniform for MatZnx<D> {
}
}
impl<D: DataMut> Reset for MatZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.size = 0;
self.rows = 0;
self.cols_in = 0;
self.cols_out = 0;
}
}
pub type MatZnxOwned = MatZnx<Vec<u8>>;
pub type MatZnxMut<'a> = MatZnx<&'a mut [u8]>;
pub type MatZnxRef<'a> = MatZnx<&'a [u8]>;
@@ -316,9 +305,9 @@ impl<D: DataRef> fmt::Display for MatZnx<D> {
)?;
for row_i in 0..self.rows {
writeln!(f, "Row {}:", row_i)?;
writeln!(f, "Row {row_i}:")?;
for col_i in 0..self.cols_in {
writeln!(f, "cols_in {}:", col_i)?;
writeln!(f, "cols_in {col_i}:")?;
writeln!(f, "{}:", self.at(row_i, col_i))?;
}
}

View File

@@ -26,7 +26,7 @@ pub use vmp_pmat::*;
pub use zn::*;
pub use znx_base::*;
pub trait Data = PartialEq + Eq + Sized;
pub trait Data = PartialEq + Eq + Sized + Default;
pub trait DataRef = Data + AsRef<[u8]>;
pub trait DataMut = DataRef + AsMut<[u8]>;

View File

@@ -7,7 +7,7 @@ use rand_distr::{Distribution, weighted::WeightedIndex};
use crate::{
alloc_aligned,
layouts::{
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, Reset, ToOwnedDeep, VecZnx, WriterTo,
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, ToOwnedDeep, VecZnx, WriterTo,
ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
},
source::Source,
@@ -173,14 +173,6 @@ impl<D: DataMut> FillUniform for ScalarZnx<D> {
}
}
impl<D: DataMut> Reset for ScalarZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
}
}
pub type ScalarZnxOwned = ScalarZnx<Vec<u8>>;
impl<D: Data> ScalarZnx<D> {

View File

@@ -7,10 +7,10 @@ use rug::{
use crate::layouts::{Backend, DataRef, VecZnx, VecZnxBig, VecZnxBigToRef, ZnxInfos};
impl<D: DataRef> VecZnx<D> {
pub fn std(&self, basek: usize, col: usize) -> f64 {
let prec: u32 = (self.size() * basek) as u32;
pub fn std(&self, base2k: usize, col: usize) -> f64 {
let prec: u32 = (self.size() * base2k) as u32;
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
self.decode_vec_float(basek, col, &mut data);
self.decode_vec_float(base2k, col, &mut data);
// std = sqrt(sum((xi - avg)^2) / n)
let mut avg: Float = Float::with_val(prec, 0);
data.iter().for_each(|x| {
@@ -29,7 +29,7 @@ impl<D: DataRef> VecZnx<D> {
}
impl<D: DataRef, B: Backend + Backend<ScalarBig = i64>> VecZnxBig<D, B> {
pub fn std(&self, basek: usize, col: usize) -> f64 {
pub fn std(&self, base2k: usize, col: usize) -> f64 {
let self_ref: VecZnxBig<&[u8], B> = self.to_ref();
let znx: VecZnx<&[u8]> = VecZnx {
data: self_ref.data,
@@ -38,6 +38,6 @@ impl<D: DataRef, B: Backend + Backend<ScalarBig = i64>> VecZnxBig<D, B> {
size: self_ref.size,
max_size: self_ref.max_size,
};
znx.std(basek, col)
znx.std(base2k, col)
}
}

View File

@@ -176,7 +176,7 @@ impl<D: DataRef, B: Backend> fmt::Display for SvpPPol<D, B> {
writeln!(f, "SvpPPol(n={}, cols={})", self.n, self.cols)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
writeln!(f, "Column {col}:")?;
let coeffs = self.at(col, 0);
write!(f, "[")?;
@@ -187,7 +187,7 @@ impl<D: DataRef, B: Backend> fmt::Display for SvpPPol<D, B> {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
write!(f, "{coeff}")?;
}
if coeffs.len() > max_show {

View File

@@ -6,8 +6,8 @@ use std::{
use crate::{
alloc_aligned,
layouts::{
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, Reset, ToOwnedDeep, WriterTo,
ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, ToOwnedDeep, WriterTo, ZnxInfos,
ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
},
source::Source,
};
@@ -25,6 +25,18 @@ pub struct VecZnx<D: Data> {
pub max_size: usize,
}
impl<D: Data + Default> Default for VecZnx<D> {
fn default() -> Self {
Self {
data: D::default(),
n: 0,
cols: 0,
size: 0,
max_size: 0,
}
}
}
impl<D: DataRef> DigestU64 for VecZnx<D> {
fn digest_u64(&self) -> u64 {
let mut h: DefaultHasher = DefaultHasher::new();
@@ -52,7 +64,7 @@ impl<D: DataRef> ToOwnedDeep for VecZnx<D> {
impl<D: DataRef> fmt::Debug for VecZnx<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}
@@ -162,10 +174,10 @@ impl<D: DataRef> fmt::Display for VecZnx<D> {
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
writeln!(f, "Column {col}:")?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
write!(f, " Size {size}: [")?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
@@ -174,7 +186,7 @@ impl<D: DataRef> fmt::Display for VecZnx<D> {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
write!(f, "{coeff}")?;
}
if coeffs.len() > max_show {
@@ -204,16 +216,6 @@ impl<D: DataMut> FillUniform for VecZnx<D> {
}
}
impl<D: DataMut> Reset for VecZnx<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
self.size = 0;
self.max_size = 0;
}
}
pub type VecZnxOwned = VecZnx<Vec<u8>>;
pub type VecZnxMut<'a> = VecZnx<&'a mut [u8]>;
pub type VecZnxRef<'a> = VecZnx<&'a [u8]>;

View File

@@ -179,10 +179,10 @@ impl<D: DataRef, B: Backend> fmt::Display for VecZnxBig<D, B> {
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
writeln!(f, "Column {col}:")?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
write!(f, " Size {size}: [")?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
@@ -191,7 +191,7 @@ impl<D: DataRef, B: Backend> fmt::Display for VecZnxBig<D, B> {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
write!(f, "{coeff}")?;
}
if coeffs.len() > max_show {

View File

@@ -199,10 +199,10 @@ impl<D: DataRef, B: Backend> fmt::Display for VecZnxDft<D, B> {
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
writeln!(f, "Column {col}:")?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
write!(f, " Size {size}: [")?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
@@ -211,7 +211,7 @@ impl<D: DataRef, B: Backend> fmt::Display for VecZnxDft<D, B> {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
write!(f, "{coeff}")?;
}
if coeffs.len() > max_show {

View File

@@ -6,8 +6,8 @@ use std::{
use crate::{
alloc_aligned,
layouts::{
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, Reset, ToOwnedDeep, WriterTo,
ZnxInfos, ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
Data, DataMut, DataRef, DataView, DataViewMut, DigestU64, FillUniform, ReaderFrom, ToOwnedDeep, WriterTo, ZnxInfos,
ZnxSliceSize, ZnxView, ZnxViewMut, ZnxZero,
},
source::Source,
};
@@ -52,7 +52,7 @@ impl<D: DataRef> ToOwnedDeep for Zn<D> {
impl<D: DataRef> fmt::Debug for Zn<D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
write!(f, "{self}")
}
}
@@ -162,10 +162,10 @@ impl<D: DataRef> fmt::Display for Zn<D> {
)?;
for col in 0..self.cols {
writeln!(f, "Column {}:", col)?;
writeln!(f, "Column {col}:")?;
for size in 0..self.size {
let coeffs = self.at(col, size);
write!(f, " Size {}: [", size)?;
write!(f, " Size {size}: [")?;
let max_show = 100;
let show_count = coeffs.len().min(max_show);
@@ -174,7 +174,7 @@ impl<D: DataRef> fmt::Display for Zn<D> {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", coeff)?;
write!(f, "{coeff}")?;
}
if coeffs.len() > max_show {
@@ -204,16 +204,6 @@ impl<D: DataMut> FillUniform for Zn<D> {
}
}
impl<D: DataMut> Reset for Zn<D> {
fn reset(&mut self) {
self.zero();
self.n = 0;
self.cols = 0;
self.size = 0;
self.max_size = 0;
}
}
pub type ZnOwned = Zn<Vec<u8>>;
pub type ZnMut<'a> = Zn<&'a mut [u8]>;
pub type ZnRef<'a> = Zn<&'a [u8]>;

View File

@@ -119,7 +119,3 @@ where
pub trait FillUniform {
fn fill_uniform(&mut self, log_bound: usize, source: &mut Source);
}
pub trait Reset {
fn reset(&mut self);
}