From 0cf1229be59498d0641ea63d36b17885bc3cdb99 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Bossuat Date: Tue, 21 Jan 2025 13:41:41 +0100 Subject: [PATCH] some API refactoring --- math/benches/ring_rns.rs | 2 +- math/benches/sampling.rs | 2 +- math/src/dft/ntt.rs | 78 ++++++------- math/src/modulus.rs | 88 +++++++------- math/src/modulus/impl_u64/barrett.rs | 6 +- math/src/modulus/impl_u64/operations.rs | 149 ++++++++++++++---------- math/src/ring/impl_u64/ring.rs | 34 +++--- 7 files changed, 197 insertions(+), 162 deletions(-) diff --git a/math/benches/ring_rns.rs b/math/benches/ring_rns.rs index 6beb239..eadd686 100644 --- a/math/benches/ring_rns.rs +++ b/math/benches/ring_rns.rs @@ -1,4 +1,4 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use criterion::{criterion_group, criterion_main, Criterion}; use math::poly::PolyRNS; use math::ring::RingRNS; diff --git a/math/benches/sampling.rs b/math/benches/sampling.rs index 6fd8e6d..ac8b128 100644 --- a/math/benches/sampling.rs +++ b/math/benches/sampling.rs @@ -1,4 +1,4 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use criterion::{criterion_group, criterion_main, Criterion}; use math::poly::PolyRNS; use math::ring::RingRNS; use sampling::source::Source; diff --git a/math/src/dft/ntt.rs b/math/src/dft/ntt.rs index ff54e7d..9be6691 100644 --- a/math/src/dft/ntt.rs +++ b/math/src/dft/ntt.rs @@ -124,7 +124,7 @@ impl Table { izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each( |(a, psi)| { let (a, b) = a.split_at_mut(size); - self.dit_inplace::(&mut a[0], &mut b[0], *psi); + self.dit_inplace::(&mut a[0], &mut b[0], psi); debug_assert!( a[0] < self.two_q, "forward_inplace_core:: output {} > {} (2q-1)", @@ -143,7 +143,7 @@ impl Table { izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each( |(a, psi)| { let (a, b) = a.split_at_mut(size); - self.dit_inplace::(&mut a[0], &mut b[0], *psi); + self.dit_inplace::(&mut a[0], &mut b[0], psi); self.prime.barrett.reduce_assign::(&mut a[0]); self.prime.barrett.reduce_assign::(&mut b[0]); debug_assert!( @@ -165,31 +165,31 @@ impl Table { izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| { let (a, b) = a.split_at_mut(size); izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| { - self.dit_inplace::(&mut a[0], &mut b[0], *psi); - self.dit_inplace::(&mut a[1], &mut b[1], *psi); - self.dit_inplace::(&mut a[2], &mut b[2], *psi); - self.dit_inplace::(&mut a[3], &mut b[3], *psi); - self.dit_inplace::(&mut a[4], &mut b[4], *psi); - self.dit_inplace::(&mut a[5], &mut b[5], *psi); - self.dit_inplace::(&mut a[6], &mut b[6], *psi); - self.dit_inplace::(&mut a[7], &mut b[7], *psi); + self.dit_inplace::(&mut a[0], &mut b[0], psi); + self.dit_inplace::(&mut a[1], &mut b[1], psi); + self.dit_inplace::(&mut a[2], &mut b[2], psi); + self.dit_inplace::(&mut a[3], &mut b[3], psi); + self.dit_inplace::(&mut a[4], &mut b[4], psi); + self.dit_inplace::(&mut a[5], &mut b[5], psi); + self.dit_inplace::(&mut a[6], &mut b[6], psi); + self.dit_inplace::(&mut a[7], &mut b[7], psi); }); }); } else { izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| { let (a, b) = a.split_at_mut(size); - izip!(a, b).for_each(|(a, b)| self.dit_inplace::(a, b, *psi)); + izip!(a, b).for_each(|(a, b)| self.dit_inplace::(a, b, psi)); }); } } } #[inline(always)] - fn dit_inplace(&self, a: &mut u64, b: &mut u64, t: Barrett) { + fn dit_inplace(&self, a: &mut u64, b: &mut u64, t: &Barrett) { debug_assert!(*a < self.four_q, "a:{} q:{}", a, self.four_q); debug_assert!(*b < self.four_q, "b:{} q:{}", b, self.four_q); a.reduce_once_assign(self.two_q); - let bt: u64 = self.prime.barrett.mul_external::(t, *b); + let bt: u64 = self.prime.barrett.mul_external::(t, b); *b = *a + self.two_q - bt; *a += bt; if !LAZY { @@ -233,41 +233,41 @@ impl Table { let psi: Barrett = self.prime.barrett.prepare( self.prime .barrett - .mul_external::(n_inv, self.psi_backward_rev[1].0), + .mul_external::(&n_inv, &self.psi_backward_rev[1].0), ); izip!(a.chunks_exact_mut(2 * size)).for_each(|a| { let (a, b) = a.split_at_mut(size); izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| { - self.dif_last_inplace::(&mut a[0], &mut b[0], psi, n_inv); - self.dif_last_inplace::(&mut a[1], &mut b[1], psi, n_inv); - self.dif_last_inplace::(&mut a[2], &mut b[2], psi, n_inv); - self.dif_last_inplace::(&mut a[3], &mut b[3], psi, n_inv); - self.dif_last_inplace::(&mut a[4], &mut b[4], psi, n_inv); - self.dif_last_inplace::(&mut a[5], &mut b[5], psi, n_inv); - self.dif_last_inplace::(&mut a[6], &mut b[6], psi, n_inv); - self.dif_last_inplace::(&mut a[7], &mut b[7], psi, n_inv); + self.dif_last_inplace::(&mut a[0], &mut b[0], &psi, &n_inv); + self.dif_last_inplace::(&mut a[1], &mut b[1], &psi, &n_inv); + self.dif_last_inplace::(&mut a[2], &mut b[2], &psi, &n_inv); + self.dif_last_inplace::(&mut a[3], &mut b[3], &psi, &n_inv); + self.dif_last_inplace::(&mut a[4], &mut b[4], &psi, &n_inv); + self.dif_last_inplace::(&mut a[5], &mut b[5], &psi, &n_inv); + self.dif_last_inplace::(&mut a[6], &mut b[6], &psi, &n_inv); + self.dif_last_inplace::(&mut a[7], &mut b[7], &psi, &n_inv); }); }); } else if t >= 16 { izip!(a.chunks_exact_mut(t), &self.psi_backward_rev[m..]).for_each(|(a, psi)| { let (a, b) = a.split_at_mut(size); izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| { - self.dif_inplace::(&mut a[0], &mut b[0], *psi); - self.dif_inplace::(&mut a[1], &mut b[1], *psi); - self.dif_inplace::(&mut a[2], &mut b[2], *psi); - self.dif_inplace::(&mut a[3], &mut b[3], *psi); - self.dif_inplace::(&mut a[4], &mut b[4], *psi); - self.dif_inplace::(&mut a[5], &mut b[5], *psi); - self.dif_inplace::(&mut a[6], &mut b[6], *psi); - self.dif_inplace::(&mut a[7], &mut b[7], *psi); + self.dif_inplace::(&mut a[0], &mut b[0], psi); + self.dif_inplace::(&mut a[1], &mut b[1], psi); + self.dif_inplace::(&mut a[2], &mut b[2], psi); + self.dif_inplace::(&mut a[3], &mut b[3], psi); + self.dif_inplace::(&mut a[4], &mut b[4], psi); + self.dif_inplace::(&mut a[5], &mut b[5], psi); + self.dif_inplace::(&mut a[6], &mut b[6], psi); + self.dif_inplace::(&mut a[7], &mut b[7], psi); }); }); } else { izip!(a.chunks_exact_mut(2 * size), &self.psi_backward_rev[m..]).for_each( |(a, psi)| { let (a, b) = a.split_at_mut(size); - izip!(a, b).for_each(|(a, b)| self.dif_inplace::(a, b, *psi)); + izip!(a, b).for_each(|(a, b)| self.dif_inplace::(a, b, psi)); }, ); } @@ -275,13 +275,13 @@ impl Table { } #[inline(always)] - fn dif_inplace(&self, a: &mut u64, b: &mut u64, t: Barrett) { + fn dif_inplace(&self, a: &mut u64, b: &mut u64, t: &Barrett) { debug_assert!(*a < self.two_q, "a:{} q:{}", a, self.two_q); debug_assert!(*b < self.two_q, "b:{} q:{}", b, self.two_q); let d: u64 = self .prime .barrett - .mul_external::(t, *a + self.two_q - *b); + .mul_external::(t, &(*a + self.two_q - *b)); *a = *a + *b; a.reduce_once_assign(self.two_q); *b = d; @@ -295,8 +295,8 @@ impl Table { &self, a: &mut u64, b: &mut u64, - psi: Barrett, - n_inv: Barrett, + psi: &Barrett, + n_inv: &Barrett, ) { debug_assert!(*a < self.two_q); debug_assert!(*b < self.two_q); @@ -304,15 +304,15 @@ impl Table { let d: u64 = self .prime .barrett - .mul_external::(psi, *a + self.two_q - *b); - *a = self.prime.barrett.mul_external::(n_inv, *a + *b); + .mul_external::(psi, &(*a + self.two_q - *b)); + *a = self.prime.barrett.mul_external::(n_inv, &(*a + *b)); *b = d; } else { let d: u64 = self .prime .barrett - .mul_external::(psi, *a + self.two_q - *b); - *a = self.prime.barrett.mul_external::(n_inv, *a + *b); + .mul_external::(psi, &(*a + self.two_q - *b)); + *a = self.prime.barrett.mul_external::(n_inv, &(*a + *b)); *b = d; } } diff --git a/math/src/modulus.rs b/math/src/modulus.rs index ec9e2e1..c7ae56c 100644 --- a/math/src/modulus.rs +++ b/math/src/modulus.rs @@ -107,37 +107,37 @@ pub trait ScalarOperations { ); // Assigns a * b to c. - fn sa_mont_mul_sb_into_sc( + fn sa_mul_sb_montgomery_into_sc( &self, - a: &montgomery::Montgomery, - b: &O, + a: &O, + b: &montgomery::Montgomery, c: &mut O, ); // Assigns a * b to b. - fn sa_mont_mul_sb_into_sb( + fn sa_mul_sb_montgomery_into_sa( &self, - a: &montgomery::Montgomery, - b: &mut O, + b: &montgomery::Montgomery, + a: &mut O, ); // Assigns a * b to c. - fn sa_barrett_mul_sb_into_sc( + fn sa_mul_sb_barrett_into_sc( &self, - a: &barrett::Barrett, - b: &O, + a: &O, + b: &barrett::Barrett, c: &mut O, ); - // Assigns a * b to b. - fn sa_barrett_mul_sb_into_sb( + // Assigns a * b to a. + fn sa_mul_sb_barrett_into_sa( &self, - a: &barrett::Barrett, - b: &mut O, + b: &barrett::Barrett, + a: &mut O, ); // Assigns (a + q - b) * c to d. - fn sa_sub_sb_mul_sc_into_sd( + fn sa_sub_sb_mul_sc_barrett_into_sd( &self, a: &O, b: &O, @@ -146,7 +146,7 @@ pub trait ScalarOperations { ); // Assigns (a + q - b) * c to b. - fn sa_sub_sb_mul_sc_into_sb( + fn sa_sub_sb_mul_sc_barrett_into_sb( &self, a: &u64, c: &barrett::Barrett, @@ -154,7 +154,7 @@ pub trait ScalarOperations { ); // Assigns (a + b) * c to a. - fn sa_add_sb_mul_sc_into_sa( + fn sa_add_sb_mul_sc_barrett_into_sa( &self, b: &u64, c: &barrett::Barrett, @@ -162,7 +162,7 @@ pub trait ScalarOperations { ); // Assigns (a + b) * c to d. - fn sa_add_sb_mul_sc_into_sd( + fn sa_add_sb_mul_sc_barrett_into_sd( &self, a: &u64, b: &u64, @@ -171,7 +171,7 @@ pub trait ScalarOperations { ); // Assigns (a - b + c) * d to e. - fn sb_sub_sa_add_sc_mul_sd_into_se( + fn sb_sub_sa_add_sc_mul_sd_barrett_into_se( &self, a: &u64, b: &u64, @@ -180,7 +180,7 @@ pub trait ScalarOperations { e: &mut u64, ); - fn sb_sub_sa_add_sc_mul_sd_into_sa( + fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa( &self, b: &u64, c: &u64, @@ -281,37 +281,41 @@ pub trait VectorOperations { ); // vec(c) <- vec(a) * vec(b). - fn va_mont_mul_vb_into_vc( + fn va_mul_vb_montgomery_into_vc( &self, - a: &[montgomery::Montgomery], - b: &[O], + a: &[O], + b: &[montgomery::Montgomery], c: &mut [O], ); - // vec(b) <- vec(a) * vec(b). - fn va_mont_mul_vb_into_vb( + // vec(a) <- vec(a) * vec(b). + fn va_mul_vb_montgomery_into_va( &self, - a: &[montgomery::Montgomery], - b: &mut [O], + b: &[montgomery::Montgomery], + a: &mut [O], ); - // vec(b) <- vec(b) * scalar(a). - fn sa_barrett_mul_vb_into_vb( + // vec(b) <- vec(a) * scalar(b). + fn va_mul_sb_barrett_into_va( &self, - a: &barrett::Barrett, - b: &mut [u64], + b: &barrett::Barrett, + a: &mut [u64], ); - // vec(c) <- vec(b) * scalar(a). - fn sa_barrett_mul_vb_into_vc( + // vec(c) <- vec(a) * scalar(b). + fn va_mul_sb_barrett_into_vc( &self, - a: &barrett::Barrett, - b: &[u64], + a: &[u64], + b: &barrett::Barrett, c: &mut [u64], ); // vec(d) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c). - fn va_sub_vb_mul_sc_into_vd( + fn va_sub_vb_mul_sc_barrett_into_vd< + const CHUNK: usize, + const VBRANGE: u8, + const REDUCE: REDUCEMOD, + >( &self, a: &[u64], b: &[u64], @@ -320,7 +324,11 @@ pub trait VectorOperations { ); // vec(b) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c). - fn va_sub_vb_mul_sc_into_vb( + fn va_sub_vb_mul_sc_barrett_into_vb< + const CHUNK: usize, + const VBRANGE: u8, + const REDUCE: REDUCEMOD, + >( &self, a: &[u64], c: &barrett::Barrett, @@ -328,7 +336,7 @@ pub trait VectorOperations { ); // vec(c) <- (vec(a) + scalar(b)) * scalar(c). - fn va_add_sb_mul_sc_into_vd( + fn va_add_sb_mul_sc_barrett_into_vd( &self, va: &[u64], sb: &u64, @@ -337,7 +345,7 @@ pub trait VectorOperations { ); // vec(a) <- (vec(a) + scalar(b)) * scalar(c). - fn va_add_sb_mul_sc_into_va( + fn va_add_sb_mul_sc_barrett_into_va( &self, sb: &u64, sc: &barrett::Barrett, @@ -345,7 +353,7 @@ pub trait VectorOperations { ); // vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). - fn vb_sub_va_add_sc_mul_sd_into_ve< + fn vb_sub_va_add_sc_mul_sd_barrett_into_ve< const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD, @@ -359,7 +367,7 @@ pub trait VectorOperations { ); // vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). - fn vb_sub_va_add_sc_mul_sd_into_va< + fn vb_sub_va_add_sc_mul_sd_barrett_into_va< const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD, diff --git a/math/src/modulus/impl_u64/barrett.rs b/math/src/modulus/impl_u64/barrett.rs index 36bbb1b..ee008b4 100644 --- a/math/src/modulus/impl_u64/barrett.rs +++ b/math/src/modulus/impl_u64/barrett.rs @@ -63,14 +63,14 @@ impl BarrettPrecomp { } #[inline(always)] - pub fn mul_external(&self, lhs: Barrett, rhs: u64) -> u64 { - let mut r: u64 = rhs; + pub fn mul_external(&self, lhs: &Barrett, rhs: &u64) -> u64 { + let mut r: u64 = *rhs; self.mul_external_assign::(lhs, &mut r); r } #[inline(always)] - pub fn mul_external_assign(&self, lhs: Barrett, rhs: &mut u64) { + pub fn mul_external_assign(&self, lhs: &Barrett, rhs: &mut u64) { let t: u64 = ((*lhs.quotient() as u128 * *rhs as u128) >> 64) as _; *rhs = (rhs.wrapping_mul(*lhs.value())).wrapping_sub(self.q.wrapping_mul(t)); self.reduce_assign::(rhs); diff --git a/math/src/modulus/impl_u64/operations.rs b/math/src/modulus/impl_u64/operations.rs index 8fab036..1df16fe 100644 --- a/math/src/modulus/impl_u64/operations.rs +++ b/math/src/modulus/impl_u64/operations.rs @@ -4,7 +4,7 @@ use crate::modulus::prime::Prime; use crate::modulus::{ScalarOperations, VectorOperations}; use crate::modulus::{NONE, REDUCEMOD}; use crate::{ - apply_ssv, apply_sv, apply_svv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv, + apply_ssv, apply_sv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv, apply_vvssv, apply_vvsv, apply_vvv, }; use itertools::izip; @@ -109,37 +109,41 @@ impl ScalarOperations for Prime { } #[inline(always)] - fn sa_mont_mul_sb_into_sc( + fn sa_mul_sb_montgomery_into_sc( &self, - a: &Montgomery, - b: &u64, + a: &u64, + b: &Montgomery, c: &mut u64, ) { *c = self.montgomery.mul_external::(*a, *b); } #[inline(always)] - fn sa_mont_mul_sb_into_sb(&self, a: &Montgomery, b: &mut u64) { - self.montgomery.mul_external_assign::(*a, b); + fn sa_mul_sb_montgomery_into_sa( + &self, + b: &Montgomery, + a: &mut u64, + ) { + self.montgomery.mul_external_assign::(*b, a); } #[inline(always)] - fn sa_barrett_mul_sb_into_sc( + fn sa_mul_sb_barrett_into_sc( &self, - a: &Barrett, - b: &u64, + a: &u64, + b: &Barrett, c: &mut u64, ) { - *c = self.barrett.mul_external::(*a, *b); + *c = self.barrett.mul_external::(b, a); } #[inline(always)] - fn sa_barrett_mul_sb_into_sb(&self, a: &Barrett, b: &mut u64) { - self.barrett.mul_external_assign::(*a, b); + fn sa_mul_sb_barrett_into_sa(&self, b: &Barrett, a: &mut u64) { + self.barrett.mul_external_assign::(b, a); } #[inline(always)] - fn sa_sub_sb_mul_sc_into_sd( + fn sa_sub_sb_mul_sc_barrett_into_sd( &self, a: &u64, b: &u64, @@ -152,43 +156,43 @@ impl ScalarOperations for Prime { 4 => *d = a + self.four_q - b, _ => unreachable!("invalid SBRANGE argument"), } - self.barrett.mul_external_assign::(*c, d); + self.barrett.mul_external_assign::(c, d); } #[inline(always)] - fn sa_sub_sb_mul_sc_into_sb( + fn sa_sub_sb_mul_sc_barrett_into_sb( &self, a: &u64, c: &Barrett, b: &mut u64, ) { self.sa_sub_sb_into_sb::(a, b); - self.barrett.mul_external_assign::(*c, b); + self.barrett.mul_external_assign::(c, b); } #[inline(always)] - fn sa_add_sb_mul_sc_into_sd( + fn sa_add_sb_mul_sc_barrett_into_sd( &self, a: &u64, b: &u64, c: &Barrett, d: &mut u64, ) { - *d = self.barrett.mul_external::(*c, *a + *b); + *d = self.barrett.mul_external::(c, &(*a + b)); } #[inline(always)] - fn sa_add_sb_mul_sc_into_sa( + fn sa_add_sb_mul_sc_barrett_into_sa( &self, b: &u64, c: &Barrett, a: &mut u64, ) { - *a = self.barrett.mul_external::(*c, *a + *b); + *a = self.barrett.mul_external::(c, &(*a + b)); } #[inline(always)] - fn sb_sub_sa_add_sc_mul_sd_into_se( + fn sb_sub_sa_add_sc_mul_sd_barrett_into_se( &self, a: &u64, b: &u64, @@ -197,11 +201,11 @@ impl ScalarOperations for Prime { e: &mut u64, ) { self.sa_sub_sb_into_sc::(&(b + c), a, e); - self.barrett.mul_external_assign::(*d, e); + self.barrett.mul_external_assign::(d, e); } #[inline(always)] - fn sb_sub_sa_add_sc_mul_sd_into_sa( + fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa( &self, b: &u64, c: &u64, @@ -209,7 +213,7 @@ impl ScalarOperations for Prime { a: &mut u64, ) { self.sa_sub_sb_into_sb::(&(b + c), a); - self.barrett.mul_external_assign::(*d, a); + self.barrett.mul_external_assign::(d, a); } #[inline(always)] @@ -398,34 +402,15 @@ impl VectorOperations for Prime { } #[inline(always)] - fn va_mont_mul_vb_into_vc( + fn va_mul_vb_montgomery_into_vc( &self, a: &[Montgomery], b: &[u64], c: &mut [u64], ) { - apply_vvv!(self, Self::sa_mont_mul_sb_into_sc::, a, b, c, CHUNK); - } - - #[inline(always)] - fn va_mont_mul_vb_into_vb( - &self, - a: &[Montgomery], - b: &mut [u64], - ) { - apply_vv!(self, Self::sa_mont_mul_sb_into_sb::, a, b, CHUNK); - } - - #[inline(always)] - fn sa_barrett_mul_vb_into_vc( - &self, - a: &Barrett, - b: &[u64], - c: &mut [u64], - ) { - apply_svv!( + apply_vvv!( self, - Self::sa_barrett_mul_sb_into_sc::, + Self::sa_mul_sb_montgomery_into_sc::, a, b, c, @@ -434,15 +419,51 @@ impl VectorOperations for Prime { } #[inline(always)] - fn sa_barrett_mul_vb_into_vb( + fn va_mul_vb_montgomery_into_va( &self, - a: &Barrett, - b: &mut [u64], + b: &[Montgomery], + a: &mut [u64], ) { - apply_sv!(self, Self::sa_barrett_mul_sb_into_sb::, a, b, CHUNK); + apply_vv!( + self, + Self::sa_mul_sb_montgomery_into_sa::, + b, + a, + CHUNK + ); } - fn va_sub_vb_mul_sc_into_vd( + #[inline(always)] + fn va_mul_sb_barrett_into_vc( + &self, + a: &[u64], + b: &Barrett, + c: &mut [u64], + ) { + apply_vsv!( + self, + Self::sa_mul_sb_barrett_into_sc::, + a, + b, + c, + CHUNK + ); + } + + #[inline(always)] + fn va_mul_sb_barrett_into_va( + &self, + b: &Barrett, + a: &mut [u64], + ) { + apply_sv!(self, Self::sa_mul_sb_barrett_into_sa::, b, a, CHUNK); + } + + fn va_sub_vb_mul_sc_barrett_into_vd< + const CHUNK: usize, + const VBRANGE: u8, + const REDUCE: REDUCEMOD, + >( &self, a: &[u64], b: &[u64], @@ -451,7 +472,7 @@ impl VectorOperations for Prime { ) { apply_vvsv!( self, - Self::sa_sub_sb_mul_sc_into_sd::, + Self::sa_sub_sb_mul_sc_barrett_into_sd::, a, b, c, @@ -460,7 +481,11 @@ impl VectorOperations for Prime { ); } - fn va_sub_vb_mul_sc_into_vb( + fn va_sub_vb_mul_sc_barrett_into_vb< + const CHUNK: usize, + const VBRANGE: u8, + const REDUCE: REDUCEMOD, + >( &self, a: &[u64], b: &Barrett, @@ -468,7 +493,7 @@ impl VectorOperations for Prime { ) { apply_vsv!( self, - Self::sa_sub_sb_mul_sc_into_sb::, + Self::sa_sub_sb_mul_sc_barrett_into_sb::, a, b, c, @@ -477,7 +502,7 @@ impl VectorOperations for Prime { } // vec(a) <- (vec(a) + scalar(b)) * scalar(c); - fn va_add_sb_mul_sc_into_va( + fn va_add_sb_mul_sc_barrett_into_va( &self, b: &u64, c: &Barrett, @@ -485,7 +510,7 @@ impl VectorOperations for Prime { ) { apply_ssv!( self, - Self::sa_add_sb_mul_sc_into_sa::, + Self::sa_add_sb_mul_sc_barrett_into_sa::, b, c, a, @@ -494,7 +519,7 @@ impl VectorOperations for Prime { } // vec(a) <- (vec(a) + scalar(b)) * scalar(c); - fn va_add_sb_mul_sc_into_vd( + fn va_add_sb_mul_sc_barrett_into_vd( &self, a: &[u64], b: &u64, @@ -503,7 +528,7 @@ impl VectorOperations for Prime { ) { apply_vssv!( self, - Self::sa_add_sb_mul_sc_into_sd::, + Self::sa_add_sb_mul_sc_barrett_into_sd::, a, b, c, @@ -513,7 +538,7 @@ impl VectorOperations for Prime { } // vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e). - fn vb_sub_va_add_sc_mul_sd_into_ve< + fn vb_sub_va_add_sc_mul_sd_barrett_into_ve< const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD, @@ -527,7 +552,7 @@ impl VectorOperations for Prime { ) { apply_vvssv!( self, - Self::sb_sub_sa_add_sc_mul_sd_into_se::, + Self::sb_sub_sa_add_sc_mul_sd_barrett_into_se::, va, vb, sc, @@ -538,7 +563,7 @@ impl VectorOperations for Prime { } // vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e). - fn vb_sub_va_add_sc_mul_sd_into_va< + fn vb_sub_va_add_sc_mul_sd_barrett_into_va< const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD, @@ -551,7 +576,7 @@ impl VectorOperations for Prime { ) { apply_vssv!( self, - Self::sb_sub_sa_add_sc_mul_sd_into_sa::, + Self::sb_sub_sa_add_sc_mul_sd_barrett_into_sa::, vb, sc, sd, diff --git a/math/src/ring/impl_u64/ring.rs b/math/src/ring/impl_u64/ring.rs index c646fe2..73667f7 100644 --- a/math/src/ring/impl_u64/ring.rs +++ b/math/src/ring/impl_u64/ring.rs @@ -125,7 +125,7 @@ impl Ring { ) { debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n()); self.modulus - .va_add_sb_mul_sc_into_va::(b, c, &mut a.0); + .va_add_sb_mul_sc_barrett_into_va::(b, c, &mut a.0); } #[inline(always)] @@ -139,7 +139,7 @@ impl Ring { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n()); self.modulus - .va_add_sb_mul_sc_into_vd::(&a.0, b, c, &mut d.0); + .va_add_sb_mul_sc_barrett_into_vd::(&a.0, b, c, &mut d.0); } #[inline(always)] @@ -220,7 +220,7 @@ impl Ring { debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); self.modulus - .va_mont_mul_vb_into_vc::(&a.0, &b.0, &mut c.0); + .va_mul_vb_montgomery_into_vc::(&a.0, &b.0, &mut c.0); } #[inline(always)] @@ -232,7 +232,7 @@ impl Ring { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); self.modulus - .va_mont_mul_vb_into_vb::(&b.0, &mut a.0); + .va_mul_vb_montgomery_into_va::(&b.0, &mut a.0); } #[inline(always)] @@ -244,9 +244,9 @@ impl Ring { ) { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); - self.modulus.sa_barrett_mul_vb_into_vc::( - &self.modulus.barrett.prepare(*b), + self.modulus.va_mul_sb_barrett_into_vc::( &a.0, + &self.modulus.barrett.prepare(*b), &mut c.0, ); } @@ -254,7 +254,7 @@ impl Ring { #[inline(always)] pub fn a_mul_b_scalar_into_a(&self, b: &u64, a: &mut Poly) { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); - self.modulus.sa_barrett_mul_vb_into_vb::( + self.modulus.va_mul_sb_barrett_into_va::( &self .modulus .barrett @@ -271,19 +271,19 @@ impl Ring { ) { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); self.modulus - .sa_barrett_mul_vb_into_vb::(b, &mut a.0); + .va_mul_sb_barrett_into_va::(b, &mut a.0); } #[inline(always)] pub fn a_mul_b_scalar_barrett_into_c( &self, - a: &Barrett, - b: &Poly, + a: &Poly, + b: &Barrett, c: &mut Poly, ) { - debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); + debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); self.modulus - .sa_barrett_mul_vb_into_vc::(a, &b.0, &mut c.0); + .va_mul_sb_barrett_into_vc::(&a.0, b, &mut c.0); } #[inline(always)] @@ -298,7 +298,7 @@ impl Ring { debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n()); self.modulus - .va_sub_vb_mul_sc_into_vd::(&a.0, &b.0, c, &mut d.0); + .va_sub_vb_mul_sc_barrett_into_vd::(&a.0, &b.0, c, &mut d.0); } #[inline(always)] @@ -311,7 +311,7 @@ impl Ring { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); self.modulus - .va_sub_vb_mul_sc_into_vb::(&b.0, c, &mut a.0); + .va_sub_vb_mul_sc_barrett_into_vb::(&b.0, c, &mut a.0); } #[inline(always)] @@ -330,7 +330,9 @@ impl Ring { debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n()); self.modulus - .vb_sub_va_add_sc_mul_sd_into_ve::(&a.0, &b.0, c, d, &mut e.0); + .vb_sub_va_add_sc_mul_sd_barrett_into_ve::( + &a.0, &b.0, c, d, &mut e.0, + ); } #[inline(always)] @@ -347,7 +349,7 @@ impl Ring { debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); self.modulus - .vb_sub_va_add_sc_mul_sd_into_va::(&b.0, c, d, &mut a.0); + .vb_sub_va_add_sc_mul_sd_barrett_into_va::(&b.0, c, d, &mut a.0); } pub fn a_rsh_scalar_b_mask_scalar_c_into_d(