some API refactoring

This commit is contained in:
Jean-Philippe Bossuat
2025-01-21 13:41:41 +01:00
parent 3e13218791
commit 0cf1229be5
7 changed files with 197 additions and 162 deletions

View File

@@ -1,4 +1,4 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use criterion::{criterion_group, criterion_main, Criterion};
use math::poly::PolyRNS;
use math::ring::RingRNS;

View File

@@ -1,4 +1,4 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use criterion::{criterion_group, criterion_main, Criterion};
use math::poly::PolyRNS;
use math::ring::RingRNS;
use sampling::source::Source;

View File

@@ -124,7 +124,7 @@ impl Table<u64> {
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(
|(a, psi)| {
let (a, b) = a.split_at_mut(size);
self.dit_inplace::<false>(&mut a[0], &mut b[0], *psi);
self.dit_inplace::<false>(&mut a[0], &mut b[0], psi);
debug_assert!(
a[0] < self.two_q,
"forward_inplace_core::<LAZY=true> output {} > {} (2q-1)",
@@ -143,7 +143,7 @@ impl Table<u64> {
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(
|(a, psi)| {
let (a, b) = a.split_at_mut(size);
self.dit_inplace::<true>(&mut a[0], &mut b[0], *psi);
self.dit_inplace::<true>(&mut a[0], &mut b[0], psi);
self.prime.barrett.reduce_assign::<BARRETT>(&mut a[0]);
self.prime.barrett.reduce_assign::<BARRETT>(&mut b[0]);
debug_assert!(
@@ -165,31 +165,31 @@ impl Table<u64> {
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| {
let (a, b) = a.split_at_mut(size);
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
self.dit_inplace::<true>(&mut a[0], &mut b[0], *psi);
self.dit_inplace::<true>(&mut a[1], &mut b[1], *psi);
self.dit_inplace::<true>(&mut a[2], &mut b[2], *psi);
self.dit_inplace::<true>(&mut a[3], &mut b[3], *psi);
self.dit_inplace::<true>(&mut a[4], &mut b[4], *psi);
self.dit_inplace::<true>(&mut a[5], &mut b[5], *psi);
self.dit_inplace::<true>(&mut a[6], &mut b[6], *psi);
self.dit_inplace::<true>(&mut a[7], &mut b[7], *psi);
self.dit_inplace::<true>(&mut a[0], &mut b[0], psi);
self.dit_inplace::<true>(&mut a[1], &mut b[1], psi);
self.dit_inplace::<true>(&mut a[2], &mut b[2], psi);
self.dit_inplace::<true>(&mut a[3], &mut b[3], psi);
self.dit_inplace::<true>(&mut a[4], &mut b[4], psi);
self.dit_inplace::<true>(&mut a[5], &mut b[5], psi);
self.dit_inplace::<true>(&mut a[6], &mut b[6], psi);
self.dit_inplace::<true>(&mut a[7], &mut b[7], psi);
});
});
} else {
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| {
let (a, b) = a.split_at_mut(size);
izip!(a, b).for_each(|(a, b)| self.dit_inplace::<true>(a, b, *psi));
izip!(a, b).for_each(|(a, b)| self.dit_inplace::<true>(a, b, psi));
});
}
}
}
#[inline(always)]
fn dit_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: Barrett<u64>) {
fn dit_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: &Barrett<u64>) {
debug_assert!(*a < self.four_q, "a:{} q:{}", a, self.four_q);
debug_assert!(*b < self.four_q, "b:{} q:{}", b, self.four_q);
a.reduce_once_assign(self.two_q);
let bt: u64 = self.prime.barrett.mul_external::<NONE>(t, *b);
let bt: u64 = self.prime.barrett.mul_external::<NONE>(t, b);
*b = *a + self.two_q - bt;
*a += bt;
if !LAZY {
@@ -233,41 +233,41 @@ impl Table<u64> {
let psi: Barrett<u64> = self.prime.barrett.prepare(
self.prime
.barrett
.mul_external::<ONCE>(n_inv, self.psi_backward_rev[1].0),
.mul_external::<ONCE>(&n_inv, &self.psi_backward_rev[1].0),
);
izip!(a.chunks_exact_mut(2 * size)).for_each(|a| {
let (a, b) = a.split_at_mut(size);
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
self.dif_last_inplace::<LAZY>(&mut a[0], &mut b[0], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[1], &mut b[1], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[2], &mut b[2], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[3], &mut b[3], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[4], &mut b[4], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[5], &mut b[5], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[6], &mut b[6], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[7], &mut b[7], psi, n_inv);
self.dif_last_inplace::<LAZY>(&mut a[0], &mut b[0], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[1], &mut b[1], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[2], &mut b[2], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[3], &mut b[3], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[4], &mut b[4], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[5], &mut b[5], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[6], &mut b[6], &psi, &n_inv);
self.dif_last_inplace::<LAZY>(&mut a[7], &mut b[7], &psi, &n_inv);
});
});
} else if t >= 16 {
izip!(a.chunks_exact_mut(t), &self.psi_backward_rev[m..]).for_each(|(a, psi)| {
let (a, b) = a.split_at_mut(size);
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
self.dif_inplace::<true>(&mut a[0], &mut b[0], *psi);
self.dif_inplace::<true>(&mut a[1], &mut b[1], *psi);
self.dif_inplace::<true>(&mut a[2], &mut b[2], *psi);
self.dif_inplace::<true>(&mut a[3], &mut b[3], *psi);
self.dif_inplace::<true>(&mut a[4], &mut b[4], *psi);
self.dif_inplace::<true>(&mut a[5], &mut b[5], *psi);
self.dif_inplace::<true>(&mut a[6], &mut b[6], *psi);
self.dif_inplace::<true>(&mut a[7], &mut b[7], *psi);
self.dif_inplace::<true>(&mut a[0], &mut b[0], psi);
self.dif_inplace::<true>(&mut a[1], &mut b[1], psi);
self.dif_inplace::<true>(&mut a[2], &mut b[2], psi);
self.dif_inplace::<true>(&mut a[3], &mut b[3], psi);
self.dif_inplace::<true>(&mut a[4], &mut b[4], psi);
self.dif_inplace::<true>(&mut a[5], &mut b[5], psi);
self.dif_inplace::<true>(&mut a[6], &mut b[6], psi);
self.dif_inplace::<true>(&mut a[7], &mut b[7], psi);
});
});
} else {
izip!(a.chunks_exact_mut(2 * size), &self.psi_backward_rev[m..]).for_each(
|(a, psi)| {
let (a, b) = a.split_at_mut(size);
izip!(a, b).for_each(|(a, b)| self.dif_inplace::<true>(a, b, *psi));
izip!(a, b).for_each(|(a, b)| self.dif_inplace::<true>(a, b, psi));
},
);
}
@@ -275,13 +275,13 @@ impl Table<u64> {
}
#[inline(always)]
fn dif_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: Barrett<u64>) {
fn dif_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: &Barrett<u64>) {
debug_assert!(*a < self.two_q, "a:{} q:{}", a, self.two_q);
debug_assert!(*b < self.two_q, "b:{} q:{}", b, self.two_q);
let d: u64 = self
.prime
.barrett
.mul_external::<NONE>(t, *a + self.two_q - *b);
.mul_external::<NONE>(t, &(*a + self.two_q - *b));
*a = *a + *b;
a.reduce_once_assign(self.two_q);
*b = d;
@@ -295,8 +295,8 @@ impl Table<u64> {
&self,
a: &mut u64,
b: &mut u64,
psi: Barrett<u64>,
n_inv: Barrett<u64>,
psi: &Barrett<u64>,
n_inv: &Barrett<u64>,
) {
debug_assert!(*a < self.two_q);
debug_assert!(*b < self.two_q);
@@ -304,15 +304,15 @@ impl Table<u64> {
let d: u64 = self
.prime
.barrett
.mul_external::<NONE>(psi, *a + self.two_q - *b);
*a = self.prime.barrett.mul_external::<NONE>(n_inv, *a + *b);
.mul_external::<NONE>(psi, &(*a + self.two_q - *b));
*a = self.prime.barrett.mul_external::<NONE>(n_inv, &(*a + *b));
*b = d;
} else {
let d: u64 = self
.prime
.barrett
.mul_external::<ONCE>(psi, *a + self.two_q - *b);
*a = self.prime.barrett.mul_external::<ONCE>(n_inv, *a + *b);
.mul_external::<ONCE>(psi, &(*a + self.two_q - *b));
*a = self.prime.barrett.mul_external::<ONCE>(n_inv, &(*a + *b));
*b = d;
}
}

View File

@@ -107,37 +107,37 @@ pub trait ScalarOperations<O> {
);
// Assigns a * b to c.
fn sa_mont_mul_sb_into_sc<const REDUCE: REDUCEMOD>(
fn sa_mul_sb_montgomery_into_sc<const REDUCE: REDUCEMOD>(
&self,
a: &montgomery::Montgomery<O>,
b: &O,
a: &O,
b: &montgomery::Montgomery<O>,
c: &mut O,
);
// Assigns a * b to b.
fn sa_mont_mul_sb_into_sb<const REDUCE: REDUCEMOD>(
fn sa_mul_sb_montgomery_into_sa<const REDUCE: REDUCEMOD>(
&self,
a: &montgomery::Montgomery<O>,
b: &mut O,
b: &montgomery::Montgomery<O>,
a: &mut O,
);
// Assigns a * b to c.
fn sa_barrett_mul_sb_into_sc<const REDUCE: REDUCEMOD>(
fn sa_mul_sb_barrett_into_sc<const REDUCE: REDUCEMOD>(
&self,
a: &barrett::Barrett<O>,
b: &O,
a: &O,
b: &barrett::Barrett<O>,
c: &mut O,
);
// Assigns a * b to b.
fn sa_barrett_mul_sb_into_sb<const REDUCE: REDUCEMOD>(
// Assigns a * b to a.
fn sa_mul_sb_barrett_into_sa<const REDUCE: REDUCEMOD>(
&self,
a: &barrett::Barrett<O>,
b: &mut O,
b: &barrett::Barrett<O>,
a: &mut O,
);
// Assigns (a + q - b) * c to d.
fn sa_sub_sb_mul_sc_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sa_sub_sb_mul_sc_barrett_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &O,
b: &O,
@@ -146,7 +146,7 @@ pub trait ScalarOperations<O> {
);
// Assigns (a + q - b) * c to b.
fn sa_sub_sb_mul_sc_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sa_sub_sb_mul_sc_barrett_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
c: &barrett::Barrett<u64>,
@@ -154,7 +154,7 @@ pub trait ScalarOperations<O> {
);
// Assigns (a + b) * c to a.
fn sa_add_sb_mul_sc_into_sa<const REDUCE: REDUCEMOD>(
fn sa_add_sb_mul_sc_barrett_into_sa<const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &barrett::Barrett<u64>,
@@ -162,7 +162,7 @@ pub trait ScalarOperations<O> {
);
// Assigns (a + b) * c to d.
fn sa_add_sb_mul_sc_into_sd<const REDUCE: REDUCEMOD>(
fn sa_add_sb_mul_sc_barrett_into_sd<const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
@@ -171,7 +171,7 @@ pub trait ScalarOperations<O> {
);
// Assigns (a - b + c) * d to e.
fn sb_sub_sa_add_sc_mul_sd_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sb_sub_sa_add_sc_mul_sd_barrett_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
@@ -180,7 +180,7 @@ pub trait ScalarOperations<O> {
e: &mut u64,
);
fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &u64,
@@ -281,37 +281,41 @@ pub trait VectorOperations<O> {
);
// vec(c) <- vec(a) * vec(b).
fn va_mont_mul_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_mul_vb_montgomery_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[montgomery::Montgomery<O>],
b: &[O],
a: &[O],
b: &[montgomery::Montgomery<O>],
c: &mut [O],
);
// vec(b) <- vec(a) * vec(b).
fn va_mont_mul_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
// vec(a) <- vec(a) * vec(b).
fn va_mul_vb_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[montgomery::Montgomery<O>],
b: &mut [O],
b: &[montgomery::Montgomery<O>],
a: &mut [O],
);
// vec(b) <- vec(b) * scalar(a).
fn sa_barrett_mul_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
// vec(b) <- vec(a) * scalar(b).
fn va_mul_sb_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &barrett::Barrett<u64>,
b: &mut [u64],
b: &barrett::Barrett<u64>,
a: &mut [u64],
);
// vec(c) <- vec(b) * scalar(a).
fn sa_barrett_mul_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
// vec(c) <- vec(a) * scalar(b).
fn va_mul_sb_barrett_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &barrett::Barrett<u64>,
b: &[u64],
a: &[u64],
b: &barrett::Barrett<u64>,
c: &mut [u64],
);
// vec(d) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c).
fn va_sub_vb_mul_sc_into_vd<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
fn va_sub_vb_mul_sc_barrett_into_vd<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &[u64],
b: &[u64],
@@ -320,7 +324,11 @@ pub trait VectorOperations<O> {
);
// vec(b) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c).
fn va_sub_vb_mul_sc_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
fn va_sub_vb_mul_sc_barrett_into_vb<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &[u64],
c: &barrett::Barrett<u64>,
@@ -328,7 +336,7 @@ pub trait VectorOperations<O> {
);
// vec(c) <- (vec(a) + scalar(b)) * scalar(c).
fn va_add_sb_mul_sc_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_add_sb_mul_sc_barrett_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
va: &[u64],
sb: &u64,
@@ -337,7 +345,7 @@ pub trait VectorOperations<O> {
);
// vec(a) <- (vec(a) + scalar(b)) * scalar(c).
fn va_add_sb_mul_sc_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_add_sb_mul_sc_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
sb: &u64,
sc: &barrett::Barrett<u64>,
@@ -345,7 +353,7 @@ pub trait VectorOperations<O> {
);
// vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_ve<
fn vb_sub_va_add_sc_mul_sd_barrett_into_ve<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
@@ -359,7 +367,7 @@ pub trait VectorOperations<O> {
);
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_va<
fn vb_sub_va_add_sc_mul_sd_barrett_into_va<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,

View File

@@ -63,14 +63,14 @@ impl BarrettPrecomp<u64> {
}
#[inline(always)]
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: Barrett<u64>, rhs: u64) -> u64 {
let mut r: u64 = rhs;
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &u64) -> u64 {
let mut r: u64 = *rhs;
self.mul_external_assign::<REDUCE>(lhs, &mut r);
r
}
#[inline(always)]
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(&self, lhs: Barrett<u64>, rhs: &mut u64) {
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &mut u64) {
let t: u64 = ((*lhs.quotient() as u128 * *rhs as u128) >> 64) as _;
*rhs = (rhs.wrapping_mul(*lhs.value())).wrapping_sub(self.q.wrapping_mul(t));
self.reduce_assign::<REDUCE>(rhs);

View File

@@ -4,7 +4,7 @@ use crate::modulus::prime::Prime;
use crate::modulus::{ScalarOperations, VectorOperations};
use crate::modulus::{NONE, REDUCEMOD};
use crate::{
apply_ssv, apply_sv, apply_svv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv,
apply_ssv, apply_sv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv,
apply_vvssv, apply_vvsv, apply_vvv,
};
use itertools::izip;
@@ -109,37 +109,41 @@ impl ScalarOperations<u64> for Prime<u64> {
}
#[inline(always)]
fn sa_mont_mul_sb_into_sc<const REDUCE: REDUCEMOD>(
fn sa_mul_sb_montgomery_into_sc<const REDUCE: REDUCEMOD>(
&self,
a: &Montgomery<u64>,
b: &u64,
a: &u64,
b: &Montgomery<u64>,
c: &mut u64,
) {
*c = self.montgomery.mul_external::<REDUCE>(*a, *b);
}
#[inline(always)]
fn sa_mont_mul_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &Montgomery<u64>, b: &mut u64) {
self.montgomery.mul_external_assign::<REDUCE>(*a, b);
fn sa_mul_sb_montgomery_into_sa<const REDUCE: REDUCEMOD>(
&self,
b: &Montgomery<u64>,
a: &mut u64,
) {
self.montgomery.mul_external_assign::<REDUCE>(*b, a);
}
#[inline(always)]
fn sa_barrett_mul_sb_into_sc<const REDUCE: REDUCEMOD>(
fn sa_mul_sb_barrett_into_sc<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &u64,
a: &u64,
b: &Barrett<u64>,
c: &mut u64,
) {
*c = self.barrett.mul_external::<REDUCE>(*a, *b);
*c = self.barrett.mul_external::<REDUCE>(b, a);
}
#[inline(always)]
fn sa_barrett_mul_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &Barrett<u64>, b: &mut u64) {
self.barrett.mul_external_assign::<REDUCE>(*a, b);
fn sa_mul_sb_barrett_into_sa<const REDUCE: REDUCEMOD>(&self, b: &Barrett<u64>, a: &mut u64) {
self.barrett.mul_external_assign::<REDUCE>(b, a);
}
#[inline(always)]
fn sa_sub_sb_mul_sc_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sa_sub_sb_mul_sc_barrett_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
@@ -152,43 +156,43 @@ impl ScalarOperations<u64> for Prime<u64> {
4 => *d = a + self.four_q - b,
_ => unreachable!("invalid SBRANGE argument"),
}
self.barrett.mul_external_assign::<REDUCE>(*c, d);
self.barrett.mul_external_assign::<REDUCE>(c, d);
}
#[inline(always)]
fn sa_sub_sb_mul_sc_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sa_sub_sb_mul_sc_barrett_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
c: &Barrett<u64>,
b: &mut u64,
) {
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(a, b);
self.barrett.mul_external_assign::<REDUCE>(*c, b);
self.barrett.mul_external_assign::<REDUCE>(c, b);
}
#[inline(always)]
fn sa_add_sb_mul_sc_into_sd<const REDUCE: REDUCEMOD>(
fn sa_add_sb_mul_sc_barrett_into_sd<const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
c: &Barrett<u64>,
d: &mut u64,
) {
*d = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
*d = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
}
#[inline(always)]
fn sa_add_sb_mul_sc_into_sa<const REDUCE: REDUCEMOD>(
fn sa_add_sb_mul_sc_barrett_into_sa<const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &Barrett<u64>,
a: &mut u64,
) {
*a = self.barrett.mul_external::<REDUCE>(*c, *a + *b);
*a = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
}
#[inline(always)]
fn sb_sub_sa_add_sc_mul_sd_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sb_sub_sa_add_sc_mul_sd_barrett_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
a: &u64,
b: &u64,
@@ -197,11 +201,11 @@ impl ScalarOperations<u64> for Prime<u64> {
e: &mut u64,
) {
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
self.barrett.mul_external_assign::<REDUCE>(*d, e);
self.barrett.mul_external_assign::<REDUCE>(d, e);
}
#[inline(always)]
fn sb_sub_sa_add_sc_mul_sd_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &u64,
@@ -209,7 +213,7 @@ impl ScalarOperations<u64> for Prime<u64> {
a: &mut u64,
) {
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
self.barrett.mul_external_assign::<REDUCE>(*d, a);
self.barrett.mul_external_assign::<REDUCE>(d, a);
}
#[inline(always)]
@@ -398,34 +402,15 @@ impl VectorOperations<u64> for Prime<u64> {
}
#[inline(always)]
fn va_mont_mul_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_mul_vb_montgomery_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[Montgomery<u64>],
b: &[u64],
c: &mut [u64],
) {
apply_vvv!(self, Self::sa_mont_mul_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
}
#[inline(always)]
fn va_mont_mul_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[Montgomery<u64>],
b: &mut [u64],
) {
apply_vv!(self, Self::sa_mont_mul_sb_into_sb::<REDUCE>, a, b, CHUNK);
}
#[inline(always)]
fn sa_barrett_mul_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &[u64],
c: &mut [u64],
) {
apply_svv!(
apply_vvv!(
self,
Self::sa_barrett_mul_sb_into_sc::<REDUCE>,
Self::sa_mul_sb_montgomery_into_sc::<REDUCE>,
a,
b,
c,
@@ -434,15 +419,51 @@ impl VectorOperations<u64> for Prime<u64> {
}
#[inline(always)]
fn sa_barrett_mul_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_mul_vb_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &mut [u64],
b: &[Montgomery<u64>],
a: &mut [u64],
) {
apply_sv!(self, Self::sa_barrett_mul_sb_into_sb::<REDUCE>, a, b, CHUNK);
apply_vv!(
self,
Self::sa_mul_sb_montgomery_into_sa::<REDUCE>,
b,
a,
CHUNK
);
}
fn va_sub_vb_mul_sc_into_vd<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
#[inline(always)]
fn va_mul_sb_barrett_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[u64],
b: &Barrett<u64>,
c: &mut [u64],
) {
apply_vsv!(
self,
Self::sa_mul_sb_barrett_into_sc::<REDUCE>,
a,
b,
c,
CHUNK
);
}
#[inline(always)]
fn va_mul_sb_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
b: &Barrett<u64>,
a: &mut [u64],
) {
apply_sv!(self, Self::sa_mul_sb_barrett_into_sa::<REDUCE>, b, a, CHUNK);
}
fn va_sub_vb_mul_sc_barrett_into_vd<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &[u64],
b: &[u64],
@@ -451,7 +472,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_vvsv!(
self,
Self::sa_sub_sb_mul_sc_into_sd::<VBRANGE, REDUCE>,
Self::sa_sub_sb_mul_sc_barrett_into_sd::<VBRANGE, REDUCE>,
a,
b,
c,
@@ -460,7 +481,11 @@ impl VectorOperations<u64> for Prime<u64> {
);
}
fn va_sub_vb_mul_sc_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
fn va_sub_vb_mul_sc_barrett_into_vb<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
>(
&self,
a: &[u64],
b: &Barrett<u64>,
@@ -468,7 +493,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_vsv!(
self,
Self::sa_sub_sb_mul_sc_into_sb::<VBRANGE, REDUCE>,
Self::sa_sub_sb_mul_sc_barrett_into_sb::<VBRANGE, REDUCE>,
a,
b,
c,
@@ -477,7 +502,7 @@ impl VectorOperations<u64> for Prime<u64> {
}
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
fn va_add_sb_mul_sc_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_add_sb_mul_sc_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
b: &u64,
c: &Barrett<u64>,
@@ -485,7 +510,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_ssv!(
self,
Self::sa_add_sb_mul_sc_into_sa::<REDUCE>,
Self::sa_add_sb_mul_sc_barrett_into_sa::<REDUCE>,
b,
c,
a,
@@ -494,7 +519,7 @@ impl VectorOperations<u64> for Prime<u64> {
}
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
fn va_add_sb_mul_sc_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
fn va_add_sb_mul_sc_barrett_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
&self,
a: &[u64],
b: &u64,
@@ -503,7 +528,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_vssv!(
self,
Self::sa_add_sb_mul_sc_into_sd::<REDUCE>,
Self::sa_add_sb_mul_sc_barrett_into_sd::<REDUCE>,
a,
b,
c,
@@ -513,7 +538,7 @@ impl VectorOperations<u64> for Prime<u64> {
}
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_ve<
fn vb_sub_va_add_sc_mul_sd_barrett_into_ve<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
@@ -527,7 +552,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_vvssv!(
self,
Self::sb_sub_sa_add_sc_mul_sd_into_se::<VBRANGE, REDUCE>,
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_se::<VBRANGE, REDUCE>,
va,
vb,
sc,
@@ -538,7 +563,7 @@ impl VectorOperations<u64> for Prime<u64> {
}
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
fn vb_sub_va_add_sc_mul_sd_into_va<
fn vb_sub_va_add_sc_mul_sd_barrett_into_va<
const CHUNK: usize,
const VBRANGE: u8,
const REDUCE: REDUCEMOD,
@@ -551,7 +576,7 @@ impl VectorOperations<u64> for Prime<u64> {
) {
apply_vssv!(
self,
Self::sb_sub_sa_add_sc_mul_sd_into_sa::<VBRANGE, REDUCE>,
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_sa::<VBRANGE, REDUCE>,
vb,
sc,
sd,

View File

@@ -125,7 +125,7 @@ impl Ring<u64> {
) {
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
self.modulus
.va_add_sb_mul_sc_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
.va_add_sb_mul_sc_barrett_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
}
#[inline(always)]
@@ -139,7 +139,7 @@ impl Ring<u64> {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
self.modulus
.va_add_sb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, b, c, &mut d.0);
.va_add_sb_mul_sc_barrett_into_vd::<CHUNK, REDUCE>(&a.0, b, c, &mut d.0);
}
#[inline(always)]
@@ -220,7 +220,7 @@ impl Ring<u64> {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus
.va_mont_mul_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
.va_mul_vb_montgomery_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
}
#[inline(always)]
@@ -232,7 +232,7 @@ impl Ring<u64> {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&b.0, &mut a.0);
.va_mul_vb_montgomery_into_va::<CHUNK, REDUCE>(&b.0, &mut a.0);
}
#[inline(always)]
@@ -244,9 +244,9 @@ impl Ring<u64> {
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(
&self.modulus.barrett.prepare(*b),
self.modulus.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(
&a.0,
&self.modulus.barrett.prepare(*b),
&mut c.0,
);
}
@@ -254,7 +254,7 @@ impl Ring<u64> {
#[inline(always)]
pub fn a_mul_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(
self.modulus.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(
&self
.modulus
.barrett
@@ -271,19 +271,19 @@ impl Ring<u64> {
) {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(b, &mut a.0);
.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(b, &mut a.0);
}
#[inline(always)]
pub fn a_mul_b_scalar_barrett_into_c<const REDUCE: REDUCEMOD>(
&self,
a: &Barrett<u64>,
b: &Poly<u64>,
a: &Poly<u64>,
b: &Barrett<u64>,
c: &mut Poly<u64>,
) {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
self.modulus
.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
}
#[inline(always)]
@@ -298,7 +298,7 @@ impl Ring<u64> {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_into_vd::<CHUNK, VBRANGE, REDUCE>(&a.0, &b.0, c, &mut d.0);
.va_sub_vb_mul_sc_barrett_into_vd::<CHUNK, VBRANGE, REDUCE>(&a.0, &b.0, c, &mut d.0);
}
#[inline(always)]
@@ -311,7 +311,7 @@ impl Ring<u64> {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.va_sub_vb_mul_sc_into_vb::<CHUNK, BRANGE, REDUCE>(&b.0, c, &mut a.0);
.va_sub_vb_mul_sc_barrett_into_vb::<CHUNK, BRANGE, REDUCE>(&b.0, c, &mut a.0);
}
#[inline(always)]
@@ -330,7 +330,9 @@ impl Ring<u64> {
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_into_ve::<CHUNK, BRANGE, REDUCE>(&a.0, &b.0, c, d, &mut e.0);
.vb_sub_va_add_sc_mul_sd_barrett_into_ve::<CHUNK, BRANGE, REDUCE>(
&a.0, &b.0, c, d, &mut e.0,
);
}
#[inline(always)]
@@ -347,7 +349,7 @@ impl Ring<u64> {
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
self.modulus
.vb_sub_va_add_sc_mul_sd_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
.vb_sub_va_add_sc_mul_sd_barrett_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
}
pub fn a_rsh_scalar_b_mask_scalar_c_into_d(