mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
removed RNS backend (archived in branch archive_rns)
This commit is contained in:
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "rns"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
num = "0.4.3"
|
||||
primality-test = "0.3.0"
|
||||
num-bigint = "0.4.6"
|
||||
num-traits = "0.2.19"
|
||||
num-integer ="0.1.46"
|
||||
prime_factorization = "1.0.5"
|
||||
sprs = "0.11.2"
|
||||
criterion = {workspace = true}
|
||||
itertools = {workspace = true}
|
||||
rand_distr = {workspace = true}
|
||||
sampling = { path = "../sampling" }
|
||||
utils = { path = "../utils" }
|
||||
|
||||
[[bench]]
|
||||
name = "ntt"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "operations"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ring_rns"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "sampling"
|
||||
harness = false
|
||||
@@ -1,103 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use rns::modulus::WordOps;
|
||||
use rns::poly::Poly;
|
||||
use rns::ring::Ring;
|
||||
|
||||
fn ntt(c: &mut Criterion) {
|
||||
fn runner<'a, const INPLACE: bool, const LAZY: bool>(
|
||||
ring: &'a Ring<u64>,
|
||||
) -> Box<dyn FnMut() + 'a> {
|
||||
let mut a: Poly<u64> = ring.new_poly();
|
||||
for i in 0..a.n() {
|
||||
a.0[i] = i as u64;
|
||||
}
|
||||
if INPLACE {
|
||||
Box::new(move || ring.ntt_inplace::<LAZY>(&mut a))
|
||||
} else {
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
Box::new(move || ring.ntt::<LAZY>(&a, &mut b))
|
||||
}
|
||||
}
|
||||
|
||||
let q: u64 = 0x1fffffffffe00001u64;
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("ntt");
|
||||
|
||||
for log_n in 10..17 {
|
||||
let ring = Ring::new(1 << log_n, q, 1);
|
||||
|
||||
let runners: [(String, Box<dyn FnMut()>); 4] = [
|
||||
(format!("inplace=true/LAZY=true/q={}", q.log2()), {
|
||||
runner::<true, true>(&ring)
|
||||
}),
|
||||
(format!("inplace=true/LAZY=false/q={}", q.log2()), {
|
||||
runner::<true, false>(&ring)
|
||||
}),
|
||||
(format!("inplace=false/LAZY=true/q={}", q.log2()), {
|
||||
runner::<false, true>(&ring)
|
||||
}),
|
||||
(format!("inplace=false/LAZY=false/q={}", q.log2()), {
|
||||
runner::<false, false>(&ring)
|
||||
}),
|
||||
];
|
||||
|
||||
for (name, mut runner) in runners {
|
||||
let id: BenchmarkId = BenchmarkId::new(name, format!("n={}", 1 << log_n));
|
||||
b.bench_with_input(id, &(), |b: &mut criterion::Bencher<'_>, _| {
|
||||
b.iter(&mut runner)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn intt(c: &mut Criterion) {
|
||||
fn runner<'a, const INPLACE: bool, const LAZY: bool>(
|
||||
ring: &'a Ring<u64>,
|
||||
) -> Box<dyn FnMut() + 'a> {
|
||||
let mut a: Poly<u64> = ring.new_poly();
|
||||
for i in 0..a.n() {
|
||||
a.0[i] = i as u64;
|
||||
}
|
||||
if INPLACE {
|
||||
Box::new(move || ring.intt_inplace::<LAZY>(&mut a))
|
||||
} else {
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
Box::new(move || ring.intt::<LAZY>(&a, &mut b))
|
||||
}
|
||||
}
|
||||
|
||||
let q: u64 = 0x1fffffffffe00001u64;
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("intt");
|
||||
|
||||
for log_n in 10..17 {
|
||||
let ring = Ring::new(1 << log_n, q, 1);
|
||||
|
||||
let runners: [(String, Box<dyn FnMut()>); 4] = [
|
||||
(format!("inplace=true/LAZY=true/q={}", q.log2()), {
|
||||
runner::<true, true>(&ring)
|
||||
}),
|
||||
(format!("inplace=true/LAZY=false/q={}", q.log2()), {
|
||||
runner::<true, false>(&ring)
|
||||
}),
|
||||
(format!("inplace=false/LAZY=true/q={}", q.log2()), {
|
||||
runner::<false, true>(&ring)
|
||||
}),
|
||||
(format!("inplace=false/LAZY=false/q={}", q.log2()), {
|
||||
runner::<false, false>(&ring)
|
||||
}),
|
||||
];
|
||||
|
||||
for (name, mut runner) in runners {
|
||||
let id: BenchmarkId = BenchmarkId::new(name, format!("n={}", 1 << log_n));
|
||||
b.bench_with_input(id, &(), |b: &mut criterion::Bencher<'_>, _| {
|
||||
b.iter(&mut runner)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, ntt, intt,);
|
||||
criterion_main!(benches);
|
||||
@@ -1,209 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use rns::modulus::montgomery::Montgomery;
|
||||
use rns::modulus::{WordOps, ONCE};
|
||||
use rns::poly::Poly;
|
||||
use rns::ring::Ring;
|
||||
|
||||
fn a_add_b_into_b(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<u64> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = i as u64;
|
||||
b.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
ring.a_add_b_into_b::<ONCE>(&a, &mut b);
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_add_b_into_b");
|
||||
for log_n in 11..17 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let r: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let runners = [("prime", { runner(r) })];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn a_mul_b_montgomery_into_a(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<Montgomery<u64>> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = ring.modulus.montgomery.prepare::<ONCE>(i as u64);
|
||||
b.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
ring.a_mul_b_montgomery_into_a::<ONCE>(&a, &mut b);
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_mul_b_montgomery_into_a");
|
||||
for log_n in 11..17 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let r: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let runners = [("prime", { runner(r) })];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn a_mul_b_montgomery_into_c(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<Montgomery<u64>> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
let mut c: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = ring.modulus.montgomery.prepare::<ONCE>(i as u64);
|
||||
b.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
ring.a_mul_b_montgomery_into_c::<ONCE>(&a, &b, &mut c);
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_mul_b_montgomery_into_c");
|
||||
for log_n in 11..17 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let r: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let runners = [("prime", { runner(r) })];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn a_ith_digit_unsigned_base_scalar_b_into_c(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>, base: usize, d: usize) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<Montgomery<u64>> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
(0..d).for_each(|i| {
|
||||
ring.a_ith_digit_unsigned_base_scalar_b_into_c(i, &a, &base, &mut b);
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_ith_digit_unsigned_base_scalar_b_into_c");
|
||||
for log_n in 11..12 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let base: usize = 7;
|
||||
let logq: usize = ring.modulus.q.log2();
|
||||
let d: usize = (logq + base - 1) / base;
|
||||
let runners = [(format!("prime/logq={}/w={}/d={}", logq, base, d), {
|
||||
runner(ring, base, d)
|
||||
})];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn a_ith_digit_signed_base_scalar_b_into_c_balanced_false(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>, base: usize, d: usize) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<Montgomery<u64>> = ring.new_poly();
|
||||
let mut carry: Poly<u64> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
(0..d).for_each(|i| {
|
||||
ring.a_ith_digit_signed_base_scalar_b_into_c::<false>(
|
||||
i, &a, &base, &mut carry, &mut b,
|
||||
);
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_ith_digit_signed_base_scalar_b_into_c::<BALANCED=false>");
|
||||
for log_n in 11..12 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let base: usize = 7;
|
||||
let logq: usize = ring.modulus.q.log2();
|
||||
let d: usize = (logq + base - 1) / base;
|
||||
let runners = [(format!("prime/logq={}/w={}/d={}", logq, base, d), {
|
||||
runner(ring, base, d)
|
||||
})];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn a_ith_digit_signed_base_scalar_b_into_c_balanced_true(c: &mut Criterion) {
|
||||
fn runner(ring: Ring<u64>, base: usize, d: usize) -> Box<dyn FnMut()> {
|
||||
let mut a: Poly<Montgomery<u64>> = ring.new_poly();
|
||||
let mut carry: Poly<u64> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
for i in 0..ring.n() {
|
||||
a.0[i] = i as u64;
|
||||
}
|
||||
Box::new(move || {
|
||||
(0..d).for_each(|i| {
|
||||
ring.a_ith_digit_signed_base_scalar_b_into_c::<true>(
|
||||
i, &a, &base, &mut carry, &mut b,
|
||||
);
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("a_ith_digit_signed_base_scalar_b_into_c::<BALANCED=true>");
|
||||
for log_n in 11..12 {
|
||||
let n: usize = 1 << log_n as usize;
|
||||
let q_base: u64 = 0x1fffffffffe00001u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring: Ring<u64> = Ring::<u64>::new(n, q_base, q_power);
|
||||
let base: usize = 7;
|
||||
let logq: usize = ring.modulus.q.log2();
|
||||
let d: usize = (logq + base - 1) / base;
|
||||
let runners = [(format!("prime/logq={}/w={}/d={}", logq, base, d), {
|
||||
runner(ring, base, d)
|
||||
})];
|
||||
for (name, mut runner) in runners {
|
||||
let id = BenchmarkId::new(name, n);
|
||||
b.bench_with_input(id, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
a_add_b_into_b,
|
||||
a_mul_b_montgomery_into_a,
|
||||
a_mul_b_montgomery_into_c,
|
||||
a_ith_digit_unsigned_base_scalar_b_into_c,
|
||||
a_ith_digit_signed_base_scalar_b_into_c_balanced_false,
|
||||
a_ith_digit_signed_base_scalar_b_into_c_balanced_true,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
@@ -1,38 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rns::poly::PolyRNS;
|
||||
use rns::ring::RingRNS;
|
||||
|
||||
fn div_floor_by_last_modulus_ntt_true(c: &mut Criterion) {
|
||||
fn runner(r: RingRNS<u64>) -> Box<dyn FnMut()> {
|
||||
let a: PolyRNS<u64> = r.new_polyrns();
|
||||
let mut b: [rns::poly::Poly<u64>; 2] = [r.new_poly(), r.new_poly()];
|
||||
let mut c: PolyRNS<u64> = r.new_polyrns();
|
||||
|
||||
Box::new(move || r.div_by_last_modulus::<false, true>(&a, &mut b, &mut c))
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("div_floor_by_last_modulus_ntt_true");
|
||||
for log_n in 11..18 {
|
||||
let n = 1 << log_n;
|
||||
let moduli: Vec<u64> = vec![
|
||||
0x1fffffffffe00001u64,
|
||||
0x1fffffffffc80001u64,
|
||||
0x1fffffffffb40001,
|
||||
0x1fffffffff500001,
|
||||
];
|
||||
|
||||
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
|
||||
|
||||
let runners = [(format!("prime/n={}/level={}", n, ring_rns.level()), {
|
||||
runner(ring_rns)
|
||||
})];
|
||||
|
||||
for (name, mut runner) in runners {
|
||||
b.bench_with_input(name, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, div_floor_by_last_modulus_ntt_true);
|
||||
criterion_main!(benches);
|
||||
@@ -1,41 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rns::poly::PolyRNS;
|
||||
use rns::ring::RingRNS;
|
||||
use sampling::source::Source;
|
||||
|
||||
fn fill_uniform(c: &mut Criterion) {
|
||||
fn runner(r: RingRNS<u64>) -> Box<dyn FnMut()> {
|
||||
let mut a: PolyRNS<u64> = r.new_polyrns();
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
|
||||
Box::new(move || {
|
||||
r.fill_uniform(&mut source, &mut a);
|
||||
})
|
||||
}
|
||||
|
||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> =
|
||||
c.benchmark_group("fill_uniform");
|
||||
for log_n in 11..18 {
|
||||
let n = 1 << log_n;
|
||||
let moduli: Vec<u64> = vec![
|
||||
0x1fffffffffe00001u64,
|
||||
0x1fffffffffc80001u64,
|
||||
0x1fffffffffb40001,
|
||||
0x1fffffffff500001,
|
||||
];
|
||||
|
||||
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
|
||||
|
||||
let runners = [(format!("prime/n={}/level={}", n, ring_rns.level()), {
|
||||
runner(ring_rns)
|
||||
})];
|
||||
|
||||
for (name, mut runner) in runners {
|
||||
b.bench_with_input(name, &(), |b, _| b.iter(&mut runner));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, fill_uniform);
|
||||
criterion_main!(benches);
|
||||
@@ -1,50 +0,0 @@
|
||||
use rns::dft::ntt::Table;
|
||||
use rns::modulus::prime::Prime;
|
||||
use rns::ring::Ring;
|
||||
|
||||
fn main() {
|
||||
// Example usage of `Prime<u64>`
|
||||
let q_base: u64 = 65537; // Example prime base
|
||||
let q_power: usize = 1; // Example power
|
||||
let prime_instance: Prime<u64> = Prime::<u64>::new(q_base, q_power);
|
||||
|
||||
// Display the fields of `Prime` to verify
|
||||
println!("Prime instance created:");
|
||||
println!("q: {}", prime_instance.q());
|
||||
println!("q_base: {}", prime_instance.q_base());
|
||||
println!("q_power: {}", prime_instance.q_power());
|
||||
|
||||
let n: usize = 32;
|
||||
let nth_root: usize = n << 1;
|
||||
|
||||
let ntt_table: Table<u64> = Table::<u64>::new(prime_instance, nth_root);
|
||||
|
||||
let mut a: Vec<u64> = vec![0; (nth_root >> 1) as usize];
|
||||
|
||||
for i in 0..a.len() {
|
||||
a[i] = i as u64;
|
||||
}
|
||||
|
||||
println!("{:?}", a);
|
||||
|
||||
ntt_table.forward_inplace::<false>(&mut a);
|
||||
|
||||
println!("{:?}", a);
|
||||
|
||||
ntt_table.backward_inplace::<false>(&mut a);
|
||||
|
||||
println!("{:?}", a);
|
||||
|
||||
let r: Ring<u64> = Ring::<u64>::new(n as usize, q_base, q_power);
|
||||
|
||||
let mut p0: rns::poly::Poly<u64> = r.new_poly();
|
||||
let mut p1: rns::poly::Poly<u64> = r.new_poly();
|
||||
|
||||
for i in 0..p0.n() {
|
||||
p0.0[i] = i as u64
|
||||
}
|
||||
|
||||
r.a_apply_automorphism_native_into_b::<false>(&p0, 2 * r.n - 1, nth_root, &mut p1);
|
||||
|
||||
println!("{:?}", p1);
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
use crate::modulus::WordOps;
|
||||
use crate::ring::Ring;
|
||||
use num::Unsigned;
|
||||
use utils::map::Map;
|
||||
|
||||
pub struct AutoPermMap(Map<usize, AutoPerm>);
|
||||
|
||||
impl AutoPermMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
0: Map::<usize, AutoPerm>::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, perm: AutoPerm) -> usize {
|
||||
let gal_el: usize = perm.gal_el;
|
||||
self.0.insert(perm.gal_el, perm);
|
||||
gal_el
|
||||
}
|
||||
|
||||
pub fn gen<O: Unsigned, const NTT: bool>(
|
||||
&mut self,
|
||||
ring: &Ring<O>,
|
||||
gen_1: usize,
|
||||
gen_2: bool,
|
||||
) -> usize {
|
||||
self.insert(AutoPerm::new::<O, NTT>(ring, gen_1, gen_2))
|
||||
}
|
||||
|
||||
pub fn get(&self, gal_el: &usize) -> Option<&AutoPerm> {
|
||||
self.0.get(gal_el)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AutoPerm {
|
||||
pub ntt: bool,
|
||||
pub gal_el: usize,
|
||||
pub permutation: Vec<usize>,
|
||||
}
|
||||
|
||||
impl AutoPerm {
|
||||
/// Returns a lookup table for the automorphism X^{i} -> X^{i * k mod nth_root}.
|
||||
/// Method will panic if n or nth_root are not power-of-two.
|
||||
/// Method will panic if gal_el is not coprime with nth_root.
|
||||
pub fn new<O: Unsigned, const NTT: bool>(ring: &Ring<O>, gen_1: usize, gen_2: bool) -> Self {
|
||||
let n = ring.n();
|
||||
let cyclotomic_order = ring.cyclotomic_order();
|
||||
|
||||
let gal_el = ring.galois_element(gen_1, gen_2);
|
||||
|
||||
let mut permutation: Vec<usize> = Vec::with_capacity(n);
|
||||
|
||||
if NTT {
|
||||
let mask = cyclotomic_order - 1;
|
||||
let log_cyclotomic_order_half: u32 = cyclotomic_order.log2() as u32 - 1;
|
||||
for i in 0..n {
|
||||
let i_rev: usize = 2 * i.reverse_bits_msb(log_cyclotomic_order_half) + 1;
|
||||
let gal_el_i: usize = ((gal_el * i_rev) & mask) >> 1;
|
||||
permutation.push(gal_el_i.reverse_bits_msb(log_cyclotomic_order_half));
|
||||
}
|
||||
} else {
|
||||
let log_n: usize = n.log2();
|
||||
let mask: usize = (n - 1) as usize;
|
||||
for i in 0..n {
|
||||
let gal_el_i: usize = i as usize * gal_el;
|
||||
let sign: usize = (gal_el_i >> log_n) & 1;
|
||||
let i_out: usize = (gal_el_i & mask) | (sign << (usize::BITS - 1));
|
||||
permutation.push(i_out)
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
ntt: NTT,
|
||||
gal_el: gal_el,
|
||||
permutation: permutation,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
pub mod ntt;
|
||||
|
||||
pub trait DFT<O> {
|
||||
fn forward_inplace_lazy(&self, x: &mut [O]);
|
||||
fn forward_inplace(&self, x: &mut [O]);
|
||||
fn backward_inplace_lazy(&self, x: &mut [O]);
|
||||
fn backward_inplace(&self, x: &mut [O]);
|
||||
}
|
||||
@@ -1,343 +0,0 @@
|
||||
use crate::dft::DFT;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::WordOps;
|
||||
use crate::modulus::{BARRETT, NONE, ONCE};
|
||||
use itertools::izip;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct Table<O> {
|
||||
prime: Prime<O>,
|
||||
psi: O,
|
||||
psi_forward_rev: Vec<Barrett<u64>>,
|
||||
psi_backward_rev: Vec<Barrett<u64>>,
|
||||
q: O,
|
||||
two_q: O,
|
||||
four_q: O,
|
||||
}
|
||||
|
||||
impl Table<u64> {
|
||||
pub fn new(prime: Prime<u64>, nth_root: usize) -> Table<u64> {
|
||||
assert!(
|
||||
nth_root & (nth_root - 1) == 0,
|
||||
"invalid argument: nth_root = {} is not a power of two",
|
||||
nth_root
|
||||
);
|
||||
|
||||
let psi: u64 = prime.primitive_nth_root(nth_root as u64);
|
||||
|
||||
let psi_mont: Montgomery<u64> = prime.montgomery.prepare::<ONCE>(psi);
|
||||
let psi_inv_mont: Montgomery<u64> = prime.montgomery.pow(psi_mont, prime.phi - 1);
|
||||
|
||||
let mut psi_forward_rev: Vec<Barrett<u64>> = vec![Barrett(0, 0); (nth_root >> 1) as usize];
|
||||
let mut psi_backward_rev: Vec<Barrett<u64>> = vec![Barrett(0, 0); (nth_root >> 1) as usize];
|
||||
|
||||
psi_forward_rev[0] = prime.barrett.prepare(1);
|
||||
psi_backward_rev[0] = prime.barrett.prepare(1);
|
||||
|
||||
let log_nth_root_half: u32 = (nth_root >> 1).log2() as _;
|
||||
|
||||
let mut powers_forward: u64 = 1u64;
|
||||
let mut powers_backward: u64 = 1u64;
|
||||
|
||||
for i in 1..(nth_root >> 1) as usize {
|
||||
let i_rev: usize = i.reverse_bits_msb(log_nth_root_half);
|
||||
|
||||
prime
|
||||
.montgomery
|
||||
.mul_external_assign::<ONCE>(psi_mont, &mut powers_forward);
|
||||
prime
|
||||
.montgomery
|
||||
.mul_external_assign::<ONCE>(psi_inv_mont, &mut powers_backward);
|
||||
|
||||
psi_forward_rev[i_rev] = prime.barrett.prepare(powers_forward);
|
||||
psi_backward_rev[i_rev] = prime.barrett.prepare(powers_backward);
|
||||
}
|
||||
|
||||
let q: u64 = prime.q();
|
||||
|
||||
Self {
|
||||
prime: prime,
|
||||
psi: psi,
|
||||
psi_forward_rev: psi_forward_rev,
|
||||
psi_backward_rev: psi_backward_rev,
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DFT<u64> for Table<u64> {
|
||||
fn forward_inplace(&self, a: &mut [u64]) {
|
||||
self.forward_inplace::<false>(a)
|
||||
}
|
||||
|
||||
fn forward_inplace_lazy(&self, a: &mut [u64]) {
|
||||
self.forward_inplace::<true>(a)
|
||||
}
|
||||
|
||||
fn backward_inplace(&self, a: &mut [u64]) {
|
||||
self.backward_inplace::<false>(a)
|
||||
}
|
||||
|
||||
fn backward_inplace_lazy(&self, a: &mut [u64]) {
|
||||
self.backward_inplace::<true>(a)
|
||||
}
|
||||
}
|
||||
|
||||
impl Table<u64> {
|
||||
pub fn forward_inplace<const LAZY: bool>(&self, a: &mut [u64]) {
|
||||
self.forward_inplace_core::<LAZY, 0, 0>(a);
|
||||
}
|
||||
|
||||
pub fn forward_inplace_core<const LAZY: bool, const SKIPSTART: u8, const SKIPEND: u8>(
|
||||
&self,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
let n: usize = a.len();
|
||||
assert!(
|
||||
n & n - 1 == 0,
|
||||
"invalid a.len()={} must be a power of two",
|
||||
n
|
||||
);
|
||||
|
||||
assert!(
|
||||
n <= self.psi_forward_rev.len(),
|
||||
"invalid a.len()={} > psi_forward_rev.len()={}",
|
||||
n,
|
||||
self.psi_forward_rev.len()
|
||||
);
|
||||
|
||||
let log_n: u32 = usize::BITS - ((n as usize) - 1).leading_zeros();
|
||||
|
||||
let start: u32 = SKIPSTART as u32;
|
||||
let end: u32 = log_n - (SKIPEND as u32);
|
||||
|
||||
for layer in start..end {
|
||||
let (m, size) = (1 << layer, 1 << (log_n - layer - 1));
|
||||
let t: usize = 2 * size;
|
||||
if layer == log_n - 1 {
|
||||
if LAZY {
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(
|
||||
|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
self.dit_inplace::<false>(&mut a[0], &mut b[0], psi);
|
||||
debug_assert!(
|
||||
a[0] < self.two_q,
|
||||
"forward_inplace_core::<LAZY=true> output {} > {} (2q-1)",
|
||||
a[0],
|
||||
self.two_q - 1
|
||||
);
|
||||
debug_assert!(
|
||||
b[0] < self.two_q,
|
||||
"forward_inplace_core::<LAZY=true> output {} > {} (2q-1)",
|
||||
b[0],
|
||||
self.two_q - 1
|
||||
);
|
||||
},
|
||||
);
|
||||
} else {
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(
|
||||
|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
self.dit_inplace::<true>(&mut a[0], &mut b[0], psi);
|
||||
self.prime.barrett.reduce_assign::<BARRETT>(&mut a[0]);
|
||||
self.prime.barrett.reduce_assign::<BARRETT>(&mut b[0]);
|
||||
debug_assert!(
|
||||
a[0] < self.q,
|
||||
"forward_inplace_core::<LAZY=false> output {} > {} (q-1)",
|
||||
a[0],
|
||||
self.q - 1
|
||||
);
|
||||
debug_assert!(
|
||||
b[0] < self.q,
|
||||
"forward_inplace_core::<LAZY=false> output {} > {} (q-1)",
|
||||
b[0],
|
||||
self.q - 1
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
} else if t >= 16 {
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
|
||||
self.dit_inplace::<true>(&mut a[0], &mut b[0], psi);
|
||||
self.dit_inplace::<true>(&mut a[1], &mut b[1], psi);
|
||||
self.dit_inplace::<true>(&mut a[2], &mut b[2], psi);
|
||||
self.dit_inplace::<true>(&mut a[3], &mut b[3], psi);
|
||||
self.dit_inplace::<true>(&mut a[4], &mut b[4], psi);
|
||||
self.dit_inplace::<true>(&mut a[5], &mut b[5], psi);
|
||||
self.dit_inplace::<true>(&mut a[6], &mut b[6], psi);
|
||||
self.dit_inplace::<true>(&mut a[7], &mut b[7], psi);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_forward_rev[m..]).for_each(|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
izip!(a, b).for_each(|(a, b)| self.dit_inplace::<true>(a, b, psi));
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn dit_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: &Barrett<u64>) {
|
||||
debug_assert!(*a < self.four_q, "a:{} q:{}", a, self.four_q);
|
||||
debug_assert!(*b < self.four_q, "b:{} q:{}", b, self.four_q);
|
||||
a.reduce_once_assign(self.two_q);
|
||||
let bt: u64 = self.prime.barrett.mul_external::<NONE>(t, b);
|
||||
*b = *a + self.two_q - bt;
|
||||
*a += bt;
|
||||
if !LAZY {
|
||||
a.reduce_once_assign(self.two_q);
|
||||
b.reduce_once_assign(self.two_q);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backward_inplace<const LAZY: bool>(&self, a: &mut [u64]) {
|
||||
self.backward_inplace_core::<LAZY, 0, 0>(a);
|
||||
}
|
||||
|
||||
pub fn backward_inplace_core<const LAZY: bool, const SKIPSTART: u8, const SKIPEND: u8>(
|
||||
&self,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
let n: usize = a.len();
|
||||
assert!(
|
||||
n & n - 1 == 0,
|
||||
"invalid x.len()= {} must be a power of two",
|
||||
n
|
||||
);
|
||||
|
||||
assert!(
|
||||
n <= self.psi_backward_rev.len(),
|
||||
"invalid a.len()={} > psi_backward_rev.len()={}",
|
||||
n,
|
||||
self.psi_backward_rev.len()
|
||||
);
|
||||
|
||||
let log_n = usize::BITS - ((n as usize) - 1).leading_zeros();
|
||||
|
||||
let start: u32 = SKIPEND as u32;
|
||||
let end: u32 = log_n - (SKIPSTART as u32);
|
||||
|
||||
for layer in (start..end).rev() {
|
||||
let (m, size) = (1 << layer, 1 << (log_n - layer - 1));
|
||||
let t: usize = 2 * size;
|
||||
if layer == 0 {
|
||||
let n_inv: Barrett<u64> = self.prime.barrett.prepare(self.prime.inv(n as u64));
|
||||
let psi: Barrett<u64> = self.prime.barrett.prepare(
|
||||
self.prime
|
||||
.barrett
|
||||
.mul_external::<ONCE>(&n_inv, &self.psi_backward_rev[1].0),
|
||||
);
|
||||
|
||||
izip!(a.chunks_exact_mut(2 * size)).for_each(|a| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
|
||||
self.dif_last_inplace::<LAZY>(&mut a[0], &mut b[0], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[1], &mut b[1], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[2], &mut b[2], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[3], &mut b[3], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[4], &mut b[4], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[5], &mut b[5], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[6], &mut b[6], &psi, &n_inv);
|
||||
self.dif_last_inplace::<LAZY>(&mut a[7], &mut b[7], &psi, &n_inv);
|
||||
});
|
||||
});
|
||||
} else if t >= 16 {
|
||||
izip!(a.chunks_exact_mut(t), &self.psi_backward_rev[m..]).for_each(|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
izip!(a.chunks_exact_mut(8), b.chunks_exact_mut(8)).for_each(|(a, b)| {
|
||||
self.dif_inplace::<true>(&mut a[0], &mut b[0], psi);
|
||||
self.dif_inplace::<true>(&mut a[1], &mut b[1], psi);
|
||||
self.dif_inplace::<true>(&mut a[2], &mut b[2], psi);
|
||||
self.dif_inplace::<true>(&mut a[3], &mut b[3], psi);
|
||||
self.dif_inplace::<true>(&mut a[4], &mut b[4], psi);
|
||||
self.dif_inplace::<true>(&mut a[5], &mut b[5], psi);
|
||||
self.dif_inplace::<true>(&mut a[6], &mut b[6], psi);
|
||||
self.dif_inplace::<true>(&mut a[7], &mut b[7], psi);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
izip!(a.chunks_exact_mut(2 * size), &self.psi_backward_rev[m..]).for_each(
|
||||
|(a, psi)| {
|
||||
let (a, b) = a.split_at_mut(size);
|
||||
izip!(a, b).for_each(|(a, b)| self.dif_inplace::<true>(a, b, psi));
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn dif_inplace<const LAZY: bool>(&self, a: &mut u64, b: &mut u64, t: &Barrett<u64>) {
|
||||
debug_assert!(*a < self.two_q, "a:{} q:{}", a, self.two_q);
|
||||
debug_assert!(*b < self.two_q, "b:{} q:{}", b, self.two_q);
|
||||
let d: u64 = self
|
||||
.prime
|
||||
.barrett
|
||||
.mul_external::<NONE>(t, &(*a + self.two_q - *b));
|
||||
*a = *a + *b;
|
||||
a.reduce_once_assign(self.two_q);
|
||||
*b = d;
|
||||
if !LAZY {
|
||||
a.reduce_once_assign(self.q);
|
||||
b.reduce_once_assign(self.q);
|
||||
}
|
||||
}
|
||||
|
||||
fn dif_last_inplace<const LAZY: bool>(
|
||||
&self,
|
||||
a: &mut u64,
|
||||
b: &mut u64,
|
||||
psi: &Barrett<u64>,
|
||||
n_inv: &Barrett<u64>,
|
||||
) {
|
||||
debug_assert!(*a < self.two_q);
|
||||
debug_assert!(*b < self.two_q);
|
||||
if LAZY {
|
||||
let d: u64 = self
|
||||
.prime
|
||||
.barrett
|
||||
.mul_external::<NONE>(psi, &(*a + self.two_q - *b));
|
||||
*a = self.prime.barrett.mul_external::<NONE>(n_inv, &(*a + *b));
|
||||
*b = d;
|
||||
} else {
|
||||
let d: u64 = self
|
||||
.prime
|
||||
.barrett
|
||||
.mul_external::<ONCE>(psi, &(*a + self.two_q - *b));
|
||||
*a = self.prime.barrett.mul_external::<ONCE>(n_inv, &(*a + *b));
|
||||
*b = d;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_ntt() {
|
||||
let q_base: u64 = 0x800000000004001;
|
||||
let q_power: usize = 1;
|
||||
let prime_instance: Prime<u64> = Prime::<u64>::new(q_base, q_power);
|
||||
let n: usize = 32;
|
||||
let two_nth_root: usize = n << 1;
|
||||
let ntt_table: Table<u64> = Table::<u64>::new(prime_instance, two_nth_root);
|
||||
let mut a: Vec<u64> = vec![0; n as usize];
|
||||
for i in 0..a.len() {
|
||||
a[i] = i as u64;
|
||||
}
|
||||
|
||||
let b: Vec<u64> = a.clone();
|
||||
ntt_table.forward_inplace::<false>(&mut a);
|
||||
ntt_table.backward_inplace::<false>(&mut a);
|
||||
assert!(a == b);
|
||||
}
|
||||
}
|
||||
507
rns/src/lib.rs
507
rns/src/lib.rs
@@ -1,507 +0,0 @@
|
||||
#![feature(bigint_helper_methods)]
|
||||
#![feature(test)]
|
||||
|
||||
pub mod automorphism;
|
||||
pub mod dft;
|
||||
pub mod modulus;
|
||||
pub mod num_bigint;
|
||||
pub mod poly;
|
||||
pub mod ring;
|
||||
pub mod scalar;
|
||||
|
||||
pub const CHUNK: usize = 8;
|
||||
pub const GALOISGENERATOR: usize = 5;
|
||||
|
||||
pub mod macros {
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_v {
|
||||
($self:expr, $f:expr, $a:expr, $CHUNK:expr) => {
|
||||
match CHUNK {
|
||||
8 => {
|
||||
$a.chunks_exact_mut(8).for_each(|a| {
|
||||
$f(&$self, &mut a[0]);
|
||||
$f(&$self, &mut a[1]);
|
||||
$f(&$self, &mut a[2]);
|
||||
$f(&$self, &mut a[3]);
|
||||
$f(&$self, &mut a[4]);
|
||||
$f(&$self, &mut a[5]);
|
||||
$f(&$self, &mut a[6]);
|
||||
$f(&$self, &mut a[7]);
|
||||
});
|
||||
|
||||
let n: usize = $a.len();
|
||||
let m = n - (n & (CHUNK - 1));
|
||||
$a[m..].iter_mut().for_each(|a| {
|
||||
$f(&$self, a);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
$a.iter_mut().for_each(|a| {
|
||||
$f(&$self, a);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$b.len() == n,
|
||||
"invalid argument b: b.len() = {} != a.len() = {}",
|
||||
$b.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($a.chunks_exact(8), $b.chunks_exact_mut(8)).for_each(|(a, b)| {
|
||||
$f(&$self, &a[0], &mut b[0]);
|
||||
$f(&$self, &a[1], &mut b[1]);
|
||||
$f(&$self, &a[2], &mut b[2]);
|
||||
$f(&$self, &a[3], &mut b[3]);
|
||||
$f(&$self, &a[4], &mut b[4]);
|
||||
$f(&$self, &a[5], &mut b[5]);
|
||||
$f(&$self, &a[6], &mut b[6]);
|
||||
$f(&$self, &a[7], &mut b[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & (CHUNK - 1));
|
||||
izip!($a[m..].iter(), $b[m..].iter_mut()).for_each(|(a, b)| {
|
||||
$f(&$self, a, b);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $b.iter_mut()).for_each(|(a, b)| {
|
||||
$f(&$self, a, b);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vvv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$b.len() == n,
|
||||
"invalid argument b: b.len() = {} != a.len() = {}",
|
||||
$b.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
$c.len() == n,
|
||||
"invalid argument c: b.len() = {} != a.len() = {}",
|
||||
$c.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!(
|
||||
$a.chunks_exact(8),
|
||||
$b.chunks_exact(8),
|
||||
$c.chunks_exact_mut(8)
|
||||
)
|
||||
.for_each(|(a, b, c)| {
|
||||
$f(&$self, &a[0], &b[0], &mut c[0]);
|
||||
$f(&$self, &a[1], &b[1], &mut c[1]);
|
||||
$f(&$self, &a[2], &b[2], &mut c[2]);
|
||||
$f(&$self, &a[3], &b[3], &mut c[3]);
|
||||
$f(&$self, &a[4], &b[4], &mut c[4]);
|
||||
$f(&$self, &a[5], &b[5], &mut c[5]);
|
||||
$f(&$self, &a[6], &b[6], &mut c[6]);
|
||||
$f(&$self, &a[7], &b[7], &mut c[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $b[m..].iter(), $c[m..].iter_mut()).for_each(
|
||||
|(a, b, c)| {
|
||||
$f(&$self, a, b, c);
|
||||
},
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $b.iter(), $c.iter_mut()).for_each(|(a, b, c)| {
|
||||
$f(&$self, a, b, c);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_sv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $CHUNK:expr) => {
|
||||
let n: usize = $b.len();
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($b.chunks_exact_mut(8)).for_each(|b| {
|
||||
$f(&$self, $a, &mut b[0]);
|
||||
$f(&$self, $a, &mut b[1]);
|
||||
$f(&$self, $a, &mut b[2]);
|
||||
$f(&$self, $a, &mut b[3]);
|
||||
$f(&$self, $a, &mut b[4]);
|
||||
$f(&$self, $a, &mut b[5]);
|
||||
$f(&$self, $a, &mut b[6]);
|
||||
$f(&$self, $a, &mut b[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($b[m..].iter_mut()).for_each(|b| {
|
||||
$f(&$self, $a, b);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($b.iter_mut()).for_each(|b| {
|
||||
$f(&$self, $a, b);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_svv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $CHUNK:expr) => {
|
||||
let n: usize = $b.len();
|
||||
debug_assert!(
|
||||
$c.len() == n,
|
||||
"invalid argument c: c.len() = {} != b.len() = {}",
|
||||
$c.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($b.chunks_exact(8), $c.chunks_exact_mut(8)).for_each(|(b, c)| {
|
||||
$f(&$self, $a, &b[0], &mut c[0]);
|
||||
$f(&$self, $a, &b[1], &mut c[1]);
|
||||
$f(&$self, $a, &b[2], &mut c[2]);
|
||||
$f(&$self, $a, &b[3], &mut c[3]);
|
||||
$f(&$self, $a, &b[4], &mut c[4]);
|
||||
$f(&$self, $a, &b[5], &mut c[5]);
|
||||
$f(&$self, $a, &b[6], &mut c[6]);
|
||||
$f(&$self, $a, &b[7], &mut c[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($b[m..].iter(), $c[m..].iter_mut()).for_each(|(b, c)| {
|
||||
$f(&$self, $a, b, c);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($b.iter(), $c.iter_mut()).for_each(|(b, c)| {
|
||||
$f(&$self, $a, b, c);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vvsv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $d:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$b.len() == n,
|
||||
"invalid argument b: b.len() = {} != a.len() = {}",
|
||||
$b.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
$d.len() == n,
|
||||
"invalid argument d: d.len() = {} != a.len() = {}",
|
||||
$d.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!(
|
||||
$a.chunks_exact(8),
|
||||
$b.chunks_exact(8),
|
||||
$d.chunks_exact_mut(8)
|
||||
)
|
||||
.for_each(|(a, b, d)| {
|
||||
$f(&$self, &a[0], &b[0], $c, &mut d[0]);
|
||||
$f(&$self, &a[1], &b[1], $c, &mut d[1]);
|
||||
$f(&$self, &a[2], &b[2], $c, &mut d[2]);
|
||||
$f(&$self, &a[3], &b[3], $c, &mut d[3]);
|
||||
$f(&$self, &a[4], &b[4], $c, &mut d[4]);
|
||||
$f(&$self, &a[5], &b[5], $c, &mut d[5]);
|
||||
$f(&$self, &a[6], &b[6], $c, &mut d[6]);
|
||||
$f(&$self, &a[7], &b[7], $c, &mut d[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $b[m..].iter(), $d[m..].iter_mut()).for_each(
|
||||
|(a, b, d)| {
|
||||
$f(&$self, a, b, $c, d);
|
||||
},
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $b.iter(), $d.iter_mut()).for_each(|(a, b, d)| {
|
||||
$f(&$self, a, b, $c, d);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vsv {
|
||||
($self:expr, $f:expr, $a:expr, $c:expr, $b:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$b.len() == n,
|
||||
"invalid argument b: b.len() = {} != a.len() = {}",
|
||||
$b.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($a.chunks_exact(8), $b.chunks_exact_mut(8)).for_each(|(a, b)| {
|
||||
$f(&$self, &a[0], $c, &mut b[0]);
|
||||
$f(&$self, &a[1], $c, &mut b[1]);
|
||||
$f(&$self, &a[2], $c, &mut b[2]);
|
||||
$f(&$self, &a[3], $c, &mut b[3]);
|
||||
$f(&$self, &a[4], $c, &mut b[4]);
|
||||
$f(&$self, &a[5], $c, &mut b[5]);
|
||||
$f(&$self, &a[6], $c, &mut b[6]);
|
||||
$f(&$self, &a[7], $c, &mut b[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $b[m..].iter_mut()).for_each(|(a, b)| {
|
||||
$f(&$self, a, $c, b);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $b.iter_mut()).for_each(|(a, b)| {
|
||||
$f(&$self, a, $c, b);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vssv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $d:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$d.len() == n,
|
||||
"invalid argument d: d.len() = {} != a.len() = {}",
|
||||
$d.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($a.chunks_exact(8), $d.chunks_exact_mut(8)).for_each(|(a, d)| {
|
||||
$f(&$self, &a[0], $b, $c, &mut d[0]);
|
||||
$f(&$self, &a[1], $b, $c, &mut d[1]);
|
||||
$f(&$self, &a[2], $b, $c, &mut d[2]);
|
||||
$f(&$self, &a[3], $b, $c, &mut d[3]);
|
||||
$f(&$self, &a[4], $b, $c, &mut d[4]);
|
||||
$f(&$self, &a[5], $b, $c, &mut d[5]);
|
||||
$f(&$self, &a[6], $b, $c, &mut d[6]);
|
||||
$f(&$self, &a[7], $b, $c, &mut d[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $d[m..].iter_mut()).for_each(|(a, d)| {
|
||||
$f(&$self, a, $b, $c, d);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $d.iter_mut()).for_each(|(a, d)| {
|
||||
$f(&$self, a, $b, $c, d);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_ssv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $CHUNK:expr) => {
|
||||
let n: usize = $c.len();
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!($c.chunks_exact_mut(8)).for_each(|c| {
|
||||
$f(&$self, $a, $b, &mut c[0]);
|
||||
$f(&$self, $a, $b, &mut c[1]);
|
||||
$f(&$self, $a, $b, &mut c[2]);
|
||||
$f(&$self, $a, $b, &mut c[3]);
|
||||
$f(&$self, $a, $b, &mut c[4]);
|
||||
$f(&$self, $a, $b, &mut c[5]);
|
||||
$f(&$self, $a, $b, &mut c[6]);
|
||||
$f(&$self, $a, $b, &mut c[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($c[m..].iter_mut()).for_each(|c| {
|
||||
$f(&$self, $a, $b, c);
|
||||
});
|
||||
}
|
||||
_ => {
|
||||
izip!($c.iter_mut()).for_each(|c| {
|
||||
$f(&$self, $a, $b, c);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vvssv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$b.len() == n,
|
||||
"invalid argument b: b.len() = {} != a.len() = {}",
|
||||
$b.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
$e.len() == n,
|
||||
"invalid argument e: e.len() = {} != a.len() = {}",
|
||||
$e.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!(
|
||||
$a.chunks_exact(8),
|
||||
$b.chunks_exact(8),
|
||||
$e.chunks_exact_mut(8)
|
||||
)
|
||||
.for_each(|(a, b, e)| {
|
||||
$f(&$self, &a[0], &b[0], $c, $d, &mut e[0]);
|
||||
$f(&$self, &a[1], &b[1], $c, $d, &mut e[1]);
|
||||
$f(&$self, &a[2], &b[2], $c, $d, &mut e[2]);
|
||||
$f(&$self, &a[3], &b[3], $c, $d, &mut e[3]);
|
||||
$f(&$self, &a[4], &b[4], $c, $d, &mut e[4]);
|
||||
$f(&$self, &a[5], &b[5], $c, $d, &mut e[5]);
|
||||
$f(&$self, &a[6], &b[6], $c, $d, &mut e[6]);
|
||||
$f(&$self, &a[7], &b[7], $c, $d, &mut e[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $b[m..].iter(), $e[m..].iter_mut()).for_each(
|
||||
|(a, b, e)| {
|
||||
$f(&$self, a, b, $c, $d, e);
|
||||
},
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $b.iter(), $e.iter_mut()).for_each(|(a, b, e)| {
|
||||
$f(&$self, a, b, $c, $d, e);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! apply_vsssvv {
|
||||
($self:expr, $f:expr, $a:expr, $b:expr, $c:expr, $d:expr, $e:expr, $g:expr, $CHUNK:expr) => {
|
||||
let n: usize = $a.len();
|
||||
debug_assert!(
|
||||
$e.len() == n,
|
||||
"invalid argument b: e.len() = {} != a.len() = {}",
|
||||
$e.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
$g.len() == n,
|
||||
"invalid argument g: g.len() = {} != a.len() = {}",
|
||||
$g.len(),
|
||||
n
|
||||
);
|
||||
debug_assert!(
|
||||
CHUNK & (CHUNK - 1) == 0,
|
||||
"invalid CHUNK const: not a power of two"
|
||||
);
|
||||
|
||||
match CHUNK {
|
||||
8 => {
|
||||
izip!(
|
||||
$a.chunks_exact(8),
|
||||
$e.chunks_exact_mut(8),
|
||||
$g.chunks_exact_mut(8)
|
||||
)
|
||||
.for_each(|(a, e, g)| {
|
||||
$f(&$self, &a[0], $b, $c, $d, &mut e[0], &mut g[0]);
|
||||
$f(&$self, &a[1], $b, $c, $d, &mut e[1], &mut g[1]);
|
||||
$f(&$self, &a[2], $b, $c, $d, &mut e[2], &mut g[2]);
|
||||
$f(&$self, &a[3], $b, $c, $d, &mut e[3], &mut g[3]);
|
||||
$f(&$self, &a[4], $b, $c, $d, &mut e[4], &mut g[4]);
|
||||
$f(&$self, &a[5], $b, $c, $d, &mut e[5], &mut g[5]);
|
||||
$f(&$self, &a[6], $b, $c, $d, &mut e[6], &mut g[6]);
|
||||
$f(&$self, &a[7], $b, $c, $d, &mut e[7], &mut g[7]);
|
||||
});
|
||||
|
||||
let m = n - (n & 7);
|
||||
izip!($a[m..].iter(), $e[m..].iter_mut(), $g[m..].iter_mut()).for_each(
|
||||
|(a, e, g)| {
|
||||
$f(&$self, a, $b, $c, $d, e, g);
|
||||
},
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
izip!($a.iter(), $e.iter_mut(), $g.iter_mut()).for_each(|(a, e, g)| {
|
||||
$f(&$self, a, $b, $c, $d, e, g);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,445 +0,0 @@
|
||||
pub mod barrett;
|
||||
pub mod impl_u64;
|
||||
pub mod montgomery;
|
||||
pub mod prime;
|
||||
|
||||
pub type REDUCEMOD = u8;
|
||||
|
||||
pub const NONE: REDUCEMOD = 0;
|
||||
pub const ONCE: REDUCEMOD = 1;
|
||||
pub const TWICE: REDUCEMOD = 2;
|
||||
pub const FOURTIMES: REDUCEMOD = 3;
|
||||
pub const BARRETT: REDUCEMOD = 4;
|
||||
pub const BARRETTLAZY: REDUCEMOD = 5;
|
||||
|
||||
pub trait WordOps<O> {
|
||||
fn log2(self) -> usize;
|
||||
fn reverse_bits_msb(self, n: u32) -> O;
|
||||
fn mask(self) -> O;
|
||||
}
|
||||
|
||||
impl WordOps<u64> for u64 {
|
||||
#[inline(always)]
|
||||
fn log2(self) -> usize {
|
||||
(u64::BITS - (self - 1).leading_zeros()) as _
|
||||
}
|
||||
#[inline(always)]
|
||||
fn reverse_bits_msb(self, n: u32) -> u64 {
|
||||
self.reverse_bits() >> (usize::BITS - n)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mask(self) -> u64 {
|
||||
(1 << self.log2()) - 1
|
||||
}
|
||||
}
|
||||
|
||||
impl WordOps<usize> for usize {
|
||||
#[inline(always)]
|
||||
fn log2(self) -> usize {
|
||||
(usize::BITS - (self - 1).leading_zeros()) as _
|
||||
}
|
||||
#[inline(always)]
|
||||
fn reverse_bits_msb(self, n: u32) -> usize {
|
||||
self.reverse_bits() >> (usize::BITS - n)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mask(self) -> usize {
|
||||
(1 << self.log2()) - 1
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReduceOnce<O> {
|
||||
/// Assigns self-q to self if self >= q in constant time.
|
||||
/// User must ensure that 2q fits in O.
|
||||
fn reduce_once_constant_time_assign(&mut self, q: O);
|
||||
/// Returns self-q if self >= q else self in constant time.
|
||||
/// /// User must ensure that 2q fits in O.
|
||||
fn reduce_once_constant_time(&self, q: O) -> O;
|
||||
/// Assigns self-q to self if self >= q.
|
||||
/// /// User must ensure that 2q fits in O.
|
||||
fn reduce_once_assign(&mut self, q: O);
|
||||
/// Returns self-q if self >= q else self.
|
||||
/// /// User must ensure that 2q fits in O.
|
||||
fn reduce_once(&self, q: O) -> O;
|
||||
}
|
||||
|
||||
pub trait ScalarOperations<O> {
|
||||
// Applies a parameterized modular reduction.
|
||||
fn sa_reduce_into_sa<const REDUCE: REDUCEMOD>(&self, x: &mut O);
|
||||
|
||||
// Assigns a + b to c.
|
||||
fn sa_add_sb_into_sc<const REDUCE: REDUCEMOD>(&self, a: &O, b: &O, c: &mut O);
|
||||
|
||||
// Assigns a + b to b.
|
||||
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||
|
||||
// Assigns a - b to c.
|
||||
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &O,
|
||||
c: &mut O,
|
||||
);
|
||||
|
||||
// Assigns a - b to b.
|
||||
fn sa_sub_sb_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||
|
||||
// Assigns a - b to a.
|
||||
fn sa_sub_sb_into_sa<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &O, a: &mut O);
|
||||
|
||||
// Assigns -a to a.
|
||||
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut O);
|
||||
|
||||
// Assigns -a to b.
|
||||
fn sa_neg_into_sb<const SARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut O);
|
||||
|
||||
// Assigns a * 2^64 to a.
|
||||
fn sa_prepare_montgomery_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut montgomery::Montgomery<O>,
|
||||
);
|
||||
|
||||
// Assigns a * 2^64 to b.
|
||||
fn sa_prepare_montgomery_into_sb<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &mut montgomery::Montgomery<O>,
|
||||
);
|
||||
|
||||
// Assigns a * b to c.
|
||||
fn sa_mul_sb_montgomery_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &montgomery::Montgomery<O>,
|
||||
c: &mut O,
|
||||
);
|
||||
|
||||
// Assigns a * b + c to c.
|
||||
fn sa_mul_sb_montgomery_add_sc_into_sc<const REDUCE1: REDUCEMOD, const REDUCE2: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &montgomery::Montgomery<O>,
|
||||
c: &mut O,
|
||||
);
|
||||
|
||||
// Assigns a * b to b.
|
||||
fn sa_mul_sb_montgomery_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &montgomery::Montgomery<O>,
|
||||
a: &mut O,
|
||||
);
|
||||
|
||||
// Assigns a * b to c.
|
||||
fn sa_mul_sb_barrett_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &barrett::Barrett<O>,
|
||||
c: &mut O,
|
||||
);
|
||||
|
||||
// Assigns a * b to a.
|
||||
fn sa_mul_sb_barrett_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &barrett::Barrett<O>,
|
||||
a: &mut O,
|
||||
);
|
||||
|
||||
// Assigns (a + q - b) * c to d.
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &O,
|
||||
b: &O,
|
||||
c: &barrett::Barrett<O>,
|
||||
d: &mut O,
|
||||
);
|
||||
|
||||
// Assigns (a + q - b) * c to b.
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
c: &barrett::Barrett<u64>,
|
||||
b: &mut u64,
|
||||
);
|
||||
|
||||
// Assigns (a + b) * c to a.
|
||||
fn sa_add_sb_mul_sc_barrett_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &barrett::Barrett<u64>,
|
||||
a: &mut u64,
|
||||
);
|
||||
|
||||
// Assigns (a + b) * c to d.
|
||||
fn sa_add_sb_mul_sc_barrett_into_sd<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &barrett::Barrett<u64>,
|
||||
d: &mut u64,
|
||||
);
|
||||
|
||||
// Assigns (a - b + c) * d to e.
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &barrett::Barrett<u64>,
|
||||
e: &mut u64,
|
||||
);
|
||||
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &barrett::Barrett<u64>,
|
||||
a: &mut u64,
|
||||
);
|
||||
|
||||
fn sa_rsh_sb_mask_sc_into_sa(&self, c: &usize, b: &u64, a: &mut u64);
|
||||
|
||||
fn sa_rsh_sb_mask_sc_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64);
|
||||
|
||||
fn sa_rsh_sb_mask_sc_add_sd_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64);
|
||||
|
||||
fn sa_signed_digit_into_sb<const CARRYOVERWRITE: bool, const BALANCED: bool>(
|
||||
&self,
|
||||
a: &u64,
|
||||
base: &u64,
|
||||
shift: &usize,
|
||||
mask: &u64,
|
||||
carry: &mut u64,
|
||||
b: &mut u64,
|
||||
);
|
||||
}
|
||||
|
||||
pub trait VectorOperations<O> {
|
||||
// Applies a parameterized modular reduction.
|
||||
fn va_reduce_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, x: &mut [O]);
|
||||
|
||||
// ADD
|
||||
// vec(c) <- vec(a) + vec(b).
|
||||
fn va_add_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
va: &[O],
|
||||
vb: &[O],
|
||||
vc: &mut [O],
|
||||
);
|
||||
|
||||
// vec(b) <- vec(a) + vec(b).
|
||||
fn va_add_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &[O], b: &mut [O]);
|
||||
|
||||
// vec(c) <- vec(a) + scalar(b).
|
||||
fn va_add_sb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &O,
|
||||
c: &mut [O],
|
||||
);
|
||||
|
||||
// vec(b) <- vec(b) + scalar(a).
|
||||
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &O, b: &mut [O]);
|
||||
|
||||
// vec(b) <- vec(a) - vec(b).
|
||||
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &mut [O],
|
||||
);
|
||||
|
||||
// vec(a) <- vec(a) - vec(b).
|
||||
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[O],
|
||||
a: &mut [O],
|
||||
);
|
||||
|
||||
// vec(c) <- vec(a) - vec(b).
|
||||
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &[O],
|
||||
c: &mut [O],
|
||||
);
|
||||
|
||||
// vec(a) <- -vec(a).
|
||||
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [O],
|
||||
);
|
||||
|
||||
// vec(b) <- -vec(a).
|
||||
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &mut [O],
|
||||
);
|
||||
|
||||
// vec(b) <- vec(a)
|
||||
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &mut [montgomery::Montgomery<O>],
|
||||
);
|
||||
|
||||
// vec(a) <- vec(a).
|
||||
fn va_prepare_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [montgomery::Montgomery<O>],
|
||||
);
|
||||
|
||||
// vec(c) <- vec(a) * vec(b).
|
||||
fn va_mul_vb_montgomery_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &[montgomery::Montgomery<O>],
|
||||
c: &mut [O],
|
||||
);
|
||||
|
||||
// vec(c) <- vec(a) * vec(b) + vec(c).
|
||||
fn va_mul_vb_montgomery_add_vc_into_vc<
|
||||
const CHUNK: usize,
|
||||
const REDUCE1: REDUCEMOD,
|
||||
const REDUCE2: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[O],
|
||||
b: &[montgomery::Montgomery<O>],
|
||||
c: &mut [O],
|
||||
);
|
||||
|
||||
// vec(a) <- vec(a) * vec(b).
|
||||
fn va_mul_vb_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[montgomery::Montgomery<O>],
|
||||
a: &mut [O],
|
||||
);
|
||||
|
||||
// vec(b) <- vec(a) * scalar(b).
|
||||
fn va_mul_sb_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &barrett::Barrett<u64>,
|
||||
a: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(c) <- vec(a) * scalar(b).
|
||||
fn va_mul_sb_barrett_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &barrett::Barrett<u64>,
|
||||
c: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(d) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c).
|
||||
fn va_sub_vb_mul_sc_barrett_into_vd<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &barrett::Barrett<u64>,
|
||||
d: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(b) <- (vec(a) + VBRANGE * q - vec(b)) * scalar(c).
|
||||
fn va_sub_vb_mul_sc_barrett_into_vb<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
c: &barrett::Barrett<u64>,
|
||||
b: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(c) <- (vec(a) + scalar(b)) * scalar(c).
|
||||
fn va_add_sb_mul_sc_barrett_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &u64,
|
||||
sc: &barrett::Barrett<u64>,
|
||||
vd: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(a) <- (vec(a) + scalar(b)) * scalar(c).
|
||||
fn va_add_sb_mul_sc_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
sb: &u64,
|
||||
sc: &barrett::Barrett<u64>,
|
||||
va: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(e) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_ve<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &barrett::Barrett<u64>,
|
||||
ve: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_va<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &barrett::Barrett<u64>,
|
||||
va: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(a) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_va<const CHUNK: usize>(&self, sb: &usize, sc: &u64, va: &mut [u64]);
|
||||
|
||||
// vec(d) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(d) <- vec(d) + (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_add_vd_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(c) <- i-th unsigned digit base 2^{sb} of vec(a).
|
||||
// vec(c) is ensured to be in the range [0, 2^{sb}-1[ with E[vec(c)] = 2^{sb}-1.
|
||||
fn va_ith_digit_unsigned_base_sb_into_vc<const CHUNK: usize>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
vc: &mut [u64],
|
||||
);
|
||||
|
||||
// vec(c) <- i-th signed digit base 2^{w} of vec(a).
|
||||
// Reads the carry of the i-1-th iteration and write the carry on the i-th iteration on carry.
|
||||
// if i > 0, carry of the i-1th iteration must be provided.
|
||||
// if BALANCED: vec(c) is ensured to be [-2^{sb-1}, 2^{sb-1}[ with E[vec(c)] = 0, else E[vec(c)] = -0.5
|
||||
fn va_ith_digit_signed_base_sb_into_vc<const CHUNK: usize, const BALANCED: bool>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
carry: &mut [u64],
|
||||
vc: &mut [u64],
|
||||
);
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct Barrett<O>(pub O, pub O);
|
||||
|
||||
impl<O> Barrett<O> {
|
||||
#[inline(always)]
|
||||
pub fn value(&self) -> &O {
|
||||
&self.0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn quotient(&self) -> &O {
|
||||
&self.1
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct BarrettPrecomp<O> {
|
||||
pub q: O,
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub lo: O,
|
||||
pub hi: O,
|
||||
pub one: Barrett<O>,
|
||||
}
|
||||
|
||||
impl<O> BarrettPrecomp<O> {
|
||||
#[inline(always)]
|
||||
pub fn value_hi(&self) -> &O {
|
||||
&self.hi
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn value_lo(&self) -> &O {
|
||||
&self.lo
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
use crate::modulus::barrett::{Barrett, BarrettPrecomp};
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::{BARRETT, BARRETTLAZY, FOURTIMES, NONE, ONCE, REDUCEMOD, TWICE};
|
||||
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::cast::ToPrimitive;
|
||||
|
||||
impl BarrettPrecomp<u64> {
|
||||
pub fn new(q: u64) -> BarrettPrecomp<u64> {
|
||||
let big_r: BigUint =
|
||||
(BigUint::from(1 as usize) << ((u64::BITS << 1) as usize)) / BigUint::from(q);
|
||||
let lo: u64 = (&big_r & BigUint::from(u64::MAX)).to_u64().unwrap();
|
||||
let hi: u64 = (big_r >> u64::BITS).to_u64().unwrap();
|
||||
let mut precomp: BarrettPrecomp<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
lo: lo,
|
||||
hi: hi,
|
||||
one: Barrett(0, 0),
|
||||
};
|
||||
precomp.one = precomp.prepare(1);
|
||||
precomp
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn one(&self) -> Barrett<u64> {
|
||||
self.one
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE: REDUCEMOD>(&self, x: &mut u64) {
|
||||
match REDUCE {
|
||||
NONE => {}
|
||||
ONCE => x.reduce_once_assign(self.q),
|
||||
TWICE => x.reduce_once_assign(self.two_q),
|
||||
FOURTIMES => x.reduce_once_assign(self.four_q),
|
||||
BARRETT => {
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q);
|
||||
x.reduce_once_assign(self.q);
|
||||
}
|
||||
BARRETTLAZY => {
|
||||
let (_, mhi) = x.widening_mul(self.hi);
|
||||
*x = *x - mhi.wrapping_mul(self.q)
|
||||
}
|
||||
_ => unreachable!("invalid REDUCE argument"),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn reduce<const REDUCE: REDUCEMOD>(&self, x: &u64) -> u64 {
|
||||
let mut r = *x;
|
||||
self.reduce_assign::<REDUCE>(&mut r);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn prepare(&self, v: u64) -> Barrett<u64> {
|
||||
debug_assert!(v < self.q);
|
||||
let quotient: u64 = (((v as u128) << 64) / self.q as u128) as _;
|
||||
Barrett(v, quotient)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &u64) -> u64 {
|
||||
let mut r: u64 = *rhs;
|
||||
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||
r
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(&self, lhs: &Barrett<u64>, rhs: &mut u64) {
|
||||
let t: u64 = ((*lhs.quotient() as u128 * *rhs as u128) >> 64) as _;
|
||||
*rhs = (rhs.wrapping_mul(*lhs.value())).wrapping_sub(self.q.wrapping_mul(t));
|
||||
self.reduce_assign::<REDUCE>(rhs);
|
||||
}
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
use crate::modulus::prime;
|
||||
|
||||
use prime::Prime;
|
||||
use primality_test::is_prime;
|
||||
|
||||
impl NTTFriendlyPrimesGenerator<u64> {
|
||||
|
||||
pub fn new(bit_size: u64, nth_root: u64) -> Self{
|
||||
let mut check_next_prime: bool = true;
|
||||
let mut check_prev_prime: bool = true;
|
||||
let next_prime = (1<<bit_size) + 1;
|
||||
let mut prev_prime = next_prime;
|
||||
|
||||
if next_prime > nth_root.wrapping_neg(){
|
||||
check_next_prime = false;
|
||||
}
|
||||
|
||||
if prev_prime < nth_root{
|
||||
check_prev_prime = false
|
||||
}
|
||||
|
||||
prev_prime -= nth_root;
|
||||
|
||||
Self{
|
||||
size: bit_size as f64,
|
||||
check_next_prime,
|
||||
check_prev_prime,
|
||||
nth_root,
|
||||
next_prime,
|
||||
prev_prime,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_upstream_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_upstream_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_downstream_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_downstream_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_alternating_primes(&mut self, k: usize) -> Vec<Prime>{
|
||||
let mut primes: Vec<Prime> = Vec::with_capacity(k);
|
||||
for i in 0..k{
|
||||
primes.push(self.next_alternating_prime())
|
||||
}
|
||||
primes
|
||||
}
|
||||
|
||||
pub fn next_upstream_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if self.check_next_prime{
|
||||
if (self.next_prime as f64).log2() - self.size >= 0.5 || self.next_prime > 0xffff_ffff_ffff_ffff-self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
panic!("prime list for upstream primes is exhausted (overlap with next bit-size or prime > 2^64)");
|
||||
}
|
||||
}else{
|
||||
if is_prime(self.next_prime) {
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.next_prime += self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.next_prime += self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_downstream_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if self.size - (self.prev_prime as f64).log2() >= 0.5 || self.prev_prime < self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
panic!("prime list for downstream primes is exhausted (overlap with previous bit-size or prime < nth_root)")
|
||||
}else{
|
||||
if is_prime(self.prev_prime){
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.prev_prime -= self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.prev_prime -= self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_alternating_prime(&mut self) -> Prime{
|
||||
loop {
|
||||
if !(self.check_next_prime || self.check_prev_prime){
|
||||
panic!("prime list for upstream and downstream prime is exhausted for the (overlap with previous/next bit-size or NthRoot > prime > 2^64)")
|
||||
}
|
||||
|
||||
if self.check_next_prime{
|
||||
if (self.next_prime as f64).log2() - self.size >= 0.5 || self.next_prime > 0xffff_ffff_ffff_ffff-self.nth_root{
|
||||
self.check_next_prime = false;
|
||||
}else{
|
||||
if is_prime(self.next_prime){
|
||||
let prime = Prime::new_unchecked(self.next_prime);
|
||||
self.next_prime += self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.next_prime += self.nth_root;
|
||||
}
|
||||
}
|
||||
|
||||
if self.check_prev_prime {
|
||||
if self.size - (self.prev_prime as f64).log2() >= 0.5 || self.prev_prime < self.nth_root{
|
||||
self.check_prev_prime = false;
|
||||
}else{
|
||||
if is_prime(self.prev_prime){
|
||||
let prime = Prime::new_unchecked(self.prev_prime);
|
||||
self.prev_prime -= self.nth_root;
|
||||
return prime
|
||||
}
|
||||
self.prev_prime -= self.nth_root;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::modulus::prime_generator;
|
||||
|
||||
#[test]
|
||||
fn prime_generation() {
|
||||
let nth_root: u64 = 1<<16 ;
|
||||
let mut g: prime_generator::NTTFriendlyPrimesGenerator = prime_generator::NTTFriendlyPrimesGenerator::new(30, nth_root);
|
||||
|
||||
let primes = g.next_alternating_primes(10);
|
||||
println!("{:?}", primes);
|
||||
for prime in primes.iter(){
|
||||
assert!(prime.q() % nth_root == 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
pub mod barrett;
|
||||
pub mod montgomery;
|
||||
pub mod operations;
|
||||
pub mod prime;
|
||||
|
||||
use crate::modulus::ReduceOnce;
|
||||
|
||||
impl ReduceOnce<u64> for u64 {
|
||||
#[inline(always)]
|
||||
fn reduce_once_constant_time_assign(&mut self, q: u64) {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self -= (q.wrapping_sub(*self) >> 63) * q;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once_constant_time(&self, q: u64) -> u64 {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
self - (q.wrapping_sub(*self) >> 63) * q
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once_assign(&mut self, q: u64) {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self = *self.min(&mut self.wrapping_sub(q))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_once(&self, q: u64) -> u64 {
|
||||
debug_assert!(q < 0x8000000000000000, "2q >= 2^64");
|
||||
*self.min(&mut self.wrapping_sub(q))
|
||||
}
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
||||
use crate::modulus::ReduceOnce;
|
||||
use crate::modulus::{ONCE, REDUCEMOD};
|
||||
extern crate test;
|
||||
|
||||
/// MontgomeryPrecomp is a set of methods implemented for MontgomeryPrecomp<u64>
|
||||
/// enabling Montgomery arithmetic over u64 values.
|
||||
impl MontgomeryPrecomp<u64> {
|
||||
/// Returns an new instance of MontgomeryPrecomp<u64>.
|
||||
/// This method will fail if gcd(q, 2^64) != 1.
|
||||
#[inline(always)]
|
||||
pub fn new(q: u64) -> MontgomeryPrecomp<u64> {
|
||||
assert!(
|
||||
q & 1 != 0,
|
||||
"Invalid argument: gcd(q={}, radix=2^64) != 1",
|
||||
q
|
||||
);
|
||||
let mut q_inv: u64 = 1;
|
||||
let mut q_pow = q;
|
||||
for _i in 0..63 {
|
||||
q_inv = q_inv.wrapping_mul(q_pow);
|
||||
q_pow = q_pow.wrapping_mul(q_pow);
|
||||
}
|
||||
let mut precomp: MontgomeryPrecomp<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
barrett: BarrettPrecomp::new(q),
|
||||
q_inv: q_inv,
|
||||
one: 0,
|
||||
minus_one: 0,
|
||||
};
|
||||
|
||||
precomp.one = precomp.prepare::<ONCE>(1);
|
||||
precomp.minus_one = q - precomp.one;
|
||||
|
||||
precomp
|
||||
}
|
||||
|
||||
/// Returns 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn one(&self) -> Montgomery<u64> {
|
||||
self.one
|
||||
}
|
||||
|
||||
/// Returns (q-1) * 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn minus_one(&self) -> Montgomery<u64> {
|
||||
self.minus_one
|
||||
}
|
||||
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce<const REDUCE: REDUCEMOD>(&self, x: u64) -> u64 {
|
||||
let mut r: u64 = x;
|
||||
self.reduce_assign::<REDUCE>(&mut r);
|
||||
r
|
||||
}
|
||||
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - FULL: maps x to x mod q using Barrett reduction.
|
||||
#[inline(always)]
|
||||
pub fn reduce_assign<const REDUCE: REDUCEMOD>(&self, x: &mut u64) {
|
||||
self.barrett.reduce_assign::<REDUCE>(x);
|
||||
}
|
||||
|
||||
/// Returns lhs * 2^64 mod q as a Montgomery<u64>.
|
||||
#[inline(always)]
|
||||
pub fn prepare<const REDUCE: REDUCEMOD>(&self, lhs: u64) -> Montgomery<u64> {
|
||||
let mut rhs: u64 = 0;
|
||||
self.prepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||
rhs
|
||||
}
|
||||
|
||||
/// Assigns lhs * 2^64 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn prepare_assign<const REDUCE: REDUCEMOD>(&self, lhs: u64, rhs: &mut Montgomery<u64>) {
|
||||
let (_, mhi) = lhs.widening_mul(*self.barrett.value_lo());
|
||||
*rhs = (lhs.wrapping_mul(*self.barrett.value_hi()).wrapping_add(mhi))
|
||||
.wrapping_mul(self.q)
|
||||
.wrapping_neg();
|
||||
self.reduce_assign::<REDUCE>(rhs);
|
||||
}
|
||||
|
||||
/// Returns lhs * (2^64)^-1 mod q as a u64.
|
||||
#[inline(always)]
|
||||
pub fn unprepare<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>) -> u64 {
|
||||
let mut rhs = 0u64;
|
||||
self.unprepare_assign::<REDUCE>(lhs, &mut rhs);
|
||||
rhs
|
||||
}
|
||||
|
||||
/// Assigns lhs * (2^64)^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn unprepare_assign<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: &mut u64) {
|
||||
let (_, r) = self.q.widening_mul(lhs.wrapping_mul(self.q_inv));
|
||||
*rhs = self.reduce::<REDUCE>(self.q.wrapping_sub(r));
|
||||
}
|
||||
|
||||
/// Returns lhs * rhs * (2^{64})^-1 mod q.
|
||||
#[inline(always)]
|
||||
pub fn mul_external<const REDUCE: REDUCEMOD>(&self, lhs: Montgomery<u64>, rhs: u64) -> u64 {
|
||||
let mut r: u64 = rhs;
|
||||
self.mul_external_assign::<REDUCE>(lhs, &mut r);
|
||||
r
|
||||
}
|
||||
|
||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn mul_external_assign<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut u64,
|
||||
) {
|
||||
let (mlo, mhi) = lhs.widening_mul(*rhs);
|
||||
let (_, hhi) = self.q.widening_mul(mlo.wrapping_mul(self.q_inv));
|
||||
*rhs = self.reduce::<REDUCE>(mhi.wrapping_sub(hhi).wrapping_add(self.q));
|
||||
}
|
||||
|
||||
/// Returns lhs * rhs * (2^{64})^-1 mod q in range [0, 2q-1].
|
||||
#[inline(always)]
|
||||
pub fn mul_internal<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: Montgomery<u64>,
|
||||
) -> Montgomery<u64> {
|
||||
self.mul_external::<REDUCE>(lhs, rhs)
|
||||
}
|
||||
|
||||
/// Assigns lhs * rhs * (2^{64})^-1 mod q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn mul_internal_assign<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.mul_external_assign::<REDUCE>(lhs, rhs);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_internal(&self, lhs: Montgomery<u64>, rhs: Montgomery<u64>) -> Montgomery<u64> {
|
||||
rhs + lhs
|
||||
}
|
||||
|
||||
/// Assigns lhs + rhs to rhs.
|
||||
#[inline(always)]
|
||||
pub fn add_internal_lazy_assign(&self, lhs: Montgomery<u64>, rhs: &mut Montgomery<u64>) {
|
||||
*rhs += lhs
|
||||
}
|
||||
|
||||
/// Assigns lhs + rhs - q if (lhs + rhs) >= q to rhs.
|
||||
#[inline(always)]
|
||||
pub fn add_internal_reduce_once_assign<const LAZY: bool>(
|
||||
&self,
|
||||
lhs: Montgomery<u64>,
|
||||
rhs: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.add_internal_lazy_assign(lhs, rhs);
|
||||
rhs.reduce_once_assign(self.q);
|
||||
}
|
||||
|
||||
/// Returns (x^exponent) * 2^64 mod q.
|
||||
#[inline(always)]
|
||||
pub fn pow(&self, x: Montgomery<u64>, exponent: u64) -> Montgomery<u64> {
|
||||
let mut y: Montgomery<u64> = self.one();
|
||||
let mut x_mut: Montgomery<u64> = x;
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
self.mul_internal_assign::<ONCE>(x_mut, &mut y);
|
||||
}
|
||||
self.mul_internal_assign::<ONCE>(x_mut, &mut x_mut);
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
y.reduce_once_assign(self.q);
|
||||
y
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::modulus::montgomery;
|
||||
use test::Bencher;
|
||||
|
||||
#[test]
|
||||
fn test_mul_external() {
|
||||
let q: u64 = 0x1fffffffffe00001;
|
||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||
let x: u64 = 0x5f876e514845cc8b;
|
||||
let y: u64 = 0xad726f98f24a761a;
|
||||
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||
assert!(
|
||||
m_precomp.mul_external::<ONCE>(y_mont, x) == (x as u128 * y as u128 % q as u128) as u64
|
||||
);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_mul_external(b: &mut Bencher) {
|
||||
let q: u64 = 0x1fffffffffe00001;
|
||||
let m_precomp = montgomery::MontgomeryPrecomp::new(q);
|
||||
let mut x: u64 = 0x5f876e514845cc8b;
|
||||
let y: u64 = 0xad726f98f24a761a;
|
||||
let y_mont = m_precomp.prepare::<ONCE>(y);
|
||||
b.iter(|| m_precomp.mul_external_assign::<ONCE>(y_mont, &mut x));
|
||||
}
|
||||
}
|
||||
@@ -1,707 +0,0 @@
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::{ScalarOperations, VectorOperations};
|
||||
use crate::modulus::{NONE, REDUCEMOD};
|
||||
use crate::{
|
||||
apply_ssv, apply_sv, apply_v, apply_vsssvv, apply_vssv, apply_vsv, apply_vv, apply_vvssv,
|
||||
apply_vvsv, apply_vvv,
|
||||
};
|
||||
use itertools::izip;
|
||||
|
||||
impl ScalarOperations<u64> for Prime<u64> {
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - TWO: subtracts 2q if x >= 2q.
|
||||
/// - FOUR: subtracts 4q if x >= 4q.
|
||||
/// - BARRETT: maps x to x mod q using Barrett reduction.
|
||||
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
|
||||
#[inline(always)]
|
||||
fn sa_reduce_into_sa<const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
||||
self.montgomery.reduce_assign::<REDUCE>(a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_into_sc<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &u64, c: &mut u64) {
|
||||
*c = a.wrapping_add(*b);
|
||||
self.sa_reduce_into_sa::<REDUCE>(c);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_into_sb<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
*b = a.wrapping_add(*b);
|
||||
self.sa_reduce_into_sa::<REDUCE>(b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sc<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &mut u64,
|
||||
) {
|
||||
match SBRANGE {
|
||||
1 => *c = *a + self.q - *b,
|
||||
2 => *c = *a + self.two_q - *b,
|
||||
4 => *c = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(c)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *a = *a + self.q - *b,
|
||||
2 => *a = *a + self.two_q - *b,
|
||||
4 => *a = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *b = *a + self.q - *b,
|
||||
2 => *b = *a + self.two_q - *b,
|
||||
4 => *b = *a + self.four_q - *b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_neg_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *a = self.q - *a,
|
||||
2 => *a = self.two_q - *a,
|
||||
4 => *a = self.four_q - *a,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(a)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_neg_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut u64) {
|
||||
match SBRANGE {
|
||||
1 => *b = self.q - *a,
|
||||
2 => *b = self.two_q - *a,
|
||||
4 => *b = self.four_q - *a,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.sa_reduce_into_sa::<REDUCE>(b)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_prepare_montgomery_into_sa<const REDUCE: REDUCEMOD>(&self, a: &mut Montgomery<u64>) {
|
||||
*a = self.montgomery.prepare::<REDUCE>(*a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_prepare_montgomery_into_sb<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &mut Montgomery<u64>,
|
||||
) {
|
||||
self.montgomery.prepare_assign::<REDUCE>(*a, b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Montgomery<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c = self.montgomery.mul_external::<REDUCE>(*a, *b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_add_sc_into_sc<const REDUCE1: REDUCEMOD, const REDUCE2: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Montgomery<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c += self.montgomery.mul_external::<REDUCE1>(*a, *b);
|
||||
self.sa_reduce_into_sa::<REDUCE2>(c);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_montgomery_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Montgomery<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
self.montgomery.mul_external_assign::<REDUCE>(*b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_barrett_into_sc<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &Barrett<u64>,
|
||||
c: &mut u64,
|
||||
) {
|
||||
*c = self.barrett.mul_external::<REDUCE>(b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_mul_sb_barrett_into_sa<const REDUCE: REDUCEMOD>(&self, b: &Barrett<u64>, a: &mut u64) {
|
||||
self.barrett.mul_external_assign::<REDUCE>(b, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sd<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut u64,
|
||||
) {
|
||||
match VBRANGE {
|
||||
1 => *d = a + self.q - b,
|
||||
2 => *d = a + self.two_q - b,
|
||||
4 => *d = a + self.four_q - b,
|
||||
_ => unreachable!("invalid SBRANGE argument"),
|
||||
}
|
||||
self.barrett.mul_external_assign::<REDUCE>(c, d);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_sub_sb_mul_sc_barrett_into_sb<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
c: &Barrett<u64>,
|
||||
b: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(a, b);
|
||||
self.barrett.mul_external_assign::<REDUCE>(c, b);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_mul_sc_barrett_into_sd<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut u64,
|
||||
) {
|
||||
*d = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_add_sb_mul_sc_barrett_into_sa<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
*a = self.barrett.mul_external::<REDUCE>(c, &(*a + b));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_se<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &u64,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
e: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sc::<SBRANGE, NONE>(&(b + c), a, e);
|
||||
self.barrett.mul_external_assign::<REDUCE>(d, e);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sb_sub_sa_add_sc_mul_sd_barrett_into_sa<const SBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
a: &mut u64,
|
||||
) {
|
||||
self.sa_sub_sb_into_sb::<SBRANGE, NONE>(&(b + c), a);
|
||||
self.barrett.mul_external_assign::<REDUCE>(d, a);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_into_sa(&self, b: &usize, c: &u64, a: &mut u64) {
|
||||
*a = (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64) {
|
||||
*d = (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_rsh_sb_mask_sc_add_sd_into_sd(&self, a: &u64, b: &usize, c: &u64, d: &mut u64) {
|
||||
*d += (*a >> b) & c
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn sa_signed_digit_into_sb<const CARRYOVERWRITE: bool, const BALANCED: bool>(
|
||||
&self,
|
||||
a: &u64,
|
||||
base: &u64,
|
||||
shift: &usize,
|
||||
mask: &u64,
|
||||
carry: &mut u64,
|
||||
b: &mut u64,
|
||||
) {
|
||||
if CARRYOVERWRITE {
|
||||
self.sa_rsh_sb_mask_sc_into_sd(a, shift, mask, carry);
|
||||
} else {
|
||||
self.sa_rsh_sb_mask_sc_add_sd_into_sd(a, shift, mask, carry);
|
||||
}
|
||||
|
||||
let c: u64 = if BALANCED && *carry == base >> 1 {
|
||||
a & 1
|
||||
} else {
|
||||
((*carry | (*carry << 1)) >> shift) & 1
|
||||
};
|
||||
|
||||
*b = *carry + (self.q - base) * c;
|
||||
*carry = c;
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorOperations<u64> for Prime<u64> {
|
||||
/// Applies a modular reduction on x based on REDUCE:
|
||||
/// - LAZY: no modular reduction.
|
||||
/// - ONCE: subtracts q if x >= q.
|
||||
/// - TWO: subtracts 2q if x >= 2q.
|
||||
/// - FOUR: subtracts 4q if x >= 4q.
|
||||
/// - BARRETT: maps x to x mod q using Barrett reduction.
|
||||
/// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1].
|
||||
#[inline(always)]
|
||||
fn va_reduce_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(&self, a: &mut [u64]) {
|
||||
apply_v!(self, Self::sa_reduce_into_sa::<REDUCE>, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_vb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_vb_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(self, Self::sa_add_sb_into_sb::<REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_sb_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_sv!(self, Self::sa_add_sb_into_sb::<REDUCE>, b, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_add_sb_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &u64,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(self, Self::sa_add_sb_into_sc::<REDUCE>, a, b, c, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_vc<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sc::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_va<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[u64],
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sa::<VBRANGE, REDUCE>,
|
||||
b,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_sub_vb_into_vb<const CHUNK: usize, const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_sub_sb_into_sb::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_neg_into_va<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_v!(self, Self::sa_neg_into_sa::<VARANGE, REDUCE>, a, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_neg_into_vb<const CHUNK: usize, const VARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [u64],
|
||||
) {
|
||||
apply_vv!(self, Self::sa_neg_into_sb::<VARANGE, REDUCE>, a, b, CHUNK);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_prep_mont_into_vb<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &mut [Montgomery<u64>],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_prepare_montgomery_into_sb::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_prepare_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut [Montgomery<u64>],
|
||||
) {
|
||||
apply_v!(
|
||||
self,
|
||||
Self::sa_prepare_montgomery_into_sa::<REDUCE>,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[Montgomery<u64>],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_into_sc::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_add_vc_into_vc<
|
||||
const CHUNK: usize,
|
||||
const REDUCE1: REDUCEMOD,
|
||||
const REDUCE2: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[Montgomery<u64>],
|
||||
b: &[u64],
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vvv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_add_sc_into_sc::<REDUCE1, REDUCE2>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_vb_montgomery_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &[Montgomery<u64>],
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_vv!(
|
||||
self,
|
||||
Self::sa_mul_sb_montgomery_into_sa::<REDUCE>,
|
||||
b,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_sb_barrett_into_vc<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &Barrett<u64>,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(
|
||||
self,
|
||||
Self::sa_mul_sb_barrett_into_sc::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn va_mul_sb_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Barrett<u64>,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_sv!(self, Self::sa_mul_sb_barrett_into_sa::<REDUCE>, b, a, CHUNK);
|
||||
}
|
||||
|
||||
fn va_sub_vb_mul_sc_barrett_into_vd<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &[u64],
|
||||
c: &Barrett<u64>,
|
||||
d: &mut [u64],
|
||||
) {
|
||||
apply_vvsv!(
|
||||
self,
|
||||
Self::sa_sub_sb_mul_sc_barrett_into_sd::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
d,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
fn va_sub_vb_mul_sc_barrett_into_vb<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &Barrett<u64>,
|
||||
c: &mut [u64],
|
||||
) {
|
||||
apply_vsv!(
|
||||
self,
|
||||
Self::sa_sub_sb_mul_sc_barrett_into_sb::<VBRANGE, REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
|
||||
fn va_add_sb_mul_sc_barrett_into_va<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut [u64],
|
||||
) {
|
||||
apply_ssv!(
|
||||
self,
|
||||
Self::sa_add_sb_mul_sc_barrett_into_sa::<REDUCE>,
|
||||
b,
|
||||
c,
|
||||
a,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a) + scalar(b)) * scalar(c);
|
||||
fn va_add_sb_mul_sc_barrett_into_vd<const CHUNK: usize, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &[u64],
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sa_add_sb_mul_sc_barrett_into_sd::<REDUCE>,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
d,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(e) <- (vec(a) - vec(b) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_ve<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &Barrett<u64>,
|
||||
ve: &mut [u64],
|
||||
) {
|
||||
apply_vvssv!(
|
||||
self,
|
||||
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_se::<VBRANGE, REDUCE>,
|
||||
va,
|
||||
vb,
|
||||
sc,
|
||||
sd,
|
||||
ve,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(b) - vec(a) + scalar(c)) * scalar(e).
|
||||
fn vb_sub_va_add_sc_mul_sd_barrett_into_va<
|
||||
const CHUNK: usize,
|
||||
const VBRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
vb: &[u64],
|
||||
sc: &u64,
|
||||
sd: &Barrett<u64>,
|
||||
va: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sb_sub_sa_add_sc_mul_sd_barrett_into_sa::<VBRANGE, REDUCE>,
|
||||
vb,
|
||||
sc,
|
||||
sd,
|
||||
va,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(a) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_va<const CHUNK: usize>(&self, sb: &usize, sc: &u64, va: &mut [u64]) {
|
||||
apply_ssv!(self, Self::sa_rsh_sb_mask_sc_into_sa, sb, sc, va, CHUNK);
|
||||
}
|
||||
|
||||
// vec(d) <- (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(self, Self::sa_rsh_sb_mask_sc_into_sd, va, sb, sc, vd, CHUNK);
|
||||
}
|
||||
|
||||
// vec(d) <- vec(d) + (vec(a)>>scalar(b)) & scalar(c).
|
||||
fn va_rsh_sb_mask_sc_add_vd_into_vd<const CHUNK: usize>(
|
||||
&self,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
sc: &u64,
|
||||
vd: &mut [u64],
|
||||
) {
|
||||
apply_vssv!(
|
||||
self,
|
||||
Self::sa_rsh_sb_mask_sc_add_sd_into_sd,
|
||||
va,
|
||||
sb,
|
||||
sc,
|
||||
vd,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
|
||||
// vec(c) <- i-th unsigned digit base 2^{sb} of vec(a).
|
||||
// vec(c) is ensured to be in the range [0, 2^{sb}-1[ with E[vec(c)] = 2^{sb}-1.
|
||||
fn va_ith_digit_unsigned_base_sb_into_vc<const CHUNK: usize>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
vc: &mut [u64],
|
||||
) {
|
||||
self.va_rsh_sb_mask_sc_into_vd::<CHUNK>(va, &(i * sb), &((1 << sb) - 1), vc);
|
||||
}
|
||||
|
||||
// vec(c) <- i-th signed digit base 2^{w} of vec(a).
|
||||
// Reads the carry of the i-1-th iteration and write the carry on the i-th iteration on carry.
|
||||
// if i > 0, carry of the i-1th iteration must be provided.
|
||||
// if BALANCED: vec(c) is ensured to be [-2^{sb-1}, 2^{sb-1}[ with E[vec(c)] = 0, else E[vec(c)] = -0.5
|
||||
fn va_ith_digit_signed_base_sb_into_vc<const CHUNK: usize, const BALANCED: bool>(
|
||||
&self,
|
||||
i: usize,
|
||||
va: &[u64],
|
||||
sb: &usize,
|
||||
carry: &mut [u64],
|
||||
vc: &mut [u64],
|
||||
) {
|
||||
let base: u64 = 1 << sb;
|
||||
let mask: u64 = base - 1;
|
||||
if i == 0 {
|
||||
apply_vsssvv!(
|
||||
self,
|
||||
Self::sa_signed_digit_into_sb::<true, BALANCED>,
|
||||
va,
|
||||
&base,
|
||||
&(i * sb),
|
||||
&mask,
|
||||
carry,
|
||||
vc,
|
||||
CHUNK
|
||||
);
|
||||
} else {
|
||||
apply_vsssvv!(
|
||||
self,
|
||||
Self::sa_signed_digit_into_sb::<false, BALANCED>,
|
||||
va,
|
||||
&base,
|
||||
&(i * sb),
|
||||
&mask,
|
||||
carry,
|
||||
vc,
|
||||
CHUNK
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::{Montgomery, MontgomeryPrecomp};
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::ONCE;
|
||||
use primality_test::is_prime;
|
||||
use prime_factorization::Factorization;
|
||||
|
||||
impl Prime<u64> {
|
||||
/// Returns a new instance of Prime<u64>.
|
||||
/// Panics if q_base is not a prime > 2 and
|
||||
/// if q_base^q_power would overflow u64.
|
||||
pub fn new(q_base: u64, q_power: usize) -> Self {
|
||||
assert!(is_prime(q_base) && q_base > 2);
|
||||
Self::new_unchecked(q_base, q_power)
|
||||
}
|
||||
|
||||
/// Returns a new instance of Prime<u64>.
|
||||
/// Does not check if q_base is a prime > 2.
|
||||
/// Panics if q_base^q_power would overflow u64.
|
||||
pub fn new_unchecked(q_base: u64, q_power: usize) -> Self {
|
||||
let mut q = q_base;
|
||||
for _i in 1..q_power {
|
||||
q *= q_base
|
||||
}
|
||||
|
||||
assert!(q.next_power_of_two().ilog2() <= 61);
|
||||
|
||||
let mut phi = q_base - 1;
|
||||
for _i in 1..q_power {
|
||||
phi *= q_base
|
||||
}
|
||||
|
||||
let mut prime: Prime<u64> = Self {
|
||||
q: q,
|
||||
two_q: q << 1,
|
||||
four_q: q << 2,
|
||||
q_base: q_base,
|
||||
q_power: q_power,
|
||||
factors: Vec::new(),
|
||||
montgomery: MontgomeryPrecomp::new(q),
|
||||
barrett: BarrettPrecomp::new(q),
|
||||
phi: phi,
|
||||
};
|
||||
|
||||
prime.check_factors();
|
||||
|
||||
prime
|
||||
}
|
||||
|
||||
pub fn q(&self) -> u64 {
|
||||
self.q
|
||||
}
|
||||
|
||||
pub fn q_base(&self) -> u64 {
|
||||
self.q_base
|
||||
}
|
||||
|
||||
pub fn q_power(&self) -> usize {
|
||||
self.q_power
|
||||
}
|
||||
|
||||
/// Returns x^exponen mod q.
|
||||
#[inline(always)]
|
||||
pub fn pow(&self, x: u64, exponent: u64) -> u64 {
|
||||
let mut y_mont: Montgomery<u64> = self.montgomery.one();
|
||||
let mut x_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(x);
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
self.montgomery
|
||||
.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||
}
|
||||
|
||||
self.montgomery
|
||||
.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
self.montgomery.unprepare::<ONCE>(y_mont)
|
||||
}
|
||||
|
||||
/// Returns x^-1 mod q.
|
||||
/// User must ensure that x is not divisible by q_base.
|
||||
#[inline(always)]
|
||||
pub fn inv(&self, x: u64) -> u64 {
|
||||
self.pow(x, self.phi - 1)
|
||||
}
|
||||
}
|
||||
|
||||
impl Prime<u64> {
|
||||
/// Returns the smallest nth primitive root of q_base.
|
||||
pub fn primitive_root(&self) -> u64 {
|
||||
let mut candidate: u64 = 1u64;
|
||||
let mut not_found: bool = true;
|
||||
|
||||
while not_found {
|
||||
candidate += 1;
|
||||
|
||||
for &factor in &self.factors {
|
||||
if pow(candidate, (self.q_base - 1) / factor, self.q_base) == 1 {
|
||||
not_found = true;
|
||||
break;
|
||||
}
|
||||
not_found = false;
|
||||
}
|
||||
}
|
||||
|
||||
if not_found {
|
||||
panic!("failed to find a primitive root for q_base={}", self.q_base)
|
||||
}
|
||||
|
||||
candidate
|
||||
}
|
||||
|
||||
/// Returns an nth primitive root of q = q_base^q_power in Montgomery.
|
||||
pub fn primitive_nth_root(&self, nth_root: u64) -> u64 {
|
||||
assert!(
|
||||
self.q & (nth_root - 1) == 1,
|
||||
"invalid prime: q = {} % nth_root = {} = {} != 1",
|
||||
self.q,
|
||||
nth_root,
|
||||
self.q & (nth_root - 1)
|
||||
);
|
||||
|
||||
let psi: u64 = self.primitive_root();
|
||||
|
||||
// nth primitive root mod q_base: psi_nth^(prime.q_base-1)/nth_root mod q_base
|
||||
let psi_nth_q_base: u64 = pow(psi, (self.q_base - 1) / nth_root, self.q_base);
|
||||
|
||||
// lifts nth primitive root mod q_base to q = q_base^q_power
|
||||
let psi_nth_q: u64 = self.hensel_lift(psi_nth_q_base, nth_root);
|
||||
|
||||
assert!(
|
||||
self.pow(psi_nth_q, nth_root) == 1,
|
||||
"invalid nth primitive root: psi^nth_root != 1 mod q"
|
||||
);
|
||||
assert!(
|
||||
self.pow(psi_nth_q, nth_root >> 1) == self.q - 1,
|
||||
"invalid nth primitive root: psi^(nth_root/2) != -1 mod q"
|
||||
);
|
||||
|
||||
psi_nth_q
|
||||
}
|
||||
|
||||
/// Checks if the field self.factor is populated.
|
||||
/// If not, factorize q_base-1 and populates self.factor.
|
||||
/// If yes, checks that it contains the unique factors of q_base-1.
|
||||
pub fn check_factors(&mut self) {
|
||||
if self.factors.len() == 0 {
|
||||
let factors = Factorization::run(self.q_base - 1).prime_factor_repr();
|
||||
let mut distincts_factors: Vec<u64> = Vec::with_capacity(factors.len());
|
||||
for factor in factors.iter() {
|
||||
distincts_factors.push(factor.0)
|
||||
}
|
||||
self.factors = distincts_factors
|
||||
} else {
|
||||
let mut q_base: u64 = self.q_base;
|
||||
|
||||
for &factor in &self.factors {
|
||||
if !is_prime(factor) {
|
||||
panic!("invalid factor list: factor {} is not prime", factor)
|
||||
}
|
||||
|
||||
while q_base % factor != 0 {
|
||||
q_base /= factor
|
||||
}
|
||||
}
|
||||
|
||||
if q_base != 1 {
|
||||
panic!("invalid factor list: does not fully divide q_base: q_base % (all factors) = {}", q_base)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns (psi + a * q_base)^{nth_root} = 1 mod q = q_base^q_power given psi^{nth_root} = 1 mod q_base.
|
||||
/// Panics if psi^{nth_root} != 1 mod q_base.
|
||||
fn hensel_lift(&self, psi: u64, nth_root: u64) -> u64 {
|
||||
assert!(
|
||||
pow(psi, nth_root, self.q_base) == 1,
|
||||
"invalid argument psi: psi^nth_root = {} != 1",
|
||||
pow(psi, nth_root, self.q_base)
|
||||
);
|
||||
|
||||
let mut psi_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(psi);
|
||||
let nth_root_mont: Montgomery<u64> = self.montgomery.prepare::<ONCE>(nth_root);
|
||||
|
||||
for _i in 1..self.q_power {
|
||||
let psi_pow: Montgomery<u64> = self.montgomery.pow(psi_mont, nth_root - 1);
|
||||
|
||||
let num: Montgomery<u64> = self.montgomery.one() + self.q
|
||||
- self.montgomery.mul_internal::<ONCE>(psi_pow, psi_mont);
|
||||
|
||||
let mut den: Montgomery<u64> =
|
||||
self.montgomery.mul_internal::<ONCE>(nth_root_mont, psi_pow);
|
||||
|
||||
den = self.montgomery.pow(den, self.phi - 1);
|
||||
|
||||
psi_mont = self
|
||||
.montgomery
|
||||
.add_internal(psi_mont, self.montgomery.mul_internal::<ONCE>(num, den));
|
||||
}
|
||||
|
||||
self.montgomery.unprepare::<ONCE>(psi_mont)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns x^exponent mod q.
|
||||
/// This function internally instantiate a new MontgomeryPrecomp<u64>
|
||||
/// To be used when called only a few times and if there
|
||||
/// is no Prime instantiated with q.
|
||||
pub fn pow(x: u64, exponent: u64, q: u64) -> u64 {
|
||||
let montgomery: MontgomeryPrecomp<u64> = MontgomeryPrecomp::<u64>::new(q);
|
||||
let mut y_mont: Montgomery<u64> = montgomery.one();
|
||||
let mut x_mont: Montgomery<u64> = montgomery.prepare::<ONCE>(x);
|
||||
let mut i: u64 = exponent;
|
||||
while i > 0 {
|
||||
if i & 1 == 1 {
|
||||
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut y_mont);
|
||||
}
|
||||
|
||||
montgomery.mul_internal_assign::<ONCE>(x_mont, &mut x_mont);
|
||||
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
montgomery.unprepare::<ONCE>(y_mont)
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
|
||||
/// Montgomery is a generic struct storing
|
||||
/// an element in the Montgomery domain.
|
||||
pub type Montgomery<O> = O;
|
||||
|
||||
/// MontgomeryPrecomp is a generic struct storing
|
||||
/// precomputations for Montgomery arithmetic.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct MontgomeryPrecomp<O> {
|
||||
pub q: O,
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub barrett: BarrettPrecomp<O>,
|
||||
pub q_inv: O,
|
||||
pub one: Montgomery<O>,
|
||||
pub minus_one: Montgomery<O>,
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
use crate::modulus::barrett::BarrettPrecomp;
|
||||
use crate::modulus::montgomery::MontgomeryPrecomp;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Prime<O> {
|
||||
pub q: O,
|
||||
/// q_base^q_powers
|
||||
pub two_q: O,
|
||||
pub four_q: O,
|
||||
pub q_base: O,
|
||||
pub q_power: usize,
|
||||
pub factors: Vec<O>,
|
||||
/// distinct factors of q-1
|
||||
pub montgomery: MontgomeryPrecomp<O>,
|
||||
pub barrett: BarrettPrecomp<O>,
|
||||
pub phi: O,
|
||||
}
|
||||
|
||||
pub struct NTTFriendlyPrimesGenerator<O> {
|
||||
pub size: f64,
|
||||
pub next_prime: O,
|
||||
pub prev_prime: O,
|
||||
pub check_next_prime: bool,
|
||||
pub check_prev_prime: bool,
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
use num_bigint::BigInt;
|
||||
use num_integer::Integer;
|
||||
use num_traits::{One, Signed, Zero};
|
||||
|
||||
pub trait Div {
|
||||
fn div_floor(&self, other: &Self) -> Self;
|
||||
fn div_round(&self, other: &Self) -> Self;
|
||||
}
|
||||
|
||||
impl Div for BigInt {
|
||||
fn div_floor(&self, other: &Self) -> Self {
|
||||
let quo: BigInt = self / other;
|
||||
if self.sign() != other.sign() {
|
||||
return quo - BigInt::one();
|
||||
}
|
||||
return quo;
|
||||
}
|
||||
|
||||
fn div_round(&self, other: &Self) -> Self {
|
||||
let (quo, mut rem) = self.div_rem(other);
|
||||
rem <<= 1;
|
||||
if rem != BigInt::zero() && &rem.abs() > other {
|
||||
if self.sign() == other.sign() {
|
||||
return quo + BigInt::one();
|
||||
} else {
|
||||
return quo - BigInt::one();
|
||||
}
|
||||
}
|
||||
return quo;
|
||||
}
|
||||
}
|
||||
186
rns/src/poly.rs
186
rns/src/poly.rs
@@ -1,186 +0,0 @@
|
||||
pub mod poly;
|
||||
use std::cmp::min;
|
||||
use std::cmp::PartialEq;
|
||||
|
||||
#[derive(Clone, Debug, Eq)]
|
||||
pub struct Poly<O>(pub Vec<O>);
|
||||
|
||||
impl<O> Poly<O>
|
||||
where
|
||||
O: Default + Clone + Copy,
|
||||
{
|
||||
pub fn new(n: usize) -> Self {
|
||||
Self(vec![O::default(); n])
|
||||
}
|
||||
|
||||
pub fn buffer_size(&self) -> usize {
|
||||
return self.0.len();
|
||||
}
|
||||
|
||||
pub fn from_buffer(&mut self, n: usize, buf: &mut [O]) {
|
||||
assert!(
|
||||
buf.len() >= n,
|
||||
"invalid buffer: buf.len()={} < n={}",
|
||||
buf.len(),
|
||||
n
|
||||
);
|
||||
self.0 = Vec::from(&buf[..n]);
|
||||
}
|
||||
|
||||
pub fn log_n(&self) -> usize {
|
||||
(usize::BITS - (self.n() - 1).leading_zeros()) as usize
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, n: usize) {
|
||||
self.0.resize(n, O::default());
|
||||
}
|
||||
|
||||
pub fn set(&mut self, v: &[O]) {
|
||||
let size: usize = min(v.len(), self.n());
|
||||
self.0[..size].copy_from_slice(&v[..size]);
|
||||
}
|
||||
|
||||
pub fn fill(&mut self, v: &O) {
|
||||
self.0.fill(*v)
|
||||
}
|
||||
|
||||
pub fn zero(&mut self) {
|
||||
self.fill(&O::default())
|
||||
}
|
||||
|
||||
pub fn copy_from(&mut self, other: &Poly<O>) {
|
||||
if std::ptr::eq(self, other) {
|
||||
return;
|
||||
}
|
||||
self.resize(other.n());
|
||||
self.0.copy_from_slice(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<O: PartialEq> PartialEq for Poly<O> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
std::ptr::eq(self, other) || (self.0 == other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<O> Default for Poly<O> {
|
||||
fn default() -> Self {
|
||||
Poly(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq)]
|
||||
pub struct PolyRNS<O>(pub Vec<Poly<O>>);
|
||||
|
||||
impl<O> PolyRNS<O>
|
||||
where
|
||||
O: Default + Clone + Copy,
|
||||
{
|
||||
pub fn new(n: usize, level: usize) -> Self {
|
||||
let mut polyrns: PolyRNS<O> = PolyRNS::<O>::default();
|
||||
let mut buf: Vec<O> = vec![O::default(); polyrns.buffer_size(n, level)];
|
||||
polyrns.from_buffer(n, level, &mut buf[..]);
|
||||
polyrns
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize {
|
||||
self.0[0].n()
|
||||
}
|
||||
|
||||
pub fn log_n(&self) -> usize {
|
||||
self.0[0].log_n()
|
||||
}
|
||||
|
||||
pub fn level(&self) -> usize {
|
||||
self.0.len() - 1
|
||||
}
|
||||
|
||||
pub fn buffer_size(&self, n: usize, level: usize) -> usize {
|
||||
n * (level + 1)
|
||||
}
|
||||
|
||||
pub fn from_buffer(&mut self, n: usize, level: usize, buf: &mut [O]) {
|
||||
assert!(
|
||||
buf.len() >= n * (level + 1),
|
||||
"invalid buffer: buf.len()={} < n * (level+1)={}",
|
||||
buf.len(),
|
||||
level + 1
|
||||
);
|
||||
self.0.clear();
|
||||
for chunk in buf.chunks_mut(n).take(level + 1) {
|
||||
let mut poly: Poly<O> = Poly(Vec::new());
|
||||
poly.from_buffer(n, chunk);
|
||||
self.0.push(poly);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, level: usize) {
|
||||
self.0.resize(level + 1, Poly::<O>::new(self.n()));
|
||||
}
|
||||
|
||||
pub fn split_at_mut(&mut self, level: usize) -> (&mut [Poly<O>], &mut [Poly<O>]) {
|
||||
self.0.split_at_mut(level)
|
||||
}
|
||||
|
||||
pub fn at(&self, level: usize) -> &Poly<O> {
|
||||
assert!(
|
||||
level <= self.level(),
|
||||
"invalid argument level: level={} > self.level()={}",
|
||||
level,
|
||||
self.level()
|
||||
);
|
||||
&self.0[level]
|
||||
}
|
||||
|
||||
pub fn at_mut(&mut self, level: usize) -> &mut Poly<O> {
|
||||
&mut self.0[level]
|
||||
}
|
||||
|
||||
pub fn fill(&mut self, v: &O) {
|
||||
(0..self.level() + 1).for_each(|i| self.at_mut(i).fill(v))
|
||||
}
|
||||
|
||||
pub fn zero(&mut self) {
|
||||
self.fill(&O::default())
|
||||
}
|
||||
|
||||
pub fn copy(&mut self, other: &PolyRNS<O>) {
|
||||
if std::ptr::eq(self, other) {
|
||||
return;
|
||||
}
|
||||
self.resize(other.level());
|
||||
self.copy_level(other.level(), other);
|
||||
}
|
||||
|
||||
pub fn copy_level(&mut self, level: usize, other: &PolyRNS<O>) {
|
||||
assert!(
|
||||
self.level() <= level,
|
||||
"invalid argument level: level={} > self.level()={}",
|
||||
level,
|
||||
self.level()
|
||||
);
|
||||
assert!(
|
||||
other.level() <= level,
|
||||
"invalid argument level: level={} > other.level()={}",
|
||||
level,
|
||||
other.level()
|
||||
);
|
||||
(0..level + 1).for_each(|i| self.at_mut(i).copy_from(other.at(i)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<O: PartialEq> PartialEq for PolyRNS<O> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
std::ptr::eq(self, other) && (self.0 == other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<O> Default for PolyRNS<O> {
|
||||
fn default() -> Self {
|
||||
Self { 0: Vec::new() }
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
pub mod impl_u64;
|
||||
use crate::dft::DFT;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::WordOps;
|
||||
use crate::poly::{Poly, PolyRNS};
|
||||
use crate::GALOISGENERATOR;
|
||||
use num::traits::Unsigned;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct Ring<O: Unsigned> {
|
||||
pub n: usize,
|
||||
pub modulus: Prime<O>,
|
||||
pub cyclotomic_order: usize,
|
||||
pub dft: Box<dyn DFT<O>>,
|
||||
}
|
||||
|
||||
impl<O: Unsigned> Ring<O> {
|
||||
pub fn log_n(&self) -> usize {
|
||||
return self.n().log2();
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize {
|
||||
return self.n;
|
||||
}
|
||||
|
||||
pub fn new_poly(&self) -> Poly<u64> {
|
||||
Poly::<u64>::new(self.n())
|
||||
}
|
||||
|
||||
pub fn cyclotomic_order(&self) -> usize {
|
||||
self.cyclotomic_order
|
||||
}
|
||||
|
||||
// Returns GALOISGENERATOR^gen_1 * (-1)^gen_2 mod 2^log_nth_root.
|
||||
pub fn galois_element(&self, gen_1: usize, gen_2: bool) -> usize {
|
||||
let mut gal_el: usize = 1;
|
||||
let mut gen_1_pow: usize = GALOISGENERATOR;
|
||||
let mut e: usize = gen_1;
|
||||
while e > 0 {
|
||||
if e & 1 == 1 {
|
||||
gal_el = gal_el.wrapping_mul(gen_1_pow);
|
||||
}
|
||||
|
||||
gen_1_pow = gen_1_pow.wrapping_mul(gen_1_pow);
|
||||
e >>= 1;
|
||||
}
|
||||
|
||||
gal_el &= self.cyclotomic_order - 1;
|
||||
|
||||
if gen_2 {
|
||||
return self.cyclotomic_order - gal_el;
|
||||
}
|
||||
gal_el
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RingRNS<O: Unsigned>(pub Vec<Rc<Ring<O>>>);
|
||||
|
||||
impl<O: Unsigned> RingRNS<O> {
|
||||
pub fn log_n(&self) -> usize {
|
||||
return self.n().log2();
|
||||
}
|
||||
|
||||
pub fn n(&self) -> usize {
|
||||
self.0[0].n()
|
||||
}
|
||||
|
||||
pub fn new_polyrns(&self) -> PolyRNS<u64> {
|
||||
PolyRNS::<u64>::new(self.n(), self.level())
|
||||
}
|
||||
|
||||
pub fn new_poly(&self) -> Poly<u64> {
|
||||
Poly::<u64>::new(self.n())
|
||||
}
|
||||
|
||||
pub fn max_level(&self) -> usize {
|
||||
self.0.len() - 1
|
||||
}
|
||||
|
||||
pub fn level(&self) -> usize {
|
||||
self.0.len() - 1
|
||||
}
|
||||
|
||||
pub fn at_level(&self, level: usize) -> RingRNS<O> {
|
||||
assert!(level <= self.0.len());
|
||||
RingRNS(self.0[..level + 1].to_vec())
|
||||
}
|
||||
}
|
||||
@@ -1,208 +0,0 @@
|
||||
use crate::automorphism::AutoPerm;
|
||||
use crate::modulus::{ScalarOperations, ONCE};
|
||||
use crate::modulus::{WordOps, REDUCEMOD};
|
||||
use crate::poly::Poly;
|
||||
use crate::ring::Ring;
|
||||
|
||||
impl Ring<u64> {
|
||||
// b <- auto(a)
|
||||
pub fn a_apply_automorphism_native_into_b<const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
gal_el: usize,
|
||||
nth_root: usize,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.apply_automorphism_native_core::<0, ONCE, NTT>(a, gal_el, nth_root, b)
|
||||
}
|
||||
|
||||
// b <- REDUCEMOD(b + auto(a))
|
||||
pub fn a_apply_automorphism_native_add_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
gal_el: usize,
|
||||
nth_root: usize,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.apply_automorphism_native_core::<1, REDUCE, NTT>(a, gal_el, nth_root, b)
|
||||
}
|
||||
|
||||
// b <- REDUCEMOD(b - auto(a))
|
||||
pub fn a_apply_automorphism_native_sub_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
gal_el: usize,
|
||||
nth_root: usize,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.apply_automorphism_native_core::<2, REDUCE, NTT>(a, gal_el, nth_root, b)
|
||||
}
|
||||
|
||||
fn apply_automorphism_native_core<const MOD: u8, const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
gal_el: usize,
|
||||
nth_root: usize,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.n() == b.n(),
|
||||
"invalid inputs: a.n() = {} != b.n() = {}",
|
||||
a.n(),
|
||||
b.n()
|
||||
);
|
||||
|
||||
assert!(
|
||||
gal_el & 1 == 1,
|
||||
"invalid gal_el={}: not coprime with nth_root={}",
|
||||
gal_el,
|
||||
nth_root
|
||||
);
|
||||
|
||||
assert!(
|
||||
nth_root & (nth_root - 1) == 0,
|
||||
"invalid nth_root={}: not a power-of-two",
|
||||
nth_root
|
||||
);
|
||||
|
||||
let b_vec: &mut Vec<u64> = &mut b.0;
|
||||
let a_vec: &Vec<u64> = &a.0;
|
||||
|
||||
if NTT {
|
||||
let mask: usize = nth_root - 1;
|
||||
let log_nth_root_half: u32 = nth_root.log2() as u32 - 1;
|
||||
a_vec.iter().enumerate().for_each(|(i, ai)| {
|
||||
let i_rev: usize = 2 * i.reverse_bits_msb(log_nth_root_half) + 1;
|
||||
let gal_el_i: usize = (((gal_el * i_rev) & mask) - 1) >> 1;
|
||||
let idx: usize = gal_el_i.reverse_bits_msb(log_nth_root_half);
|
||||
match MOD {
|
||||
0 => b_vec[idx] = *ai,
|
||||
1 => self
|
||||
.modulus
|
||||
.sa_add_sb_into_sb::<REDUCE>(ai, &mut b_vec[idx]),
|
||||
2 => self
|
||||
.modulus
|
||||
.sa_sub_sb_into_sa::<1, REDUCE>(ai, &mut b_vec[idx]),
|
||||
_ => {
|
||||
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
let n: usize = a.n();
|
||||
let mask: usize = n - 1;
|
||||
let log_n: usize = n.log2();
|
||||
let q: u64 = self.modulus.q();
|
||||
a_vec.iter().enumerate().for_each(|(i, ai)| {
|
||||
let gal_el_i: usize = i * gal_el;
|
||||
let sign: u64 = ((gal_el_i >> log_n) & 1) as u64;
|
||||
let i_out: usize = gal_el_i & mask;
|
||||
let v: u64 = ai * (sign ^ 1) | (q - ai) * sign;
|
||||
match MOD {
|
||||
0 => b_vec[i_out] = v,
|
||||
1 => self
|
||||
.modulus
|
||||
.sa_add_sb_into_sb::<REDUCE>(&v, &mut b_vec[i_out]),
|
||||
2 => self
|
||||
.modulus
|
||||
.sa_sub_sb_into_sa::<1, REDUCE>(&v, &mut b_vec[i_out]),
|
||||
_ => {
|
||||
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// b <- auto(a)
|
||||
pub fn a_apply_automorphism_from_perm_into_b<const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
auto_perm: &AutoPerm,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.automorphism_from_perm_core::<0, ONCE, NTT>(a, auto_perm, b)
|
||||
}
|
||||
|
||||
// b <- REDUCEMOD(b + auto(a))
|
||||
pub fn a_apply_automorphism_from_perm_add_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
auto_perm: &AutoPerm,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.automorphism_from_perm_core::<1, REDUCE, NTT>(a, auto_perm, b)
|
||||
}
|
||||
|
||||
// b <- REDUCEMOD(b - auto(a))
|
||||
pub fn a_apply_automorphism_from_perm_sub_b_into_b<const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
auto_perm: &AutoPerm,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
self.automorphism_from_perm_core::<2, REDUCE, NTT>(a, auto_perm, b)
|
||||
}
|
||||
|
||||
// b <- auto(a) if OVERWRITE else b <- REDUCEMOD(b + auto(a))
|
||||
fn automorphism_from_perm_core<const MOD: u8, const REDUCE: REDUCEMOD, const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
auto_perm: &AutoPerm,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.n() == b.n(),
|
||||
"invalid inputs: a.n() = {} != b.n() = {}",
|
||||
a.n(),
|
||||
b.n()
|
||||
);
|
||||
|
||||
assert!(
|
||||
NTT == auto_perm.ntt,
|
||||
"missmatch between AutoPerm NTT flag={} and method NTT flag={}",
|
||||
auto_perm.ntt,
|
||||
NTT
|
||||
);
|
||||
|
||||
let b_vec: &mut Vec<u64> = &mut b.0;
|
||||
let a_vec: &Vec<u64> = &a.0;
|
||||
|
||||
let idx: &Vec<usize> = &auto_perm.permutation;
|
||||
|
||||
if NTT {
|
||||
a_vec.iter().enumerate().for_each(|(i, ai)| match MOD {
|
||||
0 => b_vec[idx[i]] = *ai,
|
||||
1 => self
|
||||
.modulus
|
||||
.sa_add_sb_into_sb::<REDUCE>(ai, &mut b_vec[idx[i]]),
|
||||
2 => self
|
||||
.modulus
|
||||
.sa_sub_sb_into_sa::<1, REDUCE>(ai, &mut b_vec[idx[i]]),
|
||||
_ => {
|
||||
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
let n: usize = a.n();
|
||||
let mask: usize = n - 1;
|
||||
let q: u64 = self.modulus.q();
|
||||
a_vec.iter().enumerate().for_each(|(i, ai)| {
|
||||
let sign: u64 = (idx[i] >> usize::BITS - 1) as u64;
|
||||
let v: u64 = ai * (sign ^ 1) | (q - ai) * sign;
|
||||
match MOD {
|
||||
0 => b_vec[idx[i] & mask] = v,
|
||||
1 => self
|
||||
.modulus
|
||||
.sa_add_sb_into_sb::<REDUCE>(&v, &mut b_vec[idx[i] & mask]),
|
||||
2 => self
|
||||
.modulus
|
||||
.sa_sub_sb_into_sa::<1, REDUCE>(&v, &mut b_vec[idx[i] & mask]),
|
||||
_ => {
|
||||
panic!("invalid const MOD should be 0, 1, or 2 but is {}", MOD)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
pub mod automorphism;
|
||||
pub mod rescaling_rns;
|
||||
pub mod ring;
|
||||
pub mod ring_rns;
|
||||
pub mod ring_switch;
|
||||
pub mod sampling;
|
||||
pub mod utils;
|
||||
@@ -1,285 +0,0 @@
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::{BARRETT, NONE, ONCE};
|
||||
use crate::poly::{Poly, PolyRNS};
|
||||
use crate::ring::Ring;
|
||||
use crate::ring::RingRNS;
|
||||
use crate::scalar::ScalarRNS;
|
||||
extern crate test;
|
||||
|
||||
impl RingRNS<u64> {
|
||||
/// Updates b to floor(a / q[b.level()]).
|
||||
/// buf is unused if <ROUND=false,NTT=false>
|
||||
pub fn div_by_last_modulus<const ROUND: bool, const NTT: bool>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
buf: &mut [Poly<u64>; 2],
|
||||
b: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(self.level() != 0, "invalid call: self.level()=0");
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"invalid input a: a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level() - 1,
|
||||
"invalid input b: b.level()={} < self.level()-1={}",
|
||||
b.level(),
|
||||
self.level() - 1
|
||||
);
|
||||
|
||||
let level = self.level();
|
||||
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
||||
let r_last: &Ring<u64> = &self.0[level];
|
||||
|
||||
if ROUND {
|
||||
let q_level_half: u64 = r_last.modulus.q >> 1;
|
||||
|
||||
let (buf_q_scaling, buf_qi_scaling) = buf.split_at_mut(1);
|
||||
|
||||
if NTT {
|
||||
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
|
||||
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r_last.a_add_b_scalar_into_c::<NONE>(
|
||||
&buf_q_scaling[0],
|
||||
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
|
||||
&mut buf_qi_scaling[0],
|
||||
);
|
||||
r.ntt_inplace::<true>(&mut buf_qi_scaling[0]);
|
||||
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
|
||||
&buf_qi_scaling[0],
|
||||
a.at(i),
|
||||
&rescaling_constants.0[i],
|
||||
b.at_mut(i),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
r_last.a_add_b_scalar_into_c::<ONCE>(
|
||||
a.at(self.level()),
|
||||
&q_level_half,
|
||||
&mut buf_q_scaling[0],
|
||||
);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r_last.a_add_b_scalar_into_c::<NONE>(
|
||||
&buf_q_scaling[0],
|
||||
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
|
||||
&mut buf_qi_scaling[0],
|
||||
);
|
||||
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
|
||||
&buf_qi_scaling[0],
|
||||
a.at(i),
|
||||
&rescaling_constants.0[i],
|
||||
b.at_mut(i),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if NTT {
|
||||
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.split_at_mut(1);
|
||||
self.0[level].intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
|
||||
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
|
||||
&buf_ntt_qi_scaling[0],
|
||||
a.at(i),
|
||||
&rescaling_constants.0[i],
|
||||
b.at_mut(i),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r.a_sub_b_mul_c_scalar_barrett_into_d::<2, ONCE>(
|
||||
a.at(level),
|
||||
a.at(i),
|
||||
&rescaling_constants.0[i],
|
||||
b.at_mut(i),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates a to floor(a / q[b.level()]).
|
||||
/// Expects a to be in the NTT domain.
|
||||
pub fn div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(
|
||||
&self,
|
||||
buf: &mut [Poly<u64>; 2],
|
||||
a: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
self.level() <= a.level(),
|
||||
"invalid input a: a.level()={} < self.level()={}",
|
||||
self.level(),
|
||||
a.level()
|
||||
);
|
||||
|
||||
let level = self.level();
|
||||
let rescaling_constants: ScalarRNS<Barrett<u64>> = self.rescaling_constant();
|
||||
let r_last: &Ring<u64> = &self.0[level];
|
||||
|
||||
if ROUND {
|
||||
let q_level_half: u64 = r_last.modulus.q >> 1;
|
||||
let (buf_q_scaling, buf_qi_scaling) = buf.split_at_mut(1);
|
||||
|
||||
if NTT {
|
||||
r_last.intt::<false>(a.at(level), &mut buf_q_scaling[0]);
|
||||
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut buf_q_scaling[0]);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r_last.a_add_b_scalar_into_c::<NONE>(
|
||||
&buf_q_scaling[0],
|
||||
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
|
||||
&mut buf_qi_scaling[0],
|
||||
);
|
||||
r.ntt_inplace::<false>(&mut buf_qi_scaling[0]);
|
||||
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
|
||||
&buf_qi_scaling[0],
|
||||
&rescaling_constants.0[i],
|
||||
a.at_mut(i),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let (a_qi, a_q_last) = a.0.split_at_mut(self.level());
|
||||
r_last.a_add_b_scalar_into_a::<ONCE>(&q_level_half, &mut a_q_last[0]);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<1, ONCE>(
|
||||
&a_q_last[0],
|
||||
&(r.modulus.q - r_last.modulus.barrett.reduce::<BARRETT>(&q_level_half)),
|
||||
&rescaling_constants.0[i],
|
||||
&mut a_qi[i],
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if NTT {
|
||||
let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.split_at_mut(1);
|
||||
r_last.intt::<false>(a.at(level), &mut buf_ntt_q_scaling[0]);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r.ntt::<true>(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]);
|
||||
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
|
||||
&buf_ntt_qi_scaling[0],
|
||||
&rescaling_constants.0[i],
|
||||
a.at_mut(i),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let (a_i, a_level) = a.0.split_at_mut(level);
|
||||
for (i, r) in self.0[0..level].iter().enumerate() {
|
||||
r.b_sub_a_mul_c_scalar_barrett_into_a::<2, ONCE>(
|
||||
&a_level[0],
|
||||
&rescaling_constants.0[i],
|
||||
&mut a_i[i],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i])
|
||||
pub fn div_by_last_moduli<const ROUND: bool, const NTT: bool>(
|
||||
&self,
|
||||
nb_moduli_dropped: usize,
|
||||
a: &PolyRNS<u64>,
|
||||
buf0: &mut [Poly<u64>; 2],
|
||||
buf1: &mut PolyRNS<u64>,
|
||||
c: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
nb_moduli_dropped <= self.level(),
|
||||
"invalid input nb_moduli_dropped: nb_moduli_dropped={} > self.level()={}",
|
||||
nb_moduli_dropped,
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"invalid input a: a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
buf1.level() >= self.level(),
|
||||
"invalid input buf: buf.level()={} < self.level()={}",
|
||||
buf1.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
c.level() >= self.level() - nb_moduli_dropped,
|
||||
"invalid input c: c.level()={} < self.level()-nb_moduli_dropped={}",
|
||||
c.level(),
|
||||
self.level() - nb_moduli_dropped
|
||||
);
|
||||
|
||||
if nb_moduli_dropped == 0 {
|
||||
if a != c {
|
||||
c.copy(a);
|
||||
}
|
||||
} else {
|
||||
if NTT {
|
||||
self.intt::<false>(a, buf1);
|
||||
(0..nb_moduli_dropped).for_each(|i| {
|
||||
self.at_level(self.level() - i)
|
||||
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1)
|
||||
});
|
||||
self.at_level(self.level() - nb_moduli_dropped)
|
||||
.ntt::<false>(buf1, c);
|
||||
} else {
|
||||
self.div_by_last_modulus::<ROUND, false>(a, buf0, buf1);
|
||||
|
||||
(1..nb_moduli_dropped - 1).for_each(|i| {
|
||||
self.at_level(self.level() - i)
|
||||
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1);
|
||||
});
|
||||
|
||||
self.at_level(self.level() - nb_moduli_dropped + 1)
|
||||
.div_by_last_modulus::<ROUND, false>(buf1, buf0, c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates a to floor(a / prod_{level - nb_moduli_dropped}^{level} q[i])
|
||||
pub fn div_by_last_moduli_inplace<const ROUND: bool, const NTT: bool>(
|
||||
&self,
|
||||
nb_moduli_dropped: usize,
|
||||
buf0: &mut [Poly<u64>; 2],
|
||||
buf1: &mut PolyRNS<u64>,
|
||||
a: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
nb_moduli_dropped <= self.level(),
|
||||
"invalid input nb_moduli_dropped: nb_moduli_dropped={} > self.level()={}",
|
||||
nb_moduli_dropped,
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"invalid input a: a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
buf1.level() >= self.level(),
|
||||
"invalid input buf: buf.level()={} < self.level()={}",
|
||||
buf1.level(),
|
||||
self.level()
|
||||
);
|
||||
if nb_moduli_dropped == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
if NTT {
|
||||
self.intt::<false>(a, buf1);
|
||||
(0..nb_moduli_dropped).for_each(|i| {
|
||||
self.at_level(self.level() - i)
|
||||
.div_by_last_modulus_inplace::<ROUND, false>(buf0, buf1)
|
||||
});
|
||||
self.at_level(self.level() - nb_moduli_dropped)
|
||||
.ntt::<false>(buf1, a);
|
||||
} else {
|
||||
(0..nb_moduli_dropped).for_each(|i| {
|
||||
self.at_level(self.level() - i)
|
||||
.div_by_last_modulus_inplace::<ROUND, false>(buf0, a)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,451 +0,0 @@
|
||||
use crate::dft::ntt::Table;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::prime::Prime;
|
||||
use crate::modulus::{VectorOperations, ONCE};
|
||||
use crate::modulus::{BARRETT, REDUCEMOD};
|
||||
use crate::poly::Poly;
|
||||
use crate::ring::Ring;
|
||||
use crate::CHUNK;
|
||||
use num_bigint::BigInt;
|
||||
use num_traits::ToPrimitive;
|
||||
|
||||
impl Ring<u64> {
|
||||
pub fn new(n: usize, q_base: u64, q_power: usize) -> Self {
|
||||
let prime: Prime<u64> = Prime::<u64>::new(q_base, q_power);
|
||||
Self {
|
||||
n: n,
|
||||
modulus: prime.clone(),
|
||||
cyclotomic_order: n << 1,
|
||||
dft: Box::new(Table::<u64>::new(prime, n << 1)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bigint(&self, coeffs: &[BigInt], step: usize, a: &mut Poly<u64>) {
|
||||
assert!(
|
||||
step <= a.n(),
|
||||
"invalid step: step={} > a.n()={}",
|
||||
step,
|
||||
a.n()
|
||||
);
|
||||
assert!(
|
||||
coeffs.len() <= a.n() / step,
|
||||
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
|
||||
coeffs.len(),
|
||||
a.n() / step
|
||||
);
|
||||
let q_big: BigInt = BigInt::from(self.modulus.q);
|
||||
a.0.iter_mut()
|
||||
.step_by(step)
|
||||
.enumerate()
|
||||
.for_each(|(i, v)| *v = (&coeffs[i] % &q_big).to_u64().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
impl Ring<u64> {
|
||||
pub fn ntt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
|
||||
match LAZY {
|
||||
true => self.dft.forward_inplace_lazy(&mut poly.0),
|
||||
false => self.dft.forward_inplace(&mut poly.0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn intt_inplace<const LAZY: bool>(&self, poly: &mut Poly<u64>) {
|
||||
match LAZY {
|
||||
true => self.dft.backward_inplace_lazy(&mut poly.0),
|
||||
false => self.dft.backward_inplace(&mut poly.0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ntt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
|
||||
poly_out.0.copy_from_slice(&poly_in.0);
|
||||
match LAZY {
|
||||
true => self.dft.forward_inplace_lazy(&mut poly_out.0),
|
||||
false => self.dft.forward_inplace(&mut poly_out.0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn intt<const LAZY: bool>(&self, poly_in: &Poly<u64>, poly_out: &mut Poly<u64>) {
|
||||
poly_out.0.copy_from_slice(&poly_in.0);
|
||||
match LAZY {
|
||||
true => self.dft.backward_inplace_lazy(&mut poly_out.0),
|
||||
false => self.dft.backward_inplace(&mut poly_out.0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Ring<u64> {
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &Poly<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus.va_add_sb_into_va::<CHUNK, REDUCE>(b, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_scalar_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &u64,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_add_scalar_b_mul_c_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus
|
||||
.va_add_sb_mul_sc_barrett_into_va::<CHUNK, REDUCE>(b, c, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_scalar_then_mul_scalar_barrett<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &u64,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "c.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus
|
||||
.va_add_sb_mul_sc_barrett_into_vd::<CHUNK, REDUCE>(&a.0, b, c, &mut d.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_sub_vb_into_vb::<CHUNK, BRANGE, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Poly<u64>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_sub_vb_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &Poly<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_sub_vb_into_vc::<CHUNK, BRANGE, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_neg_into_vb::<CHUNK, ARANGE, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus
|
||||
.va_neg_into_va::<CHUNK, ARANGE, REDUCE>(&mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_prepare_montgomery_into_a<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &mut Poly<Montgomery<u64>>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus
|
||||
.va_prepare_montgomery_into_va::<CHUNK, REDUCE>(&mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_montgomery_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<Montgomery<u64>>,
|
||||
b: &Poly<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_mul_vb_montgomery_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_montgomery_add_c_into_c<const REDUCE1: REDUCEMOD, const REDUCE2: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<Montgomery<u64>>,
|
||||
b: &Poly<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_mul_vb_montgomery_add_vc_into_vc::<CHUNK, REDUCE1, REDUCE2>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_montgomery_into_a<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Poly<Montgomery<u64>>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_mul_vb_montgomery_into_va::<CHUNK, REDUCE>(&b.0, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_scalar_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &u64,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(
|
||||
&a.0,
|
||||
&self.modulus.barrett.prepare(*b),
|
||||
&mut c.0,
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_scalar_into_a<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut Poly<u64>) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(
|
||||
&self
|
||||
.modulus
|
||||
.barrett
|
||||
.prepare(self.modulus.barrett.reduce::<BARRETT>(b)),
|
||||
&mut a.0,
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_scalar_barrett_into_a<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Barrett<u64>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus
|
||||
.va_mul_sb_barrett_into_va::<CHUNK, REDUCE>(b, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_mul_b_scalar_barrett_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &Barrett<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus
|
||||
.va_mul_sb_barrett_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_mul_c_scalar_barrett_into_d<const VBRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &Poly<u64>,
|
||||
c: &Barrett<u64>,
|
||||
d: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus
|
||||
.va_sub_vb_mul_sc_barrett_into_vd::<CHUNK, VBRANGE, REDUCE>(&a.0, &b.0, c, &mut d.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn b_sub_a_mul_c_scalar_barrett_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &Poly<u64>,
|
||||
c: &Barrett<u64>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.va_sub_vb_mul_sc_barrett_into_vb::<CHUNK, BRANGE, REDUCE>(&b.0, c, &mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e<
|
||||
const BRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &Poly<u64>,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
e: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(e.n() == self.n(), "e.n()={} != n={}", e.n(), self.n());
|
||||
self.modulus
|
||||
.vb_sub_va_add_sc_mul_sd_barrett_into_ve::<CHUNK, BRANGE, REDUCE>(
|
||||
&a.0, &b.0, c, d, &mut e.0,
|
||||
);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
|
||||
const BRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
b: &Poly<u64>,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus
|
||||
.vb_sub_va_add_sc_mul_sd_barrett_into_va::<CHUNK, BRANGE, REDUCE>(&b.0, c, d, &mut a.0);
|
||||
}
|
||||
|
||||
pub fn a_rsh_scalar_b_mask_scalar_c_into_d(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &usize,
|
||||
c: &u64,
|
||||
d: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus
|
||||
.va_rsh_sb_mask_sc_into_vd::<CHUNK>(&a.0, b, c, &mut d.0);
|
||||
}
|
||||
|
||||
pub fn a_rsh_scalar_b_mask_scalar_c_add_d_into_d(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
b: &usize,
|
||||
c: &u64,
|
||||
d: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus
|
||||
.va_rsh_sb_mask_sc_add_vd_into_vd::<CHUNK>(&a.0, b, c, &mut d.0);
|
||||
}
|
||||
|
||||
pub fn a_ith_digit_unsigned_base_scalar_b_into_c(
|
||||
&self,
|
||||
i: usize,
|
||||
a: &Poly<u64>,
|
||||
b: &usize,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_ith_digit_unsigned_base_sb_into_vc::<CHUNK>(i, &a.0, b, &mut c.0);
|
||||
}
|
||||
|
||||
pub fn a_ith_digit_signed_base_scalar_b_into_c<const BALANCED: bool>(
|
||||
&self,
|
||||
i: usize,
|
||||
a: &Poly<u64>,
|
||||
b: &usize,
|
||||
carry: &mut Poly<u64>,
|
||||
c: &mut Poly<u64>,
|
||||
) {
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(
|
||||
carry.n() == self.n(),
|
||||
"carry.n()={} != n={}",
|
||||
carry.n(),
|
||||
self.n()
|
||||
);
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus
|
||||
.va_ith_digit_signed_base_sb_into_vc::<CHUNK, BALANCED>(
|
||||
i,
|
||||
&a.0,
|
||||
b,
|
||||
&mut carry.0,
|
||||
&mut c.0,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn a_mul_by_x_pow_b_into_a(&self, b: i32, a: &mut Poly<u64>) {
|
||||
let n: usize = self.n();
|
||||
let cyclotomic_order: usize = self.cyclotomic_order();
|
||||
|
||||
let b_0: usize = (b as usize).wrapping_add(cyclotomic_order) & (cyclotomic_order - 1);
|
||||
let b_1: usize = b as usize & (n - 1);
|
||||
|
||||
a.0.rotate_right(b_1);
|
||||
|
||||
if b_0 > b_1 {
|
||||
self.modulus
|
||||
.va_neg_into_va::<CHUNK, 1, ONCE>(&mut a.0[b_1..])
|
||||
} else {
|
||||
self.modulus
|
||||
.va_neg_into_va::<CHUNK, 1, ONCE>(&mut a.0[..b_1])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,456 +0,0 @@
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::REDUCEMOD;
|
||||
use crate::poly::PolyRNS;
|
||||
use crate::ring::{Ring, RingRNS};
|
||||
use crate::scalar::ScalarRNS;
|
||||
use num_bigint::BigInt;
|
||||
use std::rc::Rc;
|
||||
|
||||
impl RingRNS<u64> {
|
||||
pub fn new(n: usize, moduli: Vec<u64>) -> Self {
|
||||
assert!(!moduli.is_empty(), "moduli cannot be empty");
|
||||
let rings: Vec<Rc<Ring<u64>>> = moduli
|
||||
.into_iter()
|
||||
.map(|prime| Rc::new(Ring::new(n, prime, 1)))
|
||||
.collect();
|
||||
return RingRNS(rings);
|
||||
}
|
||||
|
||||
pub fn modulus(&self) -> BigInt {
|
||||
let mut modulus = BigInt::from(1);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(_, r)| modulus *= BigInt::from(r.modulus.q));
|
||||
modulus
|
||||
}
|
||||
|
||||
pub fn rescaling_constant(&self) -> ScalarRNS<Barrett<u64>> {
|
||||
let level = self.level();
|
||||
let q_scale: u64 = self.0[level].modulus.q;
|
||||
ScalarRNS(
|
||||
(0..level)
|
||||
.map(|i| {
|
||||
self.0[i]
|
||||
.modulus
|
||||
.barrett
|
||||
.prepare(self.0[i].modulus.q - self.0[i].modulus.inv(q_scale))
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn from_bigint_inplace(&self, coeffs: &[BigInt], step: usize, a: &mut PolyRNS<u64>) {
|
||||
let level = self.level();
|
||||
assert!(
|
||||
level <= a.level(),
|
||||
"invalid level: level={} > a.level()={}",
|
||||
level,
|
||||
a.level()
|
||||
);
|
||||
(0..level).for_each(|i| self.0[i].from_bigint(coeffs, step, a.at_mut(i)));
|
||||
}
|
||||
|
||||
pub fn to_bigint_inplace(&self, a: &PolyRNS<u64>, step: usize, coeffs: &mut [BigInt]) {
|
||||
assert!(
|
||||
step <= a.n(),
|
||||
"invalid step: step={} > a.n()={}",
|
||||
step,
|
||||
a.n()
|
||||
);
|
||||
assert!(
|
||||
coeffs.len() <= a.n() / step,
|
||||
"invalid coeffs: coeffs.len()={} > a.n()/step={}",
|
||||
coeffs.len(),
|
||||
a.n() / step
|
||||
);
|
||||
|
||||
let mut inv_crt: Vec<BigInt> = vec![BigInt::default(); self.level() + 1];
|
||||
let q_big: BigInt = self.modulus();
|
||||
let q_big_half: BigInt = &q_big >> 1;
|
||||
|
||||
inv_crt.iter_mut().enumerate().for_each(|(i, a)| {
|
||||
let qi_big = BigInt::from(self.0[i].modulus.q);
|
||||
*a = &q_big / &qi_big;
|
||||
*a *= a.modinv(&qi_big).unwrap();
|
||||
});
|
||||
|
||||
(0..self.n()).step_by(step).enumerate().for_each(|(i, j)| {
|
||||
coeffs[j] = BigInt::from(a.at(0).0[i]) * &inv_crt[0];
|
||||
(1..self.level() + 1).for_each(|k| {
|
||||
coeffs[j] += BigInt::from(a.at(k).0[i] * &inv_crt[k]);
|
||||
});
|
||||
coeffs[j] %= &q_big;
|
||||
if &coeffs[j] >= &q_big_half {
|
||||
coeffs[j] -= &q_big;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl RingRNS<u64> {
|
||||
pub fn ntt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.ntt_inplace::<LAZY>(&mut a.0[i]));
|
||||
}
|
||||
|
||||
pub fn intt_inplace<const LAZY: bool>(&self, a: &mut PolyRNS<u64>) {
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.intt_inplace::<LAZY>(&mut a.0[i]));
|
||||
}
|
||||
|
||||
pub fn ntt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.ntt::<LAZY>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
pub fn intt<const LAZY: bool>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.intt::<LAZY>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
}
|
||||
|
||||
impl RingRNS<u64> {
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_into_c<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &PolyRNS<u64>,
|
||||
c: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
c.level() >= self.level(),
|
||||
"c.level()={} < self.level()={}",
|
||||
c.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_add_b_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_add_b_into_b<const REDUCE: REDUCEMOD>(&self, a: &PolyRNS<u64>, b: &mut PolyRNS<u64>) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_add_b_into_b::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_c<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &PolyRNS<u64>,
|
||||
c: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
c.level() >= self.level(),
|
||||
"c.level()={} < self.level()={}",
|
||||
c.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||
ring.a_sub_b_into_c::<BRANGE, REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_b<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_sub_b_into_b::<BRANGE, REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_into_a<const BRANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
b: &PolyRNS<u64>,
|
||||
a: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_sub_b_into_a::<BRANGE, REDUCE>(&b.0[i], &mut a.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_neg_into_b<const ARANGE: u8, const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_neg_into_b::<ARANGE, REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_neg_into_a<const ARANGE: u8, const REDUCE: REDUCEMOD>(&self, a: &mut PolyRNS<u64>) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_neg_into_a::<ARANGE, REDUCE>(&mut a.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<Montgomery<u64>>,
|
||||
b: &PolyRNS<u64>,
|
||||
c: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
c.level() >= self.level(),
|
||||
"c.level()={} < self.level()={}",
|
||||
c.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||
ring.a_mul_b_montgomery_into_c::<REDUCE>(&a.0[i], &b.0[i], &mut c.0[i])
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external_inplace<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<Montgomery<u64>>,
|
||||
b: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_mul_b_montgomery_into_a::<REDUCE>(&a.0[i], &mut b.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar<const REDUCE: REDUCEMOD>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &u64,
|
||||
c: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
c.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
c.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_c::<REDUCE>(&a.0[i], b, &mut c.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_inplace<const REDUCE: REDUCEMOD>(&self, b: &u64, a: &mut PolyRNS<u64>) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, ring)| ring.a_mul_b_scalar_into_a::<REDUCE>(b, &mut a.0[i]));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn a_sub_b_add_scalar_mul_scalar_barrett_into_e<
|
||||
const BRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
a: &PolyRNS<u64>,
|
||||
b: &PolyRNS<u64>,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
e: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
e.level() >= self.level(),
|
||||
"e.level()={} < self.level()={}",
|
||||
e.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||
ring.a_sub_b_add_c_scalar_mul_d_scalar_barrett_into_e::<BRANGE, REDUCE>(
|
||||
&a.0[i],
|
||||
&b.0[i],
|
||||
c,
|
||||
d,
|
||||
&mut e.0[i],
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a<
|
||||
const BRANGE: u8,
|
||||
const REDUCE: REDUCEMOD,
|
||||
>(
|
||||
&self,
|
||||
b: &PolyRNS<u64>,
|
||||
c: &u64,
|
||||
d: &Barrett<u64>,
|
||||
a: &mut PolyRNS<u64>,
|
||||
) {
|
||||
debug_assert!(
|
||||
a.level() >= self.level(),
|
||||
"a.level()={} < self.level()={}",
|
||||
a.level(),
|
||||
self.level()
|
||||
);
|
||||
debug_assert!(
|
||||
b.level() >= self.level(),
|
||||
"b.level()={} < self.level()={}",
|
||||
b.level(),
|
||||
self.level()
|
||||
);
|
||||
self.0.iter().enumerate().for_each(|(i, ring)| {
|
||||
ring.b_sub_a_add_c_scalar_mul_d_scalar_barrett_into_a::<BRANGE, REDUCE>(
|
||||
&b.0[i],
|
||||
c,
|
||||
d,
|
||||
&mut a.0[i],
|
||||
)
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
use crate::poly::Poly;
|
||||
use crate::ring::Ring;
|
||||
|
||||
impl Ring<u64> {
|
||||
pub fn switch_degree<const NTT: bool>(
|
||||
&self,
|
||||
a: &Poly<u64>,
|
||||
buf: &mut Poly<u64>,
|
||||
b: &mut Poly<u64>,
|
||||
) {
|
||||
let (n_in, n_out) = (a.n(), b.n());
|
||||
|
||||
if n_in > n_out {
|
||||
let (gap_in, gap_out) = (1, n_in / n_out);
|
||||
if NTT {
|
||||
self.intt::<false>(&a, buf);
|
||||
b.0.iter_mut()
|
||||
.step_by(gap_in)
|
||||
.zip(buf.0.iter().step_by(gap_out))
|
||||
.for_each(|(x_out, x_in)| *x_out = *x_in);
|
||||
self.ntt_inplace::<false>(b);
|
||||
} else {
|
||||
b.0.iter_mut()
|
||||
.step_by(gap_in)
|
||||
.zip(a.0.iter().step_by(gap_out))
|
||||
.for_each(|(x_out, x_in)| *x_out = *x_in);
|
||||
}
|
||||
} else {
|
||||
let gap: usize = n_out / n_in;
|
||||
|
||||
if NTT {
|
||||
a.0.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, &c)| (0..gap).for_each(|j| b.0[i * gap + j] = c));
|
||||
} else {
|
||||
b.0.iter_mut()
|
||||
.step_by(gap)
|
||||
.zip(a.0.iter())
|
||||
.for_each(|(x_out, x_in)| *x_out = *x_in);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
use crate::modulus::WordOps;
|
||||
use crate::poly::{Poly, PolyRNS};
|
||||
use crate::ring::{Ring, RingRNS};
|
||||
use num::ToPrimitive;
|
||||
use rand_distr::{Distribution, Normal};
|
||||
use sampling::source::Source;
|
||||
|
||||
impl Ring<u64> {
|
||||
pub fn fill_uniform(&self, source: &mut Source, a: &mut Poly<u64>) {
|
||||
let max: u64 = self.modulus.q;
|
||||
let mask: u64 = max.mask();
|
||||
a.0.iter_mut()
|
||||
.for_each(|a| *a = source.next_u64n(max, mask));
|
||||
}
|
||||
|
||||
pub fn fill_dist_f64<T: Distribution<f64>>(
|
||||
&self,
|
||||
source: &mut Source,
|
||||
dist: T,
|
||||
bound: f64,
|
||||
a: &mut Poly<u64>,
|
||||
) {
|
||||
let max: u64 = self.modulus.q;
|
||||
a.0.iter_mut().for_each(|a| {
|
||||
let mut dist_f64: f64 = dist.sample(source);
|
||||
|
||||
while dist_f64.abs() > bound {
|
||||
dist_f64 = dist.sample(source)
|
||||
}
|
||||
|
||||
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
|
||||
let sign: u64 = dist_f64.to_bits() >> 63;
|
||||
|
||||
*a = (dist_u64 * sign) | (max - dist_u64) * (sign ^ 1)
|
||||
});
|
||||
}
|
||||
|
||||
pub fn fill_normal(&self, source: &mut Source, sigma: f64, bound: f64, a: &mut Poly<u64>) {
|
||||
self.fill_dist_f64(source, Normal::new(0.0, sigma).unwrap(), bound, a);
|
||||
}
|
||||
}
|
||||
|
||||
impl RingRNS<u64> {
|
||||
pub fn fill_uniform(&self, source: &mut Source, a: &mut PolyRNS<u64>) {
|
||||
self.0
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, r)| r.fill_uniform(source, a.at_mut(i)));
|
||||
}
|
||||
|
||||
pub fn fill_dist_f64<T: Distribution<f64>>(
|
||||
&self,
|
||||
source: &mut Source,
|
||||
dist: T,
|
||||
bound: f64,
|
||||
a: &mut PolyRNS<u64>,
|
||||
) {
|
||||
(0..a.n()).for_each(|j| {
|
||||
let mut dist_f64: f64 = dist.sample(source);
|
||||
|
||||
while dist_f64.abs() > bound {
|
||||
dist_f64 = dist.sample(source)
|
||||
}
|
||||
|
||||
let dist_u64: u64 = (dist_f64 + 0.5).abs().to_u64().unwrap();
|
||||
let sign: u64 = dist_f64.to_bits() >> 63;
|
||||
|
||||
self.0.iter().enumerate().for_each(|(i, r)| {
|
||||
a.at_mut(i).0[j] = (dist_u64 * sign) | (r.modulus.q - dist_u64) * (sign ^ 1);
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
use crate::modulus::ONCE;
|
||||
use crate::poly::Poly;
|
||||
use crate::ring::Ring;
|
||||
|
||||
impl Ring<u64> {
|
||||
// Generates a vector storing {X^{2^0}, X^{2^1}, .., X^{2^log_n}}.
|
||||
pub fn gen_x_pow_2<const NTT: bool, const INV: bool>(&self, log_n: usize) -> Vec<Poly<u64>> {
|
||||
let mut x_pow: Vec<Poly<u64>> = Vec::<Poly<u64>>::with_capacity(log_n);
|
||||
|
||||
(0..log_n).for_each(|i| {
|
||||
let mut idx: usize = 1 << i;
|
||||
|
||||
if INV {
|
||||
idx = self.n() - idx;
|
||||
}
|
||||
|
||||
x_pow.push(self.new_poly());
|
||||
|
||||
if i == 0 {
|
||||
x_pow[i].0[idx] = self.modulus.montgomery.one();
|
||||
self.ntt_inplace::<false>(&mut x_pow[i]);
|
||||
} else {
|
||||
let (left, right) = x_pow.split_at_mut(i);
|
||||
self.a_mul_b_montgomery_into_c::<ONCE>(&left[i - 1], &left[i - 1], &mut right[0]);
|
||||
}
|
||||
});
|
||||
|
||||
if INV {
|
||||
self.a_neg_into_a::<1, ONCE>(&mut x_pow[0]);
|
||||
}
|
||||
|
||||
if !NTT {
|
||||
x_pow.iter_mut().for_each(|x| self.intt_inplace::<false>(x));
|
||||
}
|
||||
|
||||
x_pow
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ScalarRNS<O>(pub Vec<O>);
|
||||
@@ -1,97 +0,0 @@
|
||||
use itertools::izip;
|
||||
use rns::automorphism::AutoPerm;
|
||||
use rns::poly::Poly;
|
||||
use rns::ring::Ring;
|
||||
|
||||
#[test]
|
||||
fn automorphism_u64() {
|
||||
let n: usize = 1 << 4;
|
||||
let nth_root: usize = n << 1;
|
||||
let q_base: u64 = 65537u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring: Ring<u64> = Ring::new(n, q_base, q_power);
|
||||
|
||||
sub_test("test_automorphism_native_u64::<NTT:false>", || {
|
||||
test_automorphism_native_u64::<false>(&ring, nth_root)
|
||||
});
|
||||
sub_test("test_automorphism_native_u64::<NTT:true>", || {
|
||||
test_automorphism_native_u64::<true>(&ring, nth_root)
|
||||
});
|
||||
|
||||
sub_test("test_automorphism_from_perm_u64::<NTT:false>", || {
|
||||
test_automorphism_from_perm_u64::<false>(&ring)
|
||||
});
|
||||
sub_test("test_automorphism_from_perm_u64::<NTT:true>", || {
|
||||
test_automorphism_from_perm_u64::<true>(&ring)
|
||||
});
|
||||
}
|
||||
|
||||
fn sub_test<F: FnOnce()>(name: &str, f: F) {
|
||||
println!("Running {}", name);
|
||||
f();
|
||||
}
|
||||
|
||||
fn test_automorphism_native_u64<const NTT: bool>(ring: &Ring<u64>, nth_root: usize) {
|
||||
let n: usize = ring.n();
|
||||
let q: u64 = ring.modulus.q;
|
||||
|
||||
let mut p0: Poly<u64> = ring.new_poly();
|
||||
let mut p1: Poly<u64> = ring.new_poly();
|
||||
|
||||
for i in 0..p0.n() {
|
||||
p0.0[i] = i as u64
|
||||
}
|
||||
|
||||
if NTT {
|
||||
ring.ntt_inplace::<false>(&mut p0);
|
||||
}
|
||||
|
||||
let gal_el: usize = 2 * nth_root - 1;
|
||||
|
||||
ring.a_apply_automorphism_native_into_b::<NTT>(&p0, gal_el, nth_root, &mut p1);
|
||||
|
||||
if NTT {
|
||||
ring.intt_inplace::<false>(&mut p1);
|
||||
}
|
||||
|
||||
p0.0[0] = 0;
|
||||
for i in 1..p0.n() {
|
||||
p0.0[i] = q - (n - i) as u64
|
||||
}
|
||||
|
||||
izip!(p0.0, p1.0).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
|
||||
fn test_automorphism_from_perm_u64<const NTT: bool>(ring: &Ring<u64>) {
|
||||
let n: usize = ring.n();
|
||||
let q: u64 = ring.modulus.q;
|
||||
|
||||
let mut p0: Poly<u64> = ring.new_poly();
|
||||
let mut p1: Poly<u64> = ring.new_poly();
|
||||
|
||||
for i in 0..p0.n() {
|
||||
p0.0[i] = i as u64
|
||||
}
|
||||
|
||||
if NTT {
|
||||
ring.ntt_inplace::<false>(&mut p0);
|
||||
}
|
||||
|
||||
let gen_1 = 0;
|
||||
let gen_2 = true;
|
||||
|
||||
let auto_perm = AutoPerm::new::<_, NTT>(&ring, gen_1, gen_2);
|
||||
|
||||
ring.a_apply_automorphism_from_perm_into_b::<NTT>(&p0, &auto_perm, &mut p1);
|
||||
|
||||
if NTT {
|
||||
ring.intt_inplace::<false>(&mut p1);
|
||||
}
|
||||
|
||||
p0.0[0] = 0;
|
||||
for i in 1..p0.n() {
|
||||
p0.0[i] = q - (n - i) as u64
|
||||
}
|
||||
|
||||
izip!(p0.0, p1.0).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
use itertools::izip;
|
||||
use rns::modulus::{WordOps, ONCE};
|
||||
use rns::poly::Poly;
|
||||
use rns::ring::Ring;
|
||||
use sampling::source::Source;
|
||||
|
||||
#[test]
|
||||
fn digit_decomposition() {
|
||||
let n: usize = 1 << 4;
|
||||
let q_base: u64 = 65537u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring: Ring<u64> = Ring::new(n, q_base, q_power);
|
||||
|
||||
sub_test("test_unsigned_digit_decomposition", || {
|
||||
test_unsigned_digit_decomposition(&ring)
|
||||
});
|
||||
|
||||
sub_test("test_signed_digit_decomposition::<BALANCED=false>", || {
|
||||
test_signed_digit_decomposition::<false>(&ring)
|
||||
});
|
||||
|
||||
sub_test("test_signed_digit_decomposition::<BALANCED=true>", || {
|
||||
test_signed_digit_decomposition::<true>(&ring)
|
||||
});
|
||||
}
|
||||
|
||||
fn sub_test<F: FnOnce()>(name: &str, f: F) {
|
||||
println!("Running {}", name);
|
||||
f();
|
||||
}
|
||||
|
||||
fn test_unsigned_digit_decomposition(ring: &Ring<u64>) {
|
||||
let mut a: Poly<u64> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
let mut c: Poly<u64> = ring.new_poly();
|
||||
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
ring.fill_uniform(&mut source, &mut a);
|
||||
|
||||
let base: usize = 8;
|
||||
let log_q: usize = ring.modulus.q.log2();
|
||||
let d: usize = ((log_q + base - 1) / base) as _;
|
||||
|
||||
(0..d).for_each(|i| {
|
||||
ring.a_ith_digit_unsigned_base_scalar_b_into_c(i, &a, &base, &mut b);
|
||||
ring.a_mul_b_scalar_into_a::<ONCE>(&(1 << (i * base)), &mut b);
|
||||
ring.a_add_b_into_b::<ONCE>(&b, &mut c);
|
||||
});
|
||||
|
||||
izip!(a.0, c.0).for_each(|(a, c)| assert_eq!(a, c));
|
||||
}
|
||||
|
||||
fn test_signed_digit_decomposition<const BALANCED: bool>(ring: &Ring<u64>) {
|
||||
let mut a: Poly<u64> = ring.new_poly();
|
||||
let mut b: Poly<u64> = ring.new_poly();
|
||||
let mut carry: Poly<u64> = ring.new_poly();
|
||||
let mut c: Poly<u64> = ring.new_poly();
|
||||
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
ring.fill_uniform(&mut source, &mut a);
|
||||
|
||||
let base: usize = 8;
|
||||
let log_q: usize = ring.modulus.q.log2();
|
||||
let d: usize = ((log_q + base - 1) / base) as _;
|
||||
|
||||
(0..d).for_each(|i| {
|
||||
ring.a_ith_digit_signed_base_scalar_b_into_c::<BALANCED>(i, &a, &base, &mut carry, &mut b);
|
||||
ring.a_mul_b_scalar_into_a::<ONCE>(&(1 << (i * base)), &mut b);
|
||||
ring.a_add_b_into_b::<ONCE>(&b, &mut c);
|
||||
});
|
||||
|
||||
izip!(a.0, c.0).for_each(|(a, c)| assert_eq!(a, c));
|
||||
}
|
||||
@@ -1,287 +0,0 @@
|
||||
use itertools::izip;
|
||||
use num_bigint::BigInt;
|
||||
use rns::num_bigint::Div;
|
||||
use rns::poly::{Poly, PolyRNS};
|
||||
use rns::ring::RingRNS;
|
||||
use sampling::source::Source;
|
||||
|
||||
#[test]
|
||||
fn rescaling_rns_u64() {
|
||||
let n = 1 << 10;
|
||||
let moduli: Vec<u64> = vec![
|
||||
0x1fffffffffc80001u64,
|
||||
0x1fffffffffe00001u64,
|
||||
0x1fffffffffb40001,
|
||||
0x1fffffffff500001,
|
||||
];
|
||||
let ring_rns: RingRNS<u64> = RingRNS::new(n, moduli);
|
||||
|
||||
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:false>", || {
|
||||
test_div_by_last_modulus::<false, false>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_modulus::<ROUND:false, NTT:true>", || {
|
||||
test_div_by_last_modulus::<false, true>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:false>", || {
|
||||
test_div_by_last_modulus::<true, false>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_modulus::<ROUND:true, NTT:true>", || {
|
||||
test_div_by_last_modulus::<true, true>(&ring_rns)
|
||||
});
|
||||
sub_test(
|
||||
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:false>",
|
||||
|| test_div_by_last_modulus_inplace::<false, false>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_modulus_inplace::<ROUND:false, NTT:true>",
|
||||
|| test_div_by_last_modulus_inplace::<false, true>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:true>",
|
||||
|| test_div_by_last_modulus_inplace::<true, true>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_modulus_inplace::<ROUND:true, NTT:false>",
|
||||
|| test_div_by_last_modulus_inplace::<true, false>(&ring_rns),
|
||||
);
|
||||
sub_test("test_div_by_last_moduli::<ROUND:false, NTT:false>", || {
|
||||
test_div_by_last_moduli::<false, false>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_moduli::<ROUND:false, NTT:true>", || {
|
||||
test_div_by_last_moduli::<false, true>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_moduli::<ROUND:true, NTT:false>", || {
|
||||
test_div_by_last_moduli::<true, false>(&ring_rns)
|
||||
});
|
||||
sub_test("test_div_by_last_moduli::<ROUND:true, NTT:true>", || {
|
||||
test_div_by_last_moduli::<true, true>(&ring_rns)
|
||||
});
|
||||
sub_test(
|
||||
"test_div_by_last_moduli_inplace::<ROUND:false, NTT:false>",
|
||||
|| test_div_by_last_moduli_inplace::<false, false>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_moduli_inplace::<ROUND:false, NTT:true>",
|
||||
|| test_div_by_last_moduli_inplace::<false, true>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_moduli_inplace::<ROUND:true, NTT:false>",
|
||||
|| test_div_by_last_moduli_inplace::<true, false>(&ring_rns),
|
||||
);
|
||||
sub_test(
|
||||
"test_div_by_last_moduli_inplace::<ROUND:true, NTT:true>",
|
||||
|| test_div_by_last_moduli_inplace::<true, true>(&ring_rns),
|
||||
);
|
||||
}
|
||||
|
||||
fn sub_test<F: FnOnce()>(name: &str, f: F) {
|
||||
println!("Running {}", name);
|
||||
f();
|
||||
}
|
||||
|
||||
fn test_div_by_last_modulus<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
|
||||
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut buf: [Poly<u64>; 2] = [ring_rns.new_poly(), ring_rns.new_poly()];
|
||||
let mut c: PolyRNS<u64> = ring_rns.at_level(ring_rns.level() - 1).new_polyrns();
|
||||
|
||||
// Allocates a random PolyRNS
|
||||
ring_rns.fill_uniform(&mut source, &mut a);
|
||||
|
||||
// Maps PolyRNS to [BigInt]
|
||||
let mut coeffs_a: Vec<BigInt> = (0..a.n()).map(|i| BigInt::from(i)).collect();
|
||||
ring_rns
|
||||
.at_level(a.level())
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_a);
|
||||
|
||||
// Performs c = intt(ntt(a) / q_level)
|
||||
if NTT {
|
||||
ring_rns.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_rns.div_by_last_modulus::<ROUND, NTT>(&a, &mut buf, &mut c);
|
||||
|
||||
if NTT {
|
||||
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
||||
}
|
||||
|
||||
// Exports c to coeffs_c
|
||||
let mut coeffs_c = vec![BigInt::from(0); c.n()];
|
||||
ring_rns
|
||||
.at_level(c.level())
|
||||
.to_bigint_inplace(&c, 1, &mut coeffs_c);
|
||||
|
||||
// Performs floor division on a
|
||||
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
||||
coeffs_a.iter_mut().for_each(|a| {
|
||||
if ROUND {
|
||||
*a = a.div_round(&scalar_big);
|
||||
} else {
|
||||
*a = a.div_floor(&scalar_big);
|
||||
}
|
||||
});
|
||||
|
||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
|
||||
fn test_div_by_last_modulus_inplace<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
|
||||
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut buf: [Poly<u64>; 2] = [ring_rns.new_poly(), ring_rns.new_poly()];
|
||||
|
||||
// Allocates a random PolyRNS
|
||||
ring_rns.fill_uniform(&mut source, &mut a);
|
||||
|
||||
// Maps PolyRNS to [BigInt]
|
||||
let mut coeffs_a: Vec<BigInt> = (0..a.n()).map(|i| BigInt::from(i)).collect();
|
||||
ring_rns
|
||||
.at_level(a.level())
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_a);
|
||||
|
||||
// Performs c = intt(ntt(a) / q_level)
|
||||
if NTT {
|
||||
ring_rns.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_rns.div_by_last_modulus_inplace::<ROUND, NTT>(&mut buf, &mut a);
|
||||
|
||||
if NTT {
|
||||
ring_rns
|
||||
.at_level(a.level() - 1)
|
||||
.intt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
// Exports c to coeffs_c
|
||||
let mut coeffs_c = vec![BigInt::from(0); a.n()];
|
||||
ring_rns
|
||||
.at_level(a.level() - 1)
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_c);
|
||||
|
||||
// Performs floor division on a
|
||||
let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q);
|
||||
coeffs_a.iter_mut().for_each(|a| {
|
||||
if ROUND {
|
||||
*a = a.div_round(&scalar_big);
|
||||
} else {
|
||||
*a = a.div_floor(&scalar_big);
|
||||
}
|
||||
});
|
||||
|
||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
|
||||
fn test_div_by_last_moduli<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
|
||||
let nb_moduli_dropped: usize = ring_rns.level();
|
||||
|
||||
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut buf0: [Poly<u64>; 2] = [ring_rns.new_poly(), ring_rns.new_poly()];
|
||||
let mut buf1: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut c: PolyRNS<u64> = ring_rns
|
||||
.at_level(ring_rns.level() - nb_moduli_dropped)
|
||||
.new_polyrns();
|
||||
|
||||
// Allocates a random PolyRNS
|
||||
ring_rns.fill_uniform(&mut source, &mut a);
|
||||
|
||||
// Maps PolyRNS to [BigInt]
|
||||
let mut coeffs_a: Vec<BigInt> = (0..a.n()).map(|i| BigInt::from(i)).collect();
|
||||
ring_rns
|
||||
.at_level(a.level())
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_a);
|
||||
|
||||
// Performs c = intt(ntt(a) / q_level)
|
||||
if NTT {
|
||||
ring_rns.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_rns.div_by_last_moduli::<ROUND, NTT>(nb_moduli_dropped, &a, &mut buf0, &mut buf1, &mut c);
|
||||
|
||||
if NTT {
|
||||
ring_rns.at_level(c.level()).intt_inplace::<false>(&mut c);
|
||||
}
|
||||
|
||||
// Exports c to coeffs_c
|
||||
let mut coeffs_c = vec![BigInt::from(0); a.n()];
|
||||
ring_rns
|
||||
.at_level(c.level())
|
||||
.to_bigint_inplace(&c, 1, &mut coeffs_c);
|
||||
|
||||
// Performs floor division on a
|
||||
let mut scalar_big = BigInt::from(1);
|
||||
(0..nb_moduli_dropped)
|
||||
.for_each(|i| scalar_big *= BigInt::from(ring_rns.0[ring_rns.level() - i].modulus.q));
|
||||
coeffs_a.iter_mut().for_each(|a| {
|
||||
if ROUND {
|
||||
*a = a.div_round(&scalar_big);
|
||||
} else {
|
||||
*a = a.div_floor(&scalar_big);
|
||||
}
|
||||
});
|
||||
|
||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
|
||||
fn test_div_by_last_moduli_inplace<const ROUND: bool, const NTT: bool>(ring_rns: &RingRNS<u64>) {
|
||||
let seed: [u8; 32] = [0; 32];
|
||||
let mut source: Source = Source::new(seed);
|
||||
|
||||
let nb_moduli_dropped: usize = ring_rns.level();
|
||||
|
||||
let mut a: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
let mut buf0: [Poly<u64>; 2] = [ring_rns.new_poly(), ring_rns.new_poly()];
|
||||
let mut buf1: PolyRNS<u64> = ring_rns.new_polyrns();
|
||||
|
||||
// Allocates a random PolyRNS
|
||||
ring_rns.fill_uniform(&mut source, &mut a);
|
||||
|
||||
// Maps PolyRNS to [BigInt]
|
||||
let mut coeffs_a: Vec<BigInt> = (0..a.n()).map(|i| BigInt::from(i)).collect();
|
||||
ring_rns
|
||||
.at_level(a.level())
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_a);
|
||||
|
||||
// Performs c = intt(ntt(a) / q_level)
|
||||
if NTT {
|
||||
ring_rns.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_rns.div_by_last_moduli_inplace::<ROUND, NTT>(
|
||||
nb_moduli_dropped,
|
||||
&mut buf0,
|
||||
&mut buf1,
|
||||
&mut a,
|
||||
);
|
||||
|
||||
if NTT {
|
||||
ring_rns
|
||||
.at_level(a.level() - nb_moduli_dropped)
|
||||
.intt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
// Exports c to coeffs_c
|
||||
let mut coeffs_c = vec![BigInt::from(0); a.n()];
|
||||
ring_rns
|
||||
.at_level(a.level() - nb_moduli_dropped)
|
||||
.to_bigint_inplace(&a, 1, &mut coeffs_c);
|
||||
|
||||
// Performs floor division on a
|
||||
let mut scalar_big = BigInt::from(1);
|
||||
(0..nb_moduli_dropped)
|
||||
.for_each(|i| scalar_big *= BigInt::from(ring_rns.0[ring_rns.level() - i].modulus.q));
|
||||
coeffs_a.iter_mut().for_each(|a| {
|
||||
if ROUND {
|
||||
*a = a.div_round(&scalar_big);
|
||||
} else {
|
||||
*a = a.div_floor(&scalar_big);
|
||||
}
|
||||
});
|
||||
|
||||
izip!(coeffs_a, coeffs_c).for_each(|(a, b)| assert_eq!(a, b));
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use rns::poly::Poly;
|
||||
use rns::ring::Ring;
|
||||
|
||||
#[test]
|
||||
fn ring_switch_u64() {
|
||||
let n: usize = 1 << 4;
|
||||
let q_base: u64 = 65537u64;
|
||||
let q_power: usize = 1usize;
|
||||
let ring_small: Ring<u64> = Ring::new(n, q_base, q_power);
|
||||
let ring_large = Ring::new(2 * n, q_base, q_power);
|
||||
|
||||
sub_test("test_ring_switch_small_to_large_u64::<NTT:false>", || {
|
||||
test_ring_switch_small_to_large_u64::<false>(&ring_small, &ring_large)
|
||||
});
|
||||
sub_test("test_ring_switch_small_to_large_u64::<NTT:true>", || {
|
||||
test_ring_switch_small_to_large_u64::<true>(&ring_small, &ring_large)
|
||||
});
|
||||
sub_test("test_ring_switch_large_to_small_u64::<NTT:false>", || {
|
||||
test_ring_switch_large_to_small_u64::<false>(&ring_small, &ring_large)
|
||||
});
|
||||
sub_test("test_ring_switch_large_to_small_u64::<NTT:true>", || {
|
||||
test_ring_switch_large_to_small_u64::<true>(&ring_small, &ring_large)
|
||||
});
|
||||
}
|
||||
|
||||
fn sub_test<F: FnOnce()>(name: &str, f: F) {
|
||||
println!("Running {}", name);
|
||||
f();
|
||||
}
|
||||
|
||||
fn test_ring_switch_small_to_large_u64<const NTT: bool>(
|
||||
ring_small: &Ring<u64>,
|
||||
ring_large: &Ring<u64>,
|
||||
) {
|
||||
let mut a: Poly<u64> = ring_small.new_poly();
|
||||
let mut buf: Poly<u64> = ring_small.new_poly();
|
||||
let mut b: Poly<u64> = ring_large.new_poly();
|
||||
|
||||
a.0.iter_mut().enumerate().for_each(|(i, x)| *x = i as u64);
|
||||
|
||||
if NTT {
|
||||
ring_small.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_large.switch_degree::<NTT>(&a, &mut buf, &mut b);
|
||||
|
||||
if NTT {
|
||||
ring_small.intt_inplace::<false>(&mut a);
|
||||
ring_large.intt_inplace::<false>(&mut b);
|
||||
}
|
||||
|
||||
let gap: usize = ring_large.n() / ring_small.n();
|
||||
|
||||
b.0.iter()
|
||||
.step_by(gap)
|
||||
.zip(a.0.iter())
|
||||
.for_each(|(x_out, x_in)| assert_eq!(x_out, x_in));
|
||||
}
|
||||
|
||||
fn test_ring_switch_large_to_small_u64<const NTT: bool>(
|
||||
ring_small: &Ring<u64>,
|
||||
ring_large: &Ring<u64>,
|
||||
) {
|
||||
let mut a: Poly<u64> = ring_large.new_poly();
|
||||
let mut buf: Poly<u64> = ring_large.new_poly();
|
||||
let mut b: Poly<u64> = ring_small.new_poly();
|
||||
|
||||
a.0.iter_mut().enumerate().for_each(|(i, x)| *x = i as u64);
|
||||
|
||||
if NTT {
|
||||
ring_large.ntt_inplace::<false>(&mut a);
|
||||
}
|
||||
|
||||
ring_large.switch_degree::<NTT>(&a, &mut buf, &mut b);
|
||||
|
||||
if NTT {
|
||||
ring_large.intt_inplace::<false>(&mut a);
|
||||
ring_small.intt_inplace::<false>(&mut b);
|
||||
}
|
||||
|
||||
let gap: usize = ring_large.n() / ring_small.n();
|
||||
|
||||
a.0.iter()
|
||||
.step_by(gap)
|
||||
.zip(b.0.iter())
|
||||
.for_each(|(x_out, x_in)| assert_eq!(x_out, x_in));
|
||||
}
|
||||
Reference in New Issue
Block a user