From 681268c28e734596fc5d7d597925a729fdf3cdc6 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Bossuat Date: Mon, 6 Jan 2025 13:00:34 +0100 Subject: [PATCH] wip --- math/benches/operations.rs | 22 +-- math/benches/ring_rns.rs | 8 +- math/src/modulus.rs | 78 +++++---- math/src/modulus/impl_u64/operations.rs | 112 ++++++------ math/src/poly.rs | 9 +- math/src/ring.rs | 2 +- math/src/ring/impl_u64/rescaling_rns.rs | 220 +++++++++++++----------- math/src/ring/impl_u64/ring.rs | 43 +++-- math/tests/rescaling_rns.rs | 61 +++++++ 9 files changed, 335 insertions(+), 220 deletions(-) create mode 100644 math/tests/rescaling_rns.rs diff --git a/math/benches/operations.rs b/math/benches/operations.rs index b0ed1c9..8896dff 100644 --- a/math/benches/operations.rs +++ b/math/benches/operations.rs @@ -1,11 +1,11 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use math::ring::Ring; -use math::modulus::VecOperations; +use math::modulus::VectorOperations; use math::modulus::montgomery::Montgomery; use math::modulus::ONCE; use math::CHUNK; -fn vec_add_unary(c: &mut Criterion) { +fn va_add_vb_into_vb(c: &mut Criterion) { fn runner(r: Ring) -> Box { let mut p0: math::poly::Poly = r.new_poly(); @@ -15,11 +15,11 @@ fn vec_add_unary(c: &mut Criterion) { p1.0[i] = i as u64; } Box::new(move || { - r.modulus.vec_add_unary_assign::(&p0.0, &mut p1.0); + r.modulus.va_add_vb_into_vb::(&p0.0, &mut p1.0); }) } - let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("add_vec_unary"); + let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_add_vb_into_vb"); for log_n in 11..17 { let n: usize = 1<) -> Box { let mut p0: math::poly::Poly> = r.new_poly(); @@ -48,11 +48,11 @@ fn vec_mul_montgomery_external_unary_assign(c: &mut Criterion) { p1.0[i] = i as u64; } Box::new(move || { - r.modulus.vec_mul_montgomery_external_unary_assign::(&p0.0, &mut p1.0); + r.modulus.va_mont_mul_vb_into_vb::(&p0.0, &mut p1.0); }) } - let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("mul_vec_montgomery_external_unary_assign"); + let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_mont_mul_vb_into_vb"); for log_n in 11..17 { let n: usize = 1<) -> Box { let mut p0: math::poly::Poly> = r.new_poly(); @@ -82,11 +82,11 @@ fn vec_mul_montgomery_external_binary_assign(c: &mut Criterion) { p1.0[i] = i as u64; } Box::new(move || { - r.modulus.vec_mul_montgomery_external_binary_assign::(&p0.0, & p1.0, &mut p2.0); + r.modulus.va_mont_mul_vb_into_vc::(&p0.0, & p1.0, &mut p2.0); }) } - let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("mul_vec_montgomery_external_binary_assign"); + let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("va_mont_mul_vb_into_vc"); for log_n in 11..17 { let n: usize = 1<) -> Box { let a: PolyRNS = r.new_polyrns(); @@ -11,11 +11,11 @@ fn div_floor_by_last_modulus_ntt(c: &mut Criterion) { let mut c: PolyRNS = r.new_polyrns(); Box::new(move || { - r.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c) + r.div_floor_by_last_modulus::(&a, &mut b, &mut c) }) } - let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("div_floor_by_last_modulus_ntt"); + let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("div_floor_by_last_modulus_ntt_true"); for log_n in 11..18 { let n = 1<{ fn reduce_once(&self, q:O) -> O; } -pub trait WordOperations{ +pub trait ScalarOperations{ // Applies a parameterized modular reduction. - fn word_reduce_assign(&self, x: &mut O); + fn sa_reduce_into_sa(&self, x: &mut O); // Assigns a + b to c. - fn word_add_binary_assign(&self, a: &O, b:&O, c: &mut O); + fn sa_add_sb_into_sc(&self, a: &O, b:&O, c: &mut O); // Assigns a + b to b. - fn word_add_unary_assign(&self, a: &O, b: &mut O); + fn sa_add_sb_into_sb(&self, a: &O, b: &mut O); // Assigns a - b to c. - fn word_sub_binary_assign(&self, a: &O, b:&O, c: &mut O); + fn sa_sub_sb_into_sc(&self, a: &O, b:&O, c: &mut O); // Assigns b - a to b. - fn word_sub_unary_assign(&self, a: &O, b: &mut O); + fn sa_sub_sb_into_sb(&self, a: &O, b: &mut O); // Assigns -a to a. - fn word_neg_unary_assign(&self, a:&mut O); + fn sa_neg_into_sa(&self, a:&mut O); // Assigns -a to b. - fn word_neg_binary_assign(&self, a: &O, b:&mut O); + fn sa_neg_into_sb(&self, a: &O, b:&mut O); // Assigns a * 2^64 to b. - fn word_prepare_montgomery_assign(&self, a: &O, b: &mut montgomery::Montgomery); + fn sa_prep_mont_into_sb(&self, a: &O, b: &mut montgomery::Montgomery); // Assigns a * b to c. - fn word_mul_montgomery_external_binary_assign(&self, a:&montgomery::Montgomery, b:&O, c: &mut O); + fn sa_mont_mul_sb_into_sc(&self, a:&montgomery::Montgomery, b:&O, c: &mut O); // Assigns a * b to b. - fn word_mul_montgomery_external_unary_assign(&self, a:&montgomery::Montgomery, b:&mut O); + fn sa_mont_mul_sb_into_sb(&self, a:&montgomery::Montgomery, b:&mut O); // Assigns a * b to c. - fn word_mul_barrett_binary_assign(&self, a: &barrett::Barrett, b:&O, c: &mut O); + fn sa_barrett_mul_sb_into_sc(&self, a: &barrett::Barrett, b:&O, c: &mut O); // Assigns a * b to b. - fn word_mul_barrett_unary_assign(&self, a:&barrett::Barrett, b:&mut O); + fn sa_barrett_mul_sb_into_sb(&self, a:&barrett::Barrett, b:&mut O); // Assigns (a + 2q - b) * c to d. - fn word_sum_aqqmb_prod_c_barrett_assign_d(&self, a: &O, b: &O, c: &barrett::Barrett, d: &mut O); + fn sa_sub_sb_mul_sc_into_sd(&self, a: &O, b: &O, c: &barrett::Barrett, d: &mut O); // Assigns (a + 2q - b) * c to b. - fn word_sum_aqqmb_prod_c_barrett_assign_b(&self, a: &u64, c: &barrett::Barrett, b: &mut u64); + fn sa_sub_sb_mul_sc_into_sb(&self, a: &u64, c: &barrett::Barrett, b: &mut u64); } -pub trait VecOperations{ +pub trait VectorOperations{ // Applies a parameterized modular reduction. - fn vec_reduce_assign(&self, x: &mut [O]); + fn va_reduce_into_va(&self, x: &mut [O]); + // ADD // Assigns a[i] + b[i] to c[i] - fn vec_add_binary_assign(&self, a: &[O], b:&[O], c: &mut [O]); + fn va_add_vb_into_vc(&self, a: &[O], b:&[O], c: &mut [O]); // Assigns a[i] + b[i] to b[i] - fn vec_add_unary_assign(&self, a: &[O], b: &mut [O]); + fn va_add_vb_into_vb(&self, a: &[O], b: &mut [O]); + + // Assigns a[i] + b to c[i] + fn va_add_sb_into_vc(&self, a: &[O], b:&O, c:&mut [O]); + + // Assigns b[i] + a to b[i] + fn sa_add_vb_into_vb(&self, a:&O, b:&mut [O]); + + // SUB + // Assigns a[i] - b[i] to b[i] + fn va_sub_vb_into_vb(&self, a: &[O], b: &mut [O]); // Assigns a[i] - b[i] to c[i] - fn vec_sub_binary_assign(&self, a: &[O], b:&[O], c: &mut [O]); + fn va_sub_vb_into_vc(&self, a: &[O], b:&[O], c: &mut [O]); - // Assigns a[i] - b[i] to b[i] - fn vec_sub_unary_assign(&self, a: &[O], b: &mut [O]); + // NEG + // Assigns -a[i] to a[i]. + fn va_neg_into_va(&self, a: &mut [O]); // Assigns -a[i] to a[i]. - fn vec_neg_unary_assign(&self, a: &mut [O]); - - // Assigns -a[i] to a[i]. - fn vec_neg_binary_assign(&self, a: &[O], b: &mut [O]); + fn va_neg_into_vb(&self, a: &[O], b: &mut [O]); + // MUL MONTGOMERY // Assigns a * 2^64 to b. - fn vec_prepare_montgomery_assign(&self, a: &[O], b: &mut [montgomery::Montgomery]); + fn va_prep_mont_into_vb(&self, a: &[O], b: &mut [montgomery::Montgomery]); // Assigns a[i] * b[i] to c[i]. - fn vec_mul_montgomery_external_binary_assign(&self, a:&[montgomery::Montgomery], b:&[O], c: &mut [O]); + fn va_mont_mul_vb_into_vc(&self, a:&[montgomery::Montgomery], b:&[O], c: &mut [O]); // Assigns a[i] * b[i] to b[i]. - fn vec_mul_montgomery_external_unary_assign(&self, a:&[montgomery::Montgomery], b:&mut [O]); + fn va_mont_mul_vb_into_vb(&self, a:&[montgomery::Montgomery], b:&mut [O]); + // MUL BARRETT // Assigns a * b[i] to b[i]. - fn vec_mul_scalar_barrett_external_unary_assign(&self, a:& barrett::Barrett, b:&mut [u64]); + fn sa_barrett_mul_vb_into_vb(&self, a:& barrett::Barrett, b:&mut [u64]); // Assigns a * b[i] to c[i]. - fn vec_mul_scalar_barrett_external_binary_assign(&self, a:& barrett::Barrett, b:&[u64], c: &mut [u64]); + fn sa_barrett_mul_vb_into_vc(&self, a:& barrett::Barrett, b:&[u64], c: &mut [u64]); + // OTHERS // Assigns (a[i] + 2q - b[i]) * c to d[i]. - fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d(&self, a: &[u64], b: &[u64], c: &barrett::Barrett, d: &mut [u64]); + fn va_sub_vb_mul_sc_into_vd(&self, a: &[u64], b: &[u64], c: &barrett::Barrett, d: &mut [u64]); // Assigns (a[i] + 2q - b[i]) * c to b[i]. - fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_b(&self, a: &[u64], c: &barrett::Barrett, b: &mut [u64]); + fn va_sub_vb_mul_sc_into_vb(&self, a: &[u64], c: &barrett::Barrett, b: &mut [u64]); } diff --git a/math/src/modulus/impl_u64/operations.rs b/math/src/modulus/impl_u64/operations.rs index 6711a75..ac218c6 100644 --- a/math/src/modulus/impl_u64/operations.rs +++ b/math/src/modulus/impl_u64/operations.rs @@ -1,5 +1,5 @@ -use crate::modulus::{WordOperations, VecOperations}; +use crate::modulus::{ScalarOperations, VectorOperations}; use crate::modulus::prime::Prime; use crate::modulus::ReduceOnce; use crate::modulus::montgomery::Montgomery; @@ -8,7 +8,7 @@ use crate::modulus::REDUCEMOD; use crate::{apply_v, apply_vv, apply_vvv, apply_sv, apply_svv, apply_vvsv, apply_vsv}; use itertools::izip; -impl WordOperations for Prime{ +impl ScalarOperations for Prime{ /// Applies a modular reduction on x based on REDUCE: /// - LAZY: no modular reduction. @@ -18,83 +18,83 @@ impl WordOperations for Prime{ /// - BARRETT: maps x to x mod q using Barrett reduction. /// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1]. #[inline(always)] - fn word_reduce_assign(&self, x: &mut u64){ - self.montgomery.reduce_assign::(x); + fn sa_reduce_into_sa(&self, a: &mut u64){ + self.montgomery.reduce_assign::(a); } #[inline(always)] - fn word_add_binary_assign(&self, a: &u64, b: &u64, c: &mut u64){ + fn sa_add_sb_into_sc(&self, a: &u64, b: &u64, c: &mut u64){ *c = a.wrapping_add(*b); - self.word_reduce_assign::(c); + self.sa_reduce_into_sa::(c); } #[inline(always)] - fn word_add_unary_assign(&self, a: &u64, b: &mut u64){ + fn sa_add_sb_into_sb(&self, a: &u64, b: &mut u64){ *b = a.wrapping_add(*b); - self.word_reduce_assign::(b); + self.sa_reduce_into_sa::(b); } #[inline(always)] - fn word_sub_binary_assign(&self, a: &u64, b: &u64, c: &mut u64){ + fn sa_sub_sb_into_sc(&self, a: &u64, b: &u64, c: &mut u64){ *c = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q); } #[inline(always)] - fn word_sub_unary_assign(&self, a: &u64, b: &mut u64){ + fn sa_sub_sb_into_sb(&self, a: &u64, b: &mut u64){ *b = a.wrapping_add(self.q.wrapping_sub(*b)).reduce_once(self.q); } #[inline(always)] - fn word_neg_unary_assign(&self, a: &mut u64){ + fn sa_neg_into_sa(&self, a: &mut u64){ *a = self.q.wrapping_sub(*a); - self.word_reduce_assign::(a) + self.sa_reduce_into_sa::(a) } #[inline(always)] - fn word_neg_binary_assign(&self, a: &u64, b: &mut u64){ + fn sa_neg_into_sb(&self, a: &u64, b: &mut u64){ *b = self.q.wrapping_sub(*a); - self.word_reduce_assign::(b) + self.sa_reduce_into_sa::(b) } #[inline(always)] - fn word_prepare_montgomery_assign(&self, a: &u64, b: &mut Montgomery){ + fn sa_prep_mont_into_sb(&self, a: &u64, b: &mut Montgomery){ self.montgomery.prepare_assign::(*a, b); } #[inline(always)] - fn word_mul_montgomery_external_binary_assign(&self, a: &Montgomery, b:&u64, c: &mut u64){ + fn sa_mont_mul_sb_into_sc(&self, a: &Montgomery, b:&u64, c: &mut u64){ *c = self.montgomery.mul_external::(*a, *b); } #[inline(always)] - fn word_mul_montgomery_external_unary_assign(&self, lhs:&Montgomery, rhs:&mut u64){ - self.montgomery.mul_external_assign::(*lhs, rhs); + fn sa_mont_mul_sb_into_sb(&self, a:&Montgomery, b:&mut u64){ + self.montgomery.mul_external_assign::(*a, b); } #[inline(always)] - fn word_mul_barrett_binary_assign(&self, a: &Barrett, b:&u64, c: &mut u64){ + fn sa_barrett_mul_sb_into_sc(&self, a: &Barrett, b:&u64, c: &mut u64){ *c = self.barrett.mul_external::(*a, *b); } #[inline(always)] - fn word_mul_barrett_unary_assign(&self, a:&Barrett, b:&mut u64){ + fn sa_barrett_mul_sb_into_sb(&self, a:&Barrett, b:&mut u64){ self.barrett.mul_external_assign::(*a, b); } #[inline(always)] - fn word_sum_aqqmb_prod_c_barrett_assign_d(&self, a: &u64, b: &u64, c: &Barrett, d: &mut u64){ + fn sa_sub_sb_mul_sc_into_sd(&self, a: &u64, b: &u64, c: &Barrett, d: &mut u64){ *d = self.two_q.wrapping_sub(*b).wrapping_add(*a); self.barrett.mul_external_assign::(*c, d); } #[inline(always)] - fn word_sum_aqqmb_prod_c_barrett_assign_b(&self, a: &u64, c: &Barrett, b: &mut u64){ + fn sa_sub_sb_mul_sc_into_sb(&self, a: &u64, c: &Barrett, b: &mut u64){ *b = self.two_q.wrapping_sub(*b).wrapping_add(*a); self.barrett.mul_external_assign::(*c, b); } } -impl VecOperations for Prime{ +impl VectorOperations for Prime{ /// Applies a modular reduction on x based on REDUCE: /// - LAZY: no modular reduction. @@ -104,70 +104,80 @@ impl VecOperations for Prime{ /// - BARRETT: maps x to x mod q using Barrett reduction. /// - BARRETTLAZY: maps x to x mod q using Barrett reduction with values in [0, 2q-1]. #[inline(always)] - fn vec_reduce_assign(&self, x: &mut [u64]){ - apply_v!(self, Self::word_reduce_assign::, x, CHUNK); + fn va_reduce_into_va(&self, a: &mut [u64]){ + apply_v!(self, Self::sa_reduce_into_sa::, a, CHUNK); } #[inline(always)] - fn vec_add_binary_assign(&self, a: &[u64], b:&[u64], c:&mut [u64]){ - apply_vvv!(self, Self::word_add_binary_assign::, a, b, c, CHUNK); + fn va_add_vb_into_vc(&self, a: &[u64], b:&[u64], c:&mut [u64]){ + apply_vvv!(self, Self::sa_add_sb_into_sc::, a, b, c, CHUNK); } #[inline(always)] - fn vec_add_unary_assign(&self, a: &[u64], b:&mut [u64]){ - apply_vv!(self, Self::word_add_unary_assign::, a, b, CHUNK); + fn va_add_vb_into_vb(&self, a: &[u64], b:&mut [u64]){ + apply_vv!(self, Self::sa_add_sb_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_sub_binary_assign(&self, a: &[u64], b:&[u64], c:&mut [u64]){ - apply_vvv!(self, Self::word_sub_binary_assign::, a, b, c, CHUNK); + fn va_add_sb_into_vc(&self, a: &[u64], b:&u64, c:&mut [u64]){ + apply_vsv!(self, Self::sa_add_sb_into_sc::, a, b, c, CHUNK); } #[inline(always)] - fn vec_sub_unary_assign(&self, a: &[u64], b:&mut [u64]){ - apply_vv!(self, Self::word_sub_unary_assign::, a, b, CHUNK); + fn sa_add_vb_into_vb(&self, a:&u64, b:&mut [u64]){ + apply_sv!(self, Self::sa_add_sb_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_neg_unary_assign(&self, a: &mut [u64]){ - apply_v!(self, Self::word_neg_unary_assign::, a, CHUNK); + fn va_sub_vb_into_vc(&self, a: &[u64], b:&[u64], c:&mut [u64]){ + apply_vvv!(self, Self::sa_sub_sb_into_sc::, a, b, c, CHUNK); + } + + #[inline(always)] + fn va_sub_vb_into_vb(&self, a: &[u64], b:&mut [u64]){ + apply_vv!(self, Self::sa_sub_sb_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_neg_binary_assign(&self, a: &[u64], b: &mut [u64]){ - apply_vv!(self, Self::word_neg_binary_assign::, a, b, CHUNK); + fn va_neg_into_va(&self, a: &mut [u64]){ + apply_v!(self, Self::sa_neg_into_sa::, a, CHUNK); } #[inline(always)] - fn vec_prepare_montgomery_assign(&self, a: &[u64], b: &mut [Montgomery]){ - apply_vv!(self, Self::word_prepare_montgomery_assign::, a, b, CHUNK); + fn va_neg_into_vb(&self, a: &[u64], b: &mut [u64]){ + apply_vv!(self, Self::sa_neg_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_mul_montgomery_external_binary_assign(&self, a:& [Montgomery], b:&[u64], c: &mut [u64]){ - apply_vvv!(self, Self::word_mul_montgomery_external_binary_assign::, a, b, c, CHUNK); + fn va_prep_mont_into_vb(&self, a: &[u64], b: &mut [Montgomery]){ + apply_vv!(self, Self::sa_prep_mont_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_mul_montgomery_external_unary_assign(&self, a:& [Montgomery], b:&mut [u64]){ - apply_vv!(self, Self::word_mul_montgomery_external_unary_assign::, a, b, CHUNK); + fn va_mont_mul_vb_into_vc(&self, a:& [Montgomery], b:&[u64], c: &mut [u64]){ + apply_vvv!(self, Self::sa_mont_mul_sb_into_sc::, a, b, c, CHUNK); } #[inline(always)] - fn vec_mul_scalar_barrett_external_binary_assign(&self, a:& Barrett, b:&[u64], c: &mut [u64]){ - apply_svv!(self, Self::word_mul_barrett_binary_assign::, a, b, c, CHUNK); + fn va_mont_mul_vb_into_vb(&self, a:& [Montgomery], b:&mut [u64]){ + apply_vv!(self, Self::sa_mont_mul_sb_into_sb::, a, b, CHUNK); } #[inline(always)] - fn vec_mul_scalar_barrett_external_unary_assign(&self, a:& Barrett, b:&mut [u64]){ - apply_sv!(self, Self::word_mul_barrett_unary_assign::, a, b, CHUNK); + fn sa_barrett_mul_vb_into_vc(&self, a:& Barrett, b:&[u64], c: &mut [u64]){ + apply_svv!(self, Self::sa_barrett_mul_sb_into_sc::, a, b, c, CHUNK); } - fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_d(&self, a: &[u64], b: &[u64], c: &Barrett, d: &mut [u64]){ - apply_vvsv!(self, Self::word_sum_aqqmb_prod_c_barrett_assign_d::, a, b, c, d, CHUNK); + #[inline(always)] + fn sa_barrett_mul_vb_into_vb(&self, a:& Barrett, b:&mut [u64]){ + apply_sv!(self, Self::sa_barrett_mul_sb_into_sb::, a, b, CHUNK); } - fn vec_sum_aqqmb_prod_c_scalar_barrett_assign_b(&self, a: &[u64], c: &Barrett, b: &mut [u64]){ - apply_vsv!(self, Self::word_sum_aqqmb_prod_c_barrett_assign_b::, a, c, b, CHUNK); + fn va_sub_vb_mul_sc_into_vd(&self, a: &[u64], b: &[u64], c: &Barrett, d: &mut [u64]){ + apply_vvsv!(self, Self::sa_sub_sb_mul_sc_into_sd::, a, b, c, d, CHUNK); + } + + fn va_sub_vb_mul_sc_into_vb(&self, a: &[u64], b: &Barrett, c: &mut [u64]){ + apply_vsv!(self, Self::sa_sub_sb_mul_sc_into_sb::, a, b, c, CHUNK); } } diff --git a/math/src/poly.rs b/math/src/poly.rs index 286557c..41daca0 100644 --- a/math/src/poly.rs +++ b/math/src/poly.rs @@ -55,6 +55,12 @@ impl PartialEq for Poly { } } +impl Default for Poly { + fn default() -> Self { + Poly(Vec::new()) + } +} + #[derive(Clone, Debug, Eq)] pub struct PolyRNS(pub Vec>); @@ -143,7 +149,6 @@ impl PartialEq for PolyRNS { impl Default for PolyRNS{ fn default() -> Self{ - let polys:Vec> = Vec::new(); - Self{0:polys} + Self{0:Vec::new()} } } \ No newline at end of file diff --git a/math/src/ring.rs b/math/src/ring.rs index df0675d..6e92486 100644 --- a/math/src/ring.rs +++ b/math/src/ring.rs @@ -22,7 +22,7 @@ impl Ring{ } } -pub struct RingRNS<'a, O: Unsigned>(& 'a [Ring]); +pub struct RingRNS<'a, O: Unsigned>(pub & 'a [Ring]); impl RingRNS<'_, O> { diff --git a/math/src/ring/impl_u64/rescaling_rns.rs b/math/src/ring/impl_u64/rescaling_rns.rs index 0aecf61..9a82171 100644 --- a/math/src/ring/impl_u64/rescaling_rns.rs +++ b/math/src/ring/impl_u64/rescaling_rns.rs @@ -1,3 +1,4 @@ +use crate::ring::Ring; use crate::ring::RingRNS; use crate::poly::PolyRNS; use crate::modulus::barrett::Barrett; @@ -8,121 +9,134 @@ extern crate test; impl RingRNS<'_, u64>{ /// Updates b to floor(a / q[b.level()]). - /// Expects a and b to be in the NTT domain. - pub fn div_floor_by_last_modulus_ntt(&self, a: &PolyRNS, buf: &mut PolyRNS, b: &mut PolyRNS){ - assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1); - let level = self.level(); - self.0[level].intt::(a.at(level), buf.at_mut(0)); - let rescaling_constants: ScalarRNS> = self.rescaling_constant(); - let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); - for (i, r) in self.0[0..level].iter().enumerate(){ - r.ntt::(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]); - r.sum_aqqmb_prod_c_scalar_barrett::(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i)); - } - } + pub fn div_floor_by_last_modulus(&self, a: &PolyRNS, buf: &mut PolyRNS, b: &mut PolyRNS){ + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1); - /// Updates b to floor(b / q[b.level()]). - /// Expects b to be in the NTT domain. - pub fn div_floor_by_last_modulus_ntt_inplace(&self, buf: &mut PolyRNS, b: &mut PolyRNS){ - let level = self.level(); - self.0[level].intt::(b.at(level), buf.at_mut(0)); - let rescaling_constants: ScalarRNS> = self.rescaling_constant(); - let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); - for (i, r) in self.0[0..level].iter().enumerate(){ - r.ntt::(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]); - r.sum_aqqmb_prod_c_scalar_barrett_inplace::(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], b.at_mut(i)); - } - } - - /// Updates b to floor(a / q[b.level()]). - pub fn div_floor_by_last_modulus(&self, a: &PolyRNS, b: &mut PolyRNS){ - assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1); - let level = self.level(); - let rescaling_constants:ScalarRNS> = self.rescaling_constant(); - for (i, r) in self.0[0..level].iter().enumerate(){ - r.sum_aqqmb_prod_c_scalar_barrett::(a.at(level), a.at(i), &rescaling_constants.0[i], b.at_mut(i)); - } - } - - /// Updates a to floor(b / q[b.level()]). - pub fn div_floor_by_last_modulus_inplace(&self, a: &mut PolyRNS){ let level = self.level(); let rescaling_constants: ScalarRNS> = self.rescaling_constant(); - let (a_i, a_level) = a.split_at_mut(level); - for (i, r) in self.0[0..level].iter().enumerate(){ - r.sum_aqqmb_prod_c_scalar_barrett_inplace::(&a_level[0], &rescaling_constants.0[i], &mut a_i[i]); - } - } - - pub fn div_floor_by_last_moduli(&self, nb_moduli:usize, a: &PolyRNS, b: &mut PolyRNS){ - if nb_moduli == 0{ - if a != b{ - b.copy(a); + + if NTT{ + let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); + self.0[level].intt::(a.at(level), &mut buf_ntt_q_scaling[0]); + for (i, r) in self.0[0..level].iter().enumerate(){ + r.ntt::(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]); + r.sum_aqqmb_prod_c_scalar_barrett::(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i)); } }else{ - self.div_floor_by_last_modulus(a, b); - (1..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace(b)}); + for (i, r) in self.0[0..level].iter().enumerate(){ + r.sum_aqqmb_prod_c_scalar_barrett::(a.at(level), a.at(i), &rescaling_constants.0[i], b.at_mut(i)); + } } } - pub fn div_floor_by_last_moduli_inplace(&self, nb_moduli:usize, a: &mut PolyRNS){ - (0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace(a)}); + /// Updates a to floor(a / q[b.level()]). + /// Expects a to be in the NTT domain. + pub fn div_floor_by_last_modulus_inplace(&self, buf: &mut PolyRNS, a: &mut PolyRNS){ + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + + let level = self.level(); + let rescaling_constants: ScalarRNS> = self.rescaling_constant(); + + if NTT{ + let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); + self.0[level].intt::(a.at(level), &mut buf_ntt_q_scaling[0]); + for (i, r) in self.0[0..level].iter().enumerate(){ + r.ntt::(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]); + r.sum_aqqmb_prod_c_scalar_barrett_inplace::(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i)); + } + }else{ + let (a_i, a_level) = buf.0.split_at_mut(level); + for (i, r) in self.0[0..level].iter().enumerate(){ + r.sum_aqqmb_prod_c_scalar_barrett_inplace::(&a_level[0], &rescaling_constants.0[i], &mut a_i[i]); + } + } } - pub fn div_round_by_last_modulus_ntt(&self, a: &PolyRNS, buf: &mut PolyRNS, b: &mut PolyRNS){ + /// Updates b to floor(a / prod_{level - nb_moduli}^{level} q[i]) + pub fn div_floor_by_last_moduli(&self, nb_moduli:usize, a: &PolyRNS, buf: &mut PolyRNS, c: &mut PolyRNS){ + + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + debug_assert!(c.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", c.level(), a.level()-1); + debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level()); + + if nb_moduli == 0{ + if a != c{ + c.copy(a); + } + }else{ + if NTT{ + self.intt::(a, buf); + (0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::(&mut PolyRNS::::default(), buf)}); + self.at_level(self.level()-nb_moduli).ntt::(buf, c); + }else{ + self.div_floor_by_last_modulus::(a, buf, c); + (1..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::(buf, c)}); + } + } + } + + /// Updates a to floor(a / prod_{level - nb_moduli}^{level} q[i]) + pub fn div_floor_by_last_moduli_inplace(&self, nb_moduli:usize, buf: &mut PolyRNS, a: &mut PolyRNS){ + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + debug_assert!(nb_moduli <= a.level(), "invalid input nb_moduli: nb_moduli={} > a.level()={}", nb_moduli, a.level()); + if NTT{ + self.intt::(a, buf); + (0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::(&mut PolyRNS::::default(), buf)}); + self.at_level(self.level()-nb_moduli).ntt::(buf, a); + }else{ + (0..nb_moduli).for_each(|i|{self.at_level(self.level()-i).div_floor_by_last_modulus_inplace::(buf, a)}); + } + } + + /// Updates b to round(a / q[b.level()]). + /// Expects b to be in the NTT domain. + pub fn div_round_by_last_modulus(&self, a: &PolyRNS, buf: &mut PolyRNS, b: &mut PolyRNS){ + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + debug_assert!(b.level() >= a.level()-1, "invalid input b: b.level()={} < a.level()-1={}", b.level(), a.level()-1); + + let level: usize = self.level(); + let r_last: &Ring = &self.0[level]; + let q_level_half: u64 = r_last.modulus.q>>1; + let rescaling_constants: ScalarRNS> = self.rescaling_constant(); + let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); + + if NTT{ + r_last.intt::(a.at(level), &mut buf_ntt_q_scaling[0]); + r_last.add_scalar_inplace::(&q_level_half, &mut buf_ntt_q_scaling[0]); + for (i, r) in self.0[0..level].iter().enumerate(){ + r_last.add_scalar::(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]); + r.ntt_inplace::(&mut buf_ntt_qi_scaling[0]); + r.sum_aqqmb_prod_c_scalar_barrett::(&buf_ntt_qi_scaling[0], a.at(i), &rescaling_constants.0[i], b.at_mut(i)); + } + }else{ + + } + + } + + /// Updates a to round(a / q[b.level()]). + /// Expects a to be in the NTT domain. + pub fn div_round_by_last_modulus_inplace(&self, buf: &mut PolyRNS, a: &mut PolyRNS){ + debug_assert!(self.level() <= a.level(), "invalid input a: self.level()={} > a.level()={}", self.level(), a.level()); + let level = self.level(); + let r_last: &Ring = &self.0[level]; + let q_level_half: u64 = r_last.modulus.q>>1; + let rescaling_constants: ScalarRNS> = self.rescaling_constant(); + let (buf_ntt_q_scaling, buf_ntt_qi_scaling) = buf.0.split_at_mut(1); + + if NTT{ + r_last.intt::(a.at(level), &mut buf_ntt_q_scaling[0]); + r_last.add_scalar_inplace::(&q_level_half, &mut buf_ntt_q_scaling[0]); + for (i, r) in self.0[0..level].iter().enumerate(){ + r_last.add_scalar::(&buf_ntt_q_scaling[0], &q_level_half, &mut buf_ntt_qi_scaling[0]); + r.ntt::(&buf_ntt_q_scaling[0], &mut buf_ntt_qi_scaling[0]); + r.sum_aqqmb_prod_c_scalar_barrett_inplace::(&buf_ntt_qi_scaling[0], &rescaling_constants.0[i], a.at_mut(i)); + } + } + } } -#[cfg(test)] -mod tests { - use num_bigint::BigInt; - use num_bigint::Sign; - use crate::ring::Ring; - use crate::ring::impl_u64::ring_rns::new_rings; - use sampling::source::Source; - use super::*; - - #[test] - fn test_div_floor_by_last_modulus_ntt() { - let n = 1<<10; - let moduli: Vec = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64]; - let rings: Vec> = new_rings(n, moduli); - let ring_rns: RingRNS<'_, u64> = RingRNS::new(&rings); - let seed: [u8; 32] = [0;32]; - let mut source: Source = Source::new(seed); - - let mut a: PolyRNS = ring_rns.new_polyrns(); - let mut b: PolyRNS = ring_rns.new_polyrns(); - let mut c: PolyRNS = ring_rns.at_level(ring_rns.level()-1).new_polyrns(); - - // Allocates a random PolyRNS - ring_rns.fill_uniform(&mut source, &mut a); - - // Maps PolyRNS to [BigInt] - let mut coeffs_a: Vec = (0..n).map(|i|{BigInt::from(i)}).collect(); - ring_rns.at_level(a.level()).to_bigint_inplace(&a, 1, &mut coeffs_a); - - // Performs c = intt(ntt(a) / q_level) - ring_rns.ntt_inplace::(&mut a); - ring_rns.div_floor_by_last_modulus_ntt(&a, &mut b, &mut c); - ring_rns.at_level(c.level()).intt_inplace::(&mut c); - - // Exports c to coeffs_c - let mut coeffs_c = vec![BigInt::from(0);c.n()]; - ring_rns.at_level(c.level()).to_bigint_inplace(&c, 1, &mut coeffs_c); - - // Performs floor division on a - let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q); - coeffs_a.iter_mut().for_each(|a|{ - // Emulates floor division in [0, q-1] and maps to [-(q-1)/2, (q-1)/2-1] - *a /= &scalar_big; - if a.sign() == Sign::Minus { - *a -= 1; - } - }); - - assert!(coeffs_a == coeffs_c); - } -} \ No newline at end of file diff --git a/math/src/ring/impl_u64/ring.rs b/math/src/ring/impl_u64/ring.rs index 0b8cfa0..377d47b 100644 --- a/math/src/ring/impl_u64/ring.rs +++ b/math/src/ring/impl_u64/ring.rs @@ -5,7 +5,7 @@ use crate::modulus::montgomery::Montgomery; use crate::modulus::barrett::Barrett; use crate::poly::Poly; use crate::modulus::{REDUCEMOD, BARRETT}; -use crate::modulus::VecOperations; +use crate::modulus::VectorOperations; use num_bigint::BigInt; use num_traits::ToPrimitive; use crate::CHUNK; @@ -66,7 +66,7 @@ impl Ring{ pub fn add_inplace(&self, a: &Poly, b: &mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_add_unary_assign::(&a.0, &mut b.0); + self.modulus.va_add_vb_into_vb::(&a.0, &mut b.0); } #[inline(always)] @@ -74,14 +74,27 @@ impl Ring{ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); - self.modulus.vec_add_binary_assign::(&a.0, &b.0, &mut c.0); + self.modulus.va_add_vb_into_vc::(&a.0, &b.0, &mut c.0); + } + + #[inline(always)] + pub fn add_scalar_inplace(&self, a: &u64, b: &mut Poly){ + debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); + self.modulus.sa_add_vb_into_vb::(a, &mut b.0); + } + + #[inline(always)] + pub fn add_scalar(&self, a: &Poly, b: &u64, c: &mut Poly){ + debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); + debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); + self.modulus.va_add_sb_into_vc::(&a.0, b, &mut c.0); } #[inline(always)] pub fn sub_inplace(&self, a: &Poly, b: &mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_sub_unary_assign::(&a.0, &mut b.0); + self.modulus.va_sub_vb_into_vb::(&a.0, &mut b.0); } #[inline(always)] @@ -89,20 +102,20 @@ impl Ring{ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); - self.modulus.vec_sub_binary_assign::(&a.0, &b.0, &mut c.0); + self.modulus.va_sub_vb_into_vc::(&a.0, &b.0, &mut c.0); } #[inline(always)] pub fn neg(&self, a: &Poly, b: &mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_neg_binary_assign::(&a.0, &mut b.0); + self.modulus.va_neg_into_vb::(&a.0, &mut b.0); } #[inline(always)] pub fn neg_inplace(&self, a: &mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); - self.modulus.vec_neg_unary_assign::(&mut a.0); + self.modulus.va_neg_into_va::(&mut a.0); } #[inline(always)] @@ -110,39 +123,39 @@ impl Ring{ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); - self.modulus.vec_mul_montgomery_external_binary_assign::(&a.0, &b.0, &mut c.0); + self.modulus.va_mont_mul_vb_into_vc::(&a.0, &b.0, &mut c.0); } #[inline(always)] pub fn mul_montgomery_external_inplace(&self, a:&Poly>, b:&mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_mul_montgomery_external_unary_assign::(&a.0, &mut b.0); + self.modulus.va_mont_mul_vb_into_vb::(&a.0, &mut b.0); } #[inline(always)] pub fn mul_scalar(&self, a:&Poly, b: &u64, c:&mut Poly){ debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n()); debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n()); - self.modulus.vec_mul_scalar_barrett_external_binary_assign::(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0); + self.modulus.sa_barrett_mul_vb_into_vc::(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0); } #[inline(always)] pub fn mul_scalar_inplace(&self, a:&u64, b:&mut Poly){ debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_mul_scalar_barrett_external_unary_assign::(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::(a)), &mut b.0); + self.modulus.sa_barrett_mul_vb_into_vb::(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::(a)), &mut b.0); } #[inline(always)] pub fn mul_scalar_barrett_inplace(&self, a:&Barrett, b:&mut Poly){ debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_mul_scalar_barrett_external_unary_assign::(a, &mut b.0); + self.modulus.sa_barrett_mul_vb_into_vb::(a, &mut b.0); } #[inline(always)] pub fn mul_scalar_barrett(&self, a:&Barrett, b: &Poly, c:&mut Poly){ debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_mul_scalar_barrett_external_binary_assign::(a, &b.0, &mut c.0); + self.modulus.sa_barrett_mul_vb_into_vc::(a, &b.0, &mut c.0); } #[inline(always)] @@ -150,13 +163,13 @@ impl Ring{ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n()); - self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_d::(&a.0, &b.0, c, &mut d.0); + self.modulus.va_sub_vb_mul_sc_into_vd::(&a.0, &b.0, c, &mut d.0); } #[inline(always)] pub fn sum_aqqmb_prod_c_scalar_barrett_inplace(&self, a: &Poly, c: &Barrett, b: &mut Poly){ debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n()); debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n()); - self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_b::(&a.0, c, &mut b.0); + self.modulus.va_sub_vb_mul_sc_into_vb::(&a.0, c, &mut b.0); } } \ No newline at end of file diff --git a/math/tests/rescaling_rns.rs b/math/tests/rescaling_rns.rs new file mode 100644 index 0000000..84d8e29 --- /dev/null +++ b/math/tests/rescaling_rns.rs @@ -0,0 +1,61 @@ +use num_bigint::BigInt; +use num_bigint::Sign; +use math::ring::{Ring, RingRNS}; +use math::poly::PolyRNS; +use math::ring::impl_u64::ring_rns::new_rings; +use sampling::source::Source; + +#[test] +fn rescaling_rns_u64(){ + let n = 1<<10; + let moduli: Vec = vec![0x1fffffffffc80001u64, 0x1fffffffffe00001u64]; + let rings: Vec> = new_rings(n, moduli); + let ring_rns: RingRNS<'_, u64> = RingRNS::new(&rings); + + test_div_floor_by_last_modulus::(&ring_rns); + test_div_floor_by_last_modulus::(&ring_rns); +} + +fn test_div_floor_by_last_modulus(ring_rns: &RingRNS) { + + let seed: [u8; 32] = [0;32]; + let mut source: Source = Source::new(seed); + + let mut a: PolyRNS = ring_rns.new_polyrns(); + let mut b: PolyRNS = ring_rns.new_polyrns(); + let mut c: PolyRNS = ring_rns.at_level(ring_rns.level()-1).new_polyrns(); + + // Allocates a random PolyRNS + ring_rns.fill_uniform(&mut source, &mut a); + + // Maps PolyRNS to [BigInt] + let mut coeffs_a: Vec = (0..a.n()).map(|i|{BigInt::from(i)}).collect(); + ring_rns.at_level(a.level()).to_bigint_inplace(&a, 1, &mut coeffs_a); + + // Performs c = intt(ntt(a) / q_level) + if NTT{ + ring_rns.ntt_inplace::(&mut a); + } + + ring_rns.div_floor_by_last_modulus::(&a, &mut b, &mut c); + + if NTT{ + ring_rns.at_level(c.level()).intt_inplace::(&mut c); + } + + // Exports c to coeffs_c + let mut coeffs_c = vec![BigInt::from(0);c.n()]; + ring_rns.at_level(c.level()).to_bigint_inplace(&c, 1, &mut coeffs_c); + + // Performs floor division on a + let scalar_big = BigInt::from(ring_rns.0[ring_rns.level()].modulus.q); + coeffs_a.iter_mut().for_each(|a|{ + // Emulates floor division in [0, q-1] and maps to [-(q-1)/2, (q-1)/2-1] + *a /= &scalar_big; + if a.sign() == Sign::Minus { + *a -= 1; + } + }); + + assert!(coeffs_a == coeffs_c); +} \ No newline at end of file