mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 13:16:44 +01:00
wip
This commit is contained in:
@@ -5,7 +5,7 @@ use crate::modulus::montgomery::Montgomery;
|
||||
use crate::modulus::barrett::Barrett;
|
||||
use crate::poly::Poly;
|
||||
use crate::modulus::{REDUCEMOD, BARRETT};
|
||||
use crate::modulus::VecOperations;
|
||||
use crate::modulus::VectorOperations;
|
||||
use num_bigint::BigInt;
|
||||
use num_traits::ToPrimitive;
|
||||
use crate::CHUNK;
|
||||
@@ -66,7 +66,7 @@ impl Ring<u64>{
|
||||
pub fn add_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_add_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
self.modulus.va_add_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -74,14 +74,27 @@ impl Ring<u64>{
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_add_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
self.modulus.va_add_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_scalar_inplace<const REDUCE: REDUCEMOD>(&self, a: &u64, b: &mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.sa_add_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn add_scalar<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &u64, c: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.va_add_sb_into_vc::<CHUNK, REDUCE>(&a.0, b, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sub_inplace<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_sub_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
self.modulus.va_sub_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -89,20 +102,20 @@ impl Ring<u64>{
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_sub_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
self.modulus.va_sub_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg<const REDUCE: REDUCEMOD>(&self, a: &Poly<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_neg_binary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
self.modulus.va_neg_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn neg_inplace<const REDUCE: REDUCEMOD>(&self, a: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
self.modulus.vec_neg_unary_assign::<CHUNK, REDUCE>(&mut a.0);
|
||||
self.modulus.va_neg_into_va::<CHUNK, REDUCE>(&mut a.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -110,39 +123,39 @@ impl Ring<u64>{
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_mul_montgomery_external_binary_assign::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
self.modulus.va_mont_mul_vb_into_vc::<CHUNK, REDUCE>(&a.0, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_montgomery_external_inplace<const REDUCE:REDUCEMOD>(&self, a:&Poly<Montgomery<u64>>, b:&mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_montgomery_external_unary_assign::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
self.modulus.va_mont_mul_vb_into_vb::<CHUNK, REDUCE>(&a.0, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar<const REDUCE:REDUCEMOD>(&self, a:&Poly<u64>, b: &u64, c:&mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "b.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(c.n() == self.n(), "c.n()={} != n={}", c.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_binary_assign::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0);
|
||||
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(*b), &a.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_inplace<const REDUCE:REDUCEMOD>(&self, a:&u64, b:&mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_unary_assign::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::<BARRETT>(a)), &mut b.0);
|
||||
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(&self.modulus.barrett.prepare(self.modulus.barrett.reduce::<BARRETT>(a)), &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b:&mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_unary_assign::<CHUNK, REDUCE>(a, &mut b.0);
|
||||
self.modulus.sa_barrett_mul_vb_into_vb::<CHUNK, REDUCE>(a, &mut b.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn mul_scalar_barrett<const REDUCE:REDUCEMOD>(&self, a:&Barrett<u64>, b: &Poly<u64>, c:&mut Poly<u64>){
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_mul_scalar_barrett_external_binary_assign::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
|
||||
self.modulus.sa_barrett_mul_vb_into_vc::<CHUNK, REDUCE>(a, &b.0, &mut c.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -150,13 +163,13 @@ impl Ring<u64>{
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
debug_assert!(d.n() == self.n(), "d.n()={} != n={}", d.n(), self.n());
|
||||
self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_d::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
|
||||
self.modulus.va_sub_vb_mul_sc_into_vd::<CHUNK, REDUCE>(&a.0, &b.0, c, &mut d.0);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn sum_aqqmb_prod_c_scalar_barrett_inplace<const REDUCE:REDUCEMOD>(&self, a: &Poly<u64>, c: &Barrett<u64>, b: &mut Poly<u64>){
|
||||
debug_assert!(a.n() == self.n(), "a.n()={} != n={}", a.n(), self.n());
|
||||
debug_assert!(b.n() == self.n(), "b.n()={} != n={}", b.n(), self.n());
|
||||
self.modulus.vec_sum_aqqmb_prod_c_scalar_barrett_assign_b::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
|
||||
self.modulus.va_sub_vb_mul_sc_into_vb::<CHUNK, REDUCE>(&a.0, c, &mut b.0);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user