mirror of
https://github.com/arnaucube/poulpy.git
synced 2026-02-10 05:06:44 +01:00
Added trace operation + test and renamed base2k to backend
This commit is contained in:
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -1,3 +1,3 @@
|
|||||||
[submodule "base2k/spqlios-arithmetic"]
|
[submodule "backend/spqlios-arithmetic"]
|
||||||
path = base2k/spqlios-arithmetic
|
path = backend/spqlios-arithmetic
|
||||||
url = https://github.com/phantomzone-org/spqlios-arithmetic
|
url = https://github.com/phantomzone-org/spqlios-arithmetic
|
||||||
|
|||||||
140
.vscode/settings.json
vendored
140
.vscode/settings.json
vendored
@@ -1,71 +1,71 @@
|
|||||||
{
|
{
|
||||||
"cmake.sourceDirectory": "C:/Users/boss_/go/src/github.com/gausslabs/fhe/base2k/spqlios-arithmetic",
|
"cmake.sourceDirectory": "C:/Users/boss_/go/src/github.com/gausslabs/poulpy/backend/spqlios-arithmetic",
|
||||||
"files.associations": {
|
"files.associations": {
|
||||||
"vector": "cpp",
|
"vector": "cpp",
|
||||||
"xstring": "cpp",
|
"xstring": "cpp",
|
||||||
"xutility": "cpp",
|
"xutility": "cpp",
|
||||||
"algorithm": "cpp",
|
"algorithm": "cpp",
|
||||||
"atomic": "cpp",
|
"atomic": "cpp",
|
||||||
"bit": "cpp",
|
"bit": "cpp",
|
||||||
"cctype": "cpp",
|
"cctype": "cpp",
|
||||||
"charconv": "cpp",
|
"charconv": "cpp",
|
||||||
"cinttypes": "cpp",
|
"cinttypes": "cpp",
|
||||||
"clocale": "cpp",
|
"clocale": "cpp",
|
||||||
"cmath": "cpp",
|
"cmath": "cpp",
|
||||||
"compare": "cpp",
|
"compare": "cpp",
|
||||||
"complex": "cpp",
|
"complex": "cpp",
|
||||||
"concepts": "cpp",
|
"concepts": "cpp",
|
||||||
"cstddef": "cpp",
|
"cstddef": "cpp",
|
||||||
"cstdint": "cpp",
|
"cstdint": "cpp",
|
||||||
"cstdio": "cpp",
|
"cstdio": "cpp",
|
||||||
"cstdlib": "cpp",
|
"cstdlib": "cpp",
|
||||||
"cstring": "cpp",
|
"cstring": "cpp",
|
||||||
"ctime": "cpp",
|
"ctime": "cpp",
|
||||||
"cwchar": "cpp",
|
"cwchar": "cpp",
|
||||||
"exception": "cpp",
|
"exception": "cpp",
|
||||||
"format": "cpp",
|
"format": "cpp",
|
||||||
"initializer_list": "cpp",
|
"initializer_list": "cpp",
|
||||||
"ios": "cpp",
|
"ios": "cpp",
|
||||||
"iosfwd": "cpp",
|
"iosfwd": "cpp",
|
||||||
"iostream": "cpp",
|
"iostream": "cpp",
|
||||||
"istream": "cpp",
|
"istream": "cpp",
|
||||||
"iterator": "cpp",
|
"iterator": "cpp",
|
||||||
"limits": "cpp",
|
"limits": "cpp",
|
||||||
"locale": "cpp",
|
"locale": "cpp",
|
||||||
"memory": "cpp",
|
"memory": "cpp",
|
||||||
"new": "cpp",
|
"new": "cpp",
|
||||||
"optional": "cpp",
|
"optional": "cpp",
|
||||||
"ostream": "cpp",
|
"ostream": "cpp",
|
||||||
"random": "cpp",
|
"random": "cpp",
|
||||||
"sstream": "cpp",
|
"sstream": "cpp",
|
||||||
"stdexcept": "cpp",
|
"stdexcept": "cpp",
|
||||||
"streambuf": "cpp",
|
"streambuf": "cpp",
|
||||||
"string": "cpp",
|
"string": "cpp",
|
||||||
"system_error": "cpp",
|
"system_error": "cpp",
|
||||||
"tuple": "cpp",
|
"tuple": "cpp",
|
||||||
"type_traits": "cpp",
|
"type_traits": "cpp",
|
||||||
"typeinfo": "cpp",
|
"typeinfo": "cpp",
|
||||||
"utility": "cpp",
|
"utility": "cpp",
|
||||||
"xfacet": "cpp",
|
"xfacet": "cpp",
|
||||||
"xiosbase": "cpp",
|
"xiosbase": "cpp",
|
||||||
"xlocale": "cpp",
|
"xlocale": "cpp",
|
||||||
"xlocbuf": "cpp",
|
"xlocbuf": "cpp",
|
||||||
"xlocinfo": "cpp",
|
"xlocinfo": "cpp",
|
||||||
"xlocmes": "cpp",
|
"xlocmes": "cpp",
|
||||||
"xlocmon": "cpp",
|
"xlocmon": "cpp",
|
||||||
"xlocnum": "cpp",
|
"xlocnum": "cpp",
|
||||||
"xloctime": "cpp",
|
"xloctime": "cpp",
|
||||||
"xmemory": "cpp",
|
"xmemory": "cpp",
|
||||||
"xtr1common": "cpp",
|
"xtr1common": "cpp",
|
||||||
"vec_znx_arithmetic_private.h": "c",
|
"vec_znx_arithmetic_private.h": "c",
|
||||||
"reim4_arithmetic.h": "c",
|
"reim4_arithmetic.h": "c",
|
||||||
"array": "c",
|
"array": "c",
|
||||||
"string_view": "c"
|
"string_view": "c"
|
||||||
},
|
},
|
||||||
"github.copilot.enable": {
|
"github.copilot.enable": {
|
||||||
"*": false,
|
"*": false,
|
||||||
"plaintext": false,
|
"plaintext": false,
|
||||||
"markdown": false,
|
"markdown": false,
|
||||||
"scminput": false
|
"scminput": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
26
Cargo.lock
generated
26
Cargo.lock
generated
@@ -36,7 +36,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973"
|
checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "base2k"
|
name = "backend"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"criterion",
|
"criterion",
|
||||||
@@ -131,6 +131,18 @@ version = "0.7.4"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "core"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"backend",
|
||||||
|
"criterion",
|
||||||
|
"itertools 0.14.0",
|
||||||
|
"rand_distr",
|
||||||
|
"rug",
|
||||||
|
"sampling",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "criterion"
|
name = "criterion"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
@@ -488,18 +500,6 @@ version = "0.8.5"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "rlwe"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"base2k",
|
|
||||||
"criterion",
|
|
||||||
"itertools 0.14.0",
|
|
||||||
"rand_distr",
|
|
||||||
"rug",
|
|
||||||
"sampling",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rug"
|
name = "rug"
|
||||||
version = "1.27.0"
|
version = "1.27.0"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = ["base2k", "core", "sampling", "utils"]
|
members = ["backend", "core", "sampling", "utils"]
|
||||||
resolver = "3"
|
resolver = "3"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
{
|
{
|
||||||
"github.copilot.enable": {
|
"github.copilot.enable": {
|
||||||
"*": false,
|
"*": false,
|
||||||
"plaintext": false,
|
"plaintext": false,
|
||||||
"markdown": false,
|
"markdown": false,
|
||||||
"scminput": false
|
"scminput": false
|
||||||
},
|
},
|
||||||
"files.associations": {
|
"files.associations": {
|
||||||
"random": "c"
|
"random": "c"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,18 +1,18 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "base2k"
|
name = "backend"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rug = {workspace = true}
|
rug = {workspace = true}
|
||||||
criterion = {workspace = true}
|
criterion = {workspace = true}
|
||||||
itertools = {workspace = true}
|
itertools = {workspace = true}
|
||||||
rand = {workspace = true}
|
rand = {workspace = true}
|
||||||
rand_distr = {workspace = true}
|
rand_distr = {workspace = true}
|
||||||
rand_core = {workspace = true}
|
rand_core = {workspace = true}
|
||||||
sampling = { path = "../sampling" }
|
sampling = { path = "../sampling" }
|
||||||
utils = { path = "../utils" }
|
utils = { path = "../utils" }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "fft"
|
name = "fft"
|
||||||
harness = false
|
harness = false
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
|
|
||||||
## WSL/Ubuntu
|
## WSL/Ubuntu
|
||||||
To use this crate you need to build spqlios-arithmetic, which is provided a as a git submodule:
|
To use this crate you need to build spqlios-arithmetic, which is provided a as a git submodule:
|
||||||
1) Initialize the sub-module
|
1) Initialize the sub-module
|
||||||
2) $ cd base2k/spqlios-arithmetic
|
2) $ cd backend/spqlios-arithmetic
|
||||||
3) mdkir build
|
3) mdkir build
|
||||||
4) cd build
|
4) cd build
|
||||||
5) cmake ..
|
5) cmake ..
|
||||||
6) make
|
6) make
|
||||||
|
|
||||||
## Others
|
## Others
|
||||||
Steps 3 to 6 might change depending of your platform. See [spqlios-arithmetic/wiki/build](https://github.com/tfhe/spqlios-arithmetic/wiki/build) for additional information and build options.
|
Steps 3 to 6 might change depending of your platform. See [spqlios-arithmetic/wiki/build](https://github.com/tfhe/spqlios-arithmetic/wiki/build) for additional information and build options.
|
||||||
@@ -1,56 +1,56 @@
|
|||||||
use base2k::ffi::reim::*;
|
use backend::ffi::reim::*;
|
||||||
use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main};
|
use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main};
|
||||||
use std::ffi::c_void;
|
use std::ffi::c_void;
|
||||||
|
|
||||||
fn fft(c: &mut Criterion) {
|
fn fft(c: &mut Criterion) {
|
||||||
fn forward<'a>(m: u32, log_bound: u32, reim_fft_precomp: *mut reim_fft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
|
fn forward<'a>(m: u32, log_bound: u32, reim_fft_precomp: *mut reim_fft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
|
||||||
unsafe {
|
unsafe {
|
||||||
let buf_a: *mut f64 = reim_fft_precomp_get_buffer(reim_fft_precomp, 0);
|
let buf_a: *mut f64 = reim_fft_precomp_get_buffer(reim_fft_precomp, 0);
|
||||||
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
||||||
Box::new(move || reim_fft(reim_fft_precomp, buf_a))
|
Box::new(move || reim_fft(reim_fft_precomp, buf_a))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn backward<'a>(m: u32, log_bound: u32, reim_ifft_precomp: *mut reim_ifft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
|
fn backward<'a>(m: u32, log_bound: u32, reim_ifft_precomp: *mut reim_ifft_precomp, a: &'a [i64]) -> Box<dyn FnMut() + 'a> {
|
||||||
Box::new(move || unsafe {
|
Box::new(move || unsafe {
|
||||||
let buf_a: *mut f64 = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0);
|
let buf_a: *mut f64 = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0);
|
||||||
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
||||||
reim_ifft(reim_ifft_precomp, buf_a);
|
reim_ifft(reim_ifft_precomp, buf_a);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("fft");
|
let mut b: criterion::BenchmarkGroup<'_, criterion::measurement::WallTime> = c.benchmark_group("fft");
|
||||||
|
|
||||||
for log_n in 10..17 {
|
for log_n in 10..17 {
|
||||||
let n: usize = 1 << log_n;
|
let n: usize = 1 << log_n;
|
||||||
let m: usize = n >> 1;
|
let m: usize = n >> 1;
|
||||||
let log_bound: u32 = 19;
|
let log_bound: u32 = 19;
|
||||||
|
|
||||||
let mut a: Vec<i64> = vec![i64::default(); n];
|
let mut a: Vec<i64> = vec![i64::default(); n];
|
||||||
a.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
a.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let reim_fft_precomp: *mut reim_fft_precomp = new_reim_fft_precomp(m as u32, 1);
|
let reim_fft_precomp: *mut reim_fft_precomp = new_reim_fft_precomp(m as u32, 1);
|
||||||
let reim_ifft_precomp: *mut reim_ifft_precomp = new_reim_ifft_precomp(m as u32, 1);
|
let reim_ifft_precomp: *mut reim_ifft_precomp = new_reim_ifft_precomp(m as u32, 1);
|
||||||
|
|
||||||
let runners: [(String, Box<dyn FnMut()>); 2] = [
|
let runners: [(String, Box<dyn FnMut()>); 2] = [
|
||||||
(format!("forward"), {
|
(format!("forward"), {
|
||||||
forward(m as u32, log_bound, reim_fft_precomp, &a)
|
forward(m as u32, log_bound, reim_fft_precomp, &a)
|
||||||
}),
|
}),
|
||||||
(format!("backward"), {
|
(format!("backward"), {
|
||||||
backward(m as u32, log_bound, reim_ifft_precomp, &a)
|
backward(m as u32, log_bound, reim_ifft_precomp, &a)
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
|
|
||||||
for (name, mut runner) in runners {
|
for (name, mut runner) in runners {
|
||||||
let id: BenchmarkId = BenchmarkId::new(name, format!("n={}", 1 << log_n));
|
let id: BenchmarkId = BenchmarkId::new(name, format!("n={}", 1 << log_n));
|
||||||
b.bench_with_input(id, &(), |b: &mut criterion::Bencher<'_>, _| {
|
b.bench_with_input(id, &(), |b: &mut criterion::Bencher<'_>, _| {
|
||||||
b.iter(&mut runner)
|
b.iter(&mut runner)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, fft,);
|
criterion_group!(benches, fft,);
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
use std::path::absolute;
|
use std::path::absolute;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
println!(
|
println!(
|
||||||
"cargo:rustc-link-search=native={}",
|
"cargo:rustc-link-search=native={}",
|
||||||
absolute("spqlios-arithmetic/build/spqlios")
|
absolute("spqlios-arithmetic/build/spqlios")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_str()
|
.to_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
println!("cargo:rustc-link-lib=static=spqlios");
|
println!("cargo:rustc-link-lib=static=spqlios");
|
||||||
// println!("cargo:rustc-link-lib=dylib=spqlios")
|
// println!("cargo:rustc-link-lib=dylib=spqlios")
|
||||||
}
|
}
|
||||||
@@ -1,56 +1,56 @@
|
|||||||
use base2k::ffi::reim::*;
|
use backend::ffi::reim::*;
|
||||||
use std::ffi::c_void;
|
use std::ffi::c_void;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let log_bound: usize = 19;
|
let log_bound: usize = 19;
|
||||||
|
|
||||||
let n: usize = 2048;
|
let n: usize = 2048;
|
||||||
let m: usize = n >> 1;
|
let m: usize = n >> 1;
|
||||||
|
|
||||||
let mut a: Vec<i64> = vec![i64::default(); n];
|
let mut a: Vec<i64> = vec![i64::default(); n];
|
||||||
let mut b: Vec<i64> = vec![i64::default(); n];
|
let mut b: Vec<i64> = vec![i64::default(); n];
|
||||||
let mut c: Vec<i64> = vec![i64::default(); n];
|
let mut c: Vec<i64> = vec![i64::default(); n];
|
||||||
|
|
||||||
a.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
a.iter_mut().enumerate().for_each(|(i, x)| *x = i as i64);
|
||||||
b[1] = 1;
|
b[1] = 1;
|
||||||
|
|
||||||
println!("{:?}", b);
|
println!("{:?}", b);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let reim_fft_precomp = new_reim_fft_precomp(m as u32, 2);
|
let reim_fft_precomp = new_reim_fft_precomp(m as u32, 2);
|
||||||
let reim_ifft_precomp = new_reim_ifft_precomp(m as u32, 1);
|
let reim_ifft_precomp = new_reim_ifft_precomp(m as u32, 1);
|
||||||
|
|
||||||
let buf_a = reim_fft_precomp_get_buffer(reim_fft_precomp, 0);
|
let buf_a = reim_fft_precomp_get_buffer(reim_fft_precomp, 0);
|
||||||
let buf_b = reim_fft_precomp_get_buffer(reim_fft_precomp, 1);
|
let buf_b = reim_fft_precomp_get_buffer(reim_fft_precomp, 1);
|
||||||
let buf_c = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0);
|
let buf_c = reim_ifft_precomp_get_buffer(reim_ifft_precomp, 0);
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
(0..1024).for_each(|_| {
|
(0..1024).for_each(|_| {
|
||||||
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
reim_from_znx64_simple(m as u32, log_bound as u32, buf_a as *mut c_void, a.as_ptr());
|
||||||
reim_fft(reim_fft_precomp, buf_a);
|
reim_fft(reim_fft_precomp, buf_a);
|
||||||
|
|
||||||
reim_from_znx64_simple(m as u32, log_bound as u32, buf_b as *mut c_void, b.as_ptr());
|
reim_from_znx64_simple(m as u32, log_bound as u32, buf_b as *mut c_void, b.as_ptr());
|
||||||
reim_fft(reim_fft_precomp, buf_b);
|
reim_fft(reim_fft_precomp, buf_b);
|
||||||
|
|
||||||
reim_fftvec_mul_simple(
|
reim_fftvec_mul_simple(
|
||||||
m as u32,
|
m as u32,
|
||||||
buf_c as *mut c_void,
|
buf_c as *mut c_void,
|
||||||
buf_a as *mut c_void,
|
buf_a as *mut c_void,
|
||||||
buf_b as *mut c_void,
|
buf_b as *mut c_void,
|
||||||
);
|
);
|
||||||
reim_ifft(reim_ifft_precomp, buf_c);
|
reim_ifft(reim_ifft_precomp, buf_c);
|
||||||
|
|
||||||
reim_to_znx64_simple(
|
reim_to_znx64_simple(
|
||||||
m as u32,
|
m as u32,
|
||||||
m as f64,
|
m as f64,
|
||||||
log_bound as u32,
|
log_bound as u32,
|
||||||
c.as_mut_ptr(),
|
c.as_mut_ptr(),
|
||||||
buf_c as *mut c_void,
|
buf_c as *mut c_void,
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
println!("time: {}us", now.elapsed().as_micros());
|
println!("time: {}us", now.elapsed().as_micros());
|
||||||
println!("{:?}", &c[..16]);
|
println!("{:?}", &c[..16]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
AddNormal, Decoding, Encoding, FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc,
|
AddNormal, Decoding, Encoding, FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc,
|
||||||
ScalarZnxDftOps, ScratchOwned, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft,
|
ScalarZnxDftOps, ScratchOwned, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft,
|
||||||
VecZnxDftAlloc, VecZnxDftOps, VecZnxOps, ZnxInfos,
|
VecZnxDftAlloc, VecZnxDftOps, VecZnxOps, ZnxInfos,
|
||||||
@@ -8,10 +8,10 @@ use sampling::source::Source;
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let n: usize = 16;
|
let n: usize = 16;
|
||||||
let log_base2k: usize = 18;
|
let basek: usize = 18;
|
||||||
let ct_size: usize = 3;
|
let ct_size: usize = 3;
|
||||||
let msg_size: usize = 2;
|
let msg_size: usize = 2;
|
||||||
let log_scale: usize = msg_size * log_base2k - 5;
|
let log_scale: usize = msg_size * basek - 5;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned = ScratchOwned::new(module.vec_znx_big_normalize_tmp_bytes());
|
let mut scratch: ScratchOwned = ScratchOwned::new(module.vec_znx_big_normalize_tmp_bytes());
|
||||||
@@ -36,7 +36,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Fill the second column with random values: ct = (0, a)
|
// Fill the second column with random values: ct = (0, a)
|
||||||
ct.fill_uniform(log_base2k, 1, ct_size, &mut source);
|
ct.fill_uniform(basek, 1, ct_size, &mut source);
|
||||||
|
|
||||||
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_size);
|
let mut buf_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_size);
|
||||||
|
|
||||||
@@ -64,8 +64,8 @@ fn main() {
|
|||||||
let mut want: Vec<i64> = vec![0; n];
|
let mut want: Vec<i64> = vec![0; n];
|
||||||
want.iter_mut()
|
want.iter_mut()
|
||||||
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
.for_each(|x| *x = source.next_u64n(16, 15) as i64);
|
||||||
m.encode_vec_i64(0, log_base2k, log_scale, &want, 4);
|
m.encode_vec_i64(0, basek, log_scale, &want, 4);
|
||||||
module.vec_znx_normalize_inplace(log_base2k, &mut m, 0, scratch.borrow());
|
module.vec_znx_normalize_inplace(basek, &mut m, 0, scratch.borrow());
|
||||||
|
|
||||||
// m - BIG(ct[1] * s)
|
// m - BIG(ct[1] * s)
|
||||||
module.vec_znx_big_sub_small_b_inplace(
|
module.vec_znx_big_sub_small_b_inplace(
|
||||||
@@ -78,7 +78,7 @@ fn main() {
|
|||||||
// Normalizes back to VecZnx
|
// Normalizes back to VecZnx
|
||||||
// ct[0] <- m - BIG(c1 * s)
|
// ct[0] <- m - BIG(c1 * s)
|
||||||
module.vec_znx_big_normalize(
|
module.vec_znx_big_normalize(
|
||||||
log_base2k,
|
basek,
|
||||||
&mut ct,
|
&mut ct,
|
||||||
0, // Selects the first column of ct (ct[0])
|
0, // Selects the first column of ct (ct[0])
|
||||||
&buf_big,
|
&buf_big,
|
||||||
@@ -89,9 +89,9 @@ fn main() {
|
|||||||
// Add noise to ct[0]
|
// Add noise to ct[0]
|
||||||
// ct[0] <- ct[0] + e
|
// ct[0] <- ct[0] + e
|
||||||
ct.add_normal(
|
ct.add_normal(
|
||||||
log_base2k,
|
basek,
|
||||||
0, // Selects the first column of ct (ct[0])
|
0, // Selects the first column of ct (ct[0])
|
||||||
log_base2k * ct_size, // Scaling of the noise: 2^{-log_base2k * limbs}
|
basek * ct_size, // Scaling of the noise: 2^{-basek * limbs}
|
||||||
&mut source,
|
&mut source,
|
||||||
3.2, // Standard deviation
|
3.2, // Standard deviation
|
||||||
19.0, // Truncatation bound
|
19.0, // Truncatation bound
|
||||||
@@ -118,13 +118,13 @@ fn main() {
|
|||||||
|
|
||||||
// m + e <- BIG(ct[1] * s + ct[0])
|
// m + e <- BIG(ct[1] * s + ct[0])
|
||||||
let mut res = module.new_vec_znx(1, ct_size);
|
let mut res = module.new_vec_znx(1, ct_size);
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut res, 0, &buf_big, 0, scratch.borrow());
|
module.vec_znx_big_normalize(basek, &mut res, 0, &buf_big, 0, scratch.borrow());
|
||||||
|
|
||||||
// have = m * 2^{log_scale} + e
|
// have = m * 2^{log_scale} + e
|
||||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||||
res.decode_vec_i64(0, log_base2k, res.size() * log_base2k, &mut have);
|
res.decode_vec_i64(0, basek, res.size() * basek, &mut have);
|
||||||
|
|
||||||
let scale: f64 = (1 << (res.size() * log_base2k - log_scale)) as f64;
|
let scale: f64 = (1 << (res.size() * basek - log_scale)) as f64;
|
||||||
izip!(want.iter(), have.iter())
|
izip!(want.iter(), have.iter())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.for_each(|(i, (a, b))| {
|
.for_each(|(i, (a, b))| {
|
||||||
@@ -11,23 +11,23 @@ pub trait Encoding {
|
|||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `col_i`: the index of the poly where to encode the data.
|
/// * `col_i`: the index of the poly where to encode the data.
|
||||||
/// * `log_base2k`: base two negative logarithm decomposition of the receiver.
|
/// * `basek`: base two negative logarithm decomposition of the receiver.
|
||||||
/// * `log_k`: base two negative logarithm of the scaling of the data.
|
/// * `k`: base two negative logarithm of the scaling of the data.
|
||||||
/// * `data`: data to encode on the receiver.
|
/// * `data`: data to encode on the receiver.
|
||||||
/// * `log_max`: base two logarithm of the infinity norm of the input data.
|
/// * `log_max`: base two logarithm of the infinity norm of the input data.
|
||||||
fn encode_vec_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize);
|
fn encode_vec_i64(&mut self, col_i: usize, basek: usize, k: usize, data: &[i64], log_max: usize);
|
||||||
|
|
||||||
/// encodes a single i64 on the receiver at the given index.
|
/// encodes a single i64 on the receiver at the given index.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `col_i`: the index of the poly where to encode the data.
|
/// * `col_i`: the index of the poly where to encode the data.
|
||||||
/// * `log_base2k`: base two negative logarithm decomposition of the receiver.
|
/// * `basek`: base two negative logarithm decomposition of the receiver.
|
||||||
/// * `log_k`: base two negative logarithm of the scaling of the data.
|
/// * `k`: base two negative logarithm of the scaling of the data.
|
||||||
/// * `i`: index of the coefficient on which to encode the data.
|
/// * `i`: index of the coefficient on which to encode the data.
|
||||||
/// * `data`: data to encode on the receiver.
|
/// * `data`: data to encode on the receiver.
|
||||||
/// * `log_max`: base two logarithm of the infinity norm of the input data.
|
/// * `log_max`: base two logarithm of the infinity norm of the input data.
|
||||||
fn encode_coeff_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, i: usize, data: i64, log_max: usize);
|
fn encode_coeff_i64(&mut self, col_i: usize, basek: usize, k: usize, i: usize, data: i64, log_max: usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Decoding {
|
pub trait Decoding {
|
||||||
@@ -36,70 +36,70 @@ pub trait Decoding {
|
|||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `col_i`: the index of the poly where to encode the data.
|
/// * `col_i`: the index of the poly where to encode the data.
|
||||||
/// * `log_base2k`: base two negative logarithm decomposition of the receiver.
|
/// * `basek`: base two negative logarithm decomposition of the receiver.
|
||||||
/// * `log_k`: base two logarithm of the scaling of the data.
|
/// * `k`: base two logarithm of the scaling of the data.
|
||||||
/// * `data`: data to decode from the receiver.
|
/// * `data`: data to decode from the receiver.
|
||||||
fn decode_vec_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]);
|
fn decode_vec_i64(&self, col_i: usize, basek: usize, k: usize, data: &mut [i64]);
|
||||||
|
|
||||||
/// decode a vector of Float from the receiver.
|
/// decode a vector of Float from the receiver.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `col_i`: the index of the poly where to encode the data.
|
/// * `col_i`: the index of the poly where to encode the data.
|
||||||
/// * `log_base2k`: base two negative logarithm decomposition of the receiver.
|
/// * `basek`: base two negative logarithm decomposition of the receiver.
|
||||||
/// * `data`: data to decode from the receiver.
|
/// * `data`: data to decode from the receiver.
|
||||||
fn decode_vec_float(&self, col_i: usize, log_base2k: usize, data: &mut [Float]);
|
fn decode_vec_float(&self, col_i: usize, basek: usize, data: &mut [Float]);
|
||||||
|
|
||||||
/// decode a single of i64 from the receiver at the given index.
|
/// decode a single of i64 from the receiver at the given index.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `col_i`: the index of the poly where to encode the data.
|
/// * `col_i`: the index of the poly where to encode the data.
|
||||||
/// * `log_base2k`: base two negative logarithm decomposition of the receiver.
|
/// * `basek`: base two negative logarithm decomposition of the receiver.
|
||||||
/// * `log_k`: base two negative logarithm of the scaling of the data.
|
/// * `k`: base two negative logarithm of the scaling of the data.
|
||||||
/// * `i`: index of the coefficient to decode.
|
/// * `i`: index of the coefficient to decode.
|
||||||
/// * `data`: data to decode from the receiver.
|
/// * `data`: data to decode from the receiver.
|
||||||
fn decode_coeff_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64;
|
fn decode_coeff_i64(&self, col_i: usize, basek: usize, k: usize, i: usize) -> i64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: AsMut<[u8]> + AsRef<[u8]>> Encoding for VecZnx<D> {
|
impl<D: AsMut<[u8]> + AsRef<[u8]>> Encoding for VecZnx<D> {
|
||||||
fn encode_vec_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, data: &[i64], log_max: usize) {
|
fn encode_vec_i64(&mut self, col_i: usize, basek: usize, k: usize, data: &[i64], log_max: usize) {
|
||||||
encode_vec_i64(self, col_i, log_base2k, log_k, data, log_max)
|
encode_vec_i64(self, col_i, basek, k, data, log_max)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode_coeff_i64(&mut self, col_i: usize, log_base2k: usize, log_k: usize, i: usize, value: i64, log_max: usize) {
|
fn encode_coeff_i64(&mut self, col_i: usize, basek: usize, k: usize, i: usize, value: i64, log_max: usize) {
|
||||||
encode_coeff_i64(self, col_i, log_base2k, log_k, i, value, log_max)
|
encode_coeff_i64(self, col_i, basek, k, i, value, log_max)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: AsRef<[u8]>> Decoding for VecZnx<D> {
|
impl<D: AsRef<[u8]>> Decoding for VecZnx<D> {
|
||||||
fn decode_vec_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
|
fn decode_vec_i64(&self, col_i: usize, basek: usize, k: usize, data: &mut [i64]) {
|
||||||
decode_vec_i64(self, col_i, log_base2k, log_k, data)
|
decode_vec_i64(self, col_i, basek, k, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_vec_float(&self, col_i: usize, log_base2k: usize, data: &mut [Float]) {
|
fn decode_vec_float(&self, col_i: usize, basek: usize, data: &mut [Float]) {
|
||||||
decode_vec_float(self, col_i, log_base2k, data)
|
decode_vec_float(self, col_i, basek, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_coeff_i64(&self, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
|
fn decode_coeff_i64(&self, col_i: usize, basek: usize, k: usize, i: usize) -> i64 {
|
||||||
decode_coeff_i64(self, col_i, log_base2k, log_k, i)
|
decode_coeff_i64(self, col_i, basek, k, i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
fn encode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
a: &mut VecZnx<D>,
|
a: &mut VecZnx<D>,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
data: &[i64],
|
data: &[i64],
|
||||||
log_max: usize,
|
log_max: usize,
|
||||||
) {
|
) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
size <= a.size(),
|
size <= a.size(),
|
||||||
"invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.size()={}",
|
"invalid argument k: (k + a.basek - 1)/a.basek={} > a.size()={}",
|
||||||
size,
|
size,
|
||||||
a.size()
|
a.size()
|
||||||
);
|
);
|
||||||
@@ -108,43 +108,43 @@ fn encode_vec_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let data_len: usize = data.len();
|
let data_len: usize = data.len();
|
||||||
let log_k_rem: usize = log_base2k - (log_k % log_base2k);
|
let k_rem: usize = basek - (k % basek);
|
||||||
|
|
||||||
// Zeroes coefficients of the i-th column
|
// Zeroes coefficients of the i-th column
|
||||||
(0..a.size()).for_each(|i| unsafe {
|
(0..a.size()).for_each(|i| unsafe {
|
||||||
znx_zero_i64_ref(a.n() as u64, a.at_mut_ptr(col_i, i));
|
znx_zero_i64_ref(a.n() as u64, a.at_mut_ptr(col_i, i));
|
||||||
});
|
});
|
||||||
|
|
||||||
// If 2^{log_base2k} * 2^{k_rem} < 2^{63}-1, then we can simply copy
|
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
|
||||||
// values on the last limb.
|
// values on the last limb.
|
||||||
// Else we decompose values base2k.
|
// Else we decompose values base2k.
|
||||||
if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
|
if log_max + k_rem < 63 || k_rem == basek {
|
||||||
a.at_mut(col_i, size - 1)[..data_len].copy_from_slice(&data[..data_len]);
|
a.at_mut(col_i, size - 1)[..data_len].copy_from_slice(&data[..data_len]);
|
||||||
} else {
|
} else {
|
||||||
let mask: i64 = (1 << log_base2k) - 1;
|
let mask: i64 = (1 << basek) - 1;
|
||||||
let steps: usize = min(size, (log_max + log_base2k - 1) / log_base2k);
|
let steps: usize = min(size, (log_max + basek - 1) / basek);
|
||||||
(size - steps..size)
|
(size - steps..size)
|
||||||
.rev()
|
.rev()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.for_each(|(i, i_rev)| {
|
.for_each(|(i, i_rev)| {
|
||||||
let shift: usize = i * log_base2k;
|
let shift: usize = i * basek;
|
||||||
izip!(a.at_mut(col_i, i_rev).iter_mut(), data.iter()).for_each(|(y, x)| *y = (x >> shift) & mask);
|
izip!(a.at_mut(col_i, i_rev).iter_mut(), data.iter()).for_each(|(y, x)| *y = (x >> shift) & mask);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Case where self.prec % self.k != 0.
|
// Case where self.prec % self.k != 0.
|
||||||
if log_k_rem != log_base2k {
|
if k_rem != basek {
|
||||||
let steps: usize = min(size, (log_max + log_base2k - 1) / log_base2k);
|
let steps: usize = min(size, (log_max + basek - 1) / basek);
|
||||||
(size - steps..size).rev().for_each(|i| {
|
(size - steps..size).rev().for_each(|i| {
|
||||||
a.at_mut(col_i, i)[..data_len]
|
a.at_mut(col_i, i)[..data_len]
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.for_each(|x| *x <<= log_k_rem);
|
.for_each(|x| *x <<= k_rem);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_vec_i64<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, data: &mut [i64]) {
|
fn decode_vec_i64<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, basek: usize, k: usize, data: &mut [i64]) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (k + basek - 1) / basek;
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
@@ -156,22 +156,22 @@ fn decode_vec_i64<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize
|
|||||||
assert!(col_i < a.cols());
|
assert!(col_i < a.cols());
|
||||||
}
|
}
|
||||||
data.copy_from_slice(a.at(col_i, 0));
|
data.copy_from_slice(a.at(col_i, 0));
|
||||||
let rem: usize = log_base2k - (log_k % log_base2k);
|
let rem: usize = basek - (k % basek);
|
||||||
(1..size).for_each(|i| {
|
(1..size).for_each(|i| {
|
||||||
if i == size - 1 && rem != log_base2k {
|
if i == size - 1 && rem != basek {
|
||||||
let k_rem: usize = log_base2k - rem;
|
let k_rem: usize = basek - rem;
|
||||||
izip!(a.at(col_i, i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
izip!(a.at(col_i, i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
||||||
*y = (*y << k_rem) + (x >> rem);
|
*y = (*y << k_rem) + (x >> rem);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
izip!(a.at(col_i, i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
izip!(a.at(col_i, i).iter(), data.iter_mut()).for_each(|(x, y)| {
|
||||||
*y = (*y << log_base2k) + x;
|
*y = (*y << basek) + x;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_vec_float<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, data: &mut [Float]) {
|
fn decode_vec_float<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, basek: usize, data: &mut [Float]) {
|
||||||
let size: usize = a.size();
|
let size: usize = a.size();
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
@@ -184,12 +184,12 @@ fn decode_vec_float<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usi
|
|||||||
assert!(col_i < a.cols());
|
assert!(col_i < a.cols());
|
||||||
}
|
}
|
||||||
|
|
||||||
let prec: u32 = (log_base2k * size) as u32;
|
let prec: u32 = (basek * size) as u32;
|
||||||
|
|
||||||
// 2^{log_base2k}
|
// 2^{basek}
|
||||||
let base = Float::with_val(prec, (1 << log_base2k) as f64);
|
let base = Float::with_val(prec, (1 << basek) as f64);
|
||||||
|
|
||||||
// y[i] = sum x[j][i] * 2^{-log_base2k*j}
|
// y[i] = sum x[j][i] * 2^{-basek*j}
|
||||||
(0..size).for_each(|i| {
|
(0..size).for_each(|i| {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
izip!(a.at(col_i, size - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| {
|
izip!(a.at(col_i, size - i - 1).iter(), data.iter_mut()).for_each(|(x, y)| {
|
||||||
@@ -208,73 +208,73 @@ fn decode_vec_float<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usi
|
|||||||
fn encode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
fn encode_coeff_i64<D: AsMut<[u8]> + AsRef<[u8]>>(
|
||||||
a: &mut VecZnx<D>,
|
a: &mut VecZnx<D>,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
i: usize,
|
i: usize,
|
||||||
value: i64,
|
value: i64,
|
||||||
log_max: usize,
|
log_max: usize,
|
||||||
) {
|
) {
|
||||||
let size: usize = (log_k + log_base2k - 1) / log_base2k;
|
let size: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(i < a.n());
|
assert!(i < a.n());
|
||||||
assert!(
|
assert!(
|
||||||
size <= a.size(),
|
size <= a.size(),
|
||||||
"invalid argument log_k: (log_k + a.log_base2k - 1)/a.log_base2k={} > a.size()={}",
|
"invalid argument k: (k + a.basek - 1)/a.basek={} > a.size()={}",
|
||||||
size,
|
size,
|
||||||
a.size()
|
a.size()
|
||||||
);
|
);
|
||||||
assert!(col_i < a.cols());
|
assert!(col_i < a.cols());
|
||||||
}
|
}
|
||||||
|
|
||||||
let log_k_rem: usize = log_base2k - (log_k % log_base2k);
|
let k_rem: usize = basek - (k % basek);
|
||||||
(0..a.size()).for_each(|j| a.at_mut(col_i, j)[i] = 0);
|
(0..a.size()).for_each(|j| a.at_mut(col_i, j)[i] = 0);
|
||||||
|
|
||||||
// If 2^{log_base2k} * 2^{log_k_rem} < 2^{63}-1, then we can simply copy
|
// If 2^{basek} * 2^{k_rem} < 2^{63}-1, then we can simply copy
|
||||||
// values on the last limb.
|
// values on the last limb.
|
||||||
// Else we decompose values base2k.
|
// Else we decompose values base2k.
|
||||||
if log_max + log_k_rem < 63 || log_k_rem == log_base2k {
|
if log_max + k_rem < 63 || k_rem == basek {
|
||||||
a.at_mut(col_i, size - 1)[i] = value;
|
a.at_mut(col_i, size - 1)[i] = value;
|
||||||
} else {
|
} else {
|
||||||
let mask: i64 = (1 << log_base2k) - 1;
|
let mask: i64 = (1 << basek) - 1;
|
||||||
let steps: usize = min(size, (log_max + log_base2k - 1) / log_base2k);
|
let steps: usize = min(size, (log_max + basek - 1) / basek);
|
||||||
(size - steps..size)
|
(size - steps..size)
|
||||||
.rev()
|
.rev()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.for_each(|(j, j_rev)| {
|
.for_each(|(j, j_rev)| {
|
||||||
a.at_mut(col_i, j_rev)[i] = (value >> (j * log_base2k)) & mask;
|
a.at_mut(col_i, j_rev)[i] = (value >> (j * basek)) & mask;
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Case where prec % k != 0.
|
// Case where prec % k != 0.
|
||||||
if log_k_rem != log_base2k {
|
if k_rem != basek {
|
||||||
let steps: usize = min(size, (log_max + log_base2k - 1) / log_base2k);
|
let steps: usize = min(size, (log_max + basek - 1) / basek);
|
||||||
(size - steps..size).rev().for_each(|j| {
|
(size - steps..size).rev().for_each(|j| {
|
||||||
a.at_mut(col_i, j)[i] <<= log_k_rem;
|
a.at_mut(col_i, j)[i] <<= k_rem;
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_coeff_i64<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, log_base2k: usize, log_k: usize, i: usize) -> i64 {
|
fn decode_coeff_i64<D: AsRef<[u8]>>(a: &VecZnx<D>, col_i: usize, basek: usize, k: usize, i: usize) -> i64 {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
{
|
{
|
||||||
assert!(i < a.n());
|
assert!(i < a.n());
|
||||||
assert!(col_i < a.cols())
|
assert!(col_i < a.cols())
|
||||||
}
|
}
|
||||||
|
|
||||||
let cols: usize = (log_k + log_base2k - 1) / log_base2k;
|
let cols: usize = (k + basek - 1) / basek;
|
||||||
let data: &[i64] = a.raw();
|
let data: &[i64] = a.raw();
|
||||||
let mut res: i64 = data[i];
|
let mut res: i64 = data[i];
|
||||||
let rem: usize = log_base2k - (log_k % log_base2k);
|
let rem: usize = basek - (k % basek);
|
||||||
let slice_size: usize = a.n() * a.size();
|
let slice_size: usize = a.n() * a.size();
|
||||||
(1..cols).for_each(|i| {
|
(1..cols).for_each(|i| {
|
||||||
let x = data[i * slice_size];
|
let x = data[i * slice_size];
|
||||||
if i == cols - 1 && rem != log_base2k {
|
if i == cols - 1 && rem != basek {
|
||||||
let k_rem: usize = log_base2k - rem;
|
let k_rem: usize = basek - rem;
|
||||||
res = (res << k_rem) + (x >> rem);
|
res = (res << k_rem) + (x >> rem);
|
||||||
} else {
|
} else {
|
||||||
res = (res << log_base2k) + x;
|
res = (res << basek) + x;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
res
|
res
|
||||||
@@ -292,9 +292,9 @@ mod tests {
|
|||||||
fn test_set_get_i64_lo_norm() {
|
fn test_set_get_i64_lo_norm() {
|
||||||
let n: usize = 8;
|
let n: usize = 8;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
let log_base2k: usize = 17;
|
let basek: usize = 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let log_k: usize = size * log_base2k - 5;
|
let k: usize = size * basek - 5;
|
||||||
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
||||||
let mut source: Source = Source::new([0u8; 32]);
|
let mut source: Source = Source::new([0u8; 32]);
|
||||||
let raw: &mut [i64] = a.raw_mut();
|
let raw: &mut [i64] = a.raw_mut();
|
||||||
@@ -303,9 +303,9 @@ mod tests {
|
|||||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||||
have.iter_mut()
|
have.iter_mut()
|
||||||
.for_each(|x| *x = (source.next_i64() << 56) >> 56);
|
.for_each(|x| *x = (source.next_i64() << 56) >> 56);
|
||||||
a.encode_vec_i64(col_i, log_base2k, log_k, &have, 10);
|
a.encode_vec_i64(col_i, basek, k, &have, 10);
|
||||||
let mut want: Vec<i64> = vec![i64::default(); n];
|
let mut want: Vec<i64> = vec![i64::default(); n];
|
||||||
a.decode_vec_i64(col_i, log_base2k, log_k, &mut want);
|
a.decode_vec_i64(col_i, basek, k, &mut want);
|
||||||
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -314,9 +314,9 @@ mod tests {
|
|||||||
fn test_set_get_i64_hi_norm() {
|
fn test_set_get_i64_hi_norm() {
|
||||||
let n: usize = 8;
|
let n: usize = 8;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
let log_base2k: usize = 17;
|
let basek: usize = 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let log_k: usize = size * log_base2k - 5;
|
let k: usize = size * basek - 5;
|
||||||
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(2, size);
|
||||||
let mut source = Source::new([0u8; 32]);
|
let mut source = Source::new([0u8; 32]);
|
||||||
let raw: &mut [i64] = a.raw_mut();
|
let raw: &mut [i64] = a.raw_mut();
|
||||||
@@ -324,9 +324,9 @@ mod tests {
|
|||||||
(0..a.cols()).for_each(|col_i| {
|
(0..a.cols()).for_each(|col_i| {
|
||||||
let mut have: Vec<i64> = vec![i64::default(); n];
|
let mut have: Vec<i64> = vec![i64::default(); n];
|
||||||
have.iter_mut().for_each(|x| *x = source.next_i64());
|
have.iter_mut().for_each(|x| *x = source.next_i64());
|
||||||
a.encode_vec_i64(col_i, log_base2k, log_k, &have, 64);
|
a.encode_vec_i64(col_i, basek, k, &have, 64);
|
||||||
let mut want = vec![i64::default(); n];
|
let mut want = vec![i64::default(); n];
|
||||||
a.decode_vec_i64(col_i, log_base2k, log_k, &mut want);
|
a.decode_vec_i64(col_i, basek, k, &mut want);
|
||||||
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
izip!(want, have).for_each(|(a, b)| assert_eq!(a, b, "{} != {}", a, b));
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
pub type CNV_PVEC_L = cnv_pvec_l_t;
|
pub type CNV_PVEC_L = cnv_pvec_l_t;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct cnv_pvec_r_t {
|
pub struct cnv_pvec_r_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type CNV_PVEC_R = cnv_pvec_r_t;
|
pub type CNV_PVEC_R = cnv_pvec_r_t;
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
pub mod module;
|
pub mod module;
|
||||||
pub mod reim;
|
pub mod reim;
|
||||||
pub mod svp;
|
pub mod svp;
|
||||||
pub mod vec_znx;
|
pub mod vec_znx;
|
||||||
pub mod vec_znx_big;
|
pub mod vec_znx_big;
|
||||||
pub mod vec_znx_dft;
|
pub mod vec_znx_dft;
|
||||||
pub mod vmp;
|
pub mod vmp;
|
||||||
pub mod znx;
|
pub mod znx;
|
||||||
@@ -1,18 +1,18 @@
|
|||||||
pub struct module_info_t {
|
pub struct module_info_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type module_type_t = ::std::os::raw::c_uint;
|
pub type module_type_t = ::std::os::raw::c_uint;
|
||||||
pub use self::module_type_t as MODULE_TYPE;
|
pub use self::module_type_t as MODULE_TYPE;
|
||||||
|
|
||||||
pub type MODULE = module_info_t;
|
pub type MODULE = module_info_t;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_module_info(N: u64, mode: MODULE_TYPE) -> *mut MODULE;
|
pub unsafe fn new_module_info(N: u64, mode: MODULE_TYPE) -> *mut MODULE;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_module_info(module_info: *mut MODULE);
|
pub unsafe fn delete_module_info(module_info: *mut MODULE);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn module_get_n(module: *const MODULE) -> u64;
|
pub unsafe fn module_get_n(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
@@ -1,172 +1,172 @@
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_fft_precomp {
|
pub struct reim_fft_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FFT_PRECOMP = reim_fft_precomp;
|
pub type REIM_FFT_PRECOMP = reim_fft_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_ifft_precomp {
|
pub struct reim_ifft_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_IFFT_PRECOMP = reim_ifft_precomp;
|
pub type REIM_IFFT_PRECOMP = reim_ifft_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_mul_precomp {
|
pub struct reim_mul_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FFTVEC_MUL_PRECOMP = reim_mul_precomp;
|
pub type REIM_FFTVEC_MUL_PRECOMP = reim_mul_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_addmul_precomp {
|
pub struct reim_addmul_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FFTVEC_ADDMUL_PRECOMP = reim_addmul_precomp;
|
pub type REIM_FFTVEC_ADDMUL_PRECOMP = reim_addmul_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_from_znx32_precomp {
|
pub struct reim_from_znx32_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FROM_ZNX32_PRECOMP = reim_from_znx32_precomp;
|
pub type REIM_FROM_ZNX32_PRECOMP = reim_from_znx32_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_from_znx64_precomp {
|
pub struct reim_from_znx64_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FROM_ZNX64_PRECOMP = reim_from_znx64_precomp;
|
pub type REIM_FROM_ZNX64_PRECOMP = reim_from_znx64_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_from_tnx32_precomp {
|
pub struct reim_from_tnx32_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_FROM_TNX32_PRECOMP = reim_from_tnx32_precomp;
|
pub type REIM_FROM_TNX32_PRECOMP = reim_from_tnx32_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_to_tnx32_precomp {
|
pub struct reim_to_tnx32_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_TO_TNX32_PRECOMP = reim_to_tnx32_precomp;
|
pub type REIM_TO_TNX32_PRECOMP = reim_to_tnx32_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_to_tnx_precomp {
|
pub struct reim_to_tnx_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_TO_TNX_PRECOMP = reim_to_tnx_precomp;
|
pub type REIM_TO_TNX_PRECOMP = reim_to_tnx_precomp;
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct reim_to_znx64_precomp {
|
pub struct reim_to_znx64_precomp {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type REIM_TO_ZNX64_PRECOMP = reim_to_znx64_precomp;
|
pub type REIM_TO_ZNX64_PRECOMP = reim_to_znx64_precomp;
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_fft_precomp(m: u32, num_buffers: u32) -> *mut REIM_FFT_PRECOMP;
|
pub unsafe fn new_reim_fft_precomp(m: u32, num_buffers: u32) -> *mut REIM_FFT_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fft_precomp_get_buffer(tables: *const REIM_FFT_PRECOMP, buffer_index: u32) -> *mut f64;
|
pub unsafe fn reim_fft_precomp_get_buffer(tables: *const REIM_FFT_PRECOMP, buffer_index: u32) -> *mut f64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_fft_buffer(m: u32) -> *mut f64;
|
pub unsafe fn new_reim_fft_buffer(m: u32) -> *mut f64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_reim_fft_buffer(buffer: *mut f64);
|
pub unsafe fn delete_reim_fft_buffer(buffer: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fft(tables: *const REIM_FFT_PRECOMP, data: *mut f64);
|
pub unsafe fn reim_fft(tables: *const REIM_FFT_PRECOMP, data: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_ifft_precomp(m: u32, num_buffers: u32) -> *mut REIM_IFFT_PRECOMP;
|
pub unsafe fn new_reim_ifft_precomp(m: u32, num_buffers: u32) -> *mut REIM_IFFT_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_ifft_precomp_get_buffer(tables: *const REIM_IFFT_PRECOMP, buffer_index: u32) -> *mut f64;
|
pub unsafe fn reim_ifft_precomp_get_buffer(tables: *const REIM_IFFT_PRECOMP, buffer_index: u32) -> *mut f64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_ifft(tables: *const REIM_IFFT_PRECOMP, data: *mut f64);
|
pub unsafe fn reim_ifft(tables: *const REIM_IFFT_PRECOMP, data: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_fftvec_mul_precomp(m: u32) -> *mut REIM_FFTVEC_MUL_PRECOMP;
|
pub unsafe fn new_reim_fftvec_mul_precomp(m: u32) -> *mut REIM_FFTVEC_MUL_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fftvec_mul(tables: *const REIM_FFTVEC_MUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
|
pub unsafe fn reim_fftvec_mul(tables: *const REIM_FFTVEC_MUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_fftvec_addmul_precomp(m: u32) -> *mut REIM_FFTVEC_ADDMUL_PRECOMP;
|
pub unsafe fn new_reim_fftvec_addmul_precomp(m: u32) -> *mut REIM_FFTVEC_ADDMUL_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fftvec_addmul(tables: *const REIM_FFTVEC_ADDMUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
|
pub unsafe fn reim_fftvec_addmul(tables: *const REIM_FFTVEC_ADDMUL_PRECOMP, r: *mut f64, a: *const f64, b: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_from_znx32_precomp(m: u32, log2bound: u32) -> *mut REIM_FROM_ZNX32_PRECOMP;
|
pub unsafe fn new_reim_from_znx32_precomp(m: u32, log2bound: u32) -> *mut REIM_FROM_ZNX32_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_znx32(tables: *const REIM_FROM_ZNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
|
pub unsafe fn reim_from_znx32(tables: *const REIM_FROM_ZNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_znx64(tables: *const REIM_FROM_ZNX64_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i64);
|
pub unsafe fn reim_from_znx64(tables: *const REIM_FROM_ZNX64_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_from_znx64_precomp(m: u32, maxbnd: u32) -> *mut REIM_FROM_ZNX64_PRECOMP;
|
pub unsafe fn new_reim_from_znx64_precomp(m: u32, maxbnd: u32) -> *mut REIM_FROM_ZNX64_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_znx64_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, a: *const i64);
|
pub unsafe fn reim_from_znx64_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, a: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_from_tnx32_precomp(m: u32) -> *mut REIM_FROM_TNX32_PRECOMP;
|
pub unsafe fn new_reim_from_tnx32_precomp(m: u32) -> *mut REIM_FROM_TNX32_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_tnx32(tables: *const REIM_FROM_TNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
|
pub unsafe fn reim_from_tnx32(tables: *const REIM_FROM_TNX32_PRECOMP, r: *mut ::std::os::raw::c_void, a: *const i32);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_to_tnx32_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX32_PRECOMP;
|
pub unsafe fn new_reim_to_tnx32_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX32_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_tnx32(tables: *const REIM_TO_TNX32_PRECOMP, r: *mut i32, a: *const ::std::os::raw::c_void);
|
pub unsafe fn reim_to_tnx32(tables: *const REIM_TO_TNX32_PRECOMP, r: *mut i32, a: *const ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_to_tnx_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX_PRECOMP;
|
pub unsafe fn new_reim_to_tnx_precomp(m: u32, divisor: f64, log2overhead: u32) -> *mut REIM_TO_TNX_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_tnx(tables: *const REIM_TO_TNX_PRECOMP, r: *mut f64, a: *const f64);
|
pub unsafe fn reim_to_tnx(tables: *const REIM_TO_TNX_PRECOMP, r: *mut f64, a: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_tnx_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut f64, a: *const f64);
|
pub unsafe fn reim_to_tnx_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut f64, a: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_reim_to_znx64_precomp(m: u32, divisor: f64, log2bound: u32) -> *mut REIM_TO_ZNX64_PRECOMP;
|
pub unsafe fn new_reim_to_znx64_precomp(m: u32, divisor: f64, log2bound: u32) -> *mut REIM_TO_ZNX64_PRECOMP;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_znx64(precomp: *const REIM_TO_ZNX64_PRECOMP, r: *mut i64, a: *const ::std::os::raw::c_void);
|
pub unsafe fn reim_to_znx64(precomp: *const REIM_TO_ZNX64_PRECOMP, r: *mut i64, a: *const ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_znx64_simple(m: u32, divisor: f64, log2bound: u32, r: *mut i64, a: *const ::std::os::raw::c_void);
|
pub unsafe fn reim_to_znx64_simple(m: u32, divisor: f64, log2bound: u32, r: *mut i64, a: *const ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fft_simple(m: u32, data: *mut ::std::os::raw::c_void);
|
pub unsafe fn reim_fft_simple(m: u32, data: *mut ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_ifft_simple(m: u32, data: *mut ::std::os::raw::c_void);
|
pub unsafe fn reim_ifft_simple(m: u32, data: *mut ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fftvec_mul_simple(
|
pub unsafe fn reim_fftvec_mul_simple(
|
||||||
m: u32,
|
m: u32,
|
||||||
r: *mut ::std::os::raw::c_void,
|
r: *mut ::std::os::raw::c_void,
|
||||||
a: *const ::std::os::raw::c_void,
|
a: *const ::std::os::raw::c_void,
|
||||||
b: *const ::std::os::raw::c_void,
|
b: *const ::std::os::raw::c_void,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_fftvec_addmul_simple(
|
pub unsafe fn reim_fftvec_addmul_simple(
|
||||||
m: u32,
|
m: u32,
|
||||||
r: *mut ::std::os::raw::c_void,
|
r: *mut ::std::os::raw::c_void,
|
||||||
a: *const ::std::os::raw::c_void,
|
a: *const ::std::os::raw::c_void,
|
||||||
b: *const ::std::os::raw::c_void,
|
b: *const ::std::os::raw::c_void,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_znx32_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
|
pub unsafe fn reim_from_znx32_simple(m: u32, log2bound: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_from_tnx32_simple(m: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
|
pub unsafe fn reim_from_tnx32_simple(m: u32, r: *mut ::std::os::raw::c_void, x: *const i32);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn reim_to_tnx32_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut i32, x: *const ::std::os::raw::c_void);
|
pub unsafe fn reim_to_tnx32_simple(m: u32, divisor: f64, log2overhead: u32, r: *mut i32, x: *const ::std::os::raw::c_void);
|
||||||
}
|
}
|
||||||
@@ -1,48 +1,48 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
use crate::ffi::vec_znx_dft::VEC_ZNX_DFT;
|
use crate::ffi::vec_znx_dft::VEC_ZNX_DFT;
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct svp_ppol_t {
|
pub struct svp_ppol_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type SVP_PPOL = svp_ppol_t;
|
pub type SVP_PPOL = svp_ppol_t;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn bytes_of_svp_ppol(module: *const MODULE) -> u64;
|
pub unsafe fn bytes_of_svp_ppol(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_svp_ppol(module: *const MODULE) -> *mut SVP_PPOL;
|
pub unsafe fn new_svp_ppol(module: *const MODULE) -> *mut SVP_PPOL;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_svp_ppol(res: *mut SVP_PPOL);
|
pub unsafe fn delete_svp_ppol(res: *mut SVP_PPOL);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn svp_prepare(module: *const MODULE, ppol: *mut SVP_PPOL, pol: *const i64);
|
pub unsafe fn svp_prepare(module: *const MODULE, ppol: *mut SVP_PPOL, pol: *const i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn svp_apply_dft(
|
pub unsafe fn svp_apply_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *const VEC_ZNX_DFT,
|
res: *const VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
ppol: *const SVP_PPOL,
|
ppol: *const SVP_PPOL,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn svp_apply_dft_to_dft(
|
pub unsafe fn svp_apply_dft_to_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *const VEC_ZNX_DFT,
|
res: *const VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_cols: u64,
|
res_cols: u64,
|
||||||
ppol: *const SVP_PPOL,
|
ppol: *const SVP_PPOL,
|
||||||
a: *const VEC_ZNX_DFT,
|
a: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_cols: u64,
|
a_cols: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -1,101 +1,101 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_add(
|
pub unsafe fn vec_znx_add(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_automorphism(
|
pub unsafe fn vec_znx_automorphism(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
p: i64,
|
p: i64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_negate(
|
pub unsafe fn vec_znx_negate(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_rotate(
|
pub unsafe fn vec_znx_rotate(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
p: i64,
|
p: i64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_sub(
|
pub unsafe fn vec_znx_sub(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_zero(module: *const MODULE, res: *mut i64, res_size: u64, res_sl: u64);
|
pub unsafe fn vec_znx_zero(module: *const MODULE, res: *mut i64, res_size: u64, res_sl: u64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_copy(
|
pub unsafe fn vec_znx_copy(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_normalize_base2k(
|
pub unsafe fn vec_znx_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
log2_base2k: u64,
|
base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn vec_znx_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
@@ -1,161 +1,161 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct vec_znx_big_t {
|
pub struct vec_znx_big_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type VEC_ZNX_BIG = vec_znx_big_t;
|
pub type VEC_ZNX_BIG = vec_znx_big_t;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn bytes_of_vec_znx_big(module: *const MODULE, size: u64) -> u64;
|
pub unsafe fn bytes_of_vec_znx_big(module: *const MODULE, size: u64) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_vec_znx_big(module: *const MODULE, size: u64) -> *mut VEC_ZNX_BIG;
|
pub unsafe fn new_vec_znx_big(module: *const MODULE, size: u64) -> *mut VEC_ZNX_BIG;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_vec_znx_big(res: *mut VEC_ZNX_BIG);
|
pub unsafe fn delete_vec_znx_big(res: *mut VEC_ZNX_BIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_add(
|
pub unsafe fn vec_znx_big_add(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const VEC_ZNX_BIG,
|
b: *const VEC_ZNX_BIG,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_add_small(
|
pub unsafe fn vec_znx_big_add_small(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_add_small2(
|
pub unsafe fn vec_znx_big_add_small2(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_sub(
|
pub unsafe fn vec_znx_big_sub(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const VEC_ZNX_BIG,
|
b: *const VEC_ZNX_BIG,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_sub_small_b(
|
pub unsafe fn vec_znx_big_sub_small_b(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_sub_small_a(
|
pub unsafe fn vec_znx_big_sub_small_a(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
b: *const VEC_ZNX_BIG,
|
b: *const VEC_ZNX_BIG,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_sub_small2(
|
pub unsafe fn vec_znx_big_sub_small2(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
b: *const i64,
|
b: *const i64,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
b_sl: u64,
|
b_sl: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn vec_znx_big_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_normalize_base2k(
|
pub unsafe fn vec_znx_big_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
log2_base2k: u64,
|
log2_base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_range_normalize_base2k(
|
pub unsafe fn vec_znx_big_range_normalize_base2k(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
log2_base2k: u64,
|
log2_base2k: u64,
|
||||||
res: *mut i64,
|
res: *mut i64,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
res_sl: u64,
|
res_sl: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_range_begin: u64,
|
a_range_begin: u64,
|
||||||
a_range_xend: u64,
|
a_range_xend: u64,
|
||||||
a_range_step: u64,
|
a_range_step: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_range_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn vec_znx_big_range_normalize_base2k_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_automorphism(
|
pub unsafe fn vec_znx_big_automorphism(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
p: i64,
|
p: i64,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_big_rotate(
|
pub unsafe fn vec_znx_big_rotate(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
p: i64,
|
p: i64,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_BIG,
|
a: *const VEC_ZNX_BIG,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -1,86 +1,86 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
use crate::ffi::vec_znx_big::VEC_ZNX_BIG;
|
use crate::ffi::vec_znx_big::VEC_ZNX_BIG;
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct vec_znx_dft_t {
|
pub struct vec_znx_dft_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
pub type VEC_ZNX_DFT = vec_znx_dft_t;
|
pub type VEC_ZNX_DFT = vec_znx_dft_t;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn bytes_of_vec_znx_dft(module: *const MODULE, size: u64) -> u64;
|
pub unsafe fn bytes_of_vec_znx_dft(module: *const MODULE, size: u64) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_vec_znx_dft(module: *const MODULE, size: u64) -> *mut VEC_ZNX_DFT;
|
pub unsafe fn new_vec_znx_dft(module: *const MODULE, size: u64) -> *mut VEC_ZNX_DFT;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_vec_znx_dft(res: *mut VEC_ZNX_DFT);
|
pub unsafe fn delete_vec_znx_dft(res: *mut VEC_ZNX_DFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_dft_zero(module: *const MODULE, res: *mut VEC_ZNX_DFT, res_size: u64);
|
pub unsafe fn vec_dft_zero(module: *const MODULE, res: *mut VEC_ZNX_DFT, res_size: u64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_dft_add(
|
pub unsafe fn vec_dft_add(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_DFT,
|
a: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const VEC_ZNX_DFT,
|
b: *const VEC_ZNX_DFT,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_dft_sub(
|
pub unsafe fn vec_dft_sub(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const VEC_ZNX_DFT,
|
a: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
b: *const VEC_ZNX_DFT,
|
b: *const VEC_ZNX_DFT,
|
||||||
b_size: u64,
|
b_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_dft(module: *const MODULE, res: *mut VEC_ZNX_DFT, res_size: u64, a: *const i64, a_size: u64, a_sl: u64);
|
pub unsafe fn vec_znx_dft(module: *const MODULE, res: *mut VEC_ZNX_DFT, res_size: u64, a: *const i64, a_size: u64, a_sl: u64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_idft(
|
pub unsafe fn vec_znx_idft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_dft: *const VEC_ZNX_DFT,
|
a_dft: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
tmp: *mut u8,
|
tmp: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn vec_znx_idft_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_idft_tmp_a(
|
pub unsafe fn vec_znx_idft_tmp_a(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_dft: *mut VEC_ZNX_DFT,
|
a_dft: *mut VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_dft_automorphism(
|
pub unsafe fn vec_znx_dft_automorphism(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
d: i64,
|
d: i64,
|
||||||
res_dft: *mut VEC_ZNX_DFT,
|
res_dft: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_dft: *const VEC_ZNX_DFT,
|
a_dft: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
tmp: *mut u8,
|
tmp: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vec_znx_dft_automorphism_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn vec_znx_dft_automorphism_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
@@ -1,165 +1,165 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
use crate::ffi::vec_znx_big::VEC_ZNX_BIG;
|
use crate::ffi::vec_znx_big::VEC_ZNX_BIG;
|
||||||
use crate::ffi::vec_znx_dft::VEC_ZNX_DFT;
|
use crate::ffi::vec_znx_dft::VEC_ZNX_DFT;
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct vmp_pmat_t {
|
pub struct vmp_pmat_t {
|
||||||
_unused: [u8; 0],
|
_unused: [u8; 0],
|
||||||
}
|
}
|
||||||
|
|
||||||
// [rows][cols] = [#Decomposition][#Limbs]
|
// [rows][cols] = [#Decomposition][#Limbs]
|
||||||
pub type VMP_PMAT = vmp_pmat_t;
|
pub type VMP_PMAT = vmp_pmat_t;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn bytes_of_vmp_pmat(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
|
pub unsafe fn bytes_of_vmp_pmat(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn new_vmp_pmat(module: *const MODULE, nrows: u64, ncols: u64) -> *mut VMP_PMAT;
|
pub unsafe fn new_vmp_pmat(module: *const MODULE, nrows: u64, ncols: u64) -> *mut VMP_PMAT;
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn delete_vmp_pmat(res: *mut VMP_PMAT);
|
pub unsafe fn delete_vmp_pmat(res: *mut VMP_PMAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft(
|
pub unsafe fn vmp_apply_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_add(
|
pub unsafe fn vmp_apply_dft_add(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a: *const i64,
|
a: *const i64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
a_sl: u64,
|
a_sl: u64,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_tmp_bytes(module: *const MODULE, res_size: u64, a_size: u64, nrows: u64, ncols: u64) -> u64;
|
pub unsafe fn vmp_apply_dft_tmp_bytes(module: *const MODULE, res_size: u64, a_size: u64, nrows: u64, ncols: u64) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_to_dft(
|
pub unsafe fn vmp_apply_dft_to_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_dft: *const VEC_ZNX_DFT,
|
a_dft: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_to_dft_add(
|
pub unsafe fn vmp_apply_dft_to_dft_add(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_dft: *const VEC_ZNX_DFT,
|
a_dft: *const VEC_ZNX_DFT,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes(
|
pub unsafe fn vmp_apply_dft_to_dft_tmp_bytes(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res_size: u64,
|
res_size: u64,
|
||||||
a_size: u64,
|
a_size: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
) -> u64;
|
) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_contiguous(
|
pub unsafe fn vmp_prepare_contiguous(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
pmat: *mut VMP_PMAT,
|
pmat: *mut VMP_PMAT,
|
||||||
mat: *const i64,
|
mat: *const i64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_dblptr(
|
pub unsafe fn vmp_prepare_dblptr(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
pmat: *mut VMP_PMAT,
|
pmat: *mut VMP_PMAT,
|
||||||
mat: *const *const i64,
|
mat: *const *const i64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_row(
|
pub unsafe fn vmp_prepare_row(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
pmat: *mut VMP_PMAT,
|
pmat: *mut VMP_PMAT,
|
||||||
row: *const i64,
|
row: *const i64,
|
||||||
row_i: u64,
|
row_i: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
tmp_space: *mut u8,
|
tmp_space: *mut u8,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_row_dft(
|
pub unsafe fn vmp_prepare_row_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
pmat: *mut VMP_PMAT,
|
pmat: *mut VMP_PMAT,
|
||||||
row: *const VEC_ZNX_DFT,
|
row: *const VEC_ZNX_DFT,
|
||||||
row_i: u64,
|
row_i: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_extract_row_dft(
|
pub unsafe fn vmp_extract_row_dft(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_DFT,
|
res: *mut VEC_ZNX_DFT,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
row_i: u64,
|
row_i: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_extract_row(
|
pub unsafe fn vmp_extract_row(
|
||||||
module: *const MODULE,
|
module: *const MODULE,
|
||||||
res: *mut VEC_ZNX_BIG,
|
res: *mut VEC_ZNX_BIG,
|
||||||
pmat: *const VMP_PMAT,
|
pmat: *const VMP_PMAT,
|
||||||
row_i: u64,
|
row_i: u64,
|
||||||
nrows: u64,
|
nrows: u64,
|
||||||
ncols: u64,
|
ncols: u64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
|
pub unsafe fn vmp_prepare_tmp_bytes(module: *const MODULE, nrows: u64, ncols: u64) -> u64;
|
||||||
}
|
}
|
||||||
@@ -1,76 +1,76 @@
|
|||||||
use crate::ffi::module::MODULE;
|
use crate::ffi::module::MODULE;
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_add_i64_ref(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
pub unsafe fn znx_add_i64_ref(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_add_i64_avx(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
pub unsafe fn znx_add_i64_avx(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_sub_i64_ref(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
pub unsafe fn znx_sub_i64_ref(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_sub_i64_avx(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
pub unsafe fn znx_sub_i64_avx(nn: u64, res: *mut i64, a: *const i64, b: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_negate_i64_ref(nn: u64, res: *mut i64, a: *const i64);
|
pub unsafe fn znx_negate_i64_ref(nn: u64, res: *mut i64, a: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_negate_i64_avx(nn: u64, res: *mut i64, a: *const i64);
|
pub unsafe fn znx_negate_i64_avx(nn: u64, res: *mut i64, a: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_copy_i64_ref(nn: u64, res: *mut i64, a: *const i64);
|
pub unsafe fn znx_copy_i64_ref(nn: u64, res: *mut i64, a: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_zero_i64_ref(nn: u64, res: *mut i64);
|
pub unsafe fn znx_zero_i64_ref(nn: u64, res: *mut i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_divide_by_m_ref(nn: u64, m: f64, res: *mut f64, a: *const f64);
|
pub unsafe fn rnx_divide_by_m_ref(nn: u64, m: f64, res: *mut f64, a: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_divide_by_m_avx(nn: u64, m: f64, res: *mut f64, a: *const f64);
|
pub unsafe fn rnx_divide_by_m_avx(nn: u64, m: f64, res: *mut f64, a: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_rotate_f64(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
pub unsafe fn rnx_rotate_f64(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_rotate_i64(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
pub unsafe fn znx_rotate_i64(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_rotate_inplace_f64(nn: u64, p: i64, res: *mut f64);
|
pub unsafe fn rnx_rotate_inplace_f64(nn: u64, p: i64, res: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_rotate_inplace_i64(nn: u64, p: i64, res: *mut i64);
|
pub unsafe fn znx_rotate_inplace_i64(nn: u64, p: i64, res: *mut i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_automorphism_f64(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
pub unsafe fn rnx_automorphism_f64(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_automorphism_i64(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
pub unsafe fn znx_automorphism_i64(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_automorphism_inplace_f64(nn: u64, p: i64, res: *mut f64);
|
pub unsafe fn rnx_automorphism_inplace_f64(nn: u64, p: i64, res: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_automorphism_inplace_i64(nn: u64, p: i64, res: *mut i64);
|
pub unsafe fn znx_automorphism_inplace_i64(nn: u64, p: i64, res: *mut i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_mul_xp_minus_one(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
pub unsafe fn rnx_mul_xp_minus_one(nn: u64, p: i64, res: *mut f64, in_: *const f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_mul_xp_minus_one(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
pub unsafe fn znx_mul_xp_minus_one(nn: u64, p: i64, res: *mut i64, in_: *const i64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn rnx_mul_xp_minus_one_inplace(nn: u64, p: i64, res: *mut f64);
|
pub unsafe fn rnx_mul_xp_minus_one_inplace(nn: u64, p: i64, res: *mut f64);
|
||||||
}
|
}
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_normalize(nn: u64, base_k: u64, out: *mut i64, carry_out: *mut i64, in_: *const i64, carry_in: *const i64);
|
pub unsafe fn znx_normalize(nn: u64, base_k: u64, out: *mut i64, carry_out: *mut i64, in_: *const i64, carry_in: *const i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_small_single_product(module: *const MODULE, res: *mut i64, a: *const i64, b: *const i64, tmp: *mut u8);
|
pub unsafe fn znx_small_single_product(module: *const MODULE, res: *mut i64, a: *const i64, b: *const i64, tmp: *mut u8);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe extern "C" {
|
unsafe extern "C" {
|
||||||
pub unsafe fn znx_small_single_product_tmp_bytes(module: *const MODULE) -> u64;
|
pub unsafe fn znx_small_single_product_tmp_bytes(module: *const MODULE) -> u64;
|
||||||
}
|
}
|
||||||
@@ -310,10 +310,11 @@ impl MatZnxDftOps<FFT64> for Module<FFT64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn vmp_apply_add<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch)
|
fn vmp_apply_add<R, A, B>(&self, res: &mut R, a: &A, b: &B, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
R: VecZnxDftToMut<FFT64>,
|
R: VecZnxDftToMut<FFT64>,
|
||||||
A: VecZnxDftToRef<FFT64>,
|
A: VecZnxDftToRef<FFT64>,
|
||||||
B: MatZnxDftToRef<FFT64> {
|
B: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
let mut res: VecZnxDft<&mut [u8], _> = res.to_mut();
|
let mut res: VecZnxDft<&mut [u8], _> = res.to_mut();
|
||||||
let a: VecZnxDft<&[u8], _> = a.to_ref();
|
let a: VecZnxDft<&[u8], _> = a.to_ref();
|
||||||
let b: MatZnxDft<&[u8], _> = b.to_ref();
|
let b: MatZnxDft<&[u8], _> = b.to_ref();
|
||||||
@@ -375,7 +376,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn vmp_prepare_row() {
|
fn vmp_prepare_row() {
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(16);
|
let module: Module<FFT64> = Module::<FFT64>::new(16);
|
||||||
let log_base2k: usize = 8;
|
let basek: usize = 8;
|
||||||
let mat_rows: usize = 4;
|
let mat_rows: usize = 4;
|
||||||
let mat_cols_in: usize = 2;
|
let mat_cols_in: usize = 2;
|
||||||
let mat_cols_out: usize = 2;
|
let mat_cols_out: usize = 2;
|
||||||
@@ -389,7 +390,7 @@ mod tests {
|
|||||||
for row_i in 0..mat_rows {
|
for row_i in 0..mat_rows {
|
||||||
let mut source: Source = Source::new([0u8; 32]);
|
let mut source: Source = Source::new([0u8; 32]);
|
||||||
(0..mat_cols_out).for_each(|col_out| {
|
(0..mat_cols_out).for_each(|col_out| {
|
||||||
a.fill_uniform(log_base2k, col_out, mat_size, &mut source);
|
a.fill_uniform(basek, col_out, mat_size, &mut source);
|
||||||
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
|
module.vec_znx_dft(&mut a_dft, col_out, &a, col_out);
|
||||||
});
|
});
|
||||||
module.vmp_prepare_row(&mut mat, row_i, col_in, &a_dft);
|
module.vmp_prepare_row(&mut mat, row_i, col_in, &a_dft);
|
||||||
@@ -405,7 +406,7 @@ mod tests {
|
|||||||
let n: usize = 1 << log_n;
|
let n: usize = 1 << log_n;
|
||||||
|
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
let log_base2k: usize = 15;
|
let basek: usize = 15;
|
||||||
let a_size: usize = 5;
|
let a_size: usize = 5;
|
||||||
let mat_size: usize = 6;
|
let mat_size: usize = 6;
|
||||||
let res_size: usize = 5;
|
let res_size: usize = 5;
|
||||||
@@ -470,7 +471,7 @@ mod tests {
|
|||||||
let mut res_have: VecZnx<Vec<u8>> = module.new_vec_znx(res_cols, res_size);
|
let mut res_have: VecZnx<Vec<u8>> = module.new_vec_znx(res_cols, res_size);
|
||||||
(0..mat_cols_out).for_each(|i| {
|
(0..mat_cols_out).for_each(|i| {
|
||||||
module.vec_znx_idft_tmp_a(&mut c_big, i, &mut c_dft, i);
|
module.vec_znx_idft_tmp_a(&mut c_big, i, &mut c_dft, i);
|
||||||
module.vec_znx_big_normalize(log_base2k, &mut res_have, i, &c_big, i, scratch.borrow());
|
module.vec_znx_big_normalize(basek, &mut res_have, i, &c_big, i, scratch.borrow());
|
||||||
});
|
});
|
||||||
|
|
||||||
(0..mat_cols_out).for_each(|col_i| {
|
(0..mat_cols_out).for_each(|col_i| {
|
||||||
@@ -478,7 +479,7 @@ mod tests {
|
|||||||
(0..a_cols).for_each(|i| {
|
(0..a_cols).for_each(|i| {
|
||||||
res_want_vi64[(i + 1) + (1 + i * mat_cols_out + col_i)] = 1;
|
res_want_vi64[(i + 1) + (1 + i * mat_cols_out + col_i)] = 1;
|
||||||
});
|
});
|
||||||
res_have.decode_vec_i64(col_i, log_base2k, log_base2k * 3, &mut res_have_vi64);
|
res_have.decode_vec_i64(col_i, basek, basek * 3, &mut res_have_vi64);
|
||||||
assert_eq!(res_have_vi64, res_want_vi64);
|
assert_eq!(res_have_vi64, res_want_vi64);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -4,16 +4,16 @@ use rand_distr::{Distribution, Normal};
|
|||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
pub trait FillUniform {
|
pub trait FillUniform {
|
||||||
/// Fills the first `size` size with uniform values in \[-2^{log_base2k-1}, 2^{log_base2k-1}\]
|
/// Fills the first `size` size with uniform values in \[-2^{basek-1}, 2^{basek-1}\]
|
||||||
fn fill_uniform(&mut self, log_base2k: usize, col_i: usize, size: usize, source: &mut Source);
|
fn fill_uniform(&mut self, basek: usize, col_i: usize, size: usize, source: &mut Source);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait FillDistF64 {
|
pub trait FillDistF64 {
|
||||||
fn fill_dist_f64<D: Distribution<f64>>(
|
fn fill_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -21,12 +21,12 @@ pub trait FillDistF64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait AddDistF64 {
|
pub trait AddDistF64 {
|
||||||
/// Adds vector sampled according to the provided distribution, scaled by 2^{-log_k} and bounded to \[-bound, bound\].
|
/// Adds vector sampled according to the provided distribution, scaled by 2^{-k} and bounded to \[-bound, bound\].
|
||||||
fn add_dist_f64<D: Distribution<f64>>(
|
fn add_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -34,21 +34,21 @@ pub trait AddDistF64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait FillNormal {
|
pub trait FillNormal {
|
||||||
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64);
|
fn fill_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait AddNormal {
|
pub trait AddNormal {
|
||||||
/// Adds a discrete normal vector scaled by 2^{-log_k} with the provided standard deviation and bounded to \[-bound, bound\].
|
/// Adds a discrete normal vector scaled by 2^{-k} with the provided standard deviation and bounded to \[-bound, bound\].
|
||||||
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64);
|
fn add_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> FillUniform for VecZnx<T>
|
impl<T> FillUniform for VecZnx<T>
|
||||||
where
|
where
|
||||||
VecZnx<T>: VecZnxToMut,
|
VecZnx<T>: VecZnxToMut,
|
||||||
{
|
{
|
||||||
fn fill_uniform(&mut self, log_base2k: usize, col_i: usize, size: usize, source: &mut Source) {
|
fn fill_uniform(&mut self, basek: usize, col_i: usize, size: usize, source: &mut Source) {
|
||||||
let mut a: VecZnx<&mut [u8]> = self.to_mut();
|
let mut a: VecZnx<&mut [u8]> = self.to_mut();
|
||||||
let base2k: u64 = 1 << log_base2k;
|
let base2k: u64 = 1 << basek;
|
||||||
let mask: u64 = base2k - 1;
|
let mask: u64 = base2k - 1;
|
||||||
let base2k_half: i64 = (base2k >> 1) as i64;
|
let base2k_half: i64 = (base2k >> 1) as i64;
|
||||||
(0..size).for_each(|j| {
|
(0..size).for_each(|j| {
|
||||||
@@ -65,9 +65,9 @@ where
|
|||||||
{
|
{
|
||||||
fn fill_dist_f64<D: Distribution<f64>>(
|
fn fill_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -79,16 +79,16 @@ where
|
|||||||
(bound.log2().ceil() as i64)
|
(bound.log2().ceil() as i64)
|
||||||
);
|
);
|
||||||
|
|
||||||
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
|
let limb: usize = (k + basek - 1) / basek - 1;
|
||||||
let log_base2k_rem: usize = (limb + 1) * log_base2k - log_k;
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
if log_base2k_rem != 0 {
|
if basek_rem != 0 {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
*a = (dist_f64.round() as i64) << log_base2k_rem;
|
*a = (dist_f64.round() as i64) << basek_rem;
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
@@ -108,9 +108,9 @@ where
|
|||||||
{
|
{
|
||||||
fn add_dist_f64<D: Distribution<f64>>(
|
fn add_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -122,16 +122,16 @@ where
|
|||||||
(bound.log2().ceil() as i64)
|
(bound.log2().ceil() as i64)
|
||||||
);
|
);
|
||||||
|
|
||||||
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
|
let limb: usize = (k + basek - 1) / basek - 1;
|
||||||
let log_base2k_rem: usize = (limb + 1) * log_base2k - log_k;
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
if log_base2k_rem != 0 {
|
if basek_rem != 0 {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
*a += (dist_f64.round() as i64) << log_base2k_rem;
|
*a += (dist_f64.round() as i64) << basek_rem;
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
@@ -149,11 +149,11 @@ impl<T> FillNormal for VecZnx<T>
|
|||||||
where
|
where
|
||||||
VecZnx<T>: VecZnxToMut,
|
VecZnx<T>: VecZnxToMut,
|
||||||
{
|
{
|
||||||
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
fn fill_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
||||||
self.fill_dist_f64(
|
self.fill_dist_f64(
|
||||||
log_base2k,
|
basek,
|
||||||
col_i,
|
col_i,
|
||||||
log_k,
|
k,
|
||||||
source,
|
source,
|
||||||
Normal::new(0.0, sigma).unwrap(),
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
bound,
|
bound,
|
||||||
@@ -165,11 +165,11 @@ impl<T> AddNormal for VecZnx<T>
|
|||||||
where
|
where
|
||||||
VecZnx<T>: VecZnxToMut,
|
VecZnx<T>: VecZnxToMut,
|
||||||
{
|
{
|
||||||
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
fn add_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
||||||
self.add_dist_f64(
|
self.add_dist_f64(
|
||||||
log_base2k,
|
basek,
|
||||||
col_i,
|
col_i,
|
||||||
log_k,
|
k,
|
||||||
source,
|
source,
|
||||||
Normal::new(0.0, sigma).unwrap(),
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
bound,
|
bound,
|
||||||
@@ -183,9 +183,9 @@ where
|
|||||||
{
|
{
|
||||||
fn fill_dist_f64<D: Distribution<f64>>(
|
fn fill_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -197,16 +197,16 @@ where
|
|||||||
(bound.log2().ceil() as i64)
|
(bound.log2().ceil() as i64)
|
||||||
);
|
);
|
||||||
|
|
||||||
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
|
let limb: usize = (k + basek - 1) / basek - 1;
|
||||||
let log_base2k_rem: usize = (limb + 1) * log_base2k - log_k;
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
if log_base2k_rem != 0 {
|
if basek_rem != 0 {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
*a = (dist_f64.round() as i64) << log_base2k_rem;
|
*a = (dist_f64.round() as i64) << basek_rem;
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
@@ -226,9 +226,9 @@ where
|
|||||||
{
|
{
|
||||||
fn add_dist_f64<D: Distribution<f64>>(
|
fn add_dist_f64<D: Distribution<f64>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
col_i: usize,
|
col_i: usize,
|
||||||
log_k: usize,
|
k: usize,
|
||||||
source: &mut Source,
|
source: &mut Source,
|
||||||
dist: D,
|
dist: D,
|
||||||
bound: f64,
|
bound: f64,
|
||||||
@@ -240,16 +240,16 @@ where
|
|||||||
(bound.log2().ceil() as i64)
|
(bound.log2().ceil() as i64)
|
||||||
);
|
);
|
||||||
|
|
||||||
let limb: usize = (log_k + log_base2k - 1) / log_base2k - 1;
|
let limb: usize = (k + basek - 1) / basek - 1;
|
||||||
let log_base2k_rem: usize = (limb + 1) * log_base2k - log_k;
|
let basek_rem: usize = (limb + 1) * basek - k;
|
||||||
|
|
||||||
if log_base2k_rem != 0 {
|
if basek_rem != 0 {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
let mut dist_f64: f64 = dist.sample(source);
|
let mut dist_f64: f64 = dist.sample(source);
|
||||||
while dist_f64.abs() > bound {
|
while dist_f64.abs() > bound {
|
||||||
dist_f64 = dist.sample(source)
|
dist_f64 = dist.sample(source)
|
||||||
}
|
}
|
||||||
*a += (dist_f64.round() as i64) << log_base2k_rem;
|
*a += (dist_f64.round() as i64) << basek_rem;
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
a.at_mut(col_i, limb).iter_mut().for_each(|a| {
|
||||||
@@ -267,11 +267,11 @@ impl<T> FillNormal for VecZnxBig<T, FFT64>
|
|||||||
where
|
where
|
||||||
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
|
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
|
||||||
{
|
{
|
||||||
fn fill_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
fn fill_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
||||||
self.fill_dist_f64(
|
self.fill_dist_f64(
|
||||||
log_base2k,
|
basek,
|
||||||
col_i,
|
col_i,
|
||||||
log_k,
|
k,
|
||||||
source,
|
source,
|
||||||
Normal::new(0.0, sigma).unwrap(),
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
bound,
|
bound,
|
||||||
@@ -283,11 +283,11 @@ impl<T> AddNormal for VecZnxBig<T, FFT64>
|
|||||||
where
|
where
|
||||||
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
|
VecZnxBig<T, FFT64>: VecZnxBigToMut<FFT64>,
|
||||||
{
|
{
|
||||||
fn add_normal(&mut self, log_base2k: usize, col_i: usize, log_k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
fn add_normal(&mut self, basek: usize, col_i: usize, k: usize, source: &mut Source, sigma: f64, bound: f64) {
|
||||||
self.add_dist_f64(
|
self.add_dist_f64(
|
||||||
log_base2k,
|
basek,
|
||||||
col_i,
|
col_i,
|
||||||
log_k,
|
k,
|
||||||
source,
|
source,
|
||||||
Normal::new(0.0, sigma).unwrap(),
|
Normal::new(0.0, sigma).unwrap(),
|
||||||
bound,
|
bound,
|
||||||
@@ -307,7 +307,7 @@ mod tests {
|
|||||||
fn vec_znx_fill_uniform() {
|
fn vec_znx_fill_uniform() {
|
||||||
let n: usize = 4096;
|
let n: usize = 4096;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
let log_base2k: usize = 17;
|
let basek: usize = 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let mut source: Source = Source::new([0u8; 32]);
|
let mut source: Source = Source::new([0u8; 32]);
|
||||||
let cols: usize = 2;
|
let cols: usize = 2;
|
||||||
@@ -315,14 +315,14 @@ mod tests {
|
|||||||
let one_12_sqrt: f64 = 0.28867513459481287;
|
let one_12_sqrt: f64 = 0.28867513459481287;
|
||||||
(0..cols).for_each(|col_i| {
|
(0..cols).for_each(|col_i| {
|
||||||
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
||||||
a.fill_uniform(log_base2k, col_i, size, &mut source);
|
a.fill_uniform(basek, col_i, size, &mut source);
|
||||||
(0..cols).for_each(|col_j| {
|
(0..cols).for_each(|col_j| {
|
||||||
if col_j != col_i {
|
if col_j != col_i {
|
||||||
(0..size).for_each(|limb_i| {
|
(0..size).for_each(|limb_i| {
|
||||||
assert_eq!(a.at(col_j, limb_i), zero);
|
assert_eq!(a.at(col_j, limb_i), zero);
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let std: f64 = a.std(col_i, log_base2k);
|
let std: f64 = a.std(col_i, basek);
|
||||||
assert!(
|
assert!(
|
||||||
(std - one_12_sqrt).abs() < 0.01,
|
(std - one_12_sqrt).abs() < 0.01,
|
||||||
"std={} ~!= {}",
|
"std={} ~!= {}",
|
||||||
@@ -338,25 +338,25 @@ mod tests {
|
|||||||
fn vec_znx_add_normal() {
|
fn vec_znx_add_normal() {
|
||||||
let n: usize = 4096;
|
let n: usize = 4096;
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
let module: Module<FFT64> = Module::<FFT64>::new(n);
|
||||||
let log_base2k: usize = 17;
|
let basek: usize = 17;
|
||||||
let log_k: usize = 2 * 17;
|
let k: usize = 2 * 17;
|
||||||
let size: usize = 5;
|
let size: usize = 5;
|
||||||
let sigma: f64 = 3.2;
|
let sigma: f64 = 3.2;
|
||||||
let bound: f64 = 6.0 * sigma;
|
let bound: f64 = 6.0 * sigma;
|
||||||
let mut source: Source = Source::new([0u8; 32]);
|
let mut source: Source = Source::new([0u8; 32]);
|
||||||
let cols: usize = 2;
|
let cols: usize = 2;
|
||||||
let zero: Vec<i64> = vec![0; n];
|
let zero: Vec<i64> = vec![0; n];
|
||||||
let k_f64: f64 = (1u64 << log_k as u64) as f64;
|
let k_f64: f64 = (1u64 << k as u64) as f64;
|
||||||
(0..cols).for_each(|col_i| {
|
(0..cols).for_each(|col_i| {
|
||||||
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
let mut a: VecZnx<_> = module.new_vec_znx(cols, size);
|
||||||
a.add_normal(log_base2k, col_i, log_k, &mut source, sigma, bound);
|
a.add_normal(basek, col_i, k, &mut source, sigma, bound);
|
||||||
(0..cols).for_each(|col_j| {
|
(0..cols).for_each(|col_j| {
|
||||||
if col_j != col_i {
|
if col_j != col_i {
|
||||||
(0..size).for_each(|limb_i| {
|
(0..size).for_each(|limb_i| {
|
||||||
assert_eq!(a.at(col_j, limb_i), zero);
|
assert_eq!(a.at(col_j, limb_i), zero);
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let std: f64 = a.std(col_i, log_base2k) * k_f64;
|
let std: f64 = a.std(col_i, basek) * k_f64;
|
||||||
assert!((std - sigma).abs() < 0.1, "std={} ~!= {}", std, sigma);
|
assert!((std - sigma).abs() < 0.1, "std={} ~!= {}", std, sigma);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -1,32 +1,32 @@
|
|||||||
use crate::znx_base::ZnxInfos;
|
use crate::znx_base::ZnxInfos;
|
||||||
use crate::{Decoding, VecZnx};
|
use crate::{Decoding, VecZnx};
|
||||||
use rug::Float;
|
use rug::Float;
|
||||||
use rug::float::Round;
|
use rug::float::Round;
|
||||||
use rug::ops::{AddAssignRound, DivAssignRound, SubAssignRound};
|
use rug::ops::{AddAssignRound, DivAssignRound, SubAssignRound};
|
||||||
|
|
||||||
pub trait Stats {
|
pub trait Stats {
|
||||||
/// Returns the standard devaition of the i-th polynomial.
|
/// Returns the standard devaition of the i-th polynomial.
|
||||||
fn std(&self, col_i: usize, log_base2k: usize) -> f64;
|
fn std(&self, col_i: usize, basek: usize) -> f64;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: AsRef<[u8]>> Stats for VecZnx<D> {
|
impl<D: AsRef<[u8]>> Stats for VecZnx<D> {
|
||||||
fn std(&self, col_i: usize, log_base2k: usize) -> f64 {
|
fn std(&self, col_i: usize, basek: usize) -> f64 {
|
||||||
let prec: u32 = (self.size() * log_base2k) as u32;
|
let prec: u32 = (self.size() * basek) as u32;
|
||||||
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
|
let mut data: Vec<Float> = (0..self.n()).map(|_| Float::with_val(prec, 0)).collect();
|
||||||
self.decode_vec_float(col_i, log_base2k, &mut data);
|
self.decode_vec_float(col_i, basek, &mut data);
|
||||||
// std = sqrt(sum((xi - avg)^2) / n)
|
// std = sqrt(sum((xi - avg)^2) / n)
|
||||||
let mut avg: Float = Float::with_val(prec, 0);
|
let mut avg: Float = Float::with_val(prec, 0);
|
||||||
data.iter().for_each(|x| {
|
data.iter().for_each(|x| {
|
||||||
avg.add_assign_round(x, Round::Nearest);
|
avg.add_assign_round(x, Round::Nearest);
|
||||||
});
|
});
|
||||||
avg.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
|
avg.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
|
||||||
data.iter_mut().for_each(|x| {
|
data.iter_mut().for_each(|x| {
|
||||||
x.sub_assign_round(&avg, Round::Nearest);
|
x.sub_assign_round(&avg, Round::Nearest);
|
||||||
});
|
});
|
||||||
let mut std: Float = Float::with_val(prec, 0);
|
let mut std: Float = Float::with_val(prec, 0);
|
||||||
data.iter().for_each(|x| std += x * x);
|
data.iter().for_each(|x| std += x * x);
|
||||||
std.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
|
std.div_assign_round(Float::with_val(prec, data.len()), Round::Nearest);
|
||||||
std = std.sqrt();
|
std = std.sqrt();
|
||||||
std.to_f64()
|
std.to_f64()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
|
use itertools::izip;
|
||||||
|
|
||||||
use crate::DataView;
|
use crate::DataView;
|
||||||
use crate::DataViewMut;
|
use crate::DataViewMut;
|
||||||
use crate::ScalarZnx;
|
use crate::ScalarZnx;
|
||||||
|
use crate::Scratch;
|
||||||
use crate::ZnxSliceSize;
|
use crate::ZnxSliceSize;
|
||||||
use crate::ZnxZero;
|
use crate::ZnxZero;
|
||||||
use crate::alloc_aligned;
|
use crate::alloc_aligned;
|
||||||
@@ -68,29 +71,66 @@ impl<D: AsRef<[u8]>> ZnxView for VecZnx<D> {
|
|||||||
type Scalar = i64;
|
type Scalar = i64;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: AsRef<[u8]>> VecZnx<D> {
|
||||||
|
pub fn rsh_scratch_space(n: usize) -> usize {
|
||||||
|
n * std::mem::size_of::<i64>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
|
impl<D: AsMut<[u8]> + AsRef<[u8]>> VecZnx<D> {
|
||||||
/// Truncates the precision of the [VecZnx] by k bits.
|
/// Truncates the precision of the [VecZnx] by k bits.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `log_base2k`: the base two logarithm of the coefficients decomposition.
|
/// * `basek`: the base two logarithm of the coefficients decomposition.
|
||||||
/// * `k`: the number of bits of precision to drop.
|
/// * `k`: the number of bits of precision to drop.
|
||||||
pub fn trunc_pow2(&mut self, log_base2k: usize, k: usize, col: usize) {
|
pub fn trunc_pow2(&mut self, basek: usize, k: usize, col: usize) {
|
||||||
if k == 0 {
|
if k == 0 {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.size -= k / log_base2k;
|
self.size -= k / basek;
|
||||||
|
|
||||||
let k_rem: usize = k % log_base2k;
|
let k_rem: usize = k % basek;
|
||||||
|
|
||||||
if k_rem != 0 {
|
if k_rem != 0 {
|
||||||
let mask: i64 = ((1 << (log_base2k - k_rem - 1)) - 1) << k_rem;
|
let mask: i64 = ((1 << (basek - k_rem - 1)) - 1) << k_rem;
|
||||||
self.at_mut(col, self.size() - 1)
|
self.at_mut(col, self.size() - 1)
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.for_each(|x: &mut i64| *x &= mask)
|
.for_each(|x: &mut i64| *x &= mask)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn rsh(&mut self, basek: usize, k: usize, scratch: &mut Scratch) {
|
||||||
|
let n: usize = self.n();
|
||||||
|
let cols: usize = self.cols();
|
||||||
|
let size: usize = self.size();
|
||||||
|
let steps: usize = k / basek;
|
||||||
|
|
||||||
|
self.raw_mut().rotate_right(n * steps * cols);
|
||||||
|
(0..cols).for_each(|i| {
|
||||||
|
(0..steps).for_each(|j| {
|
||||||
|
self.zero_at(i, j);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
let k_rem: usize = k % basek;
|
||||||
|
|
||||||
|
if k_rem != 0 {
|
||||||
|
let (carry, _) = scratch.tmp_slice::<i64>(n);
|
||||||
|
let shift = i64::BITS as usize - k_rem;
|
||||||
|
(0..cols).for_each(|i| {
|
||||||
|
carry.fill(0);
|
||||||
|
(steps..size).for_each(|j| {
|
||||||
|
izip!(carry.iter_mut(), self.at_mut(i, j).iter_mut()).for_each(|(ci, xi)| {
|
||||||
|
*xi += *ci << basek;
|
||||||
|
*ci = (*xi << shift) >> shift;
|
||||||
|
*xi = (*xi - *ci) >> k_rem;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: From<Vec<u8>>> VecZnx<D> {
|
impl<D: From<Vec<u8>>> VecZnx<D> {
|
||||||
@@ -165,7 +205,7 @@ fn normalize_tmp_bytes(n: usize) -> usize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
fn normalize<D: AsMut<[u8]> + AsRef<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
|
fn normalize<D: AsMut<[u8]> + AsRef<[u8]>>(basek: usize, a: &mut VecZnx<D>, a_col: usize, tmp_bytes: &mut [u8]) {
|
||||||
let n: usize = a.n();
|
let n: usize = a.n();
|
||||||
|
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
@@ -187,7 +227,7 @@ fn normalize<D: AsMut<[u8]> + AsRef<[u8]>>(log_base2k: usize, a: &mut VecZnx<D>,
|
|||||||
(0..a.size()).rev().for_each(|i| {
|
(0..a.size()).rev().for_each(|i| {
|
||||||
znx::znx_normalize(
|
znx::znx_normalize(
|
||||||
n as u64,
|
n as u64,
|
||||||
log_base2k as u64,
|
basek as u64,
|
||||||
a.at_mut_ptr(a_col, i),
|
a.at_mut_ptr(a_col, i),
|
||||||
carry_i64.as_mut_ptr(),
|
carry_i64.as_mut_ptr(),
|
||||||
a.at_mut_ptr(a_col, i),
|
a.at_mut_ptr(a_col, i),
|
||||||
@@ -123,11 +123,11 @@ pub trait VecZnxBigOps<BACKEND: Backend> {
|
|||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `log_base2k`: normalization basis.
|
/// * `basek`: normalization basis.
|
||||||
/// * `tmp_bytes`: scratch space of size at least [VecZnxBigOps::vec_znx_big_normalize].
|
/// * `tmp_bytes`: scratch space of size at least [VecZnxBigOps::vec_znx_big_normalize].
|
||||||
fn vec_znx_big_normalize<R, A>(
|
fn vec_znx_big_normalize<R, A>(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
@@ -532,7 +532,7 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
|
|||||||
|
|
||||||
fn vec_znx_big_normalize<R, A>(
|
fn vec_znx_big_normalize<R, A>(
|
||||||
&self,
|
&self,
|
||||||
log_base2k: usize,
|
basek: usize,
|
||||||
res: &mut R,
|
res: &mut R,
|
||||||
res_col: usize,
|
res_col: usize,
|
||||||
a: &A,
|
a: &A,
|
||||||
@@ -561,7 +561,7 @@ impl VecZnxBigOps<FFT64> for Module<FFT64> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
log_base2k as u64,
|
basek as u64,
|
||||||
res.at_mut_ptr(res_col, 0),
|
res.at_mut_ptr(res_col, 0),
|
||||||
res.size() as u64,
|
res.size() as u64,
|
||||||
res.sl() as u64,
|
res.sl() as u64,
|
||||||
@@ -35,13 +35,13 @@ pub trait VecZnxAlloc {
|
|||||||
|
|
||||||
pub trait VecZnxOps {
|
pub trait VecZnxOps {
|
||||||
/// Normalizes the selected column of `a` and stores the result into the selected column of `res`.
|
/// Normalizes the selected column of `a` and stores the result into the selected column of `res`.
|
||||||
fn vec_znx_normalize<R, A>(&self, log_base2k: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch)
|
fn vec_znx_normalize<R, A>(&self, basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef;
|
A: VecZnxToRef;
|
||||||
|
|
||||||
/// Normalizes the selected column of `a`.
|
/// Normalizes the selected column of `a`.
|
||||||
fn vec_znx_normalize_inplace<A>(&self, log_base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch)
|
fn vec_znx_normalize_inplace<A>(&self, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
A: VecZnxToMut;
|
A: VecZnxToMut;
|
||||||
|
|
||||||
@@ -174,7 +174,7 @@ impl<B: Backend> VecZnxAlloc for Module<B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
|
impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
|
||||||
fn vec_znx_normalize<R, A>(&self, log_base2k: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch)
|
fn vec_znx_normalize<R, A>(&self, basek: usize, res: &mut R, res_col: usize, a: &A, a_col: usize, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
R: VecZnxToMut,
|
R: VecZnxToMut,
|
||||||
A: VecZnxToRef,
|
A: VecZnxToRef,
|
||||||
@@ -193,7 +193,7 @@ impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
log_base2k as u64,
|
basek as u64,
|
||||||
res.at_mut_ptr(res_col, 0),
|
res.at_mut_ptr(res_col, 0),
|
||||||
res.size() as u64,
|
res.size() as u64,
|
||||||
res.sl() as u64,
|
res.sl() as u64,
|
||||||
@@ -205,7 +205,7 @@ impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vec_znx_normalize_inplace<A>(&self, log_base2k: usize, a: &mut A, a_col: usize, scratch: &mut Scratch)
|
fn vec_znx_normalize_inplace<A>(&self, basek: usize, a: &mut A, a_col: usize, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
A: VecZnxToMut,
|
A: VecZnxToMut,
|
||||||
{
|
{
|
||||||
@@ -221,7 +221,7 @@ impl<BACKEND: Backend> VecZnxOps for Module<BACKEND> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
vec_znx::vec_znx_normalize_base2k(
|
vec_znx::vec_znx_normalize_base2k(
|
||||||
self.ptr,
|
self.ptr,
|
||||||
log_base2k as u64,
|
basek as u64,
|
||||||
a.at_mut_ptr(a_col, 0),
|
a.at_mut_ptr(a_col, 0),
|
||||||
a.size() as u64,
|
a.size() as u64,
|
||||||
a.sl() as u64,
|
a.sl() as u64,
|
||||||
@@ -150,7 +150,7 @@ impl Integer for i128 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//(Jay)Note: `rsh` impl. ignores the column
|
//(Jay)Note: `rsh` impl. ignores the column
|
||||||
pub fn rsh<V: ZnxZero>(k: usize, log_base2k: usize, a: &mut V, _a_col: usize, scratch: &mut Scratch)
|
pub fn rsh<V: ZnxZero>(k: usize, basek: usize, a: &mut V, _a_col: usize, scratch: &mut Scratch)
|
||||||
where
|
where
|
||||||
V::Scalar: From<usize> + Integer + Zero,
|
V::Scalar: From<usize> + Integer + Zero,
|
||||||
{
|
{
|
||||||
@@ -159,7 +159,7 @@ where
|
|||||||
let cols: usize = a.cols();
|
let cols: usize = a.cols();
|
||||||
|
|
||||||
let size: usize = a.size();
|
let size: usize = a.size();
|
||||||
let steps: usize = k / log_base2k;
|
let steps: usize = k / basek;
|
||||||
|
|
||||||
a.raw_mut().rotate_right(n * steps * cols);
|
a.raw_mut().rotate_right(n * steps * cols);
|
||||||
(0..cols).for_each(|i| {
|
(0..cols).for_each(|i| {
|
||||||
@@ -168,7 +168,7 @@ where
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
let k_rem: usize = k % log_base2k;
|
let k_rem: usize = k % basek;
|
||||||
|
|
||||||
if k_rem != 0 {
|
if k_rem != 0 {
|
||||||
let (carry, _) = scratch.tmp_slice::<V::Scalar>(rsh_tmp_bytes::<V::Scalar>(n));
|
let (carry, _) = scratch.tmp_slice::<V::Scalar>(rsh_tmp_bytes::<V::Scalar>(n));
|
||||||
@@ -177,14 +177,14 @@ where
|
|||||||
std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
|
std::ptr::write_bytes(carry.as_mut_ptr(), 0, n * size_of::<V::Scalar>());
|
||||||
}
|
}
|
||||||
|
|
||||||
let log_base2k_t = V::Scalar::from(log_base2k);
|
let basek_t = V::Scalar::from(basek);
|
||||||
let shift = V::Scalar::from(V::Scalar::BITS as usize - k_rem);
|
let shift = V::Scalar::from(V::Scalar::BITS as usize - k_rem);
|
||||||
let k_rem_t = V::Scalar::from(k_rem);
|
let k_rem_t = V::Scalar::from(k_rem);
|
||||||
|
|
||||||
(0..cols).for_each(|i| {
|
(0..cols).for_each(|i| {
|
||||||
(steps..size).for_each(|j| {
|
(steps..size).for_each(|j| {
|
||||||
izip!(carry.iter_mut(), a.at_mut(i, j).iter_mut()).for_each(|(ci, xi)| {
|
izip!(carry.iter_mut(), a.at_mut(i, j).iter_mut()).for_each(|(ci, xi)| {
|
||||||
*xi += *ci << log_base2k_t;
|
*xi += *ci << basek_t;
|
||||||
*ci = (*xi << shift) >> shift;
|
*ci = (*xi << shift) >> shift;
|
||||||
*xi = (*xi - *ci) >> k_rem_t;
|
*xi = (*xi - *ci) >> k_rem_t;
|
||||||
});
|
});
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "rlwe"
|
name = "core"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rug = {workspace = true}
|
rug = {workspace = true}
|
||||||
criterion = {workspace = true}
|
criterion = {workspace = true}
|
||||||
base2k = {path="../base2k"}
|
backend = {path="../backend"}
|
||||||
sampling = {path="../sampling"}
|
sampling = {path="../sampling"}
|
||||||
rand_distr = {workspace = true}
|
rand_distr = {workspace = true}
|
||||||
itertools = {workspace = true}
|
itertools = {workspace = true}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{FFT64, Module, ScalarZnxAlloc, ScratchOwned};
|
use backend::{Module, ScalarZnx, ScalarZnxAlloc, ScratchOwned, FFT64};
|
||||||
use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
|
use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
|
||||||
use rlwe::{
|
use rlwe::{
|
||||||
elem::Infos,
|
elem::Infos,
|
||||||
@@ -32,10 +32,10 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
|
|||||||
let rows: usize = (p.k_ct_in + p.basek - 1) / p.basek;
|
let rows: usize = (p.k_ct_in + p.basek - 1) / p.basek;
|
||||||
let sigma: f64 = 3.2;
|
let sigma: f64 = 3.2;
|
||||||
|
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_rlwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank);
|
let mut ct_rlwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank);
|
||||||
let mut ct_rlwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank);
|
let mut ct_rlwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank);
|
||||||
let pt_rgsw: base2k::ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut scratch = ScratchOwned::new(
|
let mut scratch = ScratchOwned::new(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_rgsw.size())
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_rgsw.size())
|
||||||
@@ -53,9 +53,9 @@ fn bench_external_product_glwe_fft64(c: &mut Criterion) {
|
|||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_rgsw.encrypt_sk(
|
ct_rgsw.encrypt_sk(
|
||||||
@@ -127,9 +127,9 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let rows: usize = (p.k_ct + p.basek - 1) / p.basek;
|
let rows: usize = (p.k_ct + p.basek - 1) / p.basek;
|
||||||
let sigma: f64 = 3.2;
|
let sigma: f64 = 3.2;
|
||||||
|
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_glwe, rank);
|
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_glwe, rank);
|
||||||
let pt_rgsw: base2k::ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut scratch = ScratchOwned::new(
|
let mut scratch = ScratchOwned::new(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_rgsw.size())
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_rgsw.size())
|
||||||
@@ -141,9 +141,9 @@ fn bench_external_product_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_rgsw.encrypt_sk(
|
ct_rgsw.encrypt_sk(
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{FFT64, Module, ScratchOwned};
|
use backend::{FFT64, Module, ScratchOwned};
|
||||||
use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
|
use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
|
||||||
use rlwe::{
|
use rlwe::{
|
||||||
elem::Infos,
|
elem::Infos,
|
||||||
@@ -34,9 +34,9 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
|
|||||||
let rows: usize = (p.k_ct_in + p.basek - 1) / p.basek;
|
let rows: usize = (p.k_ct_in + p.basek - 1) / p.basek;
|
||||||
let sigma: f64 = 3.2;
|
let sigma: f64 = 3.2;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_grlwe, rows, rank_in, rank_out);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_grlwe, rows, rank_in, rank_out);
|
||||||
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_rlwe_in, rank_in);
|
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_rlwe_in, rank_in);
|
||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_rlwe_out, rank_out);
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_rlwe_out, rank_out);
|
||||||
|
|
||||||
let mut scratch = ScratchOwned::new(
|
let mut scratch = ScratchOwned::new(
|
||||||
GLWESwitchingKey::encrypt_sk_scratch_space(&module, rank_out, ksk.size())
|
GLWESwitchingKey::encrypt_sk_scratch_space(&module, rank_out, ksk.size())
|
||||||
@@ -55,14 +55,14 @@ fn bench_keyswitch_glwe_fft64(c: &mut Criterion) {
|
|||||||
let mut source_xe = Source::new([0u8; 32]);
|
let mut source_xe = Source::new([0u8; 32]);
|
||||||
let mut source_xa = Source::new([0u8; 32]);
|
let mut source_xa = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -135,8 +135,8 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let rows: usize = (p.k_ct + p.basek - 1) / p.basek;
|
let rows: usize = (p.k_ct + p.basek - 1) / p.basek;
|
||||||
let sigma: f64 = 3.2;
|
let sigma: f64 = 3.2;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank, rank);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank, rank);
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
|
|
||||||
let mut scratch = ScratchOwned::new(
|
let mut scratch = ScratchOwned::new(
|
||||||
GLWESwitchingKey::encrypt_sk_scratch_space(&module, rank, ksk.size())
|
GLWESwitchingKey::encrypt_sk_scratch_space(&module, rank, ksk.size())
|
||||||
@@ -148,14 +148,14 @@ fn bench_keyswitch_glwe_inplace_fft64(c: &mut Criterion) {
|
|||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDftOps, ScalarZnxOps,
|
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDftOps, ScalarZnxOps,
|
||||||
ScalarZnxToRef, Scratch, VecZnx, VecZnxBigAlloc, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps,
|
ScalarZnxToRef, Scratch, VecZnx, VecZnxBigAlloc, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps,
|
||||||
ZnxZero,
|
ZnxZero,
|
||||||
@@ -21,9 +21,9 @@ pub struct AutomorphismKey<Data, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AutomorphismKey<Vec<u8>, FFT64> {
|
impl AutomorphismKey<Vec<u8>, FFT64> {
|
||||||
pub fn new(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
pub fn alloc(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
||||||
AutomorphismKey {
|
AutomorphismKey {
|
||||||
key: GLWESwitchingKey::new(module, basek, k, rows, rank, rank),
|
key: GLWESwitchingKey::alloc(module, basek, k, rows, rank, rank),
|
||||||
p: 0,
|
p: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -106,12 +106,12 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AutomorphismKey<Vec<u8>, FFT64> {
|
impl AutomorphismKey<Vec<u8>, FFT64> {
|
||||||
pub fn encrypt_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
pub fn generate_from_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, rank, size)
|
GGLWECiphertext::generate_from_sk_scratch_space(module, rank, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space(module: &Module<FFT64>, rank: usize, pk_size: usize) -> usize {
|
pub fn generate_from_pk_scratch_space(module: &Module<FFT64>, rank: usize, pk_size: usize) -> usize {
|
||||||
GGLWECiphertext::encrypt_pk_scratch_space(module, rank, pk_size)
|
GGLWECiphertext::generate_from_pk_scratch_space(module, rank, pk_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_scratch_space(
|
pub fn keyswitch_scratch_space(
|
||||||
@@ -170,7 +170,7 @@ impl<DataSelf> AutomorphismKey<DataSelf, FFT64>
|
|||||||
where
|
where
|
||||||
MatZnxDft<DataSelf, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataSelf, FFT64>: MatZnxDftToMut<FFT64> + MatZnxDftToRef<FFT64>,
|
||||||
{
|
{
|
||||||
pub fn encrypt_sk<DataSk>(
|
pub fn generate_from_sk<DataSk>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
p: i64,
|
p: i64,
|
||||||
@@ -228,7 +228,7 @@ where
|
|||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
lhs: &AutomorphismKey<DataLhs, FFT64>,
|
lhs: &AutomorphismKey<DataLhs, FFT64>,
|
||||||
rhs: &AutomorphismKey<DataRhs, FFT64>,
|
rhs: &AutomorphismKey<DataRhs, FFT64>,
|
||||||
scratch: &mut base2k::Scratch,
|
scratch: &mut Scratch,
|
||||||
) where
|
) where
|
||||||
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
@@ -341,7 +341,7 @@ where
|
|||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
lhs: &AutomorphismKey<DataLhs, FFT64>,
|
lhs: &AutomorphismKey<DataLhs, FFT64>,
|
||||||
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
||||||
scratch: &mut base2k::Scratch,
|
scratch: &mut Scratch,
|
||||||
) where
|
) where
|
||||||
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
@@ -352,12 +352,12 @@ where
|
|||||||
pub fn keyswitch_inplace<DataRhs>(
|
pub fn keyswitch_inplace<DataRhs>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
rhs: &AutomorphismKey<DataRhs, FFT64>,
|
||||||
scratch: &mut base2k::Scratch,
|
scratch: &mut Scratch,
|
||||||
) where
|
) where
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
{
|
{
|
||||||
self.key.keyswitch_inplace(module, &rhs, scratch);
|
self.key.keyswitch_inplace(module, &rhs.key, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn external_product<DataLhs, DataRhs>(
|
pub fn external_product<DataLhs, DataRhs>(
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{Backend, Module, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos};
|
use backend::{Backend, Module, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos};
|
||||||
|
|
||||||
use crate::utils::derive_size;
|
use crate::utils::derive_size;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftAlloc, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft,
|
Backend, FFT64, MatZnxDft, MatZnxDftAlloc, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft,
|
||||||
ScalarZnxDftToRef, ScalarZnxToRef, Scratch, VecZnxAlloc, VecZnxDftAlloc, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps, ZnxInfos,
|
ScalarZnxDftToRef, ScalarZnxToRef, Scratch, VecZnxAlloc, VecZnxDftAlloc, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps, ZnxInfos,
|
||||||
ZnxZero,
|
ZnxZero,
|
||||||
@@ -21,7 +21,7 @@ pub struct GGLWECiphertext<C, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGLWECiphertext<Vec<u8>, B> {
|
impl<B: Backend> GGLWECiphertext<Vec<u8>, B> {
|
||||||
pub fn new(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank_in: usize, rank_out: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_mat_znx_dft(rows, rank_in, rank_out + 1, derive_size(basek, k)),
|
data: module.new_mat_znx_dft(rows, rank_in, rank_out + 1, derive_size(basek, k)),
|
||||||
basek: basek,
|
basek: basek,
|
||||||
@@ -79,14 +79,14 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GGLWECiphertext<Vec<u8>, FFT64> {
|
impl GGLWECiphertext<Vec<u8>, FFT64> {
|
||||||
pub fn encrypt_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
pub fn generate_from_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(module, size)
|
GLWECiphertext::encrypt_sk_scratch_space(module, size)
|
||||||
+ module.bytes_of_vec_znx(rank + 1, size)
|
+ module.bytes_of_vec_znx(rank + 1, size)
|
||||||
+ module.bytes_of_vec_znx(1, size)
|
+ module.bytes_of_vec_znx(1, size)
|
||||||
+ module.bytes_of_vec_znx_dft(rank + 1, size)
|
+ module.bytes_of_vec_znx_dft(rank + 1, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space(_module: &Module<FFT64>, _rank: usize, _pk_size: usize) -> usize {
|
pub fn generate_from_pk_scratch_space(_module: &Module<FFT64>, _rank: usize, _pk_size: usize) -> usize {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -95,7 +95,7 @@ impl<DataSelf> GGLWECiphertext<DataSelf, FFT64>
|
|||||||
where
|
where
|
||||||
MatZnxDft<DataSelf, FFT64>: MatZnxDftToMut<FFT64> + ZnxInfos,
|
MatZnxDft<DataSelf, FFT64>: MatZnxDftToMut<FFT64> + ZnxInfos,
|
||||||
{
|
{
|
||||||
pub fn encrypt_sk<DataPt, DataSk>(
|
pub fn generate_from_sk<DataPt, DataSk>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
pt: &ScalarZnx<DataPt>,
|
pt: &ScalarZnx<DataPt>,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftAlloc, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx,
|
Backend, FFT64, MatZnxDft, MatZnxDftAlloc, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx,
|
||||||
ScalarZnxDft, ScalarZnxDftToRef, ScalarZnxToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBigAlloc, VecZnxBigOps,
|
ScalarZnxDft, ScalarZnxDftToRef, ScalarZnxToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBigAlloc, VecZnxBigOps,
|
||||||
VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps, VecZnxToMut,
|
VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps, VecZnxToMut,
|
||||||
@@ -25,7 +25,7 @@ pub struct GGSWCiphertext<C, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GGSWCiphertext<Vec<u8>, B> {
|
impl<B: Backend> GGSWCiphertext<Vec<u8>, B> {
|
||||||
pub fn new(module: &Module<B>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_mat_znx_dft(rows, rank + 1, rank + 1, derive_size(basek, k)),
|
data: module.new_mat_znx_dft(rows, rank + 1, rank + 1, derive_size(basek, k)),
|
||||||
basek: basek,
|
basek: basek,
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
AddNormal, Backend, FFT64, FillUniform, MatZnxDft, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToRef, Module, ScalarZnxAlloc,
|
AddNormal, Backend, FFT64, FillUniform, MatZnxDft, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToRef, Module, ScalarZnxAlloc,
|
||||||
ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScalarZnxDftToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc,
|
ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScalarZnxDftToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc,
|
||||||
VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps,
|
VecZnxBigOps, VecZnxBigScratch, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxOps,
|
||||||
VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxZero,
|
VecZnxToMut, VecZnxToRef, ZnxInfos, ZnxZero, copy_vec_znx_from,
|
||||||
};
|
};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@ pub struct GLWECiphertext<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWECiphertext<Vec<u8>> {
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
pub fn new<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_vec_znx(rank + 1, derive_size(basek, k)),
|
data: module.new_vec_znx(rank + 1, derive_size(basek, k)),
|
||||||
basek,
|
basek,
|
||||||
@@ -281,6 +281,21 @@ where
|
|||||||
self.encrypt_pk_private(module, None, pk, source_xu, source_xe, sigma, scratch);
|
self.encrypt_pk_private(module, None, pk, source_xu, source_xe, sigma, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn copy<DataOther>(&mut self, other: &GLWECiphertext<DataOther>)
|
||||||
|
where
|
||||||
|
VecZnx<DataOther>: VecZnxToRef,
|
||||||
|
{
|
||||||
|
copy_vec_znx_from(&mut self.data.to_mut(), &other.to_ref());
|
||||||
|
self.k = other.k;
|
||||||
|
self.basek = other.basek;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn rsh(&mut self, k: usize, scratch: &mut Scratch) {
|
||||||
|
let basek: usize = self.basek();
|
||||||
|
let mut self_mut: VecZnx<&mut [u8]> = self.data.to_mut();
|
||||||
|
self_mut.rsh(basek, k, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn automorphism<DataLhs, DataRhs>(
|
pub fn automorphism<DataLhs, DataRhs>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
@@ -311,6 +326,33 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_add<DataLhs, DataRhs>(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
lhs: &GLWECiphertext<DataLhs>,
|
||||||
|
rhs: &AutomorphismKey<DataRhs, FFT64>,
|
||||||
|
scratch: &mut Scratch,
|
||||||
|
) where
|
||||||
|
VecZnx<DataLhs>: VecZnxToRef,
|
||||||
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
|
Self::keyswitch_private(self, true, rhs.p(), module, lhs, &rhs.key, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn automorphism_add_inplace<DataRhs>(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
rhs: &AutomorphismKey<DataRhs, FFT64>,
|
||||||
|
scratch: &mut Scratch,
|
||||||
|
) where
|
||||||
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
|
unsafe {
|
||||||
|
let self_ptr: *mut GLWECiphertext<DataSelf> = self as *mut GLWECiphertext<DataSelf>;
|
||||||
|
Self::keyswitch_private(self, true, rhs.p(), module, &*self_ptr, &rhs.key, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn keyswitch_from_fourier<DataLhs, DataRhs>(
|
pub(crate) fn keyswitch_from_fourier<DataLhs, DataRhs>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
@@ -379,6 +421,21 @@ where
|
|||||||
) where
|
) where
|
||||||
VecZnx<DataLhs>: VecZnxToRef,
|
VecZnx<DataLhs>: VecZnxToRef,
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
|
Self::keyswitch_private(self, false, 0, module, lhs, rhs, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn keyswitch_private<DataLhs, DataRhs>(
|
||||||
|
&mut self,
|
||||||
|
add_self: bool,
|
||||||
|
apply_auto: i64,
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
lhs: &GLWECiphertext<DataLhs>,
|
||||||
|
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
||||||
|
scratch: &mut Scratch,
|
||||||
|
) where
|
||||||
|
VecZnx<DataLhs>: VecZnxToRef,
|
||||||
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
{
|
{
|
||||||
let basek: usize = self.basek();
|
let basek: usize = self.basek();
|
||||||
|
|
||||||
@@ -422,6 +479,13 @@ where
|
|||||||
module.vec_znx_big_add_small_inplace(&mut res_big, 0, lhs, 0);
|
module.vec_znx_big_add_small_inplace(&mut res_big, 0, lhs, 0);
|
||||||
|
|
||||||
(0..cols_out).for_each(|i| {
|
(0..cols_out).for_each(|i| {
|
||||||
|
if apply_auto != 0 {
|
||||||
|
module.vec_znx_big_automorphism_inplace(apply_auto, &mut res_big, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if add_self {
|
||||||
|
module.vec_znx_big_add_small_inplace(&mut res_big, i, lhs, i);
|
||||||
|
}
|
||||||
module.vec_znx_big_normalize(basek, self, i, &res_big, i, scratch1);
|
module.vec_znx_big_normalize(basek, self, i, &res_big, i, scratch1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -520,8 +584,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let log_base2k: usize = self.basek();
|
let basek: usize = self.basek();
|
||||||
let log_k: usize = self.k();
|
let k: usize = self.k();
|
||||||
let size: usize = self.size();
|
let size: usize = self.size();
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = self.rank() + 1;
|
||||||
|
|
||||||
@@ -535,7 +599,7 @@ where
|
|||||||
let (mut ci_dft, scratch_2) = scratch_1.tmp_vec_znx_dft(module, 1, size);
|
let (mut ci_dft, scratch_2) = scratch_1.tmp_vec_znx_dft(module, 1, size);
|
||||||
|
|
||||||
// c[i] = uniform
|
// c[i] = uniform
|
||||||
self.data.fill_uniform(log_base2k, i, size, source_xa);
|
self.data.fill_uniform(basek, i, size, source_xa);
|
||||||
|
|
||||||
// c[i] = norm(IDFT(DFT(c[i]) * DFT(s[i])))
|
// c[i] = norm(IDFT(DFT(c[i]) * DFT(s[i])))
|
||||||
module.vec_znx_dft(&mut ci_dft, 0, self, i);
|
module.vec_znx_dft(&mut ci_dft, 0, self, i);
|
||||||
@@ -543,7 +607,7 @@ where
|
|||||||
let ci_big: VecZnxBig<&mut [u8], FFT64> = module.vec_znx_idft_consume(ci_dft);
|
let ci_big: VecZnxBig<&mut [u8], FFT64> = module.vec_znx_idft_consume(ci_dft);
|
||||||
|
|
||||||
// use c[0] as buffer, which is overwritten later by the normalization step
|
// use c[0] as buffer, which is overwritten later by the normalization step
|
||||||
module.vec_znx_big_normalize(log_base2k, self, 0, &ci_big, 0, scratch_2);
|
module.vec_znx_big_normalize(basek, self, 0, &ci_big, 0, scratch_2);
|
||||||
|
|
||||||
// c0_tmp = -c[i] * s[i] (use c[0] as buffer)
|
// c0_tmp = -c[i] * s[i] (use c[0] as buffer)
|
||||||
module.vec_znx_sub_ab_inplace(&mut c0_big, 0, self, 0);
|
module.vec_znx_sub_ab_inplace(&mut c0_big, 0, self, 0);
|
||||||
@@ -552,14 +616,14 @@ where
|
|||||||
if let Some((pt, col)) = pt {
|
if let Some((pt, col)) = pt {
|
||||||
if i == col {
|
if i == col {
|
||||||
module.vec_znx_add_inplace(self, i, pt, 0);
|
module.vec_znx_add_inplace(self, i, pt, 0);
|
||||||
module.vec_znx_normalize_inplace(log_base2k, self, i, scratch_2);
|
module.vec_znx_normalize_inplace(basek, self, i, scratch_2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// c[0] += e
|
// c[0] += e
|
||||||
c0_big.add_normal(log_base2k, 0, log_k, source_xe, sigma, sigma * SIX_SIGMA);
|
c0_big.add_normal(basek, 0, k, source_xe, sigma, sigma * SIX_SIGMA);
|
||||||
|
|
||||||
// c[0] += m if col = 0
|
// c[0] += m if col = 0
|
||||||
if let Some((pt, col)) = pt {
|
if let Some((pt, col)) = pt {
|
||||||
@@ -569,7 +633,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// c[0] = norm(c[0])
|
// c[0] = norm(c[0])
|
||||||
module.vec_znx_normalize(log_base2k, self, 0, &c0_big, 0, scratch_1);
|
module.vec_znx_normalize(basek, self, 0, &c0_big, 0, scratch_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn encrypt_pk_private<DataPt, DataPk>(
|
pub(crate) fn encrypt_pk_private<DataPt, DataPk>(
|
||||||
@@ -597,7 +661,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let log_base2k: usize = pk.basek();
|
let basek: usize = pk.basek();
|
||||||
let size_pk: usize = pk.size();
|
let size_pk: usize = pk.size();
|
||||||
let cols: usize = self.rank() + 1;
|
let cols: usize = self.rank() + 1;
|
||||||
|
|
||||||
@@ -629,7 +693,7 @@ where
|
|||||||
let mut ci_big = module.vec_znx_idft_consume(ci_dft);
|
let mut ci_big = module.vec_znx_idft_consume(ci_dft);
|
||||||
|
|
||||||
// ci_big = u * pk[i] + e
|
// ci_big = u * pk[i] + e
|
||||||
ci_big.add_normal(log_base2k, 0, pk.k(), source_xe, sigma, sigma * SIX_SIGMA);
|
ci_big.add_normal(basek, 0, pk.k(), source_xe, sigma, sigma * SIX_SIGMA);
|
||||||
|
|
||||||
// ci_big = u * pk[i] + e + m (if col = i)
|
// ci_big = u * pk[i] + e + m (if col = i)
|
||||||
if let Some((pt, col)) = pt {
|
if let Some((pt, col)) = pt {
|
||||||
@@ -639,7 +703,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ct[i] = norm(ci_big)
|
// ct[i] = norm(ci_big)
|
||||||
module.vec_znx_big_normalize(log_base2k, self, i, &ci_big, 0, scratch_2);
|
module.vec_znx_big_normalize(basek, self, i, &ci_big, 0, scratch_2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToRef, Module, ScalarZnxDft, ScalarZnxDftOps,
|
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftScratch, MatZnxDftToRef, Module, ScalarZnxDft, ScalarZnxDftOps,
|
||||||
ScalarZnxDftToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft,
|
ScalarZnxDftToRef, Scratch, VecZnx, VecZnxAlloc, VecZnxBig, VecZnxBigAlloc, VecZnxBigOps, VecZnxBigScratch, VecZnxDft,
|
||||||
VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxToMut, VecZnxToRef, ZnxZero,
|
VecZnxDftAlloc, VecZnxDftOps, VecZnxDftToMut, VecZnxDftToRef, VecZnxToMut, VecZnxToRef, ZnxZero,
|
||||||
@@ -17,7 +17,7 @@ pub struct GLWECiphertextFourier<C, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWECiphertextFourier<Vec<u8>, B> {
|
impl<B: Backend> GLWECiphertextFourier<Vec<u8>, B> {
|
||||||
pub fn new(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self {
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_vec_znx_dft(rank + 1, derive_size(basek, k)),
|
data: module.new_vec_znx_dft(rank + 1, derive_size(basek, k)),
|
||||||
basek: basek,
|
basek: basek,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{Backend, Module, VecZnx, VecZnxAlloc, VecZnxToMut, VecZnxToRef};
|
use backend::{Backend, Module, VecZnx, VecZnxAlloc, VecZnxToMut, VecZnxToRef};
|
||||||
|
|
||||||
use crate::{elem::Infos, utils::derive_size};
|
use crate::{elem::Infos, utils::derive_size};
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GLWEPlaintext<Vec<u8>> {
|
impl GLWEPlaintext<Vec<u8>> {
|
||||||
pub fn new<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> Self {
|
pub fn alloc<B: Backend>(module: &Module<B>, basek: usize, k: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_vec_znx(1, derive_size(basek, k)),
|
data: module.new_vec_znx(1, derive_size(basek, k)),
|
||||||
basek: basek,
|
basek: basek,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScalarZnxDftToMut,
|
Backend, FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScalarZnxDftToMut,
|
||||||
ScalarZnxDftToRef, ScalarZnxToMut, ScalarZnxToRef, ScratchOwned, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos,
|
ScalarZnxDftToRef, ScalarZnxToMut, ScalarZnxToRef, ScratchOwned, VecZnxDft, VecZnxDftToMut, VecZnxDftToRef, ZnxInfos,
|
||||||
ZnxZero,
|
ZnxZero,
|
||||||
@@ -21,7 +21,7 @@ pub struct SecretKey<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SecretKey<Vec<u8>> {
|
impl SecretKey<Vec<u8>> {
|
||||||
pub fn new<B: Backend>(module: &Module<B>, rank: usize) -> Self {
|
pub fn alloc<B: Backend>(module: &Module<B>, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_scalar_znx(rank),
|
data: module.new_scalar_znx(rank),
|
||||||
dist: SecretDistribution::NONE,
|
dist: SecretDistribution::NONE,
|
||||||
@@ -105,7 +105,7 @@ impl<DataSelf, B: Backend> SecretKeyFourier<DataSelf, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> SecretKeyFourier<Vec<u8>, B> {
|
impl<B: Backend> SecretKeyFourier<Vec<u8>, B> {
|
||||||
pub fn new(module: &Module<B>, rank: usize) -> Self {
|
pub fn alloc(module: &Module<B>, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: module.new_scalar_znx_dft(rank),
|
data: module.new_scalar_znx_dft(rank),
|
||||||
dist: SecretDistribution::NONE,
|
dist: SecretDistribution::NONE,
|
||||||
@@ -114,7 +114,7 @@ impl<B: Backend> SecretKeyFourier<Vec<u8>, B> {
|
|||||||
|
|
||||||
pub fn dft<S>(&mut self, module: &Module<FFT64>, sk: &SecretKey<S>)
|
pub fn dft<S>(&mut self, module: &Module<FFT64>, sk: &SecretKey<S>)
|
||||||
where
|
where
|
||||||
SecretKeyFourier<Vec<u8>, B>: ScalarZnxDftToMut<base2k::FFT64>,
|
SecretKeyFourier<Vec<u8>, B>: ScalarZnxDftToMut<FFT64>,
|
||||||
SecretKey<S>: ScalarZnxToRef,
|
SecretKey<S>: ScalarZnxToRef,
|
||||||
{
|
{
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
@@ -160,9 +160,9 @@ pub struct GLWEPublicKey<D, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: Backend> GLWEPublicKey<Vec<u8>, B> {
|
impl<B: Backend> GLWEPublicKey<Vec<u8>, B> {
|
||||||
pub fn new(module: &Module<B>, log_base2k: usize, log_k: usize, rank: usize) -> Self {
|
pub fn alloc(module: &Module<B>, basek: usize, k: usize, rank: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: GLWECiphertextFourier::new(module, log_base2k, log_k, rank),
|
data: GLWECiphertextFourier::alloc(module, basek, k, rank),
|
||||||
dist: SecretDistribution::NONE,
|
dist: SecretDistribution::NONE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -209,7 +209,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<C> GLWEPublicKey<C, FFT64> {
|
impl<C> GLWEPublicKey<C, FFT64> {
|
||||||
pub fn generate<S>(
|
pub fn generate_from_sk<S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
sk_dft: &SecretKeyFourier<S, FFT64>,
|
sk_dft: &SecretKeyFourier<S, FFT64>,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft, ScalarZnxDftToRef,
|
Backend, FFT64, MatZnxDft, MatZnxDftOps, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft, ScalarZnxDftToRef,
|
||||||
ScalarZnxToRef, Scratch, VecZnxDftAlloc, VecZnxDftToMut, VecZnxDftToRef, ZnxZero,
|
ScalarZnxToRef, Scratch, VecZnxDftAlloc, VecZnxDftToMut, VecZnxDftToRef, ZnxZero,
|
||||||
};
|
};
|
||||||
@@ -15,8 +15,8 @@ use crate::{
|
|||||||
pub struct GLWESwitchingKey<Data, B: Backend>(pub(crate) GGLWECiphertext<Data, B>);
|
pub struct GLWESwitchingKey<Data, B: Backend>(pub(crate) GGLWECiphertext<Data, B>);
|
||||||
|
|
||||||
impl GLWESwitchingKey<Vec<u8>, FFT64> {
|
impl GLWESwitchingKey<Vec<u8>, FFT64> {
|
||||||
pub fn new(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank_in: usize, rank_out: usize) -> Self {
|
pub fn alloc(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank_in: usize, rank_out: usize) -> Self {
|
||||||
GLWESwitchingKey(GGLWECiphertext::new(
|
GLWESwitchingKey(GGLWECiphertext::alloc(
|
||||||
module, basek, k, rows, rank_in, rank_out,
|
module, basek, k, rows, rank_in, rank_out,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -96,11 +96,11 @@ where
|
|||||||
|
|
||||||
impl GLWESwitchingKey<Vec<u8>, FFT64> {
|
impl GLWESwitchingKey<Vec<u8>, FFT64> {
|
||||||
pub fn encrypt_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
pub fn encrypt_sk_scratch_space(module: &Module<FFT64>, rank: usize, size: usize) -> usize {
|
||||||
GGLWECiphertext::encrypt_sk_scratch_space(module, rank, size)
|
GGLWECiphertext::generate_from_sk_scratch_space(module, rank, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_pk_scratch_space(module: &Module<FFT64>, rank: usize, pk_size: usize) -> usize {
|
pub fn encrypt_pk_scratch_space(module: &Module<FFT64>, rank: usize, pk_size: usize) -> usize {
|
||||||
GGLWECiphertext::encrypt_pk_scratch_space(module, rank, pk_size)
|
GGLWECiphertext::generate_from_pk_scratch_space(module, rank, pk_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn keyswitch_scratch_space(
|
pub fn keyswitch_scratch_space(
|
||||||
@@ -164,7 +164,7 @@ where
|
|||||||
ScalarZnx<DataSkIn>: ScalarZnxToRef,
|
ScalarZnx<DataSkIn>: ScalarZnxToRef,
|
||||||
ScalarZnxDft<DataSkOut, FFT64>: ScalarZnxDftToRef<FFT64>,
|
ScalarZnxDft<DataSkOut, FFT64>: ScalarZnxDftToRef<FFT64>,
|
||||||
{
|
{
|
||||||
self.0.encrypt_sk(
|
self.0.generate_from_sk(
|
||||||
module,
|
module,
|
||||||
&sk_in.data,
|
&sk_in.data,
|
||||||
sk_out_dft,
|
sk_out_dft,
|
||||||
@@ -180,7 +180,7 @@ where
|
|||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
lhs: &GLWESwitchingKey<DataLhs, FFT64>,
|
lhs: &GLWESwitchingKey<DataLhs, FFT64>,
|
||||||
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
||||||
scratch: &mut base2k::Scratch,
|
scratch: &mut Scratch,
|
||||||
) where
|
) where
|
||||||
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataLhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
@@ -247,7 +247,7 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
module: &Module<FFT64>,
|
module: &Module<FFT64>,
|
||||||
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
rhs: &GLWESwitchingKey<DataRhs, FFT64>,
|
||||||
scratch: &mut base2k::Scratch,
|
scratch: &mut Scratch,
|
||||||
) where
|
) where
|
||||||
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
MatZnxDft<DataRhs, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ pub mod keyswitch_key;
|
|||||||
pub mod tensor_key;
|
pub mod tensor_key;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test_fft64;
|
mod test_fft64;
|
||||||
|
pub mod trace;
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
pub(crate) const SIX_SIGMA: f64 = 6.0;
|
pub(crate) const SIX_SIGMA: f64 = 6.0;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Backend, FFT64, MatZnxDft, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft, ScalarZnxDftAlloc,
|
Backend, FFT64, MatZnxDft, MatZnxDftToMut, MatZnxDftToRef, Module, ScalarZnx, ScalarZnxDft, ScalarZnxDftAlloc,
|
||||||
ScalarZnxDftOps, ScalarZnxDftToRef, Scratch, VecZnxDftOps, VecZnxDftToRef,
|
ScalarZnxDftOps, ScalarZnxDftToRef, Scratch, VecZnxDftOps, VecZnxDftToRef,
|
||||||
};
|
};
|
||||||
@@ -15,11 +15,11 @@ pub struct TensorKey<C, B: Backend> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TensorKey<Vec<u8>, FFT64> {
|
impl TensorKey<Vec<u8>, FFT64> {
|
||||||
pub fn new(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
pub fn alloc(module: &Module<FFT64>, basek: usize, k: usize, rows: usize, rank: usize) -> Self {
|
||||||
let mut keys: Vec<GLWESwitchingKey<Vec<u8>, FFT64>> = Vec::new();
|
let mut keys: Vec<GLWESwitchingKey<Vec<u8>, FFT64>> = Vec::new();
|
||||||
let pairs: usize = ((rank + 1) * rank) >> 1;
|
let pairs: usize = ((rank + 1) * rank) >> 1;
|
||||||
(0..pairs).for_each(|_| {
|
(0..pairs).for_each(|_| {
|
||||||
keys.push(GLWESwitchingKey::new(module, basek, k, rows, 1, rank));
|
keys.push(GLWESwitchingKey::alloc(module, basek, k, rows, 1, rank));
|
||||||
});
|
});
|
||||||
Self { keys: keys }
|
Self { keys: keys }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{FFT64, Module, ScalarZnxOps, ScratchOwned, Stats, VecZnxOps};
|
use backend::{FFT64, Module, ScalarZnxOps, ScratchOwned, Stats, VecZnxOps};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -30,16 +30,16 @@ fn test_automorphism(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk: usize,
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows = (k_ksk + basek - 1) / basek;
|
let rows = (k_ksk + basek - 1) / basek;
|
||||||
|
|
||||||
let mut auto_key_in: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_ksk, rows, rank);
|
let mut auto_key_in: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_ksk, rows, rank);
|
||||||
let mut auto_key_out: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_ksk, rows, rank);
|
let mut auto_key_out: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_ksk, rows, rank);
|
||||||
let mut auto_key_apply: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_ksk, rows, rank);
|
let mut auto_key_apply: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_ksk, rows, rank);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
AutomorphismKey::encrypt_sk_scratch_space(&module, rank, auto_key_in.size())
|
AutomorphismKey::generate_from_sk_scratch_space(&module, rank, auto_key_in.size())
|
||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, auto_key_out.size())
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, auto_key_out.size())
|
||||||
| AutomorphismKey::automorphism_scratch_space(
|
| AutomorphismKey::automorphism_scratch_space(
|
||||||
&module,
|
&module,
|
||||||
@@ -50,14 +50,14 @@ fn test_automorphism(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk: usize,
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
auto_key_in.encrypt_sk(
|
auto_key_in.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p0,
|
p0,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -68,7 +68,7 @@ fn test_automorphism(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk: usize,
|
|||||||
);
|
);
|
||||||
|
|
||||||
// gglwe_{s2}(s1) -> s1 -> s2
|
// gglwe_{s2}(s1) -> s1 -> s2
|
||||||
auto_key_apply.encrypt_sk(
|
auto_key_apply.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p1,
|
p1,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -81,16 +81,16 @@ fn test_automorphism(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk: usize,
|
|||||||
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
||||||
auto_key_out.automorphism(&module, &auto_key_in, &auto_key_apply, scratch.borrow());
|
auto_key_out.automorphism(&module, &auto_key_in, &auto_key_apply, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ksk, rank);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ksk, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ksk);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ksk);
|
||||||
|
|
||||||
let mut sk_auto: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_auto: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
|
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
|
||||||
(0..rank).for_each(|i| {
|
(0..rank).for_each(|i| {
|
||||||
module.scalar_znx_automorphism(module.galois_element_inv(p0 * p1), &mut sk_auto, i, &sk, i);
|
module.scalar_znx_automorphism(module.galois_element_inv(p0 * p1), &mut sk_auto, i, &sk, i);
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut sk_auto_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_auto_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_auto_dft.dft(&module, &sk_auto);
|
sk_auto_dft.dft(&module, &sk_auto);
|
||||||
|
|
||||||
(0..auto_key_out.rank_in()).for_each(|col_i| {
|
(0..auto_key_out.rank_in()).for_each(|col_i| {
|
||||||
@@ -128,27 +128,27 @@ fn test_automorphism_inplace(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows = (k_ksk + basek - 1) / basek;
|
let rows = (k_ksk + basek - 1) / basek;
|
||||||
|
|
||||||
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_ksk, rows, rank);
|
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_ksk, rows, rank);
|
||||||
let mut auto_key_apply: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_ksk, rows, rank);
|
let mut auto_key_apply: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_ksk, rows, rank);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
AutomorphismKey::encrypt_sk_scratch_space(&module, rank, auto_key.size())
|
AutomorphismKey::generate_from_sk_scratch_space(&module, rank, auto_key.size())
|
||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, auto_key.size())
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, auto_key.size())
|
||||||
| AutomorphismKey::automorphism_inplace_scratch_space(&module, auto_key.size(), auto_key_apply.size(), rank),
|
| AutomorphismKey::automorphism_inplace_scratch_space(&module, auto_key.size(), auto_key_apply.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
auto_key.encrypt_sk(
|
auto_key.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p0,
|
p0,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -159,7 +159,7 @@ fn test_automorphism_inplace(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk
|
|||||||
);
|
);
|
||||||
|
|
||||||
// gglwe_{s2}(s1) -> s1 -> s2
|
// gglwe_{s2}(s1) -> s1 -> s2
|
||||||
auto_key_apply.encrypt_sk(
|
auto_key_apply.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p1,
|
p1,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -172,16 +172,16 @@ fn test_automorphism_inplace(p0: i64, p1: i64, log_n: usize, basek: usize, k_ksk
|
|||||||
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
||||||
auto_key.automorphism_inplace(&module, &auto_key_apply, scratch.borrow());
|
auto_key.automorphism_inplace(&module, &auto_key_apply, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ksk, rank);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ksk, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ksk);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ksk);
|
||||||
|
|
||||||
let mut sk_auto: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_auto: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
|
sk_auto.fill_zero(); // Necessary to avoid panic of unfilled sk
|
||||||
(0..rank).for_each(|i| {
|
(0..rank).for_each(|i| {
|
||||||
module.scalar_znx_automorphism(module.galois_element_inv(p0 * p1), &mut sk_auto, i, &sk, i);
|
module.scalar_znx_automorphism(module.galois_element_inv(p0 * p1), &mut sk_auto, i, &sk, i);
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut sk_auto_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_auto_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_auto_dft.dft(&module, &sk_auto);
|
sk_auto_dft.dft(&module, &sk_auto);
|
||||||
|
|
||||||
(0..auto_key.rank_in()).for_each(|col_i| {
|
(0..auto_key.rank_in()).for_each(|col_i| {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxToMut, ScratchOwned, Stats, VecZnxOps, ZnxViewMut};
|
use backend::{FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxToMut, ScratchOwned, Stats, VecZnxOps, ZnxViewMut};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -76,8 +76,8 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ksk: usize, sigma: f64, rank_in
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows = (k_ksk + basek - 1) / basek;
|
let rows = (k_ksk + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_in, rank_out);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_in, rank_out);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ksk);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ksk);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -88,16 +88,16 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ksk: usize, sigma: f64, rank_in
|
|||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, ksk.size()),
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, ksk.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -110,7 +110,7 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ksk: usize, sigma: f64, rank_in
|
|||||||
scratch.borrow(),
|
scratch.borrow(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ksk, rank_out);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ksk, rank_out);
|
||||||
|
|
||||||
(0..ksk.rank_in()).for_each(|col_i| {
|
(0..ksk.rank_in()).for_each(|col_i| {
|
||||||
(0..ksk.rows()).for_each(|row_i| {
|
(0..ksk.rows()).for_each(|row_i| {
|
||||||
@@ -136,11 +136,11 @@ fn test_key_switch(
|
|||||||
let rows = (k_ksk + basek - 1) / basek;
|
let rows = (k_ksk + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>, FFT64> =
|
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>, FFT64> =
|
||||||
GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s0s1);
|
GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s0s1);
|
||||||
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
||||||
GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_out_s0s1, rank_out_s1s2);
|
GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_out_s0s1, rank_out_s1s2);
|
||||||
let mut ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
let mut ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
||||||
GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s1s2);
|
GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s1s2);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -159,22 +159,22 @@ fn test_key_switch(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk0: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in_s0s1);
|
let mut sk0: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in_s0s1);
|
||||||
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in_s0s1);
|
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in_s0s1);
|
||||||
sk0_dft.dft(&module, &sk0);
|
sk0_dft.dft(&module, &sk0);
|
||||||
|
|
||||||
let mut sk1: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out_s0s1);
|
let mut sk1: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out_s0s1);
|
||||||
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out_s0s1);
|
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out_s0s1);
|
||||||
sk1_dft.dft(&module, &sk1);
|
sk1_dft.dft(&module, &sk1);
|
||||||
|
|
||||||
let mut sk2: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out_s1s2);
|
let mut sk2: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out_s1s2);
|
||||||
sk2.fill_ternary_prob(0.5, &mut source_xs);
|
sk2.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk2_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out_s1s2);
|
let mut sk2_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out_s1s2);
|
||||||
sk2_dft.dft(&module, &sk2);
|
sk2_dft.dft(&module, &sk2);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
@@ -202,8 +202,8 @@ fn test_key_switch(
|
|||||||
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
// gglwe_{s1}(s0) (x) gglwe_{s2}(s1) = gglwe_{s2}(s0)
|
||||||
ct_gglwe_s0s2.keyswitch(&module, &ct_gglwe_s0s1, &ct_gglwe_s1s2, scratch.borrow());
|
ct_gglwe_s0s2.keyswitch(&module, &ct_gglwe_s0s1, &ct_gglwe_s1s2, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ksk, rank_out_s1s2);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ksk, rank_out_s1s2);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ksk);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ksk);
|
||||||
|
|
||||||
(0..ct_gglwe_s0s2.rank_in()).for_each(|col_i| {
|
(0..ct_gglwe_s0s2.rank_in()).for_each(|col_i| {
|
||||||
(0..ct_gglwe_s0s2.rows()).for_each(|row_i| {
|
(0..ct_gglwe_s0s2.rows()).for_each(|row_i| {
|
||||||
@@ -240,9 +240,9 @@ fn test_key_switch_inplace(log_n: usize, basek: usize, k_ksk: usize, sigma: f64,
|
|||||||
let rows: usize = (k_ksk + basek - 1) / basek;
|
let rows: usize = (k_ksk + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>, FFT64> =
|
let mut ct_gglwe_s0s1: GLWESwitchingKey<Vec<u8>, FFT64> =
|
||||||
GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s0s1);
|
GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_in_s0s1, rank_out_s0s1);
|
||||||
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
let mut ct_gglwe_s1s2: GLWESwitchingKey<Vec<u8>, FFT64> =
|
||||||
GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_out_s0s1, rank_out_s0s1);
|
GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_out_s0s1, rank_out_s0s1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -259,22 +259,22 @@ fn test_key_switch_inplace(log_n: usize, basek: usize, k_ksk: usize, sigma: f64,
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk0: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in_s0s1);
|
let mut sk0: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in_s0s1);
|
||||||
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in_s0s1);
|
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in_s0s1);
|
||||||
sk0_dft.dft(&module, &sk0);
|
sk0_dft.dft(&module, &sk0);
|
||||||
|
|
||||||
let mut sk1: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out_s0s1);
|
let mut sk1: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out_s0s1);
|
||||||
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out_s0s1);
|
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out_s0s1);
|
||||||
sk1_dft.dft(&module, &sk1);
|
sk1_dft.dft(&module, &sk1);
|
||||||
|
|
||||||
let mut sk2: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out_s0s1);
|
let mut sk2: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out_s0s1);
|
||||||
sk2.fill_ternary_prob(0.5, &mut source_xs);
|
sk2.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk2_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out_s0s1);
|
let mut sk2_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out_s0s1);
|
||||||
sk2_dft.dft(&module, &sk2);
|
sk2_dft.dft(&module, &sk2);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
@@ -304,8 +304,8 @@ fn test_key_switch_inplace(log_n: usize, basek: usize, k_ksk: usize, sigma: f64,
|
|||||||
|
|
||||||
let ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>, FFT64> = ct_gglwe_s0s1;
|
let ct_gglwe_s0s2: GLWESwitchingKey<Vec<u8>, FFT64> = ct_gglwe_s0s1;
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ksk, rank_out_s0s1);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ksk, rank_out_s0s1);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ksk);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ksk);
|
||||||
|
|
||||||
(0..ct_gglwe_s0s2.rank_in()).for_each(|col_i| {
|
(0..ct_gglwe_s0s2.rank_in()).for_each(|col_i| {
|
||||||
(0..ct_gglwe_s0s2.rows()).for_each(|row_i| {
|
(0..ct_gglwe_s0s2.rows()).for_each(|row_i| {
|
||||||
@@ -342,9 +342,9 @@ fn test_external_product(log_n: usize, basek: usize, k: usize, sigma: f64, rank_
|
|||||||
|
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_gglwe_in: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k, rows, rank_in, rank_out);
|
let mut ct_gglwe_in: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k, rows, rank_in, rank_out);
|
||||||
let mut ct_gglwe_out: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k, rows, rank_in, rank_out);
|
let mut ct_gglwe_out: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k, rows, rank_in, rank_out);
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank_out);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank_out);
|
||||||
|
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
@@ -369,16 +369,16 @@ fn test_external_product(log_n: usize, basek: usize, k: usize, sigma: f64, rank_
|
|||||||
|
|
||||||
pt_rgsw.to_mut().raw_mut()[r] = 1; // X^{r}
|
pt_rgsw.to_mut().raw_mut()[r] = 1; // X^{r}
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
@@ -418,8 +418,8 @@ fn test_external_product(log_n: usize, basek: usize, k: usize, sigma: f64, rank_
|
|||||||
| GGSWCiphertext::encrypt_sk_scratch_space(&module, rank_out, ct_rgsw.size()),
|
| GGSWCiphertext::encrypt_sk_scratch_space(&module, rank_out, ct_rgsw.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank_out);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank_out);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
|
|
||||||
(0..rank_in).for_each(|i| {
|
(0..rank_in).for_each(|i| {
|
||||||
module.vec_znx_rotate_inplace(r as i64, &mut sk_in.data, i); // * X^{r}
|
module.vec_znx_rotate_inplace(r as i64, &mut sk_in.data, i); // * X^{r}
|
||||||
@@ -469,8 +469,8 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k: usize, sigma: f6
|
|||||||
|
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_gglwe: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k, rows, rank_in, rank_out);
|
let mut ct_gglwe: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k, rows, rank_in, rank_out);
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank_out);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank_out);
|
||||||
|
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
@@ -489,16 +489,16 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k: usize, sigma: f6
|
|||||||
|
|
||||||
pt_rgsw.to_mut().raw_mut()[r] = 1; // X^{r}
|
pt_rgsw.to_mut().raw_mut()[r] = 1; // X^{r}
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
// gglwe_{s1}(s0) = s0 -> s1
|
// gglwe_{s1}(s0) = s0 -> s1
|
||||||
@@ -525,8 +525,8 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k: usize, sigma: f6
|
|||||||
// gglwe_(m) (x) RGSW_(X^k) = gglwe_(m * X^k)
|
// gglwe_(m) (x) RGSW_(X^k) = gglwe_(m * X^k)
|
||||||
ct_gglwe.external_product_inplace(&module, &ct_rgsw, scratch.borrow());
|
ct_gglwe.external_product_inplace(&module, &ct_rgsw, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank_out);
|
let mut ct_glwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank_out);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
|
|
||||||
(0..rank_in).for_each(|i| {
|
(0..rank_in).for_each(|i| {
|
||||||
module.vec_znx_rotate_inplace(r as i64, &mut sk_in.data, i); // * X^{r}
|
module.vec_znx_rotate_inplace(r as i64, &mut sk_in.data, i); // * X^{r}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDftOps, ScalarZnxOps, ScratchOwned, Stats, VecZnxBig, VecZnxBigAlloc,
|
FFT64, Module, ScalarZnx, ScalarZnxAlloc, ScalarZnxDftOps, ScalarZnxOps, ScratchOwned, Stats, VecZnxBig, VecZnxBigAlloc,
|
||||||
VecZnxBigOps, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxOps, VecZnxToMut, ZnxViewMut, ZnxZero,
|
VecZnxBigOps, VecZnxDft, VecZnxDftAlloc, VecZnxDftOps, VecZnxOps, VecZnxToMut, ZnxViewMut, ZnxZero,
|
||||||
};
|
};
|
||||||
@@ -78,9 +78,9 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ggsw: usize, sigma: f64, rank:
|
|||||||
|
|
||||||
let rows: usize = (k_ggsw + basek - 1) / basek;
|
let rows: usize = (k_ggsw + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -94,10 +94,10 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ggsw: usize, sigma: f64, rank:
|
|||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct.size()),
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct.encrypt_sk(
|
ct.encrypt_sk(
|
||||||
@@ -110,7 +110,7 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ggsw: usize, sigma: f64, rank:
|
|||||||
scratch.borrow(),
|
scratch.borrow(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ggsw, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ggsw, rank);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
||||||
|
|
||||||
@@ -144,12 +144,12 @@ fn test_keyswitch(log_n: usize, basek: usize, k: usize, rank: usize, sigma: f64)
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut ct_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut tsk: TensorKey<Vec<u8>, FFT64> = TensorKey::new(&module, basek, k, rows, rank);
|
let mut tsk: TensorKey<Vec<u8>, FFT64> = TensorKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k, rows, rank, rank);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k, rows, rank, rank);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -173,16 +173,16 @@ fn test_keyswitch(log_n: usize, basek: usize, k: usize, rank: usize, sigma: f64)
|
|||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
|
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
|
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -217,7 +217,7 @@ fn test_keyswitch(log_n: usize, basek: usize, k: usize, rank: usize, sigma: f64)
|
|||||||
|
|
||||||
ct_out.keyswitch(&module, &ct_in, &ksk, &tsk, scratch.borrow());
|
ct_out.keyswitch(&module, &ct_in, &ksk, &tsk, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_out.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_out.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_out.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_out.size());
|
||||||
|
|
||||||
@@ -271,11 +271,11 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k: usize, rank: usize, sig
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut tsk: TensorKey<Vec<u8>, FFT64> = TensorKey::new(&module, basek, k, rows, rank);
|
let mut tsk: TensorKey<Vec<u8>, FFT64> = TensorKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k, rows, rank, rank);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k, rows, rank, rank);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -292,16 +292,16 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k: usize, rank: usize, sig
|
|||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
|
sk_in.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
|
sk_out.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -336,7 +336,7 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k: usize, rank: usize, sig
|
|||||||
|
|
||||||
ct.keyswitch_inplace(&module, &ksk, &tsk, scratch.borrow());
|
ct.keyswitch_inplace(&module, &ksk, &tsk, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
||||||
|
|
||||||
@@ -439,12 +439,12 @@ fn test_automorphism(p: i64, log_n: usize, basek: usize, k: usize, rank: usize,
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut ct_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::new(&module, basek, k, rows, rank);
|
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k, rows, rank);
|
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -454,7 +454,7 @@ fn test_automorphism(p: i64, log_n: usize, basek: usize, k: usize, rank: usize,
|
|||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_in.size())
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct_in.size())
|
||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct_out.size())
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct_out.size())
|
||||||
| AutomorphismKey::encrypt_sk_scratch_space(&module, rank, auto_key.size())
|
| AutomorphismKey::generate_from_sk_scratch_space(&module, rank, auto_key.size())
|
||||||
| TensorKey::encrypt_sk_scratch_space(&module, rank, tensor_key.size())
|
| TensorKey::encrypt_sk_scratch_space(&module, rank, tensor_key.size())
|
||||||
| GGSWCiphertext::automorphism_scratch_space(
|
| GGSWCiphertext::automorphism_scratch_space(
|
||||||
&module,
|
&module,
|
||||||
@@ -468,13 +468,13 @@ fn test_automorphism(p: i64, log_n: usize, basek: usize, k: usize, rank: usize,
|
|||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(var_xs, &mut source_xs);
|
sk.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
auto_key.encrypt_sk(
|
auto_key.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p,
|
p,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -508,7 +508,7 @@ fn test_automorphism(p: i64, log_n: usize, basek: usize, k: usize, rank: usize,
|
|||||||
|
|
||||||
module.scalar_znx_automorphism_inplace(p, &mut pt_scalar, 0);
|
module.scalar_znx_automorphism_inplace(p, &mut pt_scalar, 0);
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_out.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_out.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_out.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_out.size());
|
||||||
|
|
||||||
@@ -560,11 +560,11 @@ fn test_automorphism_inplace(p: i64, log_n: usize, basek: usize, k: usize, rank:
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k, rows, rank);
|
let mut ct: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k, rows, rank);
|
||||||
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::new(&module, basek, k, rows, rank);
|
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k, rows, rank);
|
let mut auto_key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k, rows, rank);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_scalar: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
@@ -574,20 +574,20 @@ fn test_automorphism_inplace(p: i64, log_n: usize, basek: usize, k: usize, rank:
|
|||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct.size())
|
GGSWCiphertext::encrypt_sk_scratch_space(&module, rank, ct.size())
|
||||||
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct.size())
|
| GLWECiphertextFourier::decrypt_scratch_space(&module, ct.size())
|
||||||
| AutomorphismKey::encrypt_sk_scratch_space(&module, rank, auto_key.size())
|
| AutomorphismKey::generate_from_sk_scratch_space(&module, rank, auto_key.size())
|
||||||
| TensorKey::encrypt_sk_scratch_space(&module, rank, tensor_key.size())
|
| TensorKey::encrypt_sk_scratch_space(&module, rank, tensor_key.size())
|
||||||
| GGSWCiphertext::automorphism_inplace_scratch_space(&module, ct.size(), auto_key.size(), tensor_key.size(), rank),
|
| GGSWCiphertext::automorphism_inplace_scratch_space(&module, ct.size(), auto_key.size(), tensor_key.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let var_xs: f64 = 0.5;
|
let var_xs: f64 = 0.5;
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(var_xs, &mut source_xs);
|
sk.fill_ternary_prob(var_xs, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
auto_key.encrypt_sk(
|
auto_key.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p,
|
p,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -621,7 +621,7 @@ fn test_automorphism_inplace(p: i64, log_n: usize, basek: usize, k: usize, rank:
|
|||||||
|
|
||||||
module.scalar_znx_automorphism_inplace(p, &mut pt_scalar, 0);
|
module.scalar_znx_automorphism_inplace(p, &mut pt_scalar, 0);
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct.size());
|
||||||
|
|
||||||
@@ -674,9 +674,9 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, rank: usize,
|
|||||||
|
|
||||||
let rows: usize = (k_ggsw + basek - 1) / basek;
|
let rows: usize = (k_ggsw + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_ggsw_lhs_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw_lhs_in: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_ggsw_lhs_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw_lhs_out: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
@@ -702,10 +702,10 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, rank: usize,
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_ggsw_rhs.encrypt_sk(
|
ct_ggsw_rhs.encrypt_sk(
|
||||||
@@ -730,11 +730,11 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, rank: usize,
|
|||||||
|
|
||||||
ct_ggsw_lhs_out.external_product(&module, &ct_ggsw_lhs_in, &ct_ggsw_rhs, scratch.borrow());
|
ct_ggsw_lhs_out.external_product(&module, &ct_ggsw_lhs_in, &ct_ggsw_rhs, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ggsw, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ggsw, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_ggsw_lhs_out.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_ggsw_lhs_out.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_ggsw_lhs_out.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_ggsw_lhs_out.size());
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
|
|
||||||
module.vec_znx_rotate_inplace(k as i64, &mut pt_ggsw_lhs, 0);
|
module.vec_znx_rotate_inplace(k as i64, &mut pt_ggsw_lhs, 0);
|
||||||
|
|
||||||
@@ -793,8 +793,8 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, rank
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ggsw + basek - 1) / basek;
|
let rows: usize = (k_ggsw + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw_rhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_ggsw_lhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw_lhs: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_ggsw_lhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_ggsw_rhs: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
|
|
||||||
@@ -815,10 +815,10 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, rank
|
|||||||
| GGSWCiphertext::external_product_inplace_scratch_space(&module, ct_ggsw_lhs.size(), ct_ggsw_rhs.size(), rank),
|
| GGSWCiphertext::external_product_inplace_scratch_space(&module, ct_ggsw_lhs.size(), ct_ggsw_rhs.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_ggsw_rhs.encrypt_sk(
|
ct_ggsw_rhs.encrypt_sk(
|
||||||
@@ -843,11 +843,11 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, rank
|
|||||||
|
|
||||||
ct_ggsw_lhs.external_product_inplace(&module, &ct_ggsw_rhs, scratch.borrow());
|
ct_ggsw_lhs.external_product_inplace(&module, &ct_ggsw_rhs, scratch.borrow());
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ggsw, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ggsw, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_ggsw_lhs.size());
|
let mut pt_dft: VecZnxDft<Vec<u8>, FFT64> = module.new_vec_znx_dft(1, ct_ggsw_lhs.size());
|
||||||
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_ggsw_lhs.size());
|
let mut pt_big: VecZnxBig<Vec<u8>, FFT64> = module.new_vec_znx_big(1, ct_ggsw_lhs.size());
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ggsw);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ggsw);
|
||||||
|
|
||||||
module.vec_znx_rotate_inplace(k as i64, &mut pt_ggsw_lhs, 0);
|
module.vec_znx_rotate_inplace(k as i64, &mut pt_ggsw_lhs, 0);
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{
|
use backend::{
|
||||||
Decoding, Encoding, FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScratchOwned, Stats, VecZnxOps, VecZnxToMut,
|
Decoding, Encoding, FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScratchOwned, Stats, VecZnxOps, VecZnxToMut,
|
||||||
ZnxViewMut, ZnxZero,
|
ZnxViewMut, ZnxZero,
|
||||||
};
|
};
|
||||||
@@ -94,8 +94,8 @@ fn automorphism() {
|
|||||||
fn test_encrypt_sk(log_n: usize, basek: usize, k_ct: usize, k_pt: usize, sigma: f64, rank: usize) {
|
fn test_encrypt_sk(log_n: usize, basek: usize, k_ct: usize, k_pt: usize, sigma: f64, rank: usize) {
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
|
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_pt);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_pt);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -105,10 +105,10 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ct: usize, k_pt: usize, sigma:
|
|||||||
GLWECiphertext::encrypt_sk_scratch_space(&module, ct.size()) | GLWECiphertext::decrypt_scratch_space(&module, ct.size()),
|
GLWECiphertext::encrypt_sk_scratch_space(&module, ct.size()) | GLWECiphertext::decrypt_scratch_space(&module, ct.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
let mut data_want: Vec<i64> = vec![0i64; module.n()];
|
let mut data_want: Vec<i64> = vec![0i64; module.n()];
|
||||||
@@ -154,18 +154,18 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k_ct: usize, k_pt: usize, sigma:
|
|||||||
fn test_encrypt_zero_sk(log_n: usize, basek: usize, k_ct: usize, sigma: f64, rank: usize) {
|
fn test_encrypt_zero_sk(log_n: usize, basek: usize, k_ct: usize, sigma: f64, rank: usize) {
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
|
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([1u8; 32]);
|
let mut source_xe: Source = Source::new([1u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
let mut ct_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct, rank);
|
let mut ct_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct, rank);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
GLWECiphertextFourier::decrypt_scratch_space(&module, ct_dft.size())
|
GLWECiphertextFourier::decrypt_scratch_space(&module, ct_dft.size())
|
||||||
@@ -188,21 +188,21 @@ fn test_encrypt_zero_sk(log_n: usize, basek: usize, k_ct: usize, sigma: f64, ran
|
|||||||
fn test_encrypt_pk(log_n: usize, basek: usize, k_ct: usize, k_pk: usize, sigma: f64, rank: usize) {
|
fn test_encrypt_pk(log_n: usize, basek: usize, k_ct: usize, k_pk: usize, sigma: f64, rank: usize) {
|
||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
|
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xa: Source = Source::new([0u8; 32]);
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xu: Source = Source::new([0u8; 32]);
|
let mut source_xu: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
let mut pk: GLWEPublicKey<Vec<u8>, FFT64> = GLWEPublicKey::new(&module, basek, k_pk, rank);
|
let mut pk: GLWEPublicKey<Vec<u8>, FFT64> = GLWEPublicKey::alloc(&module, basek, k_pk, rank);
|
||||||
pk.generate(&module, &sk_dft, &mut source_xa, &mut source_xe, sigma);
|
pk.generate_from_sk(&module, &sk_dft, &mut source_xa, &mut source_xe, sigma);
|
||||||
|
|
||||||
let mut scratch: ScratchOwned = ScratchOwned::new(
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
GLWECiphertext::encrypt_sk_scratch_space(&module, ct.size())
|
GLWECiphertext::encrypt_sk_scratch_space(&module, ct.size())
|
||||||
@@ -228,7 +228,7 @@ fn test_encrypt_pk(log_n: usize, basek: usize, k_ct: usize, k_pk: usize, sigma:
|
|||||||
scratch.borrow(),
|
scratch.borrow(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
ct.decrypt(&module, &mut pt_have, &sk_dft, scratch.borrow());
|
ct.decrypt(&module, &mut pt_have, &sk_dft, scratch.borrow());
|
||||||
|
|
||||||
@@ -258,11 +258,11 @@ fn test_keyswitch(
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct_in + basek - 1) / basek;
|
let rows: usize = (k_ct_in + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_keyswitch, rows, rank_in, rank_out);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_keyswitch, rows, rank_in, rank_out);
|
||||||
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank_in);
|
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank_in);
|
||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank_out);
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank_out);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_in);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_in);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_out);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_out);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -287,16 +287,16 @@ fn test_keyswitch(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -351,10 +351,10 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k_ksk: usize, k_ct: usize,
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct + basek - 1) / basek;
|
let rows: usize = (k_ct + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_grlwe: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank, rank);
|
let mut ct_grlwe: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank, rank);
|
||||||
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -372,16 +372,16 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k_ksk: usize, k_ct: usize,
|
|||||||
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, ct_rlwe.size(), rank, ct_grlwe.size()),
|
| GLWECiphertext::keyswitch_inplace_scratch_space(&module, ct_rlwe.size(), rank, ct_grlwe.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk0: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk0: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
sk0.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk0_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk0_dft.dft(&module, &sk0);
|
sk0_dft.dft(&module, &sk0);
|
||||||
|
|
||||||
let mut sk1: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk1: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
sk1.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk1_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk1_dft.dft(&module, &sk1);
|
sk1_dft.dft(&module, &sk1);
|
||||||
|
|
||||||
ct_grlwe.encrypt_sk(
|
ct_grlwe.encrypt_sk(
|
||||||
@@ -445,11 +445,11 @@ fn test_automorphism(
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct_in + basek - 1) / basek;
|
let rows: usize = (k_ct_in + basek - 1) / basek;
|
||||||
|
|
||||||
let mut autokey: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_autokey, rows, rank);
|
let mut autokey: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_autokey, rows, rank);
|
||||||
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank);
|
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank);
|
||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank);
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_in);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_in);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_out);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_out);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -466,13 +466,13 @@ fn test_automorphism(
|
|||||||
| GLWECiphertext::automorphism_scratch_space(&module, ct_out.size(), rank, ct_in.size(), autokey.size()),
|
| GLWECiphertext::automorphism_scratch_space(&module, ct_out.size(), rank, ct_in.size(), autokey.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
autokey.encrypt_sk(
|
autokey.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p,
|
p,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -527,10 +527,10 @@ fn test_automorphism_inplace(log_n: usize, basek: usize, p: i64, k_autokey: usiz
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct + basek - 1) / basek;
|
let rows: usize = (k_ct + basek - 1) / basek;
|
||||||
|
|
||||||
let mut autokey: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::new(&module, basek, k_autokey, rows, rank);
|
let mut autokey: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_autokey, rows, rank);
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -548,13 +548,13 @@ fn test_automorphism_inplace(log_n: usize, basek: usize, p: i64, k_autokey: usiz
|
|||||||
| GLWECiphertext::automorphism_inplace_scratch_space(&module, ct.size(), rank, autokey.size()),
|
| GLWECiphertext::automorphism_inplace_scratch_space(&module, ct.size(), rank, autokey.size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
autokey.encrypt_sk(
|
autokey.generate_from_sk(
|
||||||
&module,
|
&module,
|
||||||
p,
|
p,
|
||||||
&sk,
|
&sk,
|
||||||
@@ -607,12 +607,12 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, k_ct_in: usi
|
|||||||
|
|
||||||
let rows: usize = (k_ct_in + basek - 1) / basek;
|
let rows: usize = (k_ct_in + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_rlwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank);
|
let mut ct_rlwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank);
|
||||||
let mut ct_rlwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank);
|
let mut ct_rlwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank);
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_in);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_in);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_out);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_out);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -642,10 +642,10 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, k_ct_in: usi
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_rgsw.encrypt_sk(
|
ct_rgsw.encrypt_sk(
|
||||||
@@ -711,11 +711,11 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, k_ct
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct + basek - 1) / basek;
|
let rows: usize = (k_ct + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct_rlwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -739,10 +739,10 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, k_ct
|
|||||||
| GLWECiphertext::external_product_inplace_scratch_space(&module, ct_rlwe.size(), ct_rgsw.size(), rank),
|
| GLWECiphertext::external_product_inplace_scratch_space(&module, ct_rlwe.size(), ct_rgsw.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_rgsw.encrypt_sk(
|
ct_rgsw.encrypt_sk(
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use crate::{
|
|||||||
keyswitch_key::GLWESwitchingKey,
|
keyswitch_key::GLWESwitchingKey,
|
||||||
test_fft64::{gglwe::log2_std_noise_gglwe_product, ggsw::noise_ggsw_product},
|
test_fft64::{gglwe::log2_std_noise_gglwe_product, ggsw::noise_ggsw_product},
|
||||||
};
|
};
|
||||||
use base2k::{FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScratchOwned, Stats, VecZnxOps, VecZnxToMut, ZnxViewMut};
|
use backend::{FFT64, FillUniform, Module, ScalarZnx, ScalarZnxAlloc, ScratchOwned, Stats, VecZnxOps, VecZnxToMut, ZnxViewMut};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -59,14 +59,14 @@ fn test_keyswitch(
|
|||||||
|
|
||||||
let rows: usize = (k_ct_in + basek - 1) / basek;
|
let rows: usize = (k_ct_in + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank_in, rank_out);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank_in, rank_out);
|
||||||
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank_in);
|
let mut ct_glwe_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank_in);
|
||||||
let mut ct_glwe_dft_in: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct_in, rank_in);
|
let mut ct_glwe_dft_in: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct_in, rank_in);
|
||||||
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank_out);
|
let mut ct_glwe_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank_out);
|
||||||
let mut ct_glwe_dft_out: GLWECiphertextFourier<Vec<u8>, FFT64> =
|
let mut ct_glwe_dft_out: GLWECiphertextFourier<Vec<u8>, FFT64> =
|
||||||
GLWECiphertextFourier::new(&module, basek, k_ct_out, rank_out);
|
GLWECiphertextFourier::alloc(&module, basek, k_ct_out, rank_out);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_in);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_in);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_out);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_out);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -91,16 +91,16 @@ fn test_keyswitch(
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_in);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_in);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_in);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_in);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank_out);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank_out);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank_out);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank_out);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -157,11 +157,11 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k_ksk: usize, k_ct: usize,
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct + basek - 1) / basek;
|
let rows: usize = (k_ct + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::new(&module, basek, k_ksk, rows, rank, rank);
|
let mut ksk: GLWESwitchingKey<Vec<u8>, FFT64> = GLWESwitchingKey::alloc(&module, basek, k_ksk, rows, rank, rank);
|
||||||
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct_glwe: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut ct_rlwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct, rank);
|
let mut ct_rlwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -179,16 +179,16 @@ fn test_keyswitch_inplace(log_n: usize, basek: usize, k_ksk: usize, k_ct: usize,
|
|||||||
| GLWECiphertextFourier::keyswitch_inplace_scratch_space(&module, ct_rlwe_dft.size(), ksk.size(), rank),
|
| GLWECiphertextFourier::keyswitch_inplace_scratch_space(&module, ct_rlwe_dft.size(), ksk.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_in: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
sk_in.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_in_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_in_dft.dft(&module, &sk_in);
|
sk_in_dft.dft(&module, &sk_in);
|
||||||
|
|
||||||
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk_out: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
sk_out.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_out_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_out_dft.dft(&module, &sk_out);
|
sk_out_dft.dft(&module, &sk_out);
|
||||||
|
|
||||||
ksk.encrypt_sk(
|
ksk.encrypt_sk(
|
||||||
@@ -246,14 +246,14 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, k_ct_in: usi
|
|||||||
|
|
||||||
let rows: usize = (k_ct_in + basek - 1) / basek;
|
let rows: usize = (k_ct_in + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_rgsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_in, rank);
|
let mut ct_in: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_in, rank);
|
||||||
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct_out, rank);
|
let mut ct_out: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct_out, rank);
|
||||||
let mut ct_in_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct_in, rank);
|
let mut ct_in_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct_in, rank);
|
||||||
let mut ct_out_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct_out, rank);
|
let mut ct_out_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct_out, rank);
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_in);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_in);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct_out);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct_out);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -277,10 +277,10 @@ fn test_external_product(log_n: usize, basek: usize, k_ggsw: usize, k_ct_in: usi
|
|||||||
| GLWECiphertextFourier::external_product_scratch_space(&module, ct_out.size(), ct_in.size(), ct_rgsw.size(), rank),
|
| GLWECiphertextFourier::external_product_scratch_space(&module, ct_out.size(), ct_in.size(), ct_rgsw.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_rgsw.encrypt_sk(
|
ct_rgsw.encrypt_sk(
|
||||||
@@ -348,12 +348,12 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, k_ct
|
|||||||
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
let rows: usize = (k_ct + basek - 1) / basek;
|
let rows: usize = (k_ct + basek - 1) / basek;
|
||||||
|
|
||||||
let mut ct_ggsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::new(&module, basek, k_ggsw, rows, rank);
|
let mut ct_ggsw: GGSWCiphertext<Vec<u8>, FFT64> = GGSWCiphertext::alloc(&module, basek, k_ggsw, rows, rank);
|
||||||
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::new(&module, basek, k_ct, rank);
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k_ct, rank);
|
||||||
let mut ct_rlwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k_ct, rank);
|
let mut ct_rlwe_dft: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k_ct, rank);
|
||||||
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
let mut pt_rgsw: ScalarZnx<Vec<u8>> = module.new_scalar_znx(1);
|
||||||
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k_ct);
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k_ct);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -377,10 +377,10 @@ fn test_external_product_inplace(log_n: usize, basek: usize, k_ggsw: usize, k_ct
|
|||||||
| GLWECiphertextFourier::external_product_inplace_scratch_space(&module, ct.size(), ct_ggsw.size(), rank),
|
| GLWECiphertextFourier::external_product_inplace_scratch_space(&module, ct.size(), ct_ggsw.size(), rank),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
ct_ggsw.encrypt_sk(
|
ct_ggsw.encrypt_sk(
|
||||||
|
|||||||
@@ -4,3 +4,5 @@ mod ggsw;
|
|||||||
mod glwe;
|
mod glwe;
|
||||||
mod glwe_fourier;
|
mod glwe_fourier;
|
||||||
mod tensor_key;
|
mod tensor_key;
|
||||||
|
|
||||||
|
mod trace;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use base2k::{FFT64, Module, ScalarZnx, ScalarZnxDftAlloc, ScalarZnxDftOps, ScratchOwned, Stats, VecZnxDftOps, VecZnxOps};
|
use backend::{Module, ScalarZnx, ScalarZnxDft, ScalarZnxDftAlloc, ScalarZnxDftOps, ScratchOwned, Stats, VecZnxDftOps, VecZnxOps, FFT64};
|
||||||
use sampling::source::Source;
|
use sampling::source::Source;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -22,7 +22,7 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k: usize, sigma: f64, rank: usize
|
|||||||
|
|
||||||
let rows: usize = (k + basek - 1) / basek;
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::new(&module, basek, k, rows, rank);
|
let mut tensor_key: TensorKey<Vec<u8>, FFT64> = TensorKey::alloc(&module, basek, k, rows, rank);
|
||||||
|
|
||||||
let mut source_xs: Source = Source::new([0u8; 32]);
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
let mut source_xe: Source = Source::new([0u8; 32]);
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
@@ -34,10 +34,10 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k: usize, sigma: f64, rank: usize
|
|||||||
tensor_key.size(),
|
tensor_key.size(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let mut sk: SecretKey<Vec<u8>> = SecretKey::new(&module, rank);
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
sk.fill_ternary_prob(0.5, &mut source_xs);
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::new(&module, rank);
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
sk_dft.dft(&module, &sk);
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
tensor_key.encrypt_sk(
|
tensor_key.encrypt_sk(
|
||||||
@@ -49,12 +49,12 @@ fn test_encrypt_sk(log_n: usize, basek: usize, k: usize, sigma: f64, rank: usize
|
|||||||
scratch.borrow(),
|
scratch.borrow(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::new(&module, basek, k, rank);
|
let mut ct_glwe_fourier: GLWECiphertextFourier<Vec<u8>, FFT64> = GLWECiphertextFourier::alloc(&module, basek, k, rank);
|
||||||
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::new(&module, basek, k);
|
let mut pt: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
|
|
||||||
(0..rank).for_each(|i| {
|
(0..rank).for_each(|i| {
|
||||||
(0..rank).for_each(|j| {
|
(0..rank).for_each(|j| {
|
||||||
let mut sk_ij_dft: base2k::ScalarZnxDft<Vec<u8>, FFT64> = module.new_scalar_znx_dft(1);
|
let mut sk_ij_dft: ScalarZnxDft<Vec<u8>, FFT64> = module.new_scalar_znx_dft(1);
|
||||||
module.svp_apply(&mut sk_ij_dft, 0, &sk_dft.data, i, &sk_dft.data, j);
|
module.svp_apply(&mut sk_ij_dft, 0, &sk_dft.data, i, &sk_dft.data, j);
|
||||||
let sk_ij: ScalarZnx<Vec<u8>> = module
|
let sk_ij: ScalarZnx<Vec<u8>> = module
|
||||||
.vec_znx_idft_consume(sk_ij_dft.as_vec_znx_dft())
|
.vec_znx_idft_consume(sk_ij_dft.as_vec_znx_dft())
|
||||||
|
|||||||
116
core/src/test_fft64/trace.rs
Normal file
116
core/src/test_fft64/trace.rs
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use backend::{FFT64, FillUniform, Module, ScratchOwned, Stats, VecZnxOps, ZnxView, ZnxViewMut};
|
||||||
|
use sampling::source::Source;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
automorphism::AutomorphismKey,
|
||||||
|
elem::Infos,
|
||||||
|
glwe_ciphertext::GLWECiphertext,
|
||||||
|
glwe_plaintext::GLWEPlaintext,
|
||||||
|
keys::{SecretKey, SecretKeyFourier},
|
||||||
|
test_fft64::gglwe::var_noise_gglwe_product,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn trace_inplace() {
|
||||||
|
(1..4).for_each(|rank| {
|
||||||
|
println!("test trace_inplace rank: {}", rank);
|
||||||
|
test_trace_inplace(11, 8, 54, 3.2, rank);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_trace_inplace(log_n: usize, basek: usize, k: usize, sigma: f64, rank: usize) {
|
||||||
|
let module: Module<FFT64> = Module::<FFT64>::new(1 << log_n);
|
||||||
|
|
||||||
|
let k_autokey: usize = k + basek;
|
||||||
|
|
||||||
|
let rows: usize = (k + basek - 1) / basek;
|
||||||
|
|
||||||
|
let mut ct: GLWECiphertext<Vec<u8>> = GLWECiphertext::alloc(&module, basek, k, rank);
|
||||||
|
let mut pt_want: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
|
let mut pt_have: GLWEPlaintext<Vec<u8>> = GLWEPlaintext::alloc(&module, basek, k);
|
||||||
|
|
||||||
|
let mut source_xs: Source = Source::new([0u8; 32]);
|
||||||
|
let mut source_xe: Source = Source::new([0u8; 32]);
|
||||||
|
let mut source_xa: Source = Source::new([0u8; 32]);
|
||||||
|
|
||||||
|
let mut scratch: ScratchOwned = ScratchOwned::new(
|
||||||
|
GLWECiphertext::encrypt_sk_scratch_space(&module, ct.size())
|
||||||
|
| GLWECiphertext::decrypt_scratch_space(&module, ct.size())
|
||||||
|
| AutomorphismKey::generate_from_sk_scratch_space(&module, rank, k_autokey)
|
||||||
|
| GLWECiphertext::trace_inplace_scratch_space(&module, ct.size(), k_autokey, rank),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut sk: SecretKey<Vec<u8>> = SecretKey::alloc(&module, rank);
|
||||||
|
sk.fill_ternary_prob(0.5, &mut source_xs);
|
||||||
|
|
||||||
|
let mut sk_dft: SecretKeyFourier<Vec<u8>, FFT64> = SecretKeyFourier::alloc(&module, rank);
|
||||||
|
sk_dft.dft(&module, &sk);
|
||||||
|
|
||||||
|
let mut data_want: Vec<i64> = vec![0i64; module.n()];
|
||||||
|
|
||||||
|
data_want
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|x| *x = source_xa.next_i64() & 0xFF);
|
||||||
|
|
||||||
|
pt_have
|
||||||
|
.data
|
||||||
|
.fill_uniform(basek, 0, pt_have.size(), &mut source_xa);
|
||||||
|
|
||||||
|
ct.encrypt_sk(
|
||||||
|
&module,
|
||||||
|
&pt_have,
|
||||||
|
&sk_dft,
|
||||||
|
&mut source_xa,
|
||||||
|
&mut source_xe,
|
||||||
|
sigma,
|
||||||
|
scratch.borrow(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut auto_keys: HashMap<i64, AutomorphismKey<Vec<u8>, FFT64>> = HashMap::new();
|
||||||
|
let gal_els: Vec<i64> = GLWECiphertext::trace_galois_elements(&module);
|
||||||
|
gal_els.iter().for_each(|gal_el| {
|
||||||
|
let mut key: AutomorphismKey<Vec<u8>, FFT64> = AutomorphismKey::alloc(&module, basek, k_autokey, rows, rank);
|
||||||
|
key.generate_from_sk(
|
||||||
|
&module,
|
||||||
|
*gal_el,
|
||||||
|
&sk,
|
||||||
|
&mut source_xa,
|
||||||
|
&mut source_xe,
|
||||||
|
sigma,
|
||||||
|
scratch.borrow(),
|
||||||
|
);
|
||||||
|
auto_keys.insert(*gal_el, key);
|
||||||
|
});
|
||||||
|
|
||||||
|
ct.trace_inplace(&module, 0, 5, &auto_keys, scratch.borrow());
|
||||||
|
ct.trace_inplace(&module, 5, log_n, &auto_keys, scratch.borrow());
|
||||||
|
|
||||||
|
(0..pt_want.size()).for_each(|i| pt_want.data.at_mut(0, i)[0] = pt_have.data.at(0, i)[0]);
|
||||||
|
|
||||||
|
ct.decrypt(&module, &mut pt_have, &sk_dft, scratch.borrow());
|
||||||
|
|
||||||
|
module.vec_znx_sub_ab_inplace(&mut pt_want, 0, &pt_have, 0);
|
||||||
|
module.vec_znx_normalize_inplace(basek, &mut pt_want, 0, scratch.borrow());
|
||||||
|
|
||||||
|
let noise_have = pt_want.data.std(0, basek).log2();
|
||||||
|
|
||||||
|
let mut noise_want: f64 = var_noise_gglwe_product(
|
||||||
|
module.n() as f64,
|
||||||
|
basek,
|
||||||
|
0.5,
|
||||||
|
0.5,
|
||||||
|
1.0 / 12.0,
|
||||||
|
sigma * sigma,
|
||||||
|
0.0,
|
||||||
|
rank as f64,
|
||||||
|
k,
|
||||||
|
k_autokey,
|
||||||
|
);
|
||||||
|
noise_want += sigma * sigma * (-2.0 * (k) as f64).exp2();
|
||||||
|
noise_want += module.n() as f64 * 1.0 / 12.0 * 0.5 * rank as f64 * (-2.0 * (k) as f64).exp2();
|
||||||
|
noise_want = noise_want.sqrt().log2();
|
||||||
|
|
||||||
|
assert!((noise_have - noise_want).abs() < 1.0);
|
||||||
|
}
|
||||||
82
core/src/trace.rs
Normal file
82
core/src/trace.rs
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use backend::{FFT64, MatZnxDft, MatZnxDftToRef, Module, Scratch, VecZnx, VecZnxToMut, VecZnxToRef};
|
||||||
|
|
||||||
|
use crate::{automorphism::AutomorphismKey, glwe_ciphertext::GLWECiphertext};
|
||||||
|
|
||||||
|
impl GLWECiphertext<Vec<u8>> {
|
||||||
|
pub fn trace_galois_elements(module: &Module<FFT64>) -> Vec<i64> {
|
||||||
|
let mut gal_els: Vec<i64> = Vec::new();
|
||||||
|
(0..module.log_n()).for_each(|i| {
|
||||||
|
if i == 0 {
|
||||||
|
gal_els.push(-1);
|
||||||
|
} else {
|
||||||
|
gal_els.push(module.galois_element(1 << (i - 1)));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
gal_els
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trace_scratch_space(
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
out_size: usize,
|
||||||
|
in_size: usize,
|
||||||
|
autokey_size: usize,
|
||||||
|
rank: usize,
|
||||||
|
) -> usize {
|
||||||
|
Self::automorphism_inplace_scratch_space(module, out_size.max(in_size), rank, autokey_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trace_inplace_scratch_space(module: &Module<FFT64>, out_size: usize, autokey_size: usize, rank: usize) -> usize {
|
||||||
|
Self::automorphism_inplace_scratch_space(module, out_size, rank, autokey_size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<DataSelf> GLWECiphertext<DataSelf>
|
||||||
|
where
|
||||||
|
VecZnx<DataSelf>: VecZnxToMut + VecZnxToRef,
|
||||||
|
{
|
||||||
|
pub fn trace<DataLhs, DataAK>(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
lhs: &GLWECiphertext<DataLhs>,
|
||||||
|
auto_keys: &HashMap<i64, AutomorphismKey<DataAK, FFT64>>,
|
||||||
|
scratch: &mut Scratch,
|
||||||
|
) where
|
||||||
|
VecZnx<DataLhs>: VecZnxToRef,
|
||||||
|
MatZnxDft<DataAK, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
|
self.copy(lhs);
|
||||||
|
self.trace_inplace(module, start, end, auto_keys, scratch);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trace_inplace<DataAK>(
|
||||||
|
&mut self,
|
||||||
|
module: &Module<FFT64>,
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
auto_keys: &HashMap<i64, AutomorphismKey<DataAK, FFT64>>,
|
||||||
|
scratch: &mut Scratch,
|
||||||
|
) where
|
||||||
|
MatZnxDft<DataAK, FFT64>: MatZnxDftToRef<FFT64>,
|
||||||
|
{
|
||||||
|
(start..end).for_each(|i| {
|
||||||
|
self.rsh(1, scratch);
|
||||||
|
|
||||||
|
let p: i64;
|
||||||
|
if i == 0 {
|
||||||
|
p = -1;
|
||||||
|
} else {
|
||||||
|
p = module.galois_element(1 << (i - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key) = auto_keys.get(&p) {
|
||||||
|
self.automorphism_add_inplace(module, key, scratch);
|
||||||
|
} else {
|
||||||
|
panic!("auto_keys[{}] is empty", p)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user