Browse Source

stuff

master
Mara Mihali 2 years ago
committed by maramihali
parent
commit
dda7a6fb46
19 changed files with 345 additions and 298 deletions
  1. +6
    -1
      Cargo.toml
  2. +36
    -20
      README.md
  3. +0
    -1
      benches/nizk.rs
  4. +3
    -3
      examples/cubic.rs
  5. +5
    -3
      src/commitments.rs
  6. +25
    -23
      src/dense_mlpoly.rs
  7. +116
    -102
      src/group.rs
  8. +5
    -4
      src/lib.rs
  9. +6
    -4
      src/nizk/bullet.rs
  10. +30
    -30
      src/nizk/mod.rs
  11. +6
    -5
      src/product_tree.rs
  12. +9
    -8
      src/r1csinstance.rs
  13. +9
    -8
      src/r1csproof.rs
  14. +3
    -3
      src/random.rs
  15. +37
    -36
      src/scalar/mod.rs
  16. +8
    -8
      src/scalar/ristretto255.rs
  17. +16
    -15
      src/sparse_mlpoly.rs
  18. +4
    -3
      src/sumcheck.rs
  19. +21
    -21
      src/unipoly.rs

+ 6
- 1
Cargo.toml

@ -13,7 +13,6 @@ keywords = ["zkSNARKs", "cryptography", "proofs"]
[dependencies] [dependencies]
curve25519-dalek = {version = "3.2.0", features = ["serde", "simd_backend"]} curve25519-dalek = {version = "3.2.0", features = ["serde", "simd_backend"]}
merlin = "3.0.0" merlin = "3.0.0"
rand = "0.7.3"
digest = "0.8.1" digest = "0.8.1"
sha3 = "0.8.2" sha3 = "0.8.2"
byteorder = "1.3.4" byteorder = "1.3.4"
@ -27,6 +26,12 @@ itertools = "0.10.0"
colored = "2.0.0" colored = "2.0.0"
flate2 = "1.0.14" flate2 = "1.0.14"
thiserror = "1.0" thiserror = "1.0"
ark-ff = { version = "^0.3.0", default-features = false }
ark-ec = { version = "^0.3.0", default-features = false }
ark-std = { version = "^0.3.0"}
ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] }
ark-serialize = { version = "^0.3.0", features = ["derive"] }
lazy_static = "1.4.0"
[dev-dependencies] [dev-dependencies]
criterion = "0.3.1" criterion = "0.3.1"

+ 36
- 20
README.md

@ -1,33 +1,37 @@
# Spartan: High-speed zkSNARKs without trusted setup # Spartan: High-speed zkSNARKs without trusted setup
![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg) ![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg)
[![](https://img.shields.io/crates/v/spartan.svg)]((https://crates.io/crates/spartan))
[![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>)
Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model. Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model.
A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details. A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details.
Note that this library has *not* received a security review or audit.
Note that this library has _not_ received a security review or audit.
## Highlights ## Highlights
We now highlight Spartan's distinctive features. We now highlight Spartan's distinctive features.
* **No "toxic" waste:** Spartan is a *transparent* zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters.
- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters.
* **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest.
- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest.
* **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS).
- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS).
* **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`.
- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`.
* **State-of-the-art performance:**
Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads.
- **State-of-the-art performance:**
Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads.
### Implementation details ### Implementation details
`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library.
`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library.
## Examples ## Examples
To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`: To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`:
```text ```text
spartan = "0.4.1" spartan = "0.4.1"
``` ```
@ -70,6 +74,7 @@ Some of our public APIs' style is inspired by the underlying crates we use.
``` ```
Here is another example to use the NIZK variant of the Spartan proof system: Here is another example to use the NIZK variant of the Spartan proof system:
```rust ```rust
# extern crate libspartan; # extern crate libspartan;
# extern crate merlin; # extern crate merlin;
@ -101,6 +106,7 @@ Here is another example to use the NIZK variant of the Spartan proof system:
``` ```
Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance
```rust ```rust
#![allow(non_snake_case)] #![allow(non_snake_case)]
# extern crate curve25519_dalek; # extern crate curve25519_dalek;
@ -182,7 +188,7 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
// a variable that holds a byte representation of 1 // a variable that holds a byte representation of 1
let one = Scalar::one().to_bytes(); let one = Scalar::one().to_bytes();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs) // constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff: // An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs) // Az \circ Bz = Cz, where z = (vars, 1, inputs)
@ -210,11 +216,11 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment // compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z0 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let mut rng = ark_std::rand::thread_rng();
let i0 = Scalar::rand(&mut rng);
let i1 = Scalar::rand(&mut rng);
let z0 = Scalar::rand(&mut rng);
let z1 = Scalar::rand(&mut rng);
let z2 = (z0 + z1) * i0; // constraint 0 let z2 = (z0 + z1) * i0; // constraint 0
let z3 = (z0 + i1) * z2; // constraint 1 let z3 = (z0 + i1) * z2; // constraint 1
let z4 = Scalar::zero(); //constraint 2 let z4 = Scalar::zero(); //constraint 2
@ -233,7 +239,7 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
inputs[0] = i0.to_bytes(); inputs[0] = i0.to_bytes();
inputs[1] = i1.to_bytes(); inputs[1] = i1.to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable // check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs); let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert_eq!(res.unwrap(), true); assert_eq!(res.unwrap(), true);
@ -253,30 +259,36 @@ Finally, we provide an example that specifies a custom R1CS instance instead of
For more examples, see [`examples/`](examples) directory in this repo. For more examples, see [`examples/`](examples) directory in this repo.
## Building `libspartan` ## Building `libspartan`
Install [`rustup`](https://rustup.rs/) Install [`rustup`](https://rustup.rs/)
Switch to nightly Rust using `rustup`: Switch to nightly Rust using `rustup`:
```text ```text
rustup default nightly rustup default nightly
``` ```
Clone the repository: Clone the repository:
```text ```text
git clone https://github.com/Microsoft/Spartan git clone https://github.com/Microsoft/Spartan
cd Spartan cd Spartan
``` ```
To build docs for public APIs of `libspartan`: To build docs for public APIs of `libspartan`:
```text ```text
cargo doc cargo doc
``` ```
To run tests: To run tests:
```text ```text
RUSTFLAGS="-C target_cpu=native" cargo test RUSTFLAGS="-C target_cpu=native" cargo test
``` ```
To build `libspartan`: To build `libspartan`:
```text ```text
RUSTFLAGS="-C target_cpu=native" cargo build --release RUSTFLAGS="-C target_cpu=native" cargo build --release
``` ```
@ -284,19 +296,23 @@ RUSTFLAGS="-C target_cpu=native" cargo build --release
> NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`. > NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`.
### Supported features ### Supported features
* `profile`: enables fine-grained profiling information (see below for its use)
- `profile`: enables fine-grained profiling information (see below for its use)
## Performance ## Performance
### End-to-end benchmarks ### End-to-end benchmarks
`libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below). `libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below).
To run end-to-end benchmarks: To run end-to-end benchmarks:
```text
```text
RUSTFLAGS="-C target_cpu=native" cargo bench RUSTFLAGS="-C target_cpu=native" cargo bench
``` ```
### Fine-grained profiling ### Fine-grained profiling
Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`. Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`.
These profilers report performance as depicted below (for varying R1CS instance sizes). The reported These profilers report performance as depicted below (for varying R1CS instance sizes). The reported
@ -304,7 +320,7 @@ performance is from running the profilers on a Microsoft Surface Laptop 3 on a s
See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature. See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature.
```text ```text
$ ./target/release/snark
$ ./target/release/snark
Profiler:: SNARK Profiler:: SNARK
* number_of_constraints 1048576 * number_of_constraints 1048576
* number_of_variables 1048576 * number_of_variables 1048576
@ -355,7 +371,7 @@ Profiler:: SNARK
``` ```
```text ```text
$ ./target/release/nizk
$ ./target/release/nizk
Profiler:: NIZK Profiler:: NIZK
* number_of_constraints 1048576 * number_of_constraints 1048576
* number_of_variables 1048576 * number_of_variables 1048576

+ 0
- 1
benches/nizk.rs

@ -4,7 +4,6 @@ extern crate criterion;
extern crate digest; extern crate digest;
extern crate libspartan; extern crate libspartan;
extern crate merlin; extern crate merlin;
extern crate rand;
extern crate sha3; extern crate sha3;
use libspartan::{Instance, NIZKGens, NIZK}; use libspartan::{Instance, NIZKGens, NIZK};

+ 3
- 3
examples/cubic.rs

@ -8,7 +8,7 @@
//! `(Z3 + 5) * 1 - I0 = 0` //! `(Z3 + 5) * 1 - I0 = 0`
//! //!
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 //! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
use curve25519_dalek::scalar::Scalar;
use ark_bls12_377::Fr as Scalar;
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
use merlin::Transcript; use merlin::Transcript;
use rand::rngs::OsRng; use rand::rngs::OsRng;
@ -71,8 +71,8 @@ fn produce_r1cs() -> (
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment // compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let z0 = Scalar::random(&mut csprng);
let mut rng = ark_std::rand::thread_rng();
let z0 = Scalar::rand(&mut rng);
let z1 = z0 * z0; // constraint 0 let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1 let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2 let z3 = z2 + z0; // constraint 2

+ 5
- 3
src/commitments.rs

@ -1,8 +1,10 @@
use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT_COMPRESSED};
use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT};
use super::scalar::Scalar; use super::scalar::Scalar;
use digest::{ExtendableOutput, Input}; use digest::{ExtendableOutput, Input};
use sha3::Shake256; use sha3::Shake256;
use std::io::Read; use std::io::Read;
use ark_ff::fields::{Field};
use ark_ec::{ProjectiveCurve};
#[derive(Debug)] #[derive(Debug)]
pub struct MultiCommitGens { pub struct MultiCommitGens {
@ -15,14 +17,14 @@ impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self { pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default(); let mut shake = Shake256::default();
shake.input(label); shake.input(label);
shake.input(GROUP_BASEPOINT_COMPRESSED.as_bytes());
shake.input(GROUP_BASEPOINT.as_bytes());
let mut reader = shake.xof_result(); let mut reader = shake.xof_result();
let mut gens: Vec<GroupElement> = Vec::new(); let mut gens: Vec<GroupElement> = Vec::new();
let mut uniform_bytes = [0u8; 64]; let mut uniform_bytes = [0u8; 64];
for _ in 0..n + 1 { for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap(); reader.read_exact(&mut uniform_bytes).unwrap();
gens.push(GroupElement::from_uniform_bytes(&uniform_bytes));
gens.push(GroupElement::from_random_bytes(&uniform_bytes));
} }
MultiCommitGens { MultiCommitGens {

+ 25
- 23
src/dense_mlpoly.rs

@ -9,7 +9,10 @@ use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript}; use super::transcript::{AppendToTranscript, ProofTranscript};
use core::ops::Index; use core::ops::Index;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_ff::{One,Zero};
#[cfg(feature = "multicore")] #[cfg(feature = "multicore")]
use rayon::prelude::*; use rayon::prelude::*;
@ -38,12 +41,12 @@ pub struct PolyCommitmentBlinds {
blinds: Vec<Scalar>, blinds: Vec<Scalar>,
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct PolyCommitment { pub struct PolyCommitment {
C: Vec<CompressedGroup>, C: Vec<CompressedGroup>,
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct ConstPolyCommitment { pub struct ConstPolyCommitment {
C: CompressedGroup, C: CompressedGroup,
} }
@ -296,7 +299,7 @@ impl AppendToTranscript for PolyCommitment {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct PolyEvalProof { pub struct PolyEvalProof {
proof: DotProductProofLog, proof: DotProductProofLog,
} }
@ -402,9 +405,8 @@ impl PolyEvalProof {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::super::scalar::ScalarFromPrimitives;
use super::*; use super::*;
use rand::rngs::OsRng;
use ark_std::{UniformRand};
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar { fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
let eq = EqPolynomial::new(r.to_vec()); let eq = EqPolynomial::new(r.to_vec());
@ -432,19 +434,19 @@ mod tests {
// Z = [1, 2, 1, 4] // Z = [1, 2, 1, 4]
let Z = vec![ let Z = vec![
Scalar::one(), Scalar::one(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
Scalar::from(2),
Scalar::from(1),
Scalar::from(4)
]; ];
// r = [4,3] // r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let r = vec![Scalar::from(4), Scalar::from(3)];
let eval_with_LR = evaluate_with_LR(&Z, &r); let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z); let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r); let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval, Scalar::from(28));
assert_eq!(eval_with_LR, eval); assert_eq!(eval_with_LR, eval);
} }
@ -518,12 +520,12 @@ mod tests {
#[test] #[test]
fn check_memoized_chis() { fn check_memoized_chis() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let s = 10; let s = 10;
let mut r: Vec<Scalar> = Vec::new(); let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s { for _i in 0..s {
r.push(Scalar::random(&mut csprng));
r.push(Scalar::rand(&mut rng));
} }
let chis = tests::compute_chis_at_r(&r); let chis = tests::compute_chis_at_r(&r);
let chis_m = EqPolynomial::new(r).evals(); let chis_m = EqPolynomial::new(r).evals();
@ -532,12 +534,12 @@ mod tests {
#[test] #[test]
fn check_factored_chis() { fn check_factored_chis() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let s = 10; let s = 10;
let mut r: Vec<Scalar> = Vec::new(); let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s { for _i in 0..s {
r.push(Scalar::random(&mut csprng));
r.push(Scalar::rand(&mut rng));
} }
let chis = EqPolynomial::new(r.clone()).evals(); let chis = EqPolynomial::new(r.clone()).evals();
let (L, R) = EqPolynomial::new(r).compute_factored_evals(); let (L, R) = EqPolynomial::new(r).compute_factored_evals();
@ -547,12 +549,12 @@ mod tests {
#[test] #[test]
fn check_memoized_factored_chis() { fn check_memoized_factored_chis() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let s = 10; let s = 10;
let mut r: Vec<Scalar> = Vec::new(); let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s { for _i in 0..s {
r.push(Scalar::random(&mut csprng));
r.push(Scalar::rand(&mut rng));
} }
let (L, R) = tests::compute_factored_chis_at_r(&r); let (L, R) = tests::compute_factored_chis_at_r(&r);
let eq = EqPolynomial::new(r); let eq = EqPolynomial::new(r);
@ -564,17 +566,17 @@ mod tests {
#[test] #[test]
fn check_polynomial_commit() { fn check_polynomial_commit() {
let Z = vec![ let Z = vec![
(1_usize).to_scalar(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
Scalar::from(1),
Scalar::from(2),
Scalar::from(1),
Scalar::from(4)
]; ];
let poly = DensePolynomial::new(Z); let poly = DensePolynomial::new(Z);
// r = [4,3] // r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let r = vec![Scalar::from(4), Scalar::from(3)];
let eval = poly.evaluate(&r); let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval, Scalar::from(28));
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two"); let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(&gens, None); let (poly_commitment, blinds) = poly.commit(&gens, None);

+ 116
- 102
src/group.rs

@ -1,117 +1,131 @@
use super::errors::ProofVerifyError;
use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar};
use lazy_static::lazy_static;
use super::scalar::{Scalar};
use core::borrow::Borrow; use core::borrow::Borrow;
use core::ops::{Mul, MulAssign}; use core::ops::{Mul, MulAssign};
use ark_ec::{ProjectiveCurve, AffineCurve};
pub type GroupElement = curve25519_dalek::ristretto::RistrettoPoint;
pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto;
pub use ark_bls12_377::G1Projective as GroupElement;
pub use ark_bls12_377::G1Affine as AffineGroupElement;
pub trait CompressedGroupExt {
type Group;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
}
impl CompressedGroupExt for CompressedGroup {
type Group = curve25519_dalek::ristretto::RistrettoPoint;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
self
.decompress()
.ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes()))
}
}
pub const GROUP_BASEPOINT_COMPRESSED: CompressedGroup =
curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED;
// pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto;
impl<'b> MulAssign<&'b Scalar> for GroupElement {
fn mul_assign(&mut self, scalar: &'b Scalar) {
let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar);
*self = result;
}
}
// pub trait CompressedGroupExt {
// type Group;
// fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
// }
impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
type Output = GroupElement;
fn mul(self, scalar: &'b Scalar) -> GroupElement {
self * Scalar::decompress_scalar(scalar)
}
}
impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
type Output = GroupElement;
// what I should prolly do is implement compression and decompression operation on the GroupAffine
fn mul(self, point: &'b GroupElement) -> GroupElement {
Scalar::decompress_scalar(self) * point
}
}
// impl CompressedGroupExt for CompressedGroup {
// type Group = curve25519_dalek::ristretto::RistrettoPoint;
// fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
// self
// .decompress()
// .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes()))
// }
// }
macro_rules! define_mul_variants {
(LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: &'b $rhs) -> $out {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
&self * &rhs
}
}
};
// ????
lazy_static! {
pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator();
} }
macro_rules! define_mul_assign_variants {
(LHS = $lhs:ty, RHS = $rhs:ty) => {
impl MulAssign<$rhs> for $lhs {
fn mul_assign(&mut self, rhs: $rhs) {
*self *= &rhs;
}
}
};
}
define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
pub trait VartimeMultiscalarMul {
type Scalar;
fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
where
I: IntoIterator,
I::Item: Borrow<Self::Scalar>,
J: IntoIterator,
J::Item: Borrow<Self>,
Self: Clone;
}
// impl<'b> MulAssign<&'b Scalar> for GroupElement {
// fn mul_assign(&mut self, scalar: &'b Scalar) {
// let result = (self as &GroupElement).mul( scalar.into_repr());
// *self = result;
// }
// }
impl VartimeMultiscalarMul for GroupElement {
type Scalar = super::scalar::Scalar;
fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
where
I: IntoIterator,
I::Item: Borrow<Self::Scalar>,
J: IntoIterator,
J::Item: Borrow<Self>,
Self: Clone,
{
use curve25519_dalek::traits::VartimeMultiscalarMul;
<Self as VartimeMultiscalarMul>::vartime_multiscalar_mul(
scalars
.into_iter()
.map(|s| Scalar::decompress_scalar(s.borrow()))
.collect::<Vec<ScalarBytes>>(),
points,
)
}
}
// // This game happens because dalek works with scalars as bytes representation but we want people to have an easy life and not care about this
// impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
// type Output = GroupElement;
// fn mul(self, scalar: &'b Scalar) -> GroupElement {
// self * Scalar::into_repr(scalar)
// }
// }
// impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
// type Output = GroupElement;
// fn mul(self, point: &'b GroupElement) -> GroupElement {
// Scalar::into_repr(self) * point
// }
// }
// macro_rules! define_mul_variants {
// (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
// impl<'b> Mul<&'b $rhs> for $lhs {
// type Output = $out;
// fn mul(self, rhs: &'b $rhs) -> $out {
// &self * rhs
// }
// }
// impl<'a> Mul<$rhs> for &'a $lhs {
// type Output = $out;
// fn mul(self, rhs: $rhs) -> $out {
// self * &rhs
// }
// }
// impl Mul<$rhs> for $lhs {
// type Output = $out;
// fn mul(self, rhs: $rhs) -> $out {
// &self * &rhs
// }
// }
// };
// }
// macro_rules! define_mul_assign_variants {
// (LHS = $lhs:ty, RHS = $rhs:ty) => {
// impl MulAssign<$rhs> for $lhs {
// fn mul_assign(&mut self, rhs: $rhs) {
// *self *= &rhs;
// }
// }
// };
// }
// define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
// define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
// define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
// TODO
// pub trait VartimeMultiscalarMul {
// type Scalar;
// fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
// where
// I: IntoIterator,
// I::Item: Borrow<Self::Scalar>,
// J: IntoIterator,
// J::Item: Borrow<Self>,
// Self: Clone;
// }
// impl VartimeMultiscalarMul for GroupElement {
// type Scalar = super::scalar::Scalar;
// fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
// where
// I: IntoIterator,
// I::Item: Borrow<Self::Scalar>,
// J: IntoIterator,
// J::Item: Borrow<Self>,
// Self: Clone,
// {
// // use curve25519_dalek::traits::VartimeMultiscalarMul;
// <Self as VartimeMultiscalarMul>::vartime_multiscalar_mul(
// scalars
// .into_iter()
// .map(|s| Scalar::into_repr(s.borrow()))
// .collect::<Vec<Scalar>>(),
// points,
// )
// }
// }

+ 5
- 4
src/lib.rs

@ -9,9 +9,10 @@ extern crate core;
extern crate curve25519_dalek; extern crate curve25519_dalek;
extern crate digest; extern crate digest;
extern crate merlin; extern crate merlin;
extern crate rand;
extern crate sha3; extern crate sha3;
extern crate test; extern crate test;
extern crate lazy_static;
extern crate ark_std;
#[cfg(feature = "multicore")] #[cfg(feature = "multicore")]
extern crate rayon; extern crate rayon;
@ -42,7 +43,7 @@ use r1csinstance::{
use r1csproof::{R1CSGens, R1CSProof}; use r1csproof::{R1CSGens, R1CSProof};
use random::RandomTape; use random::RandomTape;
use scalar::Scalar; use scalar::Scalar;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use timer::Timer; use timer::Timer;
use transcript::{AppendToTranscript, ProofTranscript}; use transcript::{AppendToTranscript, ProofTranscript};
@ -307,7 +308,7 @@ impl SNARKGens {
} }
/// `SNARK` holds a proof produced by Spartan SNARK /// `SNARK` holds a proof produced by Spartan SNARK
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct SNARK { pub struct SNARK {
r1cs_sat_proof: R1CSProof, r1cs_sat_proof: R1CSProof,
inst_evals: (Scalar, Scalar, Scalar), inst_evals: (Scalar, Scalar, Scalar),
@ -484,7 +485,7 @@ impl NIZKGens {
} }
/// `NIZK` holds a proof produced by Spartan NIZK /// `NIZK` holds a proof produced by Spartan NIZK
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct NIZK { pub struct NIZK {
r1cs_sat_proof: R1CSProof, r1cs_sat_proof: R1CSProof,
r: (Vec<Scalar>, Vec<Scalar>), r: (Vec<Scalar>, Vec<Scalar>),

+ 6
- 4
src/nizk/bullet.rs

@ -9,9 +9,11 @@ use super::super::scalar::Scalar;
use super::super::transcript::ProofTranscript; use super::super::transcript::ProofTranscript;
use core::iter; use core::iter;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_ff::{Field, fields};
use ark_std::{One, Zero};
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct BulletReductionProof { pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>, L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>, R_vec: Vec<CompressedGroup>,
@ -99,7 +101,7 @@ impl BulletReductionProof {
transcript.append_point(b"R", &R.compress()); transcript.append_point(b"R", &R.compress());
let u = transcript.challenge_scalar(b"u"); let u = transcript.challenge_scalar(b"u");
let u_inv = u.invert().unwrap();
let u_inv = u.inverse().unwrap();
for i in 0..n { for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i]; a_L[i] = a_L[i] * u + u_inv * a_R[i];
@ -158,7 +160,7 @@ impl BulletReductionProof {
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone(); let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
let allinv = ark_ff::fields::batch_inversion(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2 // 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n { for i in 0..lg_n {

+ 30
- 30
src/nizk/mod.rs

@ -6,12 +6,12 @@ use super::random::RandomTape;
use super::scalar::Scalar; use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript}; use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
mod bullet; mod bullet;
use bullet::BulletReductionProof; use bullet::BulletReductionProof;
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct KnowledgeProof { pub struct KnowledgeProof {
alpha: CompressedGroup, alpha: CompressedGroup,
z1: Scalar, z1: Scalar,
@ -73,7 +73,7 @@ impl KnowledgeProof {
} }
} }
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct EqualityProof { pub struct EqualityProof {
alpha: CompressedGroup, alpha: CompressedGroup,
z: Scalar, z: Scalar,
@ -142,7 +142,7 @@ impl EqualityProof {
} }
} }
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct ProductProof { pub struct ProductProof {
alpha: CompressedGroup, alpha: CompressedGroup,
beta: CompressedGroup, beta: CompressedGroup,
@ -288,7 +288,7 @@ impl ProductProof {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DotProductProof { pub struct DotProductProof {
delta: CompressedGroup, delta: CompressedGroup,
beta: CompressedGroup, beta: CompressedGroup,
@ -416,7 +416,7 @@ impl DotProductProofGens {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DotProductProofLog { pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof, bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup, delta: CompressedGroup,
@ -567,15 +567,15 @@ impl DotProductProofLog {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use rand::rngs::OsRng;
use ark_std::{UniformRand};
#[test] #[test]
fn check_knowledgeproof() { fn check_knowledgeproof() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
let x = Scalar::random(&mut csprng);
let r = Scalar::random(&mut csprng);
let x = Scalar::rand(&mut rng);
let r = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof"); let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example"); let mut prover_transcript = Transcript::new(b"example");
@ -590,13 +590,13 @@ mod tests {
#[test] #[test]
fn check_equalityproof() { fn check_equalityproof() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::random(&mut csprng);
let v1 = Scalar::rand(&mut rng);
let v2 = v1; let v2 = v1;
let s1 = Scalar::random(&mut csprng);
let s2 = Scalar::random(&mut csprng);
let s1 = Scalar::rand(&mut rng);
let s2 = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof"); let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example"); let mut prover_transcript = Transcript::new(b"example");
@ -618,15 +618,15 @@ mod tests {
#[test] #[test]
fn check_productproof() { fn check_productproof() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let gens_1 = MultiCommitGens::new(1, b"test-productproof"); let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::random(&mut csprng);
let rX = Scalar::random(&mut csprng);
let y = Scalar::random(&mut csprng);
let rY = Scalar::random(&mut csprng);
let x = Scalar::rand(&mut rng);
let rX = Scalar::rand(&mut rng);
let y = Scalar::rand(&mut rng);
let rY = Scalar::rand(&mut rng);
let z = x * y; let z = x * y;
let rZ = Scalar::random(&mut csprng);
let rZ = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof"); let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example"); let mut prover_transcript = Transcript::new(b"example");
@ -650,7 +650,7 @@ mod tests {
#[test] #[test]
fn check_dotproductproof() { fn check_dotproductproof() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let n = 1024; let n = 1024;
@ -660,12 +660,12 @@ mod tests {
let mut x: Vec<Scalar> = Vec::new(); let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new(); let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n { for _ in 0..n {
x.push(Scalar::random(&mut csprng));
a.push(Scalar::random(&mut csprng));
x.push(Scalar::rand(&mut rng));
a.push(Scalar::rand(&mut rng));
} }
let y = DotProductProofLog::compute_dotproduct(&x, &a); let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let r_x = Scalar::rand(&mut rng);
let r_y = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof"); let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example"); let mut prover_transcript = Transcript::new(b"example");
@ -689,18 +689,18 @@ mod tests {
#[test] #[test]
fn check_dotproductproof_log() { fn check_dotproductproof_log() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let n = 1024; let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024"); let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a); let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let r_x = Scalar::rand(&mut rng);
let r_y = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof"); let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example"); let mut prover_transcript = Transcript::new(b"example");

+ 6
- 5
src/product_tree.rs

@ -5,7 +5,8 @@ use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof; use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript; use super::transcript::ProofTranscript;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_std::{One};
#[derive(Debug)] #[derive(Debug)]
pub struct ProductCircuit { pub struct ProductCircuit {
@ -107,7 +108,7 @@ impl DotProductCircuit {
} }
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct LayerProof { pub struct LayerProof {
pub proof: SumcheckInstanceProof, pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>, pub claims: Vec<Scalar>,
@ -130,7 +131,7 @@ impl LayerProof {
} }
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct LayerProofBatched { pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof, pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>, pub claims_prod_left: Vec<Scalar>,
@ -153,12 +154,12 @@ impl LayerProofBatched {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct ProductCircuitEvalProof { pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>, proof: Vec<LayerProof>,
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct ProductCircuitEvalProofBatched { pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>, proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),

+ 9
- 8
src/r1csinstance.rs

@ -12,10 +12,11 @@ use super::sparse_mlpoly::{
use super::timer::Timer; use super::timer::Timer;
use flate2::{write::ZlibEncoder, Compression}; use flate2::{write::ZlibEncoder, Compression};
use merlin::Transcript; use merlin::Transcript;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_std::{One, Zero, UniformRand};
use ark_ff::{Field};
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSInstance { pub struct R1CSInstance {
num_cons: usize, num_cons: usize,
num_vars: usize, num_vars: usize,
@ -55,7 +56,7 @@ impl R1CSCommitmentGens {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSCommitment { pub struct R1CSCommitment {
num_cons: usize, num_cons: usize,
num_vars: usize, num_vars: usize,
@ -164,7 +165,7 @@ impl R1CSInstance {
Timer::print(&format!("number_of_variables {}", num_vars)); Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs)); Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
// assert num_cons and num_vars are power of 2 // assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log2() as usize).pow2(), num_cons); assert_eq!((num_cons.log2() as usize).pow2(), num_cons);
@ -179,7 +180,7 @@ impl R1CSInstance {
// produce a random satisfying assignment // produce a random satisfying assignment
let Z = { let Z = {
let mut Z: Vec<Scalar> = (0..size_z) let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::random(&mut csprng))
.map(|_i| Scalar::rand(&mut rng))
.collect::<Vec<Scalar>>(); .collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1 Z[num_vars] = Scalar::one(); // set the constant term to 1
Z Z
@ -206,7 +207,7 @@ impl R1CSInstance {
C.push(SparseMatEntry::new( C.push(SparseMatEntry::new(
i, i,
C_idx, C_idx,
AB_val * C_val.invert().unwrap(),
AB_val * C_val.inverse().unwrap(),
)); ));
} }
} }
@ -319,7 +320,7 @@ impl R1CSInstance {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSEvalProof { pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof, proof: SparseMatPolyEvalProof,
} }

+ 9
- 8
src/r1csproof.rs

@ -15,9 +15,10 @@ use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript}; use super::transcript::{AppendToTranscript, ProofTranscript};
use core::iter; use core::iter;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_std::{Zero, One};
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct R1CSProof { pub struct R1CSProof {
comm_vars: PolyCommitment, comm_vars: PolyCommitment,
sc_proof_phase1: ZKSumcheckInstanceProof, sc_proof_phase1: ZKSumcheckInstanceProof,
@ -492,7 +493,7 @@ impl R1CSProof {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use rand::rngs::OsRng;
use ark_std::{UniformRand};
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) { fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5 // three constraints over five variables Z1, Z2, Z3, Z4, and Z5
@ -528,11 +529,11 @@ mod tests {
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C); let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
// compute a satisfying assignment // compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let z2 = Scalar::random(&mut csprng);
let mut rng = ark_std::rand::thread_rng();
let i0 = Scalar::rand(&mut rng);
let i1 = Scalar::rand(&mut rng);
let z1 = Scalar::rand(&mut rng);
let z2 = Scalar::rand(&mut rng);
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
let z5 = Scalar::zero(); //constraint 3 let z5 = Scalar::zero(); //constraint 3

+ 3
- 3
src/random.rs

@ -1,7 +1,7 @@
use super::scalar::Scalar; use super::scalar::Scalar;
use super::transcript::ProofTranscript; use super::transcript::ProofTranscript;
use merlin::Transcript; use merlin::Transcript;
use rand::rngs::OsRng;
use ark_std::{UniformRand};
pub struct RandomTape { pub struct RandomTape {
tape: Transcript, tape: Transcript,
@ -10,9 +10,9 @@ pub struct RandomTape {
impl RandomTape { impl RandomTape {
pub fn new(name: &'static [u8]) -> Self { pub fn new(name: &'static [u8]) -> Self {
let tape = { let tape = {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let mut tape = Transcript::new(name); let mut tape = Transcript::new(name);
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape.append_scalar(b"init_randomness", &Scalar::rand(&mut rng));
tape tape
}; };
Self { tape } Self { tape }

+ 37
- 36
src/scalar/mod.rs

@ -1,43 +1,44 @@
mod ristretto255;
pub use ark_bls12_377::Fr as Scalar;
// mod ristretto255;
pub type Scalar = ristretto255::Scalar;
pub type ScalarBytes = curve25519_dalek::scalar::Scalar;
// pub type Scalar = ristretto255::Scalar;
// pub type ScalarBytes = curve25519_dalek::scalar::Scalar;
pub trait ScalarFromPrimitives {
fn to_scalar(self) -> Scalar;
}
// pub trait ScalarFromPrimitives {
// fn to_scalar(self) -> Scalar;
// }
impl ScalarFromPrimitives for usize {
#[inline]
fn to_scalar(self) -> Scalar {
(0..self).map(|_i| Scalar::one()).sum()
}
}
// impl ScalarFromPrimitives for usize {
// #[inline]
// fn to_scalar(self) -> Scalar {
// (0..self).map(|_i| Scalar::one()).sum()
// }
// }
impl ScalarFromPrimitives for bool {
#[inline]
fn to_scalar(self) -> Scalar {
if self {
Scalar::one()
} else {
Scalar::zero()
}
}
}
// impl ScalarFromPrimitives for bool {
// #[inline]
// fn to_scalar(self) -> Scalar {
// if self {
// Scalar::one()
// } else {
// Scalar::zero()
// }
// }
// }
pub trait ScalarBytesFromScalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
}
// pub trait ScalarBytesFromScalar {
// fn decompress_scalar(s: &Scalar) -> ScalarBytes;
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
// }
impl ScalarBytesFromScalar for Scalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes {
ScalarBytes::from_bytes_mod_order(s.to_bytes())
}
// impl ScalarBytesFromScalar for Scalar {
// fn decompress_scalar(s: &Scalar) -> ScalarBytes {
// ScalarBytes::from_bytes_mod_order(s.to_bytes())
// }
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
(0..s.len())
.map(|i| Scalar::decompress_scalar(&s[i]))
.collect::<Vec<ScalarBytes>>()
}
}
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
// (0..s.len())
// .map(|i| Scalar::decompress_scalar(&s[i]))
// .collect::<Vec<ScalarBytes>>()
// }
// }

+ 8
- 8
src/scalar/ristretto255.rs

@ -11,7 +11,7 @@ use core::fmt;
use core::iter::{Product, Sum}; use core::iter::{Product, Sum};
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use rand_core::{CryptoRng, RngCore}; use rand_core::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
use zeroize::Zeroize; use zeroize::Zeroize;
@ -196,7 +196,7 @@ macro_rules! impl_binops_multiplicative {
// The internal representation of this type is four 64-bit unsigned // The internal representation of this type is four 64-bit unsigned
// integers in little-endian order. `Scalar` values are always in // integers in little-endian order. `Scalar` values are always in
// Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256. // Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256.
#[derive(Clone, Copy, Eq, Serialize, Deserialize)]
#[derive(Clone, Copy, Eq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Scalar(pub(crate) [u64; 4]); pub struct Scalar(pub(crate) [u64; 4]);
impl fmt::Debug for Scalar { impl fmt::Debug for Scalar {
@ -634,7 +634,7 @@ impl Scalar {
debug_assert!(acc != Scalar::zero()); debug_assert!(acc != Scalar::zero());
// Compute the inverse of all products // Compute the inverse of all products
acc = acc.invert().unwrap();
acc = acc.inverse().unwrap();
// We need to return the product of all inverses later // We need to return the product of all inverses later
let ret = acc; let ret = acc;
@ -1140,14 +1140,14 @@ mod tests {
#[test] #[test]
fn test_inversion() { fn test_inversion() {
assert_eq!(Scalar::zero().invert().is_none().unwrap_u8(), 1);
assert_eq!(Scalar::one().invert().unwrap(), Scalar::one());
assert_eq!((-&Scalar::one()).invert().unwrap(), -&Scalar::one());
assert_eq!(Scalar::zero().inverse().is_none().unwrap_u8(), 1);
assert_eq!(Scalar::one().inverse().unwrap(), Scalar::one());
assert_eq!((-&Scalar::one()).inverse().unwrap(), -&Scalar::one());
let mut tmp = R2; let mut tmp = R2;
for _ in 0..100 { for _ in 0..100 {
let mut tmp2 = tmp.invert().unwrap();
let mut tmp2 = tmp.inverse().unwrap();
tmp2.mul_assign(&tmp); tmp2.mul_assign(&tmp);
assert_eq!(tmp2, Scalar::one()); assert_eq!(tmp2, Scalar::one());
@ -1170,7 +1170,7 @@ mod tests {
let mut r3 = R; let mut r3 = R;
for _ in 0..100 { for _ in 0..100 {
r1 = r1.invert().unwrap();
r1 = r1.inverse().unwrap();
r2 = r2.pow_vartime(&q_minus_2); r2 = r2.pow_vartime(&q_minus_2);
r3 = r3.pow(&q_minus_2); r3 = r3.pow(&q_minus_2);

+ 16
- 15
src/sparse_mlpoly.rs

@ -14,9 +14,10 @@ use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript}; use super::transcript::{AppendToTranscript, ProofTranscript};
use core::cmp::Ordering; use core::cmp::Ordering;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_ff::{One, Zero};
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct SparseMatEntry { pub struct SparseMatEntry {
row: usize, row: usize,
col: usize, col: usize,
@ -29,7 +30,7 @@ impl SparseMatEntry {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct SparseMatPolynomial { pub struct SparseMatPolynomial {
num_vars_x: usize, num_vars_x: usize,
num_vars_y: usize, num_vars_y: usize,
@ -42,7 +43,7 @@ pub struct Derefs {
comb: DensePolynomial, comb: DensePolynomial,
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DerefsCommitment { pub struct DerefsCommitment {
comm_ops_val: PolyCommitment, comm_ops_val: PolyCommitment,
} }
@ -71,7 +72,7 @@ impl Derefs {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DerefsEvalProof { pub struct DerefsEvalProof {
proof_derefs: PolyEvalProof, proof_derefs: PolyEvalProof,
} }
@ -321,7 +322,7 @@ impl SparseMatPolyCommitmentGens {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct SparseMatPolyCommitment { pub struct SparseMatPolyCommitment {
batch_size: usize, batch_size: usize,
num_ops: usize, num_ops: usize,
@ -687,7 +688,7 @@ impl PolyEvalNetwork {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
struct HashLayerProof { struct HashLayerProof {
eval_row: (Vec<Scalar>, Vec<Scalar>, Scalar), eval_row: (Vec<Scalar>, Vec<Scalar>, Scalar),
eval_col: (Vec<Scalar>, Vec<Scalar>, Scalar), eval_col: (Vec<Scalar>, Vec<Scalar>, Scalar),
@ -1035,7 +1036,7 @@ impl HashLayerProof {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
struct ProductLayerProof { struct ProductLayerProof {
eval_row: (Scalar, Vec<Scalar>, Vec<Scalar>, Scalar), eval_row: (Scalar, Vec<Scalar>, Vec<Scalar>, Scalar),
eval_col: (Scalar, Vec<Scalar>, Vec<Scalar>, Scalar), eval_col: (Scalar, Vec<Scalar>, Vec<Scalar>, Scalar),
@ -1325,7 +1326,7 @@ impl ProductLayerProof {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
struct PolyEvalNetworkProof { struct PolyEvalNetworkProof {
proof_prod_layer: ProductLayerProof, proof_prod_layer: ProductLayerProof,
proof_hash_layer: HashLayerProof, proof_hash_layer: HashLayerProof,
@ -1439,7 +1440,7 @@ impl PolyEvalNetworkProof {
} }
} }
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct SparseMatPolyEvalProof { pub struct SparseMatPolyEvalProof {
comm_derefs: DerefsCommitment, comm_derefs: DerefsCommitment,
poly_eval_network_proof: PolyEvalNetworkProof, poly_eval_network_proof: PolyEvalNetworkProof,
@ -1626,11 +1627,11 @@ impl SparsePolynomial {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use rand::rngs::OsRng;
use ark_std::{UniformRand};
use rand::RngCore; use rand::RngCore;
#[test] #[test]
fn check_sparse_polyeval_proof() { fn check_sparse_polyeval_proof() {
let mut csprng: OsRng = OsRng;
let mut rng = ark_std::rand::thread_rng();
let num_nz_entries: usize = 256; let num_nz_entries: usize = 256;
let num_rows: usize = 256; let num_rows: usize = 256;
@ -1644,7 +1645,7 @@ mod tests {
M.push(SparseMatEntry::new( M.push(SparseMatEntry::new(
(csprng.next_u64() % (num_rows as u64)) as usize, (csprng.next_u64() % (num_rows as u64)) as usize,
(csprng.next_u64() % (num_cols as u64)) as usize, (csprng.next_u64() % (num_cols as u64)) as usize,
Scalar::random(&mut csprng),
Scalar::rand(&mut rng),
)); ));
} }
@ -1662,10 +1663,10 @@ mod tests {
// evaluation // evaluation
let rx: Vec<Scalar> = (0..num_vars_x) let rx: Vec<Scalar> = (0..num_vars_x)
.map(|_i| Scalar::random(&mut csprng))
.map(|_i| Scalar::rand(&mut rng))
.collect::<Vec<Scalar>>(); .collect::<Vec<Scalar>>();
let ry: Vec<Scalar> = (0..num_vars_y) let ry: Vec<Scalar> = (0..num_vars_y)
.map(|_i| Scalar::random(&mut csprng))
.map(|_i| Scalar::rand(&mut rng))
.collect::<Vec<Scalar>>(); .collect::<Vec<Scalar>>();
let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry); let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry);
let evals = vec![eval[0], eval[0], eval[0]]; let evals = vec![eval[0], eval[0], eval[0]];

+ 4
- 3
src/sumcheck.rs

@ -12,9 +12,10 @@ use super::unipoly::{CompressedUniPoly, UniPoly};
use core::iter; use core::iter;
use itertools::izip; use itertools::izip;
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_ff::{One,Zero};
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct SumcheckInstanceProof { pub struct SumcheckInstanceProof {
compressed_polys: Vec<CompressedUniPoly>, compressed_polys: Vec<CompressedUniPoly>,
} }
@ -61,7 +62,7 @@ impl SumcheckInstanceProof {
} }
} }
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct ZKSumcheckInstanceProof { pub struct ZKSumcheckInstanceProof {
comm_polys: Vec<CompressedGroup>, comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>, comm_evals: Vec<CompressedGroup>,

+ 21
- 21
src/unipoly.rs

@ -1,10 +1,10 @@
use super::commitments::{Commitments, MultiCommitGens}; use super::commitments::{Commitments, MultiCommitGens};
use super::group::GroupElement; use super::group::GroupElement;
use super::scalar::{Scalar, ScalarFromPrimitives};
use super::scalar::{Scalar};
use super::transcript::{AppendToTranscript, ProofTranscript}; use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript; use merlin::Transcript;
use serde::{Deserialize, Serialize};
use ark_serialize::*;
use ark_ff::{One, Zero, Field};
// ax^2 + bx + c stored as vec![c,b,a] // ax^2 + bx + c stored as vec![c,b,a]
// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,c,b,a]
#[derive(Debug)] #[derive(Debug)]
@ -14,7 +14,7 @@ pub struct UniPoly {
// ax^2 + bx + c stored as vec![c,a] // ax^2 + bx + c stored as vec![c,a]
// ax^3 + bx^2 + cx + d stored as vec![d,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,b,a]
#[derive(Serialize, Deserialize, Debug)]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct CompressedUniPoly { pub struct CompressedUniPoly {
coeffs_except_linear_term: Vec<Scalar>, coeffs_except_linear_term: Vec<Scalar>,
} }
@ -25,7 +25,7 @@ impl UniPoly {
assert!(evals.len() == 3 || evals.len() == 4); assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 { let coeffs = if evals.len() == 3 {
// ax^2 + bx + c // ax^2 + bx + c
let two_inv = (2_usize).to_scalar().invert().unwrap();
let two_inv = Scalar::from(2).inverse().unwrap();
let c = evals[0]; let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c); let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
@ -33,8 +33,8 @@ impl UniPoly {
vec![c, b, a] vec![c, b, a]
} else { } else {
// ax^3 + bx^2 + cx + d // ax^3 + bx^2 + cx + d
let two_inv = (2_usize).to_scalar().invert().unwrap();
let six_inv = (6_usize).to_scalar().invert().unwrap();
let two_inv = Scalar::from(2).inverse().unwrap();
let six_inv = Scalar::from(6).inverse().unwrap();
let d = evals[0]; let d = evals[0];
let a = six_inv let a = six_inv
@ -128,8 +128,8 @@ mod tests {
fn test_from_evals_quad() { fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1 // polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one(); let e0 = Scalar::one();
let e1 = (6_usize).to_scalar();
let e2 = (15_usize).to_scalar();
let e1 = Scalar::from(6);
let e2 = Scalar::from(15);
let evals = vec![e0, e1, e2]; let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals); let poly = UniPoly::from_evals(&evals);
@ -137,8 +137,8 @@ mod tests {
assert_eq!(poly.eval_at_one(), e1); assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3); assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one()); assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
assert_eq!(poly.coeffs[1], Scalar::from(3));
assert_eq!(poly.coeffs[2], Scalar::from(2));
let hint = e0 + e1; let hint = e0 + e1;
let compressed_poly = poly.compress(); let compressed_poly = poly.compress();
@ -147,17 +147,17 @@ mod tests {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
} }
let e3 = (28_usize).to_scalar();
assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3);
let e3 = Scalar::from(28);
assert_eq!(poly.evaluate(&Scalar::from(3)), e3);
} }
#[test] #[test]
fn test_from_evals_cubic() { fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1 // polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one(); let e0 = Scalar::one();
let e1 = (7_usize).to_scalar();
let e2 = (23_usize).to_scalar();
let e3 = (55_usize).to_scalar();
let e1 = Scalar::from(7);
let e2 = Scalar::from(23);
let e3 = Scalar::from(55);
let evals = vec![e0, e1, e2, e3]; let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals); let poly = UniPoly::from_evals(&evals);
@ -165,9 +165,9 @@ mod tests {
assert_eq!(poly.eval_at_one(), e1); assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4); assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one()); assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
assert_eq!(poly.coeffs[3], (1_usize).to_scalar());
assert_eq!(poly.coeffs[1], Scalar::from(3));
assert_eq!(poly.coeffs[2], Scalar::from(2));
assert_eq!(poly.coeffs[3], Scalar::from(1));
let hint = e0 + e1; let hint = e0 + e1;
let compressed_poly = poly.compress(); let compressed_poly = poly.compress();
@ -176,7 +176,7 @@ mod tests {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
} }
let e4 = (109_usize).to_scalar();
assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4);
let e4 = Scalar::from(109);
assert_eq!(poly.evaluate(&Scalar::from(4)), e4);
} }
} }

Loading…
Cancel
Save