diff --git a/Cargo.toml b/Cargo.toml index 487dbc6..608dcd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,6 @@ keywords = ["zkSNARKs", "cryptography", "proofs"] [dependencies] curve25519-dalek = {version = "3.2.0", features = ["serde", "simd_backend"]} merlin = "3.0.0" -rand = "0.7.3" digest = "0.8.1" sha3 = "0.8.2" byteorder = "1.3.4" @@ -27,6 +26,12 @@ itertools = "0.10.0" colored = "2.0.0" flate2 = "1.0.14" thiserror = "1.0" +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0"} +ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] } +ark-serialize = { version = "^0.3.0", features = ["derive"] } +lazy_static = "1.4.0" [dev-dependencies] criterion = "0.3.1" diff --git a/README.md b/README.md index e86921e..ba069af 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,37 @@ # Spartan: High-speed zkSNARKs without trusted setup ![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg) -[![](https://img.shields.io/crates/v/spartan.svg)]((https://crates.io/crates/spartan)) +[![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>) Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model. A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details. -Note that this library has *not* received a security review or audit. +Note that this library has _not_ received a security review or audit. ## Highlights + We now highlight Spartan's distinctive features. -* **No "toxic" waste:** Spartan is a *transparent* zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters. +- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters. -* **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest. +- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest. -* **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS). +- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS). -* **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`. +- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`. -* **State-of-the-art performance:** -Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads. +- **State-of-the-art performance:** + Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads. ### Implementation details -`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library. + +`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library. ## Examples + To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`: + ```text spartan = "0.4.1" ``` @@ -70,6 +74,7 @@ Some of our public APIs' style is inspired by the underlying crates we use. ``` Here is another example to use the NIZK variant of the Spartan proof system: + ```rust # extern crate libspartan; # extern crate merlin; @@ -101,6 +106,7 @@ Here is another example to use the NIZK variant of the Spartan proof system: ``` Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance + ```rust #![allow(non_snake_case)] # extern crate curve25519_dalek; @@ -182,7 +188,7 @@ Finally, we provide an example that specifies a custom R1CS instance instead of // a variable that holds a byte representation of 1 let one = Scalar::one().to_bytes(); - // R1CS is a set of three sparse matrices A B C, where is a row for every + // R1CS is a set of three sparse matrices A B C, where is a row for every // constraint and a column for every entry in z = (vars, 1, inputs) // An R1CS instance is satisfiable iff: // Az \circ Bz = Cz, where z = (vars, 1, inputs) @@ -210,11 +216,11 @@ Finally, we provide an example that specifies a custom R1CS instance instead of let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); // compute a satisfying assignment - let mut csprng: OsRng = OsRng; - let i0 = Scalar::random(&mut csprng); - let i1 = Scalar::random(&mut csprng); - let z0 = Scalar::random(&mut csprng); - let z1 = Scalar::random(&mut csprng); +let mut rng = ark_std::rand::thread_rng(); + let i0 = Scalar::rand(&mut rng); + let i1 = Scalar::rand(&mut rng); + let z0 = Scalar::rand(&mut rng); + let z1 = Scalar::rand(&mut rng); let z2 = (z0 + z1) * i0; // constraint 0 let z3 = (z0 + i1) * z2; // constraint 1 let z4 = Scalar::zero(); //constraint 2 @@ -233,7 +239,7 @@ Finally, we provide an example that specifies a custom R1CS instance instead of inputs[0] = i0.to_bytes(); inputs[1] = i1.to_bytes(); let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); - + // check if the instance we created is satisfiable let res = inst.is_sat(&assignment_vars, &assignment_inputs); assert_eq!(res.unwrap(), true); @@ -253,30 +259,36 @@ Finally, we provide an example that specifies a custom R1CS instance instead of For more examples, see [`examples/`](examples) directory in this repo. ## Building `libspartan` + Install [`rustup`](https://rustup.rs/) Switch to nightly Rust using `rustup`: + ```text rustup default nightly ``` Clone the repository: + ```text git clone https://github.com/Microsoft/Spartan cd Spartan ``` To build docs for public APIs of `libspartan`: + ```text cargo doc ``` To run tests: + ```text RUSTFLAGS="-C target_cpu=native" cargo test ``` To build `libspartan`: + ```text RUSTFLAGS="-C target_cpu=native" cargo build --release ``` @@ -284,19 +296,23 @@ RUSTFLAGS="-C target_cpu=native" cargo build --release > NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`. ### Supported features -* `profile`: enables fine-grained profiling information (see below for its use) + +- `profile`: enables fine-grained profiling information (see below for its use) ## Performance ### End-to-end benchmarks + `libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below). To run end-to-end benchmarks: -```text + +```text RUSTFLAGS="-C target_cpu=native" cargo bench ``` ### Fine-grained profiling + Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`. These profilers report performance as depicted below (for varying R1CS instance sizes). The reported @@ -304,7 +320,7 @@ performance is from running the profilers on a Microsoft Surface Laptop 3 on a s See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature. ```text -$ ./target/release/snark +$ ./target/release/snark Profiler:: SNARK * number_of_constraints 1048576 * number_of_variables 1048576 @@ -355,7 +371,7 @@ Profiler:: SNARK ``` ```text -$ ./target/release/nizk +$ ./target/release/nizk Profiler:: NIZK * number_of_constraints 1048576 * number_of_variables 1048576 diff --git a/benches/nizk.rs b/benches/nizk.rs index ff67f02..0cdfe72 100644 --- a/benches/nizk.rs +++ b/benches/nizk.rs @@ -4,7 +4,6 @@ extern crate criterion; extern crate digest; extern crate libspartan; extern crate merlin; -extern crate rand; extern crate sha3; use libspartan::{Instance, NIZKGens, NIZK}; diff --git a/examples/cubic.rs b/examples/cubic.rs index dcfa7b4..ad8a7fb 100644 --- a/examples/cubic.rs +++ b/examples/cubic.rs @@ -8,7 +8,7 @@ //! `(Z3 + 5) * 1 - I0 = 0` //! //! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 -use curve25519_dalek::scalar::Scalar; +use ark_bls12_377::Fr as Scalar; use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; use merlin::Transcript; use rand::rngs::OsRng; @@ -71,8 +71,8 @@ fn produce_r1cs() -> ( let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); // compute a satisfying assignment - let mut csprng: OsRng = OsRng; - let z0 = Scalar::random(&mut csprng); +let mut rng = ark_std::rand::thread_rng(); + let z0 = Scalar::rand(&mut rng); let z1 = z0 * z0; // constraint 0 let z2 = z1 * z0; // constraint 1 let z3 = z2 + z0; // constraint 2 diff --git a/src/commitments.rs b/src/commitments.rs index d3caf7f..3a26ce6 100644 --- a/src/commitments.rs +++ b/src/commitments.rs @@ -1,8 +1,10 @@ -use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT_COMPRESSED}; +use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT}; use super::scalar::Scalar; use digest::{ExtendableOutput, Input}; use sha3::Shake256; use std::io::Read; +use ark_ff::fields::{Field}; +use ark_ec::{ProjectiveCurve}; #[derive(Debug)] pub struct MultiCommitGens { @@ -15,14 +17,14 @@ impl MultiCommitGens { pub fn new(n: usize, label: &[u8]) -> Self { let mut shake = Shake256::default(); shake.input(label); - shake.input(GROUP_BASEPOINT_COMPRESSED.as_bytes()); + shake.input(GROUP_BASEPOINT.as_bytes()); let mut reader = shake.xof_result(); let mut gens: Vec = Vec::new(); let mut uniform_bytes = [0u8; 64]; for _ in 0..n + 1 { reader.read_exact(&mut uniform_bytes).unwrap(); - gens.push(GroupElement::from_uniform_bytes(&uniform_bytes)); + gens.push(GroupElement::from_random_bytes(&uniform_bytes)); } MultiCommitGens { diff --git a/src/dense_mlpoly.rs b/src/dense_mlpoly.rs index 1dbf47c..e107f86 100644 --- a/src/dense_mlpoly.rs +++ b/src/dense_mlpoly.rs @@ -9,7 +9,10 @@ use super::scalar::Scalar; use super::transcript::{AppendToTranscript, ProofTranscript}; use core::ops::Index; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_ff::{One,Zero}; + + #[cfg(feature = "multicore")] use rayon::prelude::*; @@ -38,12 +41,12 @@ pub struct PolyCommitmentBlinds { blinds: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct PolyCommitment { C: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ConstPolyCommitment { C: CompressedGroup, } @@ -296,7 +299,7 @@ impl AppendToTranscript for PolyCommitment { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct PolyEvalProof { proof: DotProductProofLog, } @@ -402,9 +405,8 @@ impl PolyEvalProof { #[cfg(test)] mod tests { - use super::super::scalar::ScalarFromPrimitives; use super::*; - use rand::rngs::OsRng; + use ark_std::{UniformRand}; fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar { let eq = EqPolynomial::new(r.to_vec()); @@ -432,19 +434,19 @@ mod tests { // Z = [1, 2, 1, 4] let Z = vec![ Scalar::one(), - (2_usize).to_scalar(), - (1_usize).to_scalar(), - (4_usize).to_scalar(), + Scalar::from(2), + Scalar::from(1), + Scalar::from(4) ]; // r = [4,3] - let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; + let r = vec![Scalar::from(4), Scalar::from(3)]; let eval_with_LR = evaluate_with_LR(&Z, &r); let poly = DensePolynomial::new(Z); let eval = poly.evaluate(&r); - assert_eq!(eval, (28_usize).to_scalar()); + assert_eq!(eval, Scalar::from(28)); assert_eq!(eval_with_LR, eval); } @@ -518,12 +520,12 @@ mod tests { #[test] fn check_memoized_chis() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let s = 10; let mut r: Vec = Vec::new(); for _i in 0..s { - r.push(Scalar::random(&mut csprng)); + r.push(Scalar::rand(&mut rng)); } let chis = tests::compute_chis_at_r(&r); let chis_m = EqPolynomial::new(r).evals(); @@ -532,12 +534,12 @@ mod tests { #[test] fn check_factored_chis() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let s = 10; let mut r: Vec = Vec::new(); for _i in 0..s { - r.push(Scalar::random(&mut csprng)); + r.push(Scalar::rand(&mut rng)); } let chis = EqPolynomial::new(r.clone()).evals(); let (L, R) = EqPolynomial::new(r).compute_factored_evals(); @@ -547,12 +549,12 @@ mod tests { #[test] fn check_memoized_factored_chis() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let s = 10; let mut r: Vec = Vec::new(); for _i in 0..s { - r.push(Scalar::random(&mut csprng)); + r.push(Scalar::rand(&mut rng)); } let (L, R) = tests::compute_factored_chis_at_r(&r); let eq = EqPolynomial::new(r); @@ -564,17 +566,17 @@ mod tests { #[test] fn check_polynomial_commit() { let Z = vec![ - (1_usize).to_scalar(), - (2_usize).to_scalar(), - (1_usize).to_scalar(), - (4_usize).to_scalar(), + Scalar::from(1), + Scalar::from(2), + Scalar::from(1), + Scalar::from(4) ]; let poly = DensePolynomial::new(Z); // r = [4,3] - let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; + let r = vec![Scalar::from(4), Scalar::from(3)]; let eval = poly.evaluate(&r); - assert_eq!(eval, (28_usize).to_scalar()); + assert_eq!(eval, Scalar::from(28)); let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two"); let (poly_commitment, blinds) = poly.commit(&gens, None); diff --git a/src/group.rs b/src/group.rs index ee8b770..bc17d18 100644 --- a/src/group.rs +++ b/src/group.rs @@ -1,117 +1,131 @@ -use super::errors::ProofVerifyError; -use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar}; +use lazy_static::lazy_static; +use super::scalar::{Scalar}; use core::borrow::Borrow; use core::ops::{Mul, MulAssign}; +use ark_ec::{ProjectiveCurve, AffineCurve}; -pub type GroupElement = curve25519_dalek::ristretto::RistrettoPoint; -pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto; +pub use ark_bls12_377::G1Projective as GroupElement; +pub use ark_bls12_377::G1Affine as AffineGroupElement; -pub trait CompressedGroupExt { - type Group; - fn unpack(&self) -> Result; -} -impl CompressedGroupExt for CompressedGroup { - type Group = curve25519_dalek::ristretto::RistrettoPoint; - fn unpack(&self) -> Result { - self - .decompress() - .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes())) - } -} -pub const GROUP_BASEPOINT_COMPRESSED: CompressedGroup = - curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED; +// pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto; -impl<'b> MulAssign<&'b Scalar> for GroupElement { - fn mul_assign(&mut self, scalar: &'b Scalar) { - let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar); - *self = result; - } -} +// pub trait CompressedGroupExt { +// type Group; +// fn unpack(&self) -> Result; +// } -impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement { - type Output = GroupElement; - fn mul(self, scalar: &'b Scalar) -> GroupElement { - self * Scalar::decompress_scalar(scalar) - } -} -impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar { - type Output = GroupElement; +// what I should prolly do is implement compression and decompression operation on the GroupAffine - fn mul(self, point: &'b GroupElement) -> GroupElement { - Scalar::decompress_scalar(self) * point - } -} +// impl CompressedGroupExt for CompressedGroup { +// type Group = curve25519_dalek::ristretto::RistrettoPoint; +// fn unpack(&self) -> Result { +// self +// .decompress() +// .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes())) +// } +// } -macro_rules! define_mul_variants { - (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => { - impl<'b> Mul<&'b $rhs> for $lhs { - type Output = $out; - fn mul(self, rhs: &'b $rhs) -> $out { - &self * rhs - } - } - - impl<'a> Mul<$rhs> for &'a $lhs { - type Output = $out; - fn mul(self, rhs: $rhs) -> $out { - self * &rhs - } - } - - impl Mul<$rhs> for $lhs { - type Output = $out; - fn mul(self, rhs: $rhs) -> $out { - &self * &rhs - } - } - }; +// ???? +lazy_static! { + pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator(); } -macro_rules! define_mul_assign_variants { - (LHS = $lhs:ty, RHS = $rhs:ty) => { - impl MulAssign<$rhs> for $lhs { - fn mul_assign(&mut self, rhs: $rhs) { - *self *= &rhs; - } - } - }; -} -define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar); -define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement); -define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement); - -pub trait VartimeMultiscalarMul { - type Scalar; - fn vartime_multiscalar_mul(scalars: I, points: J) -> Self - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - Self: Clone; -} +// impl<'b> MulAssign<&'b Scalar> for GroupElement { +// fn mul_assign(&mut self, scalar: &'b Scalar) { +// let result = (self as &GroupElement).mul( scalar.into_repr()); +// *self = result; +// } +// } -impl VartimeMultiscalarMul for GroupElement { - type Scalar = super::scalar::Scalar; - fn vartime_multiscalar_mul(scalars: I, points: J) -> Self - where - I: IntoIterator, - I::Item: Borrow, - J: IntoIterator, - J::Item: Borrow, - Self: Clone, - { - use curve25519_dalek::traits::VartimeMultiscalarMul; - ::vartime_multiscalar_mul( - scalars - .into_iter() - .map(|s| Scalar::decompress_scalar(s.borrow())) - .collect::>(), - points, - ) - } -} +// // This game happens because dalek works with scalars as bytes representation but we want people to have an easy life and not care about this +// impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement { +// type Output = GroupElement; +// fn mul(self, scalar: &'b Scalar) -> GroupElement { +// self * Scalar::into_repr(scalar) +// } +// } + +// impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar { +// type Output = GroupElement; + +// fn mul(self, point: &'b GroupElement) -> GroupElement { +// Scalar::into_repr(self) * point +// } +// } + +// macro_rules! define_mul_variants { +// (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => { +// impl<'b> Mul<&'b $rhs> for $lhs { +// type Output = $out; +// fn mul(self, rhs: &'b $rhs) -> $out { +// &self * rhs +// } +// } + +// impl<'a> Mul<$rhs> for &'a $lhs { +// type Output = $out; +// fn mul(self, rhs: $rhs) -> $out { +// self * &rhs +// } +// } + +// impl Mul<$rhs> for $lhs { +// type Output = $out; +// fn mul(self, rhs: $rhs) -> $out { +// &self * &rhs +// } +// } +// }; +// } + +// macro_rules! define_mul_assign_variants { +// (LHS = $lhs:ty, RHS = $rhs:ty) => { +// impl MulAssign<$rhs> for $lhs { +// fn mul_assign(&mut self, rhs: $rhs) { +// *self *= &rhs; +// } +// } +// }; +// } + +// define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar); +// define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement); +// define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement); + + +// TODO +// pub trait VartimeMultiscalarMul { +// type Scalar; +// fn vartime_multiscalar_mul(scalars: I, points: J) -> Self +// where +// I: IntoIterator, +// I::Item: Borrow, +// J: IntoIterator, +// J::Item: Borrow, +// Self: Clone; +// } + +// impl VartimeMultiscalarMul for GroupElement { +// type Scalar = super::scalar::Scalar; +// fn vartime_multiscalar_mul(scalars: I, points: J) -> Self +// where +// I: IntoIterator, +// I::Item: Borrow, +// J: IntoIterator, +// J::Item: Borrow, +// Self: Clone, +// { +// // use curve25519_dalek::traits::VartimeMultiscalarMul; +// ::vartime_multiscalar_mul( +// scalars +// .into_iter() +// .map(|s| Scalar::into_repr(s.borrow())) +// .collect::>(), +// points, +// ) +// } +// } diff --git a/src/lib.rs b/src/lib.rs index e48eecf..634aa23 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,9 +9,10 @@ extern crate core; extern crate curve25519_dalek; extern crate digest; extern crate merlin; -extern crate rand; extern crate sha3; extern crate test; +extern crate lazy_static; +extern crate ark_std; #[cfg(feature = "multicore")] extern crate rayon; @@ -42,7 +43,7 @@ use r1csinstance::{ use r1csproof::{R1CSGens, R1CSProof}; use random::RandomTape; use scalar::Scalar; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; use timer::Timer; use transcript::{AppendToTranscript, ProofTranscript}; @@ -307,7 +308,7 @@ impl SNARKGens { } /// `SNARK` holds a proof produced by Spartan SNARK -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct SNARK { r1cs_sat_proof: R1CSProof, inst_evals: (Scalar, Scalar, Scalar), @@ -484,7 +485,7 @@ impl NIZKGens { } /// `NIZK` holds a proof produced by Spartan NIZK -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct NIZK { r1cs_sat_proof: R1CSProof, r: (Vec, Vec), diff --git a/src/nizk/bullet.rs b/src/nizk/bullet.rs index 3f8951a..728dd33 100644 --- a/src/nizk/bullet.rs +++ b/src/nizk/bullet.rs @@ -9,9 +9,11 @@ use super::super::scalar::Scalar; use super::super::transcript::ProofTranscript; use core::iter; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_ff::{Field, fields}; +use ark_std::{One, Zero}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct BulletReductionProof { L_vec: Vec, R_vec: Vec, @@ -99,7 +101,7 @@ impl BulletReductionProof { transcript.append_point(b"R", &R.compress()); let u = transcript.challenge_scalar(b"u"); - let u_inv = u.invert().unwrap(); + let u_inv = u.inverse().unwrap(); for i in 0..n { a_L[i] = a_L[i] * u + u_inv * a_R[i]; @@ -158,7 +160,7 @@ impl BulletReductionProof { // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 let mut challenges_inv = challenges.clone(); - let allinv = Scalar::batch_invert(&mut challenges_inv); + let allinv = ark_ff::fields::batch_inversion(&mut challenges_inv); // 3. Compute u_i^2 and (1/u_i)^2 for i in 0..lg_n { diff --git a/src/nizk/mod.rs b/src/nizk/mod.rs index e80f2b1..00a6c4a 100644 --- a/src/nizk/mod.rs +++ b/src/nizk/mod.rs @@ -6,12 +6,12 @@ use super::random::RandomTape; use super::scalar::Scalar; use super::transcript::{AppendToTranscript, ProofTranscript}; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; mod bullet; use bullet::BulletReductionProof; -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct KnowledgeProof { alpha: CompressedGroup, z1: Scalar, @@ -73,7 +73,7 @@ impl KnowledgeProof { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct EqualityProof { alpha: CompressedGroup, z: Scalar, @@ -142,7 +142,7 @@ impl EqualityProof { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct ProductProof { alpha: CompressedGroup, beta: CompressedGroup, @@ -288,7 +288,7 @@ impl ProductProof { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DotProductProof { delta: CompressedGroup, beta: CompressedGroup, @@ -416,7 +416,7 @@ impl DotProductProofGens { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DotProductProofLog { bullet_reduction_proof: BulletReductionProof, delta: CompressedGroup, @@ -567,15 +567,15 @@ impl DotProductProofLog { #[cfg(test)] mod tests { use super::*; - use rand::rngs::OsRng; +use ark_std::{UniformRand}; #[test] fn check_knowledgeproof() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); - let x = Scalar::random(&mut csprng); - let r = Scalar::random(&mut csprng); + let x = Scalar::rand(&mut rng); + let r = Scalar::rand(&mut rng); let mut random_tape = RandomTape::new(b"proof"); let mut prover_transcript = Transcript::new(b"example"); @@ -590,13 +590,13 @@ mod tests { #[test] fn check_equalityproof() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); - let v1 = Scalar::random(&mut csprng); + let v1 = Scalar::rand(&mut rng); let v2 = v1; - let s1 = Scalar::random(&mut csprng); - let s2 = Scalar::random(&mut csprng); + let s1 = Scalar::rand(&mut rng); + let s2 = Scalar::rand(&mut rng); let mut random_tape = RandomTape::new(b"proof"); let mut prover_transcript = Transcript::new(b"example"); @@ -618,15 +618,15 @@ mod tests { #[test] fn check_productproof() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let gens_1 = MultiCommitGens::new(1, b"test-productproof"); - let x = Scalar::random(&mut csprng); - let rX = Scalar::random(&mut csprng); - let y = Scalar::random(&mut csprng); - let rY = Scalar::random(&mut csprng); + let x = Scalar::rand(&mut rng); + let rX = Scalar::rand(&mut rng); + let y = Scalar::rand(&mut rng); + let rY = Scalar::rand(&mut rng); let z = x * y; - let rZ = Scalar::random(&mut csprng); + let rZ = Scalar::rand(&mut rng); let mut random_tape = RandomTape::new(b"proof"); let mut prover_transcript = Transcript::new(b"example"); @@ -650,7 +650,7 @@ mod tests { #[test] fn check_dotproductproof() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let n = 1024; @@ -660,12 +660,12 @@ mod tests { let mut x: Vec = Vec::new(); let mut a: Vec = Vec::new(); for _ in 0..n { - x.push(Scalar::random(&mut csprng)); - a.push(Scalar::random(&mut csprng)); + x.push(Scalar::rand(&mut rng)); + a.push(Scalar::rand(&mut rng)); } let y = DotProductProofLog::compute_dotproduct(&x, &a); - let r_x = Scalar::random(&mut csprng); - let r_y = Scalar::random(&mut csprng); + let r_x = Scalar::rand(&mut rng); + let r_y = Scalar::rand(&mut rng); let mut random_tape = RandomTape::new(b"proof"); let mut prover_transcript = Transcript::new(b"example"); @@ -689,18 +689,18 @@ mod tests { #[test] fn check_dotproductproof_log() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let n = 1024; let gens = DotProductProofGens::new(n, b"test-1024"); - let x: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); - let a: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); + let x: Vec = (0..n).map(|_i| Scalar::rand(&mut rng)).collect(); + let a: Vec = (0..n).map(|_i| Scalar::rand(&mut rng)).collect(); let y = DotProductProof::compute_dotproduct(&x, &a); - let r_x = Scalar::random(&mut csprng); - let r_y = Scalar::random(&mut csprng); + let r_x = Scalar::rand(&mut rng); + let r_y = Scalar::rand(&mut rng); let mut random_tape = RandomTape::new(b"proof"); let mut prover_transcript = Transcript::new(b"example"); diff --git a/src/product_tree.rs b/src/product_tree.rs index 59ef964..e72434b 100644 --- a/src/product_tree.rs +++ b/src/product_tree.rs @@ -5,7 +5,8 @@ use super::scalar::Scalar; use super::sumcheck::SumcheckInstanceProof; use super::transcript::ProofTranscript; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_std::{One}; #[derive(Debug)] pub struct ProductCircuit { @@ -107,7 +108,7 @@ impl DotProductCircuit { } #[allow(dead_code)] -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct LayerProof { pub proof: SumcheckInstanceProof, pub claims: Vec, @@ -130,7 +131,7 @@ impl LayerProof { } #[allow(dead_code)] -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct LayerProofBatched { pub proof: SumcheckInstanceProof, pub claims_prod_left: Vec, @@ -153,12 +154,12 @@ impl LayerProofBatched { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ProductCircuitEvalProof { proof: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct ProductCircuitEvalProofBatched { proof: Vec, claims_dotp: (Vec, Vec, Vec), diff --git a/src/r1csinstance.rs b/src/r1csinstance.rs index 6a3885a..a0de322 100644 --- a/src/r1csinstance.rs +++ b/src/r1csinstance.rs @@ -12,10 +12,11 @@ use super::sparse_mlpoly::{ use super::timer::Timer; use flate2::{write::ZlibEncoder, Compression}; use merlin::Transcript; -use rand::rngs::OsRng; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_std::{One, Zero, UniformRand}; +use ark_ff::{Field}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct R1CSInstance { num_cons: usize, num_vars: usize, @@ -55,7 +56,7 @@ impl R1CSCommitmentGens { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct R1CSCommitment { num_cons: usize, num_vars: usize, @@ -164,7 +165,7 @@ impl R1CSInstance { Timer::print(&format!("number_of_variables {}", num_vars)); Timer::print(&format!("number_of_inputs {}", num_inputs)); - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); // assert num_cons and num_vars are power of 2 assert_eq!((num_cons.log2() as usize).pow2(), num_cons); @@ -179,7 +180,7 @@ impl R1CSInstance { // produce a random satisfying assignment let Z = { let mut Z: Vec = (0..size_z) - .map(|_i| Scalar::random(&mut csprng)) + .map(|_i| Scalar::rand(&mut rng)) .collect::>(); Z[num_vars] = Scalar::one(); // set the constant term to 1 Z @@ -206,7 +207,7 @@ impl R1CSInstance { C.push(SparseMatEntry::new( i, C_idx, - AB_val * C_val.invert().unwrap(), + AB_val * C_val.inverse().unwrap(), )); } } @@ -319,7 +320,7 @@ impl R1CSInstance { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct R1CSEvalProof { proof: SparseMatPolyEvalProof, } diff --git a/src/r1csproof.rs b/src/r1csproof.rs index 60c6366..bd2d93d 100644 --- a/src/r1csproof.rs +++ b/src/r1csproof.rs @@ -15,9 +15,10 @@ use super::timer::Timer; use super::transcript::{AppendToTranscript, ProofTranscript}; use core::iter; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_std::{Zero, One}; -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct R1CSProof { comm_vars: PolyCommitment, sc_proof_phase1: ZKSumcheckInstanceProof, @@ -492,7 +493,7 @@ impl R1CSProof { #[cfg(test)] mod tests { use super::*; - use rand::rngs::OsRng; +use ark_std::{UniformRand}; fn produce_tiny_r1cs() -> (R1CSInstance, Vec, Vec) { // three constraints over five variables Z1, Z2, Z3, Z4, and Z5 @@ -528,11 +529,11 @@ mod tests { let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C); // compute a satisfying assignment - let mut csprng: OsRng = OsRng; - let i0 = Scalar::random(&mut csprng); - let i1 = Scalar::random(&mut csprng); - let z1 = Scalar::random(&mut csprng); - let z2 = Scalar::random(&mut csprng); + let mut rng = ark_std::rand::thread_rng(); + let i0 = Scalar::rand(&mut rng); + let i1 = Scalar::rand(&mut rng); + let z1 = Scalar::rand(&mut rng); + let z2 = Scalar::rand(&mut rng); let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 let z5 = Scalar::zero(); //constraint 3 diff --git a/src/random.rs b/src/random.rs index 2a1a3a2..4337f27 100644 --- a/src/random.rs +++ b/src/random.rs @@ -1,7 +1,7 @@ use super::scalar::Scalar; use super::transcript::ProofTranscript; use merlin::Transcript; -use rand::rngs::OsRng; +use ark_std::{UniformRand}; pub struct RandomTape { tape: Transcript, @@ -10,9 +10,9 @@ pub struct RandomTape { impl RandomTape { pub fn new(name: &'static [u8]) -> Self { let tape = { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let mut tape = Transcript::new(name); - tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng)); + tape.append_scalar(b"init_randomness", &Scalar::rand(&mut rng)); tape }; Self { tape } diff --git a/src/scalar/mod.rs b/src/scalar/mod.rs index f2cfd7a..b6182ee 100644 --- a/src/scalar/mod.rs +++ b/src/scalar/mod.rs @@ -1,43 +1,44 @@ -mod ristretto255; +pub use ark_bls12_377::Fr as Scalar; +// mod ristretto255; -pub type Scalar = ristretto255::Scalar; -pub type ScalarBytes = curve25519_dalek::scalar::Scalar; +// pub type Scalar = ristretto255::Scalar; +// pub type ScalarBytes = curve25519_dalek::scalar::Scalar; -pub trait ScalarFromPrimitives { - fn to_scalar(self) -> Scalar; -} +// pub trait ScalarFromPrimitives { +// fn to_scalar(self) -> Scalar; +// } -impl ScalarFromPrimitives for usize { - #[inline] - fn to_scalar(self) -> Scalar { - (0..self).map(|_i| Scalar::one()).sum() - } -} +// impl ScalarFromPrimitives for usize { +// #[inline] +// fn to_scalar(self) -> Scalar { +// (0..self).map(|_i| Scalar::one()).sum() +// } +// } -impl ScalarFromPrimitives for bool { - #[inline] - fn to_scalar(self) -> Scalar { - if self { - Scalar::one() - } else { - Scalar::zero() - } - } -} +// impl ScalarFromPrimitives for bool { +// #[inline] +// fn to_scalar(self) -> Scalar { +// if self { +// Scalar::one() +// } else { +// Scalar::zero() +// } +// } +// } -pub trait ScalarBytesFromScalar { - fn decompress_scalar(s: &Scalar) -> ScalarBytes; - fn decompress_vector(s: &[Scalar]) -> Vec; -} +// pub trait ScalarBytesFromScalar { +// fn decompress_scalar(s: &Scalar) -> ScalarBytes; +// fn decompress_vector(s: &[Scalar]) -> Vec; +// } -impl ScalarBytesFromScalar for Scalar { - fn decompress_scalar(s: &Scalar) -> ScalarBytes { - ScalarBytes::from_bytes_mod_order(s.to_bytes()) - } +// impl ScalarBytesFromScalar for Scalar { +// fn decompress_scalar(s: &Scalar) -> ScalarBytes { +// ScalarBytes::from_bytes_mod_order(s.to_bytes()) +// } - fn decompress_vector(s: &[Scalar]) -> Vec { - (0..s.len()) - .map(|i| Scalar::decompress_scalar(&s[i])) - .collect::>() - } -} +// fn decompress_vector(s: &[Scalar]) -> Vec { +// (0..s.len()) +// .map(|i| Scalar::decompress_scalar(&s[i])) +// .collect::>() +// } +// } diff --git a/src/scalar/ristretto255.rs b/src/scalar/ristretto255.rs index e8e33c8..08d2779 100755 --- a/src/scalar/ristretto255.rs +++ b/src/scalar/ristretto255.rs @@ -11,7 +11,7 @@ use core::fmt; use core::iter::{Product, Sum}; use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use rand_core::{CryptoRng, RngCore}; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; use zeroize::Zeroize; @@ -196,7 +196,7 @@ macro_rules! impl_binops_multiplicative { // The internal representation of this type is four 64-bit unsigned // integers in little-endian order. `Scalar` values are always in // Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256. -#[derive(Clone, Copy, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Eq, CanonicalSerialize, CanonicalDeserialize)] pub struct Scalar(pub(crate) [u64; 4]); impl fmt::Debug for Scalar { @@ -634,7 +634,7 @@ impl Scalar { debug_assert!(acc != Scalar::zero()); // Compute the inverse of all products - acc = acc.invert().unwrap(); + acc = acc.inverse().unwrap(); // We need to return the product of all inverses later let ret = acc; @@ -1140,14 +1140,14 @@ mod tests { #[test] fn test_inversion() { - assert_eq!(Scalar::zero().invert().is_none().unwrap_u8(), 1); - assert_eq!(Scalar::one().invert().unwrap(), Scalar::one()); - assert_eq!((-&Scalar::one()).invert().unwrap(), -&Scalar::one()); + assert_eq!(Scalar::zero().inverse().is_none().unwrap_u8(), 1); + assert_eq!(Scalar::one().inverse().unwrap(), Scalar::one()); + assert_eq!((-&Scalar::one()).inverse().unwrap(), -&Scalar::one()); let mut tmp = R2; for _ in 0..100 { - let mut tmp2 = tmp.invert().unwrap(); + let mut tmp2 = tmp.inverse().unwrap(); tmp2.mul_assign(&tmp); assert_eq!(tmp2, Scalar::one()); @@ -1170,7 +1170,7 @@ mod tests { let mut r3 = R; for _ in 0..100 { - r1 = r1.invert().unwrap(); + r1 = r1.inverse().unwrap(); r2 = r2.pow_vartime(&q_minus_2); r3 = r3.pow(&q_minus_2); diff --git a/src/sparse_mlpoly.rs b/src/sparse_mlpoly.rs index 00a86ae..8f757d3 100644 --- a/src/sparse_mlpoly.rs +++ b/src/sparse_mlpoly.rs @@ -14,9 +14,10 @@ use super::timer::Timer; use super::transcript::{AppendToTranscript, ProofTranscript}; use core::cmp::Ordering; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_ff::{One, Zero}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct SparseMatEntry { row: usize, col: usize, @@ -29,7 +30,7 @@ impl SparseMatEntry { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct SparseMatPolynomial { num_vars_x: usize, num_vars_y: usize, @@ -42,7 +43,7 @@ pub struct Derefs { comb: DensePolynomial, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DerefsCommitment { comm_ops_val: PolyCommitment, } @@ -71,7 +72,7 @@ impl Derefs { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct DerefsEvalProof { proof_derefs: PolyEvalProof, } @@ -321,7 +322,7 @@ impl SparseMatPolyCommitmentGens { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct SparseMatPolyCommitment { batch_size: usize, num_ops: usize, @@ -687,7 +688,7 @@ impl PolyEvalNetwork { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] struct HashLayerProof { eval_row: (Vec, Vec, Scalar), eval_col: (Vec, Vec, Scalar), @@ -1035,7 +1036,7 @@ impl HashLayerProof { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] struct ProductLayerProof { eval_row: (Scalar, Vec, Vec, Scalar), eval_col: (Scalar, Vec, Vec, Scalar), @@ -1325,7 +1326,7 @@ impl ProductLayerProof { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] struct PolyEvalNetworkProof { proof_prod_layer: ProductLayerProof, proof_hash_layer: HashLayerProof, @@ -1439,7 +1440,7 @@ impl PolyEvalNetworkProof { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] pub struct SparseMatPolyEvalProof { comm_derefs: DerefsCommitment, poly_eval_network_proof: PolyEvalNetworkProof, @@ -1626,11 +1627,11 @@ impl SparsePolynomial { #[cfg(test)] mod tests { use super::*; - use rand::rngs::OsRng; +use ark_std::{UniformRand}; use rand::RngCore; #[test] fn check_sparse_polyeval_proof() { - let mut csprng: OsRng = OsRng; + let mut rng = ark_std::rand::thread_rng(); let num_nz_entries: usize = 256; let num_rows: usize = 256; @@ -1644,7 +1645,7 @@ mod tests { M.push(SparseMatEntry::new( (csprng.next_u64() % (num_rows as u64)) as usize, (csprng.next_u64() % (num_cols as u64)) as usize, - Scalar::random(&mut csprng), + Scalar::rand(&mut rng), )); } @@ -1662,10 +1663,10 @@ mod tests { // evaluation let rx: Vec = (0..num_vars_x) - .map(|_i| Scalar::random(&mut csprng)) + .map(|_i| Scalar::rand(&mut rng)) .collect::>(); let ry: Vec = (0..num_vars_y) - .map(|_i| Scalar::random(&mut csprng)) + .map(|_i| Scalar::rand(&mut rng)) .collect::>(); let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry); let evals = vec![eval[0], eval[0], eval[0]]; diff --git a/src/sumcheck.rs b/src/sumcheck.rs index a077c10..cc45114 100644 --- a/src/sumcheck.rs +++ b/src/sumcheck.rs @@ -12,9 +12,10 @@ use super::unipoly::{CompressedUniPoly, UniPoly}; use core::iter; use itertools::izip; use merlin::Transcript; -use serde::{Deserialize, Serialize}; +use ark_serialize::*; +use ark_ff::{One,Zero}; -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct SumcheckInstanceProof { compressed_polys: Vec, } @@ -61,7 +62,7 @@ impl SumcheckInstanceProof { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct ZKSumcheckInstanceProof { comm_polys: Vec, comm_evals: Vec, diff --git a/src/unipoly.rs b/src/unipoly.rs index dcc3918..2f94aad 100644 --- a/src/unipoly.rs +++ b/src/unipoly.rs @@ -1,10 +1,10 @@ use super::commitments::{Commitments, MultiCommitGens}; use super::group::GroupElement; -use super::scalar::{Scalar, ScalarFromPrimitives}; +use super::scalar::{Scalar}; use super::transcript::{AppendToTranscript, ProofTranscript}; use merlin::Transcript; -use serde::{Deserialize, Serialize}; - +use ark_serialize::*; +use ark_ff::{One, Zero, Field}; // ax^2 + bx + c stored as vec![c,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] #[derive(Debug)] @@ -14,7 +14,7 @@ pub struct UniPoly { // ax^2 + bx + c stored as vec![c,a] // ax^3 + bx^2 + cx + d stored as vec![d,b,a] -#[derive(Serialize, Deserialize, Debug)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] pub struct CompressedUniPoly { coeffs_except_linear_term: Vec, } @@ -25,7 +25,7 @@ impl UniPoly { assert!(evals.len() == 3 || evals.len() == 4); let coeffs = if evals.len() == 3 { // ax^2 + bx + c - let two_inv = (2_usize).to_scalar().invert().unwrap(); + let two_inv = Scalar::from(2).inverse().unwrap(); let c = evals[0]; let a = two_inv * (evals[2] - evals[1] - evals[1] + c); @@ -33,8 +33,8 @@ impl UniPoly { vec![c, b, a] } else { // ax^3 + bx^2 + cx + d - let two_inv = (2_usize).to_scalar().invert().unwrap(); - let six_inv = (6_usize).to_scalar().invert().unwrap(); + let two_inv = Scalar::from(2).inverse().unwrap(); + let six_inv = Scalar::from(6).inverse().unwrap(); let d = evals[0]; let a = six_inv @@ -128,8 +128,8 @@ mod tests { fn test_from_evals_quad() { // polynomial is 2x^2 + 3x + 1 let e0 = Scalar::one(); - let e1 = (6_usize).to_scalar(); - let e2 = (15_usize).to_scalar(); + let e1 = Scalar::from(6); + let e2 = Scalar::from(15); let evals = vec![e0, e1, e2]; let poly = UniPoly::from_evals(&evals); @@ -137,8 +137,8 @@ mod tests { assert_eq!(poly.eval_at_one(), e1); assert_eq!(poly.coeffs.len(), 3); assert_eq!(poly.coeffs[0], Scalar::one()); - assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); - assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); + assert_eq!(poly.coeffs[1], Scalar::from(3)); + assert_eq!(poly.coeffs[2], Scalar::from(2)); let hint = e0 + e1; let compressed_poly = poly.compress(); @@ -147,17 +147,17 @@ mod tests { assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); } - let e3 = (28_usize).to_scalar(); - assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3); + let e3 = Scalar::from(28); + assert_eq!(poly.evaluate(&Scalar::from(3)), e3); } #[test] fn test_from_evals_cubic() { // polynomial is x^3 + 2x^2 + 3x + 1 let e0 = Scalar::one(); - let e1 = (7_usize).to_scalar(); - let e2 = (23_usize).to_scalar(); - let e3 = (55_usize).to_scalar(); + let e1 = Scalar::from(7); + let e2 = Scalar::from(23); + let e3 = Scalar::from(55); let evals = vec![e0, e1, e2, e3]; let poly = UniPoly::from_evals(&evals); @@ -165,9 +165,9 @@ mod tests { assert_eq!(poly.eval_at_one(), e1); assert_eq!(poly.coeffs.len(), 4); assert_eq!(poly.coeffs[0], Scalar::one()); - assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); - assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); - assert_eq!(poly.coeffs[3], (1_usize).to_scalar()); + assert_eq!(poly.coeffs[1], Scalar::from(3)); + assert_eq!(poly.coeffs[2], Scalar::from(2)); + assert_eq!(poly.coeffs[3], Scalar::from(1)); let hint = e0 + e1; let compressed_poly = poly.compress(); @@ -176,7 +176,7 @@ mod tests { assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); } - let e4 = (109_usize).to_scalar(); - assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4); + let e4 = Scalar::from(109); + assert_eq!(poly.evaluate(&Scalar::from(4)), e4); } }