From 7db2d30972ce72ee7622070a1debc3b72580f4c7 Mon Sep 17 00:00:00 2001 From: Nicolas Gailly Date: Wed, 22 Mar 2023 23:48:28 +0100 Subject: [PATCH] PST/SQRT + Benches (#35) * first version of the sqrt PST without the MIPP * snarkpack integration * snarkpack integration * adding mipp as submodule directly * snarkpack integration * finalizing * snarkpack integration * update mipp with latestest optimisations and add preliminary documentation * improve codebase documentation * remove unused imports and apply cargo fix changes * passing v0.4 * adding gh action * correct workflow item * correct working dir and msrv * remove unnecessary stuff * wip * wip * remove circuit in fq as it's not needed now * done for tonight * wip * wip * sip * prallelise commitment and groth16 verification * finalise comments for mipp * wip * finalise comments * wip * compiling but test failing * putting back non random blinds * using absorb when we can * absorbing scalar * with bls12-381 * stuff * trying to bring ark-blst to testudo * correcting random implementation * with square in place * works with blst * works with blst * fix: don't require nightly Rust With removing the `test` feature, it can also be built with a stable Rust release and don't require a nightly Rust version. * using ark-blst main branch * started cleanup and added testudo benchmark * add testudo snark and nizk in separate files * rename functions that perform setups and add comments * prototyping * explain testudo-nizk * add support for odd case in sqrt_pst * add missing constraints and correct proof size for benchmarks * add support for odd case in sqrt_pst * fix typo in comment * Documentation #31 * fix typo in comment * Fix Cargo.toml and add benchmark for sqrt pst (#34) * add benchmark for sqrt pst * fix typo in comment * add README * comment from readme not executing --------- Co-authored-by: Mara Mihali Co-authored-by: Mara Mihali Co-authored-by: Volker Mische --- .cargo/config | 5 +- .github/workflows/testudo.yml | 52 +- Cargo.toml | 63 +- README.md | 424 +---- benches/nizk.rs | 151 -- benches/pst.rs | 98 + benches/r1cs.rs | 72 - benches/snark.rs | 137 -- benches/testudo.rs | 127 ++ examples/cubic.rs | 280 +-- profiler/nizk.rs | 52 - profiler/snark.rs | 63 - profiler/testudo.rs | 92 + rustfmt.toml | 4 + src/commitments.rs | 145 +- src/constraints.rs | 835 +++++---- src/dense_mlpoly.rs | 1358 +++++++------- src/errors.rs | 38 +- src/group.rs | 80 - src/lib.rs | 994 +++-------- src/macros.rs | 56 + src/math.rs | 52 +- src/mipp.rs | 410 +++++ src/nizk/bullet.rs | 490 ++--- src/nizk/mod.rs | 917 ++-------- src/parameters.rs | 2354 +++++++++++++++++++++++- src/poseidon_transcript.rs | 158 +- src/product_tree.rs | 862 +++++---- src/r1csinstance.rs | 699 ++++---- src/r1csproof.rs | 1111 ++++++------ src/random.rs | 28 - src/scalar/mod.rs | 44 - src/sparse_mlpoly.rs | 3143 ++++++++++++++++----------------- src/sqrt_pst.rs | 343 ++++ src/sumcheck.rs | 1277 +++++--------- src/testudo_nizk.rs | 202 +++ src/testudo_snark.rs | 377 ++++ src/timer.rs | 111 +- src/transcript.rs | 77 +- src/unipoly.rs | 331 ++-- 40 files changed, 9773 insertions(+), 8339 deletions(-) delete mode 100644 benches/nizk.rs create mode 100644 benches/pst.rs delete mode 100644 benches/r1cs.rs delete mode 100644 benches/snark.rs create mode 100644 benches/testudo.rs delete mode 100644 profiler/nizk.rs delete mode 100644 profiler/snark.rs create mode 100644 profiler/testudo.rs create mode 100644 rustfmt.toml delete mode 100644 src/group.rs create mode 100644 src/macros.rs create mode 100644 src/mipp.rs delete mode 100644 src/random.rs delete mode 100644 src/scalar/mod.rs create mode 100644 src/sqrt_pst.rs create mode 100644 src/testudo_nizk.rs create mode 100644 src/testudo_snark.rs diff --git a/.cargo/config b/.cargo/config index 3a420e9..8b13789 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,4 +1 @@ -[build] -rustflags = [ - "-C", "target-cpu=native", -] \ No newline at end of file + diff --git a/.github/workflows/testudo.yml b/.github/workflows/testudo.yml index 781bd98..892a051 100644 --- a/.github/workflows/testudo.yml +++ b/.github/workflows/testudo.yml @@ -1,37 +1,27 @@ name: Build and Test Testudo -on: - push: - branches: [master] - pull_request: - branches: [master] -# The crate ark-ff uses the macro llvm_asm! when emitting asm which returns an -# error because it was deprecated in favour of asm!. We temporarily overcome -# this problem by setting the environment variable below (until the crate -# is updated). -env: - RUSTFLAGS: "--emit asm -C llvm-args=-x86-asm-syntax=intel" +on: [push, pull_request] jobs: - build_nightly: + cargo-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Install - run: rustup default nightly - - name: Install rustfmt Components - run: rustup component add rustfmt - # - name: Install clippy - # run: rustup component add clippy - - name: Build - run: cargo build --verbose - - name: Run tests - run: cargo test --verbose - - name: Build examples - run: cargo build --examples --verbose - - name: Check Rustfmt Code Style - run: cargo fmt --all -- --check - # cargo clippy uses cargo check which returns an error when asm is emitted - # we want to emit asm for ark-ff operations so we avoid using clippy for # now - # - name: Check clippy warnings - # run: cargo clippy --all-targets --all-features + - name: Checkout sources + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - uses: Swatinem/rust-cache@v2 + with: + shared-key: cache-${{ hashFiles('**/Cargo.lock') }} + cache-on-failure: true + + - name: cargo test + run: RUST_LOG=info cargo test --all --all-features -- --nocapture \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index e5d5b4a..11a3736 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,20 +18,26 @@ itertools = "0.10.0" colored = "2.0.0" thiserror = "1.0" json = "0.12.4" -ark-ff = { version = "^0.3.0", default-features = false } -ark-ec = { version = "^0.3.0", default-features = false } -ark-std = { version = "^0.3.0"} -ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] } -ark-serialize = { version = "^0.3.0", features = ["derive"] } -ark-sponge = { version = "^0.3.0" , features = ["r1cs"] } -ark-crypto-primitives = { version = "^0.3.0", default-features = true } -ark-r1cs-std = { version = "^0.3.0", default-features = false } -ark-nonnative-field = { version = "0.3.0", default-features = false } -ark-relations = { version = "^0.3.0", default-features = false, optional = true } -ark-groth16 = { version = "^0.3.0", features = ["r1cs"] } -ark-bw6-761 = { version = "^0.3.0" } -ark-poly-commit = { version = "^0.3.0" } -ark-poly = {version = "^0.3.0"} +ark-ff = { version = "0.4.0", default-features = false } +ark-ec = { version = "0.4.0", default-features = false } +ark-std = { version = "0.4.0"} +ark-bls12-377 = { version = "0.4.0", features = ["r1cs","curve"] } +ark-bls12-381 = { version = "0.4.0", features = ["curve"] } +ark-blst = { git = "https://github.com/nikkolasg/ark-blst" } +ark-serialize = { version = "0.4.0", features = ["derive"] } +ark-crypto-primitives = {version = "0.4.0", features = ["sponge","r1cs","snark"] } +ark-r1cs-std = { version = "0.4.0", default-features = false } +ark-relations = { version = "0.4.0", default-features = false, optional = true } +ark-snark = { version = "0.4.0", default-features = false } +ark-groth16 = { version = "0.3.0" } +ark-bw6-761 = { version = "0.4.0" } +ark-poly-commit = { version = "0.4.0" } +ark-poly = {version = "0.4.0"} + +poseidon-paramgen = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" } +poseidon-parameters = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" } +# Needed for ark-blst +blstrs = { version = "^0.6.1", features = ["__private_bench"] } lazy_static = "1.4.0" rand = { version = "0.8", features = [ "std", "std_rng" ] } @@ -46,29 +52,20 @@ csv = "1.1.5" criterion = "0.3.6" [lib] -name = "libspartan" +name = "libtestudo" path = "src/lib.rs" [[bin]] -name = "snark" -path = "profiler/snark.rs" - -[[bin]] -name = "nizk" -path = "profiler/nizk.rs" - -[[bench]] -name = "snark" -harness = false +name = "testudo" +path = "profiler/testudo.rs" [[bench]] -name = "nizk" +name = "testudo" harness = false [[bench]] -name = "r1cs" +name = "pst" harness = false -debug = true [features] multicore = ["rayon"] @@ -79,6 +76,10 @@ parallel = [ "std", "ark-ff/parallel", "ark-std/parallel", "ark-ec/parallel", "a std = ["ark-ff/std", "ark-ec/std", "ark-std/std", "ark-relations/std", "ark-serialize/std"] [patch.crates-io] -ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/", rev = "a2a5ac491ae005ba2afd03fd21b7d3160d794a83"} -ark-poly-commit = {git = "https://github.com/maramihali/poly-commit"} - +ark-poly-commit = {git = "https://github.com/cryptonetlab/ark-polycommit", branch="feat/variable-crs"} +ark-groth16 = { git = "https://github.com/arkworks-rs/groth16" } +blstrs = { git = "https://github.com/nikkolasg/blstrs", branch = "feat/arkwork" } +ark-ec = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" } +ark-ff = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" } +ark-poly = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" } +ark-serialize = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" } \ No newline at end of file diff --git a/README.md b/README.md index 0eed4a7..fd7e65c 100644 --- a/README.md +++ b/README.md @@ -1,421 +1,27 @@ -# Spartan: High-speed zkSNARKs without trusted setup +# Testudo -![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg) -[![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>) +[![Build and Test Testudo](https://github.com/cryptonetlab/testudo/actions/workflows/testudo.yml/badge.svg?branch=master)](https://github.com/cryptonetlab/testudo/actions/workflows/testudo.yml) -Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model. +Testudo is a linear-time prover SNARK with a small and universal trusted setup. For a deep dive, please refer to [this](https://www.notion.so/pl-strflt/Testudo-Blog-Post-Final-a18db71f8e634ebbb9f68383f7904c51) blog post. -A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details. +In the current stage, the repository contains: -Note that this library has _not_ received a security review or audit. +- a modified version of [Spartan](https://github.com/microsoft/Spartan) using [arkworks](https://github.com/arkworks-rs) with the sumchecks verified using Groth16 +- a fast version of the [PST](https://eprint.iacr.org/2011/587.pdf) commitment scheme with a square-root trusted setup +- support for an arkworks wrapper around the fast blst library with GPU integration [repo](https://github.com/nikkolasg/ark-blst) -## Highlights +## Building `testudo` -We now highlight Spartan's distinctive features. +Testudo is available with stable Rust. -- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters. +Run `cargo build` or `cargo test` to build, respectively test the repository. -- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest. +To run the current benchmarks on BLS12-377: -- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS). - -- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`. - -- **State-of-the-art performance:** - Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads. - -### Implementation details - -`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library. - -## Examples - -To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`: - -```text -spartan = "0.7.1" -``` - -The following example shows how to use `libspartan` to create and verify a SNARK proof. -Some of our public APIs' style is inspired by the underlying crates we use. - -```rust -# extern crate libspartan; -# extern crate merlin; -# use libspartan::{Instance, SNARKGens, SNARK}; -# use libspartan::poseidon_transcript::PoseidonTranscript; -# use libspartan::parameters::poseidon_params; -# fn main() { - // specify the size of an R1CS instance - let num_vars = 1024; - let num_cons = 1024; - let num_inputs = 10; - let num_non_zero_entries = 1024; - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); - - // ask the library to produce a synthentic R1CS instance - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // create a commitment to the R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - let params = poseidon_params(); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove(&inst, &comm, &decomm, vars, &inputs, &gens, &mut prover_transcript); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &inputs, &mut verifier_transcript, &gens) - .is_ok()); - println!("proof verification successful!"); -# } -``` - -Here is another example to use the NIZK variant of the Spartan proof system: - -```rust -# extern crate libspartan; -# extern crate merlin; -# use libspartan::{Instance, NIZKGens, NIZK}; -# use libspartan::poseidon_transcript::PoseidonTranscript; -# use libspartan::parameters::poseidon_params; -# fn main() { - // specify the size of an R1CS instance - let num_vars = 1024; - let num_cons = 1024; - let num_inputs = 10; - - // produce public parameters - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - - // ask the library to produce a synthentic R1CS instance - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - let params = poseidon_params(); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&inst, &inputs, &mut verifier_transcript, &gens) - .is_ok()); - println!("proof verification successful!"); -# } -``` - -Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance - -```rust -#![allow(non_snake_case)] -# extern crate ark_std; -# extern crate libspartan; -# extern crate merlin; -# mod scalar; -# use scalar::Scalar; -# use libspartan::parameters::poseidon_params; -# use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; -# use libspartan::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; -# -# use ark_ff::{PrimeField, Field, BigInteger}; -# use ark_std::{One, Zero, UniformRand}; -# fn main() { - // produce a tiny instance - let ( - num_cons, - num_vars, - num_inputs, - num_non_zero_entries, - inst, - assignment_vars, - assignment_inputs, - ) = produce_tiny_r1cs(); - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); - - // create a commitment to the R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - let params = poseidon_params(); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - assignment_vars, - &assignment_inputs, - &gens, - &mut prover_transcript, - ); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) - .is_ok()); - println!("proof verification successful!"); -# } - -# fn produce_tiny_r1cs() -> ( -# usize, -# usize, -# usize, -# usize, -# Instance, -# VarsAssignment, -# InputsAssignment, -# ) { - // We will use the following example, but one could construct any R1CS instance. - // Our R1CS instance is three constraints over five variables and two public inputs - // (Z0 + Z1) * I0 - Z2 = 0 - // (Z0 + I1) * Z2 - Z3 = 0 - // Z4 * 1 - 0 = 0 - - // parameters of the R1CS instance rounded to the nearest power of two - let num_cons = 4; - let num_vars = 5; - let num_inputs = 2; - let num_non_zero_entries = 5; - - // We will encode the above constraints into three matrices, where - // the coefficients in the matrix are in the little-endian byte order - let mut A: Vec<(usize, usize, Vec)> = Vec::new(); - let mut B: Vec<(usize, usize, Vec)> = Vec::new(); - let mut C: Vec<(usize, usize, Vec)> = Vec::new(); - - // The constraint system is defined over a finite field, which in our case is - // the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493 - // To construct these matrices, we will use `curve25519-dalek` but one can use any other method. - - // a variable that holds a byte representation of 1 - let one = Scalar::one().into_repr().to_bytes_le(); - - // R1CS is a set of three sparse matrices A B C, where is a row for every - // constraint and a column for every entry in z = (vars, 1, inputs) - // An R1CS instance is satisfiable iff: - // Az \circ Bz = Cz, where z = (vars, 1, inputs) - - // constraint 0 entries in (A,B,C) - // constraint 0 is (Z0 + Z1) * I0 - Z2 = 0. - // We set 1 in matrix A for columns that correspond to Z0 and Z1 - // We set 1 in matrix B for column that corresponds to I0 - // We set 1 in matrix C for column that corresponds to Z2 - A.push((0, 0, one.clone())); - A.push((0, 1, one.clone())); - B.push((0, num_vars + 1, one.clone())); - C.push((0, 2, one.clone())); - - // constraint 1 entries in (A,B,C) - A.push((1, 0, one.clone())); - A.push((1, num_vars + 2, one.clone())); - B.push((1, 2, one.clone())); - C.push((1, 3, one.clone())); - - // constraint 3 entries in (A,B,C) - A.push((2, 4, one.clone())); - B.push((2, num_vars, one.clone())); - - let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); - - // compute a satisfying assignment -let mut rng = ark_std::rand::thread_rng(); - let i0 = Scalar::rand(&mut rng); - let i1 = Scalar::rand(&mut rng); - let z0 = Scalar::rand(&mut rng); - let z1 = Scalar::rand(&mut rng); - let z2 = (z0 + z1) * i0; // constraint 0 - let z3 = (z0 + i1) * z2; // constraint 1 - let z4 = Scalar::zero(); //constraint 2 - - // create a VarsAssignment - let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars]; - vars[0] = z0.into_repr().to_bytes_le(); - vars[1] = z1.into_repr().to_bytes_le(); - vars[2] = z2.into_repr().to_bytes_le(); - vars[3] = z3.into_repr().to_bytes_le(); - vars[4] = z4.into_repr().to_bytes_le(); - let assignment_vars = VarsAssignment::new(&vars).unwrap(); - - // create an InputsAssignment - let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs]; - inputs[0] = i0.into_repr().to_bytes_le(); - inputs[1] = i1.into_repr().to_bytes_le(); - let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); - - // check if the instance we created is satisfiable - let res = inst.is_sat(&assignment_vars, &assignment_inputs); - assert_eq!(res.unwrap(), true); - - ( - num_cons, - num_vars, - num_inputs, - num_non_zero_entries, - inst, - assignment_vars, - assignment_inputs, - ) -# } -``` - -For more examples, see [`examples/`](examples) directory in this repo. - -## Building `libspartan` - -Install [`rustup`](https://rustup.rs/) - -Switch to nightly Rust using `rustup`: - -```text -rustup default nightly +```console +cargo bench --bench testudo --all-features release -- --nocapture ``` -Clone the repository: - -```text -git clone https://github.com/Microsoft/Spartan -cd Spartan -``` - -To build docs for public APIs of `libspartan`: - -```text -cargo doc -``` - -To run tests: - -```text -RUSTFLAGS="-C target_cpu=native" cargo test -``` - -To build `libspartan`: - -```text -RUSTFLAGS="-C target_cpu=native" cargo build --release -``` - -> NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`. - -### Supported features - -- `profile`: enables fine-grained profiling information (see below for its use) - -## Performance - -### End-to-end benchmarks - -`libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below). - -To run end-to-end benchmarks: - -```text -RUSTFLAGS="-C target_cpu=native" cargo bench -``` - -### Fine-grained profiling - -Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`. - -These profilers report performance as depicted below (for varying R1CS instance sizes). The reported -performance is from running the profilers on a Microsoft Surface Laptop 3 on a single CPU core of Intel Core i7-1065G7 running Ubuntu 20.04 (atop WSL2 on Windows 10). -See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature. - -```text -$ ./target/release/snark -Profiler:: SNARK - * number_of_constraints 1048576 - * number_of_variables 1048576 - * number_of_inputs 10 - * number_non-zero_entries_A 1048576 - * number_non-zero_entries_B 1048576 - * number_non-zero_entries_C 1048576 - * SNARK::encode - * SNARK::encode 14.2644201s - * SNARK::prove - * R1CSProof::prove - * polycommit - * polycommit 2.7175848s - * prove_sc_phase_one - * prove_sc_phase_one 683.7481ms - * prove_sc_phase_two - * prove_sc_phase_two 846.1056ms - * polyeval - * polyeval 193.4216ms - * R1CSProof::prove 4.4416193s - * len_r1cs_sat_proof 47024 - * eval_sparse_polys - * eval_sparse_polys 377.357ms - * R1CSEvalProof::prove - * commit_nondet_witness - * commit_nondet_witness 14.4507331s - * build_layered_network - * build_layered_network 3.4360521s - * evalproof_layered_network - * len_product_layer_proof 64712 - * evalproof_layered_network 15.5708066s - * R1CSEvalProof::prove 34.2930559s - * len_r1cs_eval_proof 133720 - * SNARK::prove 39.1297568s - * SNARK::proof_compressed_len 141768 - * SNARK::verify - * verify_sat_proof - * verify_sat_proof 20.0828ms - * verify_eval_proof - * verify_polyeval_proof - * verify_prod_proof - * verify_prod_proof 1.1847ms - * verify_hash_proof - * verify_hash_proof 81.06ms - * verify_polyeval_proof 82.3583ms - * verify_eval_proof 82.8937ms - * SNARK::verify 103.0536ms -``` - -```text -$ ./target/release/nizk -Profiler:: NIZK - * number_of_constraints 1048576 - * number_of_variables 1048576 - * number_of_inputs 10 - * number_non-zero_entries_A 1048576 - * number_non-zero_entries_B 1048576 - * number_non-zero_entries_C 1048576 - * NIZK::prove - * R1CSProof::prove - * polycommit - * polycommit 2.7220635s - * prove_sc_phase_one - * prove_sc_phase_one 722.5487ms - * prove_sc_phase_two - * prove_sc_phase_two 862.6796ms - * polyeval - * polyeval 190.2233ms - * R1CSProof::prove 4.4982305s - * len_r1cs_sat_proof 47024 - * NIZK::prove 4.5139888s - * NIZK::proof_compressed_len 48134 - * NIZK::verify - * eval_sparse_polys - * eval_sparse_polys 395.0847ms - * verify_sat_proof - * verify_sat_proof 19.286ms - * NIZK::verify 414.5102ms -``` - -## LICENSE - -See [LICENSE](./LICENSE) - -## Contributing +## Join us! -See [CONTRIBUTING](./CONTRIBUTING.md) +If you want to contribute, reach out to the Discord server of [cryptonet](https://discord.com/invite/CFnTSkVTCk). diff --git a/benches/nizk.rs b/benches/nizk.rs deleted file mode 100644 index 8fdd340..0000000 --- a/benches/nizk.rs +++ /dev/null @@ -1,151 +0,0 @@ -extern crate core; -extern crate criterion; -extern crate digest; -extern crate libspartan; -extern crate merlin; -extern crate sha3; - -use std::time::{Duration, SystemTime}; - -use libspartan::{ - parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance, - NIZKGens, NIZK, -}; - -use criterion::*; - -fn nizk_prove_benchmark(c: &mut Criterion) { - for &s in [24, 28, 30].iter() { - let mut group = c.benchmark_group("R1CS_prove_benchmark"); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - let start = SystemTime::now(); - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - let end = SystemTime::now(); - let duration = end.duration_since(start).unwrap(); - println!( - "Generating r1cs instance with {} constraints took {} ms", - num_cons, - duration.as_millis() - ); - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - - let name = format!("R1CS_prove_{}", num_vars); - group - .measurement_time(Duration::from_secs(60)) - .bench_function(&name, move |b| { - b.iter(|| { - let mut prover_transcript = - PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - NIZK::prove( - black_box(&inst), - black_box(vars.clone()), - black_box(&inputs), - black_box(&gens), - black_box(&mut prover_transcript), - ); - }); - }); - group.finish(); - } -} - -fn nizk_verify_benchmark(c: &mut Criterion) { - for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() { - let mut group = c.benchmark_group("R1CS_verify_benchmark"); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - // these are the public io - let num_inputs = 10; - let start = SystemTime::now(); - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - let end = SystemTime::now(); - let duration = end.duration_since(start).unwrap(); - println!( - "Generating r1cs instance with {} constraints took {} ms", - num_cons, - duration.as_millis() - ); - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); - - let name = format!("R1CS_verify_{}", num_cons); - group - .measurement_time(Duration::from_secs(60)) - .bench_function(&name, move |b| { - b.iter(|| { - let mut verifier_transcript = - PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - assert!(proof - .verify( - black_box(&inst), - black_box(&inputs), - black_box(&mut verifier_transcript), - black_box(&gens), - ) - .is_ok()); - }); - }); - group.finish(); - } -} - -fn nizk_verify_groth16_benchmark(c: &mut Criterion) { - for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() { - let mut group = c.benchmark_group("R1CS_verify_groth16_benchmark"); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - // these are the public io - let num_inputs = 10; - let start = SystemTime::now(); - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - let end = SystemTime::now(); - let duration = end.duration_since(start).unwrap(); - println!( - "Generating r1cs instance with {} constraints took {} ms", - num_cons, - duration.as_millis() - ); - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); - - let name = format!("R1CS_verify_groth16_{}", num_cons); - group - .measurement_time(Duration::from_secs(60)) - .bench_function(&name, move |b| { - b.iter(|| { - let mut verifier_transcript = - PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - assert!(proof - .verify_groth16( - black_box(&inst), - black_box(&inputs), - black_box(&mut verifier_transcript), - black_box(&gens) - ) - .is_ok()); - }); - }); - group.finish(); - } -} - -fn set_duration() -> Criterion { - Criterion::default().sample_size(2) -} - -criterion_group! { -name = benches_nizk; -config = set_duration(); -targets = nizk_prove_benchmark, nizk_verify_benchmark, nizk_verify_groth16_benchmark -} - -criterion_main!(benches_nizk); diff --git a/benches/pst.rs b/benches/pst.rs new file mode 100644 index 0000000..a9b821a --- /dev/null +++ b/benches/pst.rs @@ -0,0 +1,98 @@ +use std::time::Instant; + +use ark_poly_commit::multilinear_pc::MultilinearPC; +use ark_serialize::CanonicalSerialize; +use libtestudo::{ + parameters::PoseidonConfiguration, poseidon_transcript::PoseidonTranscript, sqrt_pst::Polynomial, +}; +use serde::Serialize; +type F = ark_bls12_377::Fr; +type E = ark_bls12_377::Bls12_377; +use ark_std::UniformRand; + +#[derive(Default, Clone, Serialize)] +struct BenchmarkResults { + power: usize, + commit_time: u128, + opening_time: u128, + verification_time: u128, + proof_size: usize, + commiter_key_size: usize, +} +fn main() { + let params = ark_bls12_377::Fr::poseidon_params(); + + let mut writer = csv::Writer::from_path("sqrt_pst.csv").expect("unable to open csv writer"); + for &s in [4, 5, 20, 27].iter() { + println!("Running for {} inputs", s); + let mut rng = ark_std::test_rng(); + let mut br = BenchmarkResults::default(); + br.power = s; + let num_vars = s; + let len = 2_usize.pow(num_vars as u32); + let z: Vec = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect(); + let r: Vec = (0..num_vars) + .into_iter() + .map(|_| F::rand(&mut rng)) + .collect(); + + let setup_vars = (num_vars as f32 / 2.0).ceil() as usize; + let gens = MultilinearPC::::setup((num_vars as f32 / 2.0).ceil() as usize, &mut rng); + let (ck, vk) = MultilinearPC::::trim(&gens, setup_vars); + + let mut cks = Vec::::new(); + ck.serialize_with_mode(&mut cks, ark_serialize::Compress::Yes) + .unwrap(); + br.commiter_key_size = cks.len(); + + let mut pl = Polynomial::from_evaluations(&z.clone()); + + let v = pl.eval(&r); + + let start = Instant::now(); + let (comm_list, t) = pl.commit(&ck); + let duration = start.elapsed().as_millis(); + br.commit_time = duration; + + let mut prover_transcript = PoseidonTranscript::new(¶ms); + + let start = Instant::now(); + let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t); + let duration = start.elapsed().as_millis(); + br.opening_time = duration; + + let mut p1 = Vec::::new(); + let mut p2 = Vec::::new(); + pst_proof + .serialize_with_mode(&mut p1, ark_serialize::Compress::Yes) + .unwrap(); + + mipp_proof + .serialize_with_mode(&mut p2, ark_serialize::Compress::Yes) + .unwrap(); + + br.proof_size = p1.len() + p2.len(); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + + let start = Instant::now(); + let res = Polynomial::verify( + &mut verifier_transcript, + &vk, + &u, + &r, + v, + &pst_proof, + &mipp_proof, + &t, + ); + let duration = start.elapsed().as_millis(); + br.verification_time = duration; + assert!(res == true); + + writer + .serialize(br) + .expect("unable to write results to csv"); + writer.flush().expect("wasn't able to flush"); + } +} diff --git a/benches/r1cs.rs b/benches/r1cs.rs deleted file mode 100644 index 48b93ef..0000000 --- a/benches/r1cs.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::time::Instant; - -use libspartan::{ - parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance, - NIZKGens, NIZK, -}; -use serde::Serialize; - -#[derive(Default, Clone, Serialize)] -struct BenchmarkResults { - power: usize, - input_constraints: usize, - spartan_verifier_circuit_constraints: usize, - r1cs_instance_generation_time: u128, - spartan_proving_time: u128, - groth16_setup_time: u128, - groth16_proving_time: u128, - testudo_verification_time: u128, - testudo_proving_time: u128, -} - -fn main() { - let mut writer = csv::Writer::from_path("testudo.csv").expect("unable to open csv writer"); - // for &s in [ - // 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - // ] - // .iter() - // For testing purposes we currently bench on very small instance to ensure - // correctness and then on biggest one for timings. - for &s in [4, 26].iter() { - println!("Running for {} inputs", s); - let mut br = BenchmarkResults::default(); - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - br.power = s; - br.input_constraints = num_cons; - let num_inputs = 10; - - let start = Instant::now(); - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - let duration = start.elapsed().as_millis(); - br.r1cs_instance_generation_time = duration; - let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - - let start = Instant::now(); - let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); - let duration = start.elapsed().as_millis(); - br.spartan_proving_time = duration; - - let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - let res = proof.verify(&inst, &inputs, &mut verifier_transcript, &gens); - assert!(res.is_ok()); - br.spartan_verifier_circuit_constraints = res.unwrap(); - - let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377); - let res = proof.verify_groth16(&inst, &inputs, &mut verifier_transcript, &gens); - assert!(res.is_ok()); - - let (ds, dp, dv) = res.unwrap(); - br.groth16_setup_time = ds; - br.groth16_proving_time = dp; - - br.testudo_proving_time = br.spartan_proving_time + br.groth16_proving_time; - br.testudo_verification_time = dv; - writer - .serialize(br) - .expect("unable to write results to csv"); - writer.flush().expect("wasn't able to flush"); - } -} diff --git a/benches/snark.rs b/benches/snark.rs deleted file mode 100644 index 2bf7861..0000000 --- a/benches/snark.rs +++ /dev/null @@ -1,137 +0,0 @@ -extern crate libspartan; -extern crate merlin; - -use libspartan::{ - parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, Instance, SNARKGens, - SNARK, -}; - -use criterion::*; - -fn snark_encode_benchmark(c: &mut Criterion) { - for &s in [10, 12, 16].iter() { - let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("SNARK_encode_benchmark"); - group.plot_config(plot_config); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - let (inst, _vars, _inputs) = - Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); - - // produce a commitment to R1CS instance - let name = format!("SNARK_encode_{}", num_cons); - group.bench_function(&name, move |b| { - b.iter(|| { - SNARK::encode(black_box(&inst), black_box(&gens)); - }); - }); - group.finish(); - } -} - -fn snark_prove_benchmark(c: &mut Criterion) { - for &s in [10, 12, 16].iter() { - let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("SNARK_prove_benchmark"); - group.plot_config(plot_config); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - - let params = poseidon_params(); - - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); - - // produce a commitment to R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - // produce a proof - let name = format!("SNARK_prove_{}", num_cons); - group.bench_function(&name, move |b| { - b.iter(|| { - let mut prover_transcript = PoseidonTranscript::new(¶ms); - SNARK::prove( - black_box(&inst), - black_box(&comm), - black_box(&decomm), - black_box(vars.clone()), - black_box(&inputs), - black_box(&gens), - black_box(&mut prover_transcript), - ); - }); - }); - group.finish(); - } -} - -fn snark_verify_benchmark(c: &mut Criterion) { - for &s in [10, 12, 16].iter() { - let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("SNARK_verify_benchmark"); - group.plot_config(plot_config); - - let params = poseidon_params(); - - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); - - // produce a commitment to R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - vars, - &inputs, - &gens, - &mut prover_transcript, - ); - - // verify the proof - let name = format!("SNARK_verify_{}", num_cons); - group.bench_function(&name, move |b| { - b.iter(|| { - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify( - black_box(&comm), - black_box(&inputs), - black_box(&mut verifier_transcript), - black_box(&gens) - ) - .is_ok()); - }); - }); - group.finish(); - } -} - -fn set_duration() -> Criterion { - Criterion::default().sample_size(10) -} - -criterion_group! { -name = benches_snark; -config = set_duration(); -targets = snark_verify_benchmark -} - -criterion_main!(benches_snark); diff --git a/benches/testudo.rs b/benches/testudo.rs new file mode 100644 index 0000000..bd9cc75 --- /dev/null +++ b/benches/testudo.rs @@ -0,0 +1,127 @@ +use std::time::Instant; + +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; +use ark_ff::PrimeField; +use ark_serialize::*; +use libtestudo::parameters::PoseidonConfiguration; +use libtestudo::{ + poseidon_transcript::PoseidonTranscript, + testudo_snark::{TestudoSnark, TestudoSnarkGens}, + Instance, +}; +use serde::Serialize; + +#[derive(Default, Clone, Serialize)] +struct BenchmarkResults { + power: usize, + input_constraints: usize, + testudo_proving_time: u128, + testudo_verification_time: u128, + sat_proof_size: usize, + eval_proof_size: usize, + total_proof_size: usize, +} + +fn main() { + bench_with_bls12_377(); + // bench_with_bls12_381(); + // bench_with_ark_blst(); +} + +fn bench_with_ark_blst() { + let params = ark_blst::Scalar::poseidon_params(); + testudo_snark_bench::(params, "testudo_blst"); +} + +fn bench_with_bls12_377() { + let params = ark_bls12_377::Fr::poseidon_params(); + testudo_snark_bench::(params, "testudo_bls12_377"); +} + +fn bench_with_bls12_381() { + let params = ark_bls12_381::Fr::poseidon_params(); + testudo_snark_bench::(params, "testudo_bls12_381"); +} + +fn testudo_snark_bench(params: PoseidonConfig, file_name: &str) +where + E: Pairing, + E::ScalarField: PrimeField, + E::ScalarField: Absorb, +{ + let mut writer = csv::Writer::from_path(file_name).expect("unable to open csv writer"); + for &s in [4, 5, 10, 12, 14, 16, 18, 20, 22, 24, 26].iter() { + println!("Running for {} inputs", s); + let mut br = BenchmarkResults::default(); + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + br.power = s; + br.input_constraints = num_cons; + let num_inputs = 10; + + let (inst, vars, inputs) = + Instance::::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + let mut prover_transcript = PoseidonTranscript::new(¶ms.clone()); + + let gens = + TestudoSnarkGens::::setup(num_cons, num_vars, num_inputs, num_cons, params.clone()); + + let (comm, decomm) = TestudoSnark::::encode(&inst, &gens); + + let start = Instant::now(); + let proof = TestudoSnark::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + params.clone(), + ) + .unwrap(); + let duration = start.elapsed().as_millis(); + br.testudo_proving_time = duration; + + let mut sat_proof = Vec::::new(); + proof + .r1cs_verifier_proof + .serialize_with_mode(&mut sat_proof, Compress::Yes) + .unwrap(); + br.sat_proof_size = sat_proof.len(); + + let mut eval_proof = Vec::::new(); + proof + .r1cs_eval_proof + .serialize_with_mode(&mut eval_proof, Compress::Yes) + .unwrap(); + br.eval_proof_size = eval_proof.len(); + + let mut total_proof = Vec::::new(); + proof + .serialize_with_mode(&mut total_proof, Compress::Yes) + .unwrap(); + br.total_proof_size = total_proof.len(); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone()); + let start = Instant::now(); + + let res = proof.verify( + &gens, + &comm, + &inputs, + &mut verifier_transcript, + params.clone(), + ); + assert!(res.is_ok()); + let duration = start.elapsed().as_millis(); + br.testudo_verification_time = duration; + + writer + .serialize(br) + .expect("unable to write results to csv"); + writer.flush().expect("wasn't able to flush"); + } +} diff --git a/examples/cubic.rs b/examples/cubic.rs index 2f52cd6..dcc69eb 100644 --- a/examples/cubic.rs +++ b/examples/cubic.rs @@ -8,143 +8,163 @@ //! `(Z3 + 5) * 1 - I0 = 0` //! //! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 -use ark_bls12_377::Fr as Scalar; +use ark_ec::pairing::Pairing; use ark_ff::{BigInteger, PrimeField}; use ark_std::{One, UniformRand, Zero}; -use libspartan::{ - parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment, - Instance, SNARKGens, VarsAssignment, SNARK, +use libtestudo::testudo_snark::{TestudoSnark, TestudoSnarkGens}; +use libtestudo::{ + parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment, Instance, + VarsAssignment, }; #[allow(non_snake_case)] -fn produce_r1cs() -> ( - usize, - usize, - usize, - usize, - Instance, - VarsAssignment, - InputsAssignment, +fn produce_r1cs() -> ( + usize, + usize, + usize, + usize, + Instance, + VarsAssignment, + InputsAssignment, ) { - // parameters of the R1CS instance - let num_cons = 4; - let num_vars = 4; - let num_inputs = 1; - let num_non_zero_entries = 8; - - // We will encode the above constraints into three matrices, where - // the coefficients in the matrix are in the little-endian byte order - let mut A: Vec<(usize, usize, Vec)> = Vec::new(); - let mut B: Vec<(usize, usize, Vec)> = Vec::new(); - let mut C: Vec<(usize, usize, Vec)> = Vec::new(); - - let one = Scalar::one().into_repr().to_bytes_le(); - - // R1CS is a set of three sparse matrices A B C, where is a row for every - // constraint and a column for every entry in z = (vars, 1, inputs) - // An R1CS instance is satisfiable iff: - // Az \circ Bz = Cz, where z = (vars, 1, inputs) - - // constraint 0 entries in (A,B,C) - // constraint 0 is Z0 * Z0 - Z1 = 0. - A.push((0, 0, one.clone())); - B.push((0, 0, one.clone())); - C.push((0, 1, one.clone())); - - // constraint 1 entries in (A,B,C) - // constraint 1 is Z1 * Z0 - Z2 = 0. - A.push((1, 1, one.clone())); - B.push((1, 0, one.clone())); - C.push((1, 2, one.clone())); - - // constraint 2 entries in (A,B,C) - // constraint 2 is (Z2 + Z0) * 1 - Z3 = 0. - A.push((2, 2, one.clone())); - A.push((2, 0, one.clone())); - B.push((2, num_vars, one.clone())); - C.push((2, 3, one.clone())); - - // constraint 3 entries in (A,B,C) - // constraint 3 is (Z3 + 5) * 1 - I0 = 0. - A.push((3, 3, one.clone())); - A.push((3, num_vars, Scalar::from(5u32).into_repr().to_bytes_le())); - B.push((3, num_vars, one.clone())); - C.push((3, num_vars + 1, one)); - - let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); - - // compute a satisfying assignment - let mut rng = ark_std::rand::thread_rng(); - let z0 = Scalar::rand(&mut rng); - let z1 = z0 * z0; // constraint 0 - let z2 = z1 * z0; // constraint 1 - let z3 = z2 + z0; // constraint 2 - let i0 = z3 + Scalar::from(5u32); // constraint 3 - - // create a VarsAssignment - let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars]; - vars[0] = z0.into_repr().to_bytes_le(); - vars[1] = z1.into_repr().to_bytes_le(); - vars[2] = z2.into_repr().to_bytes_le(); - vars[3] = z3.into_repr().to_bytes_le(); - let assignment_vars = VarsAssignment::new(&vars).unwrap(); - - // create an InputsAssignment - let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs]; - inputs[0] = i0.into_repr().to_bytes_le(); - let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); - - // check if the instance we created is satisfiable - let res = inst.is_sat(&assignment_vars, &assignment_inputs); - assert!(res.unwrap(), "should be satisfied"); - - ( - num_cons, - num_vars, - num_inputs, - num_non_zero_entries, - inst, - assignment_vars, - assignment_inputs, - ) + // parameters of the R1CS instance + let num_cons = 4; + let num_vars = 4; + let num_inputs = 1; + let num_non_zero_entries = 8; + + // We will encode the above constraints into three matrices, where + // the coefficients in the matrix are in the little-endian byte order + let mut A: Vec<(usize, usize, Vec)> = Vec::new(); + let mut B: Vec<(usize, usize, Vec)> = Vec::new(); + let mut C: Vec<(usize, usize, Vec)> = Vec::new(); + + let one = E::ScalarField::one().into_bigint().to_bytes_le(); + + // R1CS is a set of three sparse matrices A B C, where is a row for every + // constraint and a column for every entry in z = (vars, 1, inputs) + // An R1CS instance is satisfiable iff: + // Az \circ Bz = Cz, where z = (vars, 1, inputs) + + // constraint 0 entries in (A,B,C) + // constraint 0 is Z0 * Z0 - Z1 = 0. + A.push((0, 0, one.clone())); + B.push((0, 0, one.clone())); + C.push((0, 1, one.clone())); + + // constraint 1 entries in (A,B,C) + // constraint 1 is Z1 * Z0 - Z2 = 0. + A.push((1, 1, one.clone())); + B.push((1, 0, one.clone())); + C.push((1, 2, one.clone())); + + // constraint 2 entries in (A,B,C) + // constraint 2 is (Z2 + Z0) * 1 - Z3 = 0. + A.push((2, 2, one.clone())); + A.push((2, 0, one.clone())); + B.push((2, num_vars, one.clone())); + C.push((2, 3, one.clone())); + + // constraint 3 entries in (A,B,C) + // constraint 3 is (Z3 + 5) * 1 - I0 = 0. + A.push((3, 3, one.clone())); + A.push(( + 3, + num_vars, + E::ScalarField::from(5u32).into_bigint().to_bytes_le(), + )); + B.push((3, num_vars, one.clone())); + C.push((3, num_vars + 1, one)); + + let inst = Instance::::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); + + // compute a satisfying assignment + let mut rng = ark_std::rand::thread_rng(); + let z0 = E::ScalarField::rand(&mut rng); + let z1 = z0 * z0; // constraint 0 + let z2 = z1 * z0; // constraint 1 + let z3 = z2 + z0; // constraint 2 + let i0 = z3 + E::ScalarField::from(5u32); // constraint 3 + + // create a VarsAssignment + let mut vars = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_vars]; + vars[0] = z0.into_bigint().to_bytes_le(); + vars[1] = z1.into_bigint().to_bytes_le(); + vars[2] = z2.into_bigint().to_bytes_le(); + vars[3] = z3.into_bigint().to_bytes_le(); + let assignment_vars = VarsAssignment::new(&vars).unwrap(); + + // create an InputsAssignment + let mut inputs = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_inputs]; + inputs[0] = i0.into_bigint().to_bytes_le(); + let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assignment_vars, &assignment_inputs); + assert!(res.unwrap(), "should be satisfied"); + + ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) } +type E = ark_bls12_377::Bls12_377; fn main() { - // produce an R1CS instance - let ( - num_cons, - num_vars, - num_inputs, - num_non_zero_entries, - inst, - assignment_vars, - assignment_inputs, - ) = produce_r1cs(); - - let params = poseidon_params(); - - // produce public parameters - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); - - // create a commitment to the R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - assignment_vars, - &assignment_inputs, - &gens, - &mut prover_transcript, - ); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) - .is_ok()); - println!("proof verification successful!"); + // produce an R1CS instance + let ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) = produce_r1cs::(); + + let params = poseidon_params(); + + // produce public parameters + let gens = TestudoSnarkGens::::setup( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + params.clone(), + ); + + // create a commitment to the R1CS instance + let (comm, decomm) = TestudoSnark::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let proof = TestudoSnark::prove( + &inst, + &comm, + &decomm, + assignment_vars, + &assignment_inputs, + &gens, + &mut prover_transcript, + params.clone(), + ) + .unwrap(); + + // verify the proof of satisfiability + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + assert!(proof + .verify( + &gens, + &comm, + &assignment_inputs, + &mut verifier_transcript, + params + ) + .is_ok()); + println!("proof verification successful!"); } diff --git a/profiler/nizk.rs b/profiler/nizk.rs deleted file mode 100644 index ce5afe7..0000000 --- a/profiler/nizk.rs +++ /dev/null @@ -1,52 +0,0 @@ -#![allow(non_snake_case)] -#![allow(clippy::assertions_on_result_states)] - -extern crate libspartan; -extern crate merlin; -extern crate rand; - -use ark_serialize::*; -use libspartan::parameters::poseidon_params; -use libspartan::poseidon_transcript::PoseidonTranscript; -use libspartan::{Instance, NIZKGens, NIZK}; - -fn print(msg: &str) { - let star = "* "; - println!("{:indent$}{}{}", "", star, msg, indent = 2); -} - -pub fn main() { - // the list of number of variables (and constraints) in an R1CS instance - let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; - - println!("Profiler:: NIZK"); - for &s in inst_sizes.iter() { - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - - // produce a synthetic R1CSInstance - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // produce public generators - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - - let params = poseidon_params(); - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); - - let mut proof_encoded = Vec::new(); - proof.serialize(&mut proof_encoded).unwrap(); - let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len()); - print(&msg_proof_len); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&inst, &inputs, &mut verifier_transcript, &gens) - .is_ok()); - - println!(); - } -} diff --git a/profiler/snark.rs b/profiler/snark.rs deleted file mode 100644 index 17ceab5..0000000 --- a/profiler/snark.rs +++ /dev/null @@ -1,63 +0,0 @@ -#![allow(non_snake_case)] -#![allow(clippy::assertions_on_result_states)] - -extern crate libspartan; -extern crate merlin; - -use ark_serialize::*; -use libspartan::parameters::poseidon_params; -use libspartan::poseidon_transcript::PoseidonTranscript; -use libspartan::{Instance, SNARKGens, SNARK}; - -fn print(msg: &str) { - let star = "* "; - println!("{:indent$}{}{}", "", star, msg, indent = 2); -} - -pub fn main() { - // the list of number of variables (and constraints) in an R1CS instance - let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; - - println!("Profiler:: SNARK"); - for &s in inst_sizes.iter() { - let num_vars = (2_usize).pow(s as u32); - let num_cons = num_vars; - let num_inputs = 10; - - // produce a synthetic R1CSInstance - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // produce public generators - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); - - // create a commitment to R1CSInstance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - let params = poseidon_params(); - - // produce a proof of satisfiability - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - vars, - &inputs, - &gens, - &mut prover_transcript, - ); - - let mut proof_encoded = Vec::new(); - proof.serialize(&mut proof_encoded).unwrap(); - let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len()); - print(&msg_proof_len); - - // verify the proof of satisfiability - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &inputs, &mut verifier_transcript, &gens) - .is_ok()); - - println!(); - } -} diff --git a/profiler/testudo.rs b/profiler/testudo.rs new file mode 100644 index 0000000..92f7c26 --- /dev/null +++ b/profiler/testudo.rs @@ -0,0 +1,92 @@ +#![allow(non_snake_case)] +#![allow(clippy::assertions_on_result_states)] + +extern crate libtestudo; +extern crate merlin; +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; +use ark_ff::PrimeField; +use ark_serialize::*; +use libtestudo::parameters::PoseidonConfiguration; +use libtestudo::poseidon_transcript::PoseidonTranscript; +use libtestudo::{ + testudo_snark::{TestudoSnark, TestudoSnarkGens}, + Instance, +}; + +fn print(msg: &str) { + let star = "* "; + println!("{:indent$}{}{}", "", star, msg, indent = 2); +} + +fn main() { + let params = ark_bls12_377::Fr::poseidon_params(); + profiler::(params); +} + +fn profiler(params: PoseidonConfig) +where + E: Pairing, + E::ScalarField: PrimeField, + E::ScalarField: Absorb, +{ + // the list of number of variables (and constraints) in an R1CS instance + let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; + + println!("Profiler:: SNARK"); + for &s in inst_sizes.iter() { + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = + Instance::::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public generators + let gens = + TestudoSnarkGens::::setup(num_cons, num_vars, num_inputs, num_cons, params.clone()); + + // create a commitment to R1CSInstance + let (comm, decomm) = TestudoSnark::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = PoseidonTranscript::new(¶ms.clone()); + let proof = TestudoSnark::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + params.clone(), + ) + .unwrap(); + + let mut proof_encoded = Vec::new(); + proof + .serialize_with_mode(&mut proof_encoded, Compress::Yes) + .unwrap(); + let msg_proof_len = format!( + "TestudoSnark::proof_compressed_len {:?}", + proof_encoded.len() + ); + print(&msg_proof_len); + + // verify the proof of satisfiability + let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone()); + assert!(proof + .verify( + &gens, + &comm, + &inputs, + &mut verifier_transcript, + params.clone() + ) + .is_ok()); + + println!(); + } +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..7b20d96 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,4 @@ +edition = "2018" +tab_spaces = 2 +newline_style = "Unix" +use_try_shorthand = true diff --git a/src/commitments.rs b/src/commitments.rs index 5633504..458a839 100644 --- a/src/commitments.rs +++ b/src/commitments.rs @@ -1,92 +1,87 @@ -use super::group::{GroupElement, GroupElementAffine, VartimeMultiscalarMul, GROUP_BASEPOINT}; -use super::scalar::Scalar; -use crate::group::CompressGroupElement; +use crate::ark_std::UniformRand; use crate::parameters::*; -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::PrimeField; - -use ark_sponge::poseidon::PoseidonSponge; -use ark_sponge::CryptographicSponge; +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_ec::{CurveGroup, VariableBaseMSM}; +use rand::SeedableRng; +use std::ops::Mul; #[derive(Debug, Clone)] -pub struct MultiCommitGens { - pub n: usize, - pub G: Vec, - pub h: GroupElement, +pub struct MultiCommitGens { + pub n: usize, + pub G: Vec, + pub h: G::Affine, } -impl MultiCommitGens { - pub fn new(n: usize, label: &[u8]) -> Self { - let params = poseidon_params(); - let mut sponge = PoseidonSponge::new(¶ms); - sponge.absorb(&label); - sponge.absorb(&GROUP_BASEPOINT.compress().0); +impl MultiCommitGens { + pub fn new(n: usize, label: &[u8]) -> Self { + let params = poseidon_params(); + let mut sponge = PoseidonSponge::new(¶ms); + sponge.absorb(&label); + let mut b = Vec::new(); + G::generator().serialize_compressed(&mut b).unwrap(); + sponge.absorb(&b); - let mut gens: Vec = Vec::new(); - for _ in 0..n + 1 { - let mut el_aff: Option = None; - while el_aff.is_none() { - let uniform_bytes = sponge.squeeze_bytes(64); - el_aff = GroupElementAffine::from_random_bytes(&uniform_bytes); - } - let el = el_aff.unwrap().mul_by_cofactor_to_projective(); - gens.push(el); - } + let gens = (0..=n) + .map(|_| { + let mut uniform_bytes = [0u8; 32]; + uniform_bytes.copy_from_slice(&sponge.squeeze_bytes(32)[..]); + let mut prng = rand::rngs::StdRng::from_seed(uniform_bytes); + G::Affine::rand(&mut prng) + }) + .collect::>(); - MultiCommitGens { - n, - G: gens[..n].to_vec(), - h: gens[n], - } + MultiCommitGens { + n, + G: gens[..n].to_vec(), + h: gens[n], } + } - pub fn clone(&self) -> MultiCommitGens { - MultiCommitGens { - n: self.n, - h: self.h, - G: self.G.clone(), - } + pub fn clone(&self) -> Self { + MultiCommitGens { + n: self.n, + h: self.h, + G: self.G.clone(), } + } - pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) { - let (G1, G2) = self.G.split_at(mid); + pub fn split_at(&self, mid: usize) -> (Self, Self) { + let (G1, G2) = self.G.split_at(mid); - ( - MultiCommitGens { - n: G1.len(), - G: G1.to_vec(), - h: self.h, - }, - MultiCommitGens { - n: G2.len(), - G: G2.to_vec(), - h: self.h, - }, - ) - } + ( + MultiCommitGens { + n: G1.len(), + G: G1.to_vec(), + h: self.h, + }, + MultiCommitGens { + n: G2.len(), + G: G2.to_vec(), + h: self.h, + }, + ) + } } -pub trait Commitments { - fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement; -} +pub struct PedersenCommit; -impl Commitments for Scalar { - fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { - assert_eq!(gens_n.n, 1); - GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h]) - } -} +impl PedersenCommit { + pub fn commit_scalar( + scalar: &G::ScalarField, + blind: &G::ScalarField, + gens_n: &MultiCommitGens, + ) -> G { + assert_eq!(gens_n.n, 1); + ::msm_unchecked(&[gens_n.G[0], gens_n.h], &[*scalar, *blind]) + } -impl Commitments for Vec { - fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { - assert_eq!(gens_n.n, self.len()); - GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr()) - } -} - -impl Commitments for [Scalar] { - fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { - assert_eq!(gens_n.n, self.len()); - GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr()) - } + pub fn commit_slice( + scalars: &[G::ScalarField], + blind: &G::ScalarField, + gens_n: &MultiCommitGens, + ) -> G { + assert_eq!(scalars.len(), gens_n.n); + ::msm_unchecked(&gens_n.G, scalars) + gens_n.h.mul(blind) + } } diff --git a/src/constraints.rs b/src/constraints.rs index 89f14f0..931d3f0 100644 --- a/src/constraints.rs +++ b/src/constraints.rs @@ -1,488 +1,479 @@ -use std::{borrow::Borrow, vec}; +use ark_ec::pairing::Pairing; +use std::borrow::Borrow; -use super::scalar::Scalar; use crate::{ - group::Fq, - math::Math, - sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}, - unipoly::UniPoly, -}; -use ark_bls12_377::{constraints::PairingVar as IV, Bls12_377 as I, Fr}; -use ark_crypto_primitives::{ - snark::BooleanInputVar, CircuitSpecificSetupSNARK, SNARKGadget, SNARK, + math::Math, + sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}, + unipoly::UniPoly, }; -use ark_ff::{BitIteratorLE, PrimeField, Zero}; -use ark_groth16::{ - constraints::{Groth16VerifierGadget, PreparedVerifyingKeyVar, ProofVar}, - Groth16, PreparedVerifyingKey, Proof as GrothProof, -}; +use ark_ff::PrimeField; +use ark_crypto_primitives::sponge::{ + constraints::CryptographicSpongeVar, + poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig}, +}; +use ark_poly_commit::multilinear_pc::data_structures::Commitment; use ark_r1cs_std::{ - alloc::{AllocVar, AllocationMode}, - fields::fp::FpVar, - prelude::{Boolean, EqGadget, FieldVar}, + alloc::{AllocVar, AllocationMode}, + fields::fp::FpVar, + prelude::{EqGadget, FieldVar}, }; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError}; -use ark_sponge::{ - constraints::CryptographicSpongeVar, - poseidon::{constraints::PoseidonSpongeVar, PoseidonParameters}, -}; -use rand::{CryptoRng, Rng}; -pub struct PoseidonTranscripVar { - pub cs: ConstraintSystemRef, - pub sponge: PoseidonSpongeVar, - pub params: PoseidonParameters, +pub struct PoseidonTranscripVar +where + F: PrimeField, +{ + pub cs: ConstraintSystemRef, + pub sponge: PoseidonSpongeVar, } -impl PoseidonTranscripVar { - fn new( - cs: ConstraintSystemRef, - params: &PoseidonParameters, - challenge: Option, - ) -> Self { - let mut sponge = PoseidonSpongeVar::new(cs.clone(), params); - - if let Some(c) = challenge { - let c_var = FpVar::::new_witness(cs.clone(), || Ok(c)).unwrap(); - sponge.absorb(&c_var).unwrap(); - } - - Self { - cs, - sponge, - params: params.clone(), - } - } +impl PoseidonTranscripVar +where + F: PrimeField, +{ + fn new(cs: ConstraintSystemRef, params: &PoseidonConfig, c_var: FpVar) -> Self { + let mut sponge = PoseidonSpongeVar::new(cs.clone(), params); - fn append(&mut self, input: &FpVar) -> Result<(), SynthesisError> { - self.sponge.absorb(&input) - } + sponge.absorb(&c_var).unwrap(); - fn append_vector(&mut self, input_vec: &[FpVar]) -> Result<(), SynthesisError> { - for input in input_vec.iter() { - self.append(input)?; - } - Ok(()) - } + Self { cs, sponge } + } - fn challenge(&mut self) -> Result, SynthesisError> { - let c_var = self.sponge.squeeze_field_elements(1).unwrap().remove(0); + fn append(&mut self, input: &FpVar) -> Result<(), SynthesisError> { + self.sponge.absorb(&input) + } - Ok(c_var) + fn append_vector(&mut self, input_vec: &[FpVar]) -> Result<(), SynthesisError> { + for input in input_vec.iter() { + self.append(input)?; } + Ok(()) + } - fn challenge_vector(&mut self, len: usize) -> Result>, SynthesisError> { - let c_vars = self.sponge.squeeze_field_elements(len).unwrap(); + fn challenge(&mut self) -> Result, SynthesisError> { + Ok(self.sponge.squeeze_field_elements(1).unwrap().remove(0)) + } - Ok(c_vars) - } + fn challenge_scalar_vec(&mut self, len: usize) -> Result>, SynthesisError> { + let c_vars = self.sponge.squeeze_field_elements(len).unwrap(); + Ok(c_vars) + } } +/// Univariate polynomial in constraint system #[derive(Clone)] -pub struct UniPolyVar { - pub coeffs: Vec>, +pub struct UniPolyVar { + pub coeffs: Vec>, } -impl AllocVar for UniPolyVar { - fn new_variable>( - cs: impl Into>, - f: impl FnOnce() -> Result, - mode: AllocationMode, - ) -> Result { - f().and_then(|c| { - let cs = cs.into(); - let cp: &UniPoly = c.borrow(); - let mut coeffs_var = Vec::new(); - for coeff in cp.coeffs.iter() { - let coeff_var = FpVar::::new_variable(cs.clone(), || Ok(coeff), mode)?; - coeffs_var.push(coeff_var); - } - Ok(Self { coeffs: coeffs_var }) - }) - } +impl AllocVar, F> for UniPolyVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|c| { + let cs = cs.into(); + let cp: &UniPoly = c.borrow(); + let mut coeffs_var = Vec::new(); + for coeff in cp.coeffs.iter() { + let coeff_var = FpVar::::new_variable(cs.clone(), || Ok(coeff), mode)?; + coeffs_var.push(coeff_var); + } + Ok(Self { coeffs: coeffs_var }) + }) + } } -impl UniPolyVar { - pub fn eval_at_zero(&self) -> FpVar { - self.coeffs[0].clone() - } +impl UniPolyVar { + pub fn eval_at_zero(&self) -> FpVar { + self.coeffs[0].clone() + } - pub fn eval_at_one(&self) -> FpVar { - let mut res = self.coeffs[0].clone(); - for i in 1..self.coeffs.len() { - res = &res + &self.coeffs[i]; - } - res + pub fn eval_at_one(&self) -> FpVar { + let mut res = self.coeffs[0].clone(); + for i in 1..self.coeffs.len() { + res = &res + &self.coeffs[i]; } + res + } - // mul without reduce - pub fn evaluate(&self, r: &FpVar) -> FpVar { - let mut eval = self.coeffs[0].clone(); - let mut power = r.clone(); + // TODO check if mul without reduce can help + pub fn evaluate(&self, r: &FpVar) -> FpVar { + let mut eval = self.coeffs[0].clone(); + let mut power = r.clone(); - for i in 1..self.coeffs.len() { - eval += &power * &self.coeffs[i]; - power *= r; - } - eval + for i in 1..self.coeffs.len() { + eval += &power * &self.coeffs[i]; + power *= r; } + eval + } } +/// Circuit gadget that implements the sumcheck verifier #[derive(Clone)] -pub struct SumcheckVerificationCircuit { - pub polys: Vec, +pub struct SumcheckVerificationCircuit { + pub polys: Vec>, } -impl SumcheckVerificationCircuit { - fn verifiy_sumcheck( - &self, - poly_vars: &[UniPolyVar], - claim_var: &FpVar, - transcript_var: &mut PoseidonTranscripVar, - ) -> Result<(FpVar, Vec>), SynthesisError> { - let mut e_var = claim_var.clone(); - let mut r_vars: Vec> = Vec::new(); - - for (poly_var, _poly) in poly_vars.iter().zip(self.polys.iter()) { - let res = poly_var.eval_at_one() + poly_var.eval_at_zero(); - res.enforce_equal(&e_var)?; - transcript_var.append_vector(&poly_var.coeffs)?; - let r_i_var = transcript_var.challenge()?; - r_vars.push(r_i_var.clone()); - e_var = poly_var.evaluate(&r_i_var.clone()); - } - - Ok((e_var, r_vars)) +impl SumcheckVerificationCircuit { + fn verifiy_sumcheck( + &self, + poly_vars: &[UniPolyVar], + claim_var: &FpVar, + transcript_var: &mut PoseidonTranscripVar, + ) -> Result<(FpVar, Vec>), SynthesisError> { + let mut e_var = claim_var.clone(); + let mut r_vars: Vec> = Vec::new(); + + for (poly_var, _poly) in poly_vars.iter().zip(self.polys.iter()) { + let res = poly_var.eval_at_one() + poly_var.eval_at_zero(); + res.enforce_equal(&e_var)?; + transcript_var.append_vector(&poly_var.coeffs)?; + let r_i_var = transcript_var.challenge()?; + r_vars.push(r_i_var.clone()); + e_var = poly_var.evaluate(&r_i_var.clone()); } + + Ok((e_var, r_vars)) + } } #[derive(Clone)] -pub struct SparsePolyEntryVar { - idx: usize, - val_var: FpVar, +pub struct SparsePolyEntryVar { + idx: usize, + val_var: FpVar, } -impl AllocVar for SparsePolyEntryVar { - fn new_variable>( - cs: impl Into>, - f: impl FnOnce() -> Result, - _mode: AllocationMode, - ) -> Result { - f().and_then(|s| { - let cs = cs.into(); - let spe: &SparsePolyEntry = s.borrow(); - let val_var = FpVar::::new_witness(cs, || Ok(spe.val))?; - Ok(Self { - idx: spe.idx, - val_var, - }) - }) - } +impl AllocVar, F> for SparsePolyEntryVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + f().and_then(|s| { + let cs = cs.into(); + let spe: &SparsePolyEntry = s.borrow(); + let val_var = FpVar::::new_witness(cs, || Ok(spe.val))?; + Ok(Self { + idx: spe.idx, + val_var, + }) + }) + } } #[derive(Clone)] -pub struct SparsePolynomialVar { - num_vars: usize, - Z_var: Vec, +pub struct SparsePolynomialVar { + Z_var: Vec>, } -impl AllocVar for SparsePolynomialVar { - fn new_variable>( - cs: impl Into>, - f: impl FnOnce() -> Result, - mode: AllocationMode, - ) -> Result { - f().and_then(|s| { - let cs = cs.into(); - let sp: &SparsePolynomial = s.borrow(); - let mut Z_var = Vec::new(); - for spe in sp.Z.iter() { - let spe_var = SparsePolyEntryVar::new_variable(cs.clone(), || Ok(spe), mode)?; - Z_var.push(spe_var); - } - Ok(Self { - num_vars: sp.num_vars, - Z_var, - }) - }) - } +impl AllocVar, F> for SparsePolynomialVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + f().and_then(|s| { + let cs = cs.into(); + let sp: &SparsePolynomial = s.borrow(); + let mut Z_var = Vec::new(); + for spe in sp.Z.iter() { + let spe_var = SparsePolyEntryVar::new_variable(cs.clone(), || Ok(spe), mode)?; + Z_var.push(spe_var); + } + Ok(Self { Z_var }) + }) + } } -impl SparsePolynomialVar { - fn compute_chi(a: &[bool], r_vars: &[FpVar]) -> FpVar { - let mut chi_i_var = FpVar::::one(); - let one = FpVar::::one(); - for (i, r_var) in r_vars.iter().enumerate() { - if a[i] { - chi_i_var *= r_var; - } else { - chi_i_var *= &one - r_var; - } - } - chi_i_var +impl SparsePolynomialVar { + fn compute_chi(a: &[bool], r_vars: &[FpVar]) -> FpVar { + let mut chi_i_var = FpVar::::one(); + let one = FpVar::::one(); + for (i, r_var) in r_vars.iter().enumerate() { + if a[i] { + chi_i_var *= r_var; + } else { + chi_i_var *= &one - r_var; + } } - - pub fn evaluate(&self, r_var: &[FpVar]) -> FpVar { - let mut sum = FpVar::::zero(); - for spe_var in self.Z_var.iter() { - // potential problem - let bits = &spe_var.idx.get_bits(r_var.len()); - sum += SparsePolynomialVar::compute_chi(bits, r_var) * &spe_var.val_var; - } - sum + chi_i_var + } + + pub fn evaluate(&self, r_var: &[FpVar]) -> FpVar { + let mut sum = FpVar::::zero(); + for spe_var in self.Z_var.iter() { + // potential problem + let bits = &spe_var.idx.get_bits(r_var.len()); + sum += SparsePolynomialVar::compute_chi(bits, r_var) * &spe_var.val_var; } + sum + } } #[derive(Clone)] -pub struct R1CSVerificationCircuit { - pub num_vars: usize, - pub num_cons: usize, - pub input: Vec, - pub input_as_sparse_poly: SparsePolynomial, - pub evals: (Fr, Fr, Fr), - pub params: PoseidonParameters, - pub prev_challenge: Fr, - pub claims_phase2: (Scalar, Scalar, Scalar, Scalar), - pub eval_vars_at_ry: Fr, - pub sc_phase1: SumcheckVerificationCircuit, - pub sc_phase2: SumcheckVerificationCircuit, - // The point on which the polynomial was evaluated by the prover. - pub claimed_ry: Vec, - pub claimed_transcript_sat_state: Scalar, +pub struct R1CSVerificationCircuit { + pub num_vars: usize, + pub num_cons: usize, + pub input: Vec, + pub input_as_sparse_poly: SparsePolynomial, + pub evals: (F, F, F), + pub params: PoseidonConfig, + pub prev_challenge: F, + pub claims_phase2: (F, F, F, F), + pub eval_vars_at_ry: F, + pub sc_phase1: SumcheckVerificationCircuit, + pub sc_phase2: SumcheckVerificationCircuit, + // The point on which the polynomial was evaluated by the prover. + pub claimed_rx: Vec, + pub claimed_ry: Vec, + pub claimed_transcript_sat_state: F, } -impl R1CSVerificationCircuit { - fn new(config: &VerifierConfig) -> Self { - Self { - num_vars: config.num_vars, - num_cons: config.num_cons, - input: config.input.clone(), - input_as_sparse_poly: config.input_as_sparse_poly.clone(), - evals: config.evals, - params: config.params.clone(), - prev_challenge: config.prev_challenge, - claims_phase2: config.claims_phase2, - eval_vars_at_ry: config.eval_vars_at_ry, - sc_phase1: SumcheckVerificationCircuit { - polys: config.polys_sc1.clone(), - }, - sc_phase2: SumcheckVerificationCircuit { - polys: config.polys_sc2.clone(), - }, - claimed_ry: config.ry.clone(), - claimed_transcript_sat_state: config.transcript_sat_state, - } +impl R1CSVerificationCircuit { + pub fn new>(config: &VerifierConfig) -> Self { + Self { + num_vars: config.num_vars, + num_cons: config.num_cons, + input: config.input.clone(), + input_as_sparse_poly: config.input_as_sparse_poly.clone(), + evals: config.evals, + params: config.params.clone(), + prev_challenge: config.prev_challenge, + claims_phase2: config.claims_phase2, + eval_vars_at_ry: config.eval_vars_at_ry, + sc_phase1: SumcheckVerificationCircuit { + polys: config.polys_sc1.clone(), + }, + sc_phase2: SumcheckVerificationCircuit { + polys: config.polys_sc2.clone(), + }, + claimed_rx: config.rx.clone(), + claimed_ry: config.ry.clone(), + claimed_transcript_sat_state: config.transcript_sat_state, } + } } -impl ConstraintSynthesizer for R1CSVerificationCircuit { - fn generate_constraints(self, cs: ConstraintSystemRef) -> ark_relations::r1cs::Result<()> { - let mut transcript_var = - PoseidonTranscripVar::new(cs.clone(), &self.params, Some(self.prev_challenge)); - - let poly_sc1_vars = self - .sc_phase1 - .polys - .iter() - .map(|p| { - UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap() - }) - .collect::>(); - - let poly_sc2_vars = self - .sc_phase2 - .polys - .iter() - .map(|p| { - UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap() - }) - .collect::>(); - - let input_vars = self - .input - .iter() - .map(|i| { - FpVar::::new_variable(cs.clone(), || Ok(i), AllocationMode::Witness).unwrap() - }) - .collect::>>(); - - let claimed_ry_vars = self - .claimed_ry - .iter() - .map(|r| { - FpVar::::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap() - }) - .collect::>>(); - - transcript_var.append_vector(&input_vars)?; - - let num_rounds_x = self.num_cons.log_2(); - let _num_rounds_y = (2 * self.num_vars).log_2(); - - let tau_vars = transcript_var.challenge_vector(num_rounds_x)?; - - let claim_phase1_var = FpVar::::new_witness(cs.clone(), || Ok(Fr::zero()))?; - - let (claim_post_phase1_var, rx_var) = self.sc_phase1.verifiy_sumcheck( - &poly_sc1_vars, - &claim_phase1_var, - &mut transcript_var, - )?; - - let (Az_claim, Bz_claim, Cz_claim, prod_Az_Bz_claims) = &self.claims_phase2; - - let Az_claim_var = FpVar::::new_input(cs.clone(), || Ok(Az_claim))?; - let Bz_claim_var = FpVar::::new_input(cs.clone(), || Ok(Bz_claim))?; - let Cz_claim_var = FpVar::::new_input(cs.clone(), || Ok(Cz_claim))?; - let prod_Az_Bz_claim_var = FpVar::::new_input(cs.clone(), || Ok(prod_Az_Bz_claims))?; - let one = FpVar::::one(); - let prod_vars: Vec> = (0..rx_var.len()) - .map(|i| (&rx_var[i] * &tau_vars[i]) + (&one - &rx_var[i]) * (&one - &tau_vars[i])) - .collect(); - let mut taus_bound_rx_var = FpVar::::one(); - - for p_var in prod_vars.iter() { - taus_bound_rx_var *= p_var; - } - - let expected_claim_post_phase1_var = - (&prod_Az_Bz_claim_var - &Cz_claim_var) * &taus_bound_rx_var; - - claim_post_phase1_var.enforce_equal(&expected_claim_post_phase1_var)?; - - let r_A_var = transcript_var.challenge()?; - let r_B_var = transcript_var.challenge()?; - let r_C_var = transcript_var.challenge()?; - - let claim_phase2_var = - &r_A_var * &Az_claim_var + &r_B_var * &Bz_claim_var + &r_C_var * &Cz_claim_var; - - let (claim_post_phase2_var, ry_var) = self.sc_phase2.verifiy_sumcheck( - &poly_sc2_vars, - &claim_phase2_var, - &mut transcript_var, - )?; - - // Because the verifier checks the commitment opening on point ry outside - // the circuit, the prover needs to send ry to the verifier (making the - // proof size O(log n)). As this point is normally obtained by the verifier - // from the second round of sumcheck, the circuit needs to ensure the - // claimed point, coming from the prover, is actually the point derived - // inside the circuit. These additional checks will be removed - // when the commitment verification is done inside the circuit. - for (i, r) in claimed_ry_vars.iter().enumerate() { - ry_var[i].enforce_equal(r)?; - } - - let input_as_sparse_poly_var = SparsePolynomialVar::new_variable( - cs.clone(), - || Ok(&self.input_as_sparse_poly), - AllocationMode::Witness, - )?; - - let poly_input_eval_var = input_as_sparse_poly_var.evaluate(&ry_var[1..]); - - let eval_vars_at_ry_var = FpVar::::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?; - - let eval_Z_at_ry_var = (FpVar::::one() - &ry_var[0]) * &eval_vars_at_ry_var - + &ry_var[0] * &poly_input_eval_var; - - let (eval_A_r, eval_B_r, eval_C_r) = self.evals; - - let eval_A_r_var = FpVar::::new_witness(cs.clone(), || Ok(eval_A_r))?; - let eval_B_r_var = FpVar::::new_witness(cs.clone(), || Ok(eval_B_r))?; - let eval_C_r_var = FpVar::::new_witness(cs.clone(), || Ok(eval_C_r))?; - - let scalar_var = - &r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var; - - let expected_claim_post_phase2_var = eval_Z_at_ry_var * scalar_var; - claim_post_phase2_var.enforce_equal(&expected_claim_post_phase2_var)?; - - let expected_transcript_state_var = transcript_var.challenge()?; - let claimed_transcript_state_var = - FpVar::::new_input(cs, || Ok(self.claimed_transcript_sat_state))?; +/// This section implements the sumcheck verification part of Spartan +impl ConstraintSynthesizer for R1CSVerificationCircuit { + fn generate_constraints(self, cs: ConstraintSystemRef) -> ark_relations::r1cs::Result<()> { + let initial_challenge_var = FpVar::::new_input(cs.clone(), || Ok(self.prev_challenge))?; + let mut transcript_var = + PoseidonTranscripVar::new(cs.clone(), &self.params, initial_challenge_var); + + let poly_sc1_vars = self + .sc_phase1 + .polys + .iter() + .map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()) + .collect::>>(); + + let poly_sc2_vars = self + .sc_phase2 + .polys + .iter() + .map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()) + .collect::>>(); + + let input_vars = self + .input + .iter() + .map(|i| FpVar::::new_variable(cs.clone(), || Ok(i), AllocationMode::Input).unwrap()) + .collect::>>(); + + let claimed_rx_vars = self + .claimed_rx + .iter() + .map(|r| FpVar::::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap()) + .collect::>>(); + + let claimed_ry_vars = self + .claimed_ry + .iter() + .map(|r| FpVar::::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap()) + .collect::>>(); + + transcript_var.append_vector(&input_vars)?; + + let num_rounds_x = self.num_cons.log_2(); + let _num_rounds_y = (2 * self.num_vars).log_2(); + + let tau_vars = transcript_var.challenge_scalar_vec(num_rounds_x)?; + + let claim_phase1_var = FpVar::::new_witness(cs.clone(), || Ok(F::zero()))?; + + let (claim_post_phase1_var, rx_var) = + self + .sc_phase1 + .verifiy_sumcheck(&poly_sc1_vars, &claim_phase1_var, &mut transcript_var)?; + + // The prover sends (rx, ry) to the verifier for the evaluation proof so + // the constraints need to ensure it is indeed the result from the first + // round of sumcheck verification. + for (i, r) in claimed_rx_vars.iter().enumerate() { + rx_var[i].enforce_equal(r)?; + } - // Ensure that the prover and verifier transcipt views are consistent at - // the end of the satisfiability proof. - expected_transcript_state_var.enforce_equal(&claimed_transcript_state_var)?; + let (Az_claim, Bz_claim, Cz_claim, prod_Az_Bz_claims) = &self.claims_phase2; - Ok(()) - } -} + let Az_claim_var = FpVar::::new_witness(cs.clone(), || Ok(Az_claim))?; + let Bz_claim_var = FpVar::::new_witness(cs.clone(), || Ok(Bz_claim))?; + let Cz_claim_var = FpVar::::new_witness(cs.clone(), || Ok(Cz_claim))?; + let prod_Az_Bz_claim_var = FpVar::::new_witness(cs.clone(), || Ok(prod_Az_Bz_claims))?; + let one = FpVar::::one(); + let prod_vars: Vec> = (0..rx_var.len()) + .map(|i| (&rx_var[i] * &tau_vars[i]) + (&one - &rx_var[i]) * (&one - &tau_vars[i])) + .collect(); + let mut taus_bound_rx_var = FpVar::::one(); -#[derive(Clone)] -pub struct VerifierConfig { - pub num_vars: usize, - pub num_cons: usize, - pub input: Vec, - pub input_as_sparse_poly: SparsePolynomial, - pub evals: (Fr, Fr, Fr), - pub params: PoseidonParameters, - pub prev_challenge: Fr, - pub claims_phase2: (Fr, Fr, Fr, Fr), - pub eval_vars_at_ry: Fr, - pub polys_sc1: Vec, - pub polys_sc2: Vec, - pub ry: Vec, - pub transcript_sat_state: Scalar, -} -#[derive(Clone)] -pub struct VerifierCircuit { - pub inner_circuit: R1CSVerificationCircuit, - pub inner_proof: GrothProof, - pub inner_vk: PreparedVerifyingKey, - pub eval_vars_at_ry: Fr, - pub claims_phase2: (Fr, Fr, Fr, Fr), - pub ry: Vec, - pub transcript_sat_state: Scalar, -} + for p_var in prod_vars.iter() { + taus_bound_rx_var *= p_var; + } -impl VerifierCircuit { - pub fn new( - config: &VerifierConfig, - mut rng: &mut R, - ) -> Result { - let inner_circuit = R1CSVerificationCircuit::new(config); - let (pk, vk) = Groth16::::setup(inner_circuit.clone(), &mut rng).unwrap(); - let proof = Groth16::::prove(&pk, inner_circuit.clone(), &mut rng)?; - let pvk = Groth16::::process_vk(&vk).unwrap(); - Ok(Self { - inner_circuit, - inner_proof: proof, - inner_vk: pvk, - eval_vars_at_ry: config.eval_vars_at_ry, - claims_phase2: config.claims_phase2, - ry: config.ry.clone(), - transcript_sat_state: config.transcript_sat_state, - }) + let expected_claim_post_phase1_var = + (&prod_Az_Bz_claim_var - &Cz_claim_var) * &taus_bound_rx_var; + + claim_post_phase1_var.enforce_equal(&expected_claim_post_phase1_var)?; + + let r_A_var = transcript_var.challenge()?; + let r_B_var = transcript_var.challenge()?; + let r_C_var = transcript_var.challenge()?; + + let claim_phase2_var = + &r_A_var * &Az_claim_var + &r_B_var * &Bz_claim_var + &r_C_var * &Cz_claim_var; + + let (claim_post_phase2_var, ry_var) = + self + .sc_phase2 + .verifiy_sumcheck(&poly_sc2_vars, &claim_phase2_var, &mut transcript_var)?; + + // Because the verifier checks the commitment opening on point ry outside + // the circuit, the prover needs to send ry to the verifier (making the + // proof size O(log n)). As this point is normally obtained by the verifier + // from the second round of sumcheck, the circuit needs to ensure the + // claimed point, coming from the prover, is actually the point derived + // inside the circuit. These additional checks will be removed + // when the commitment verification is done inside the circuit. + // Moreover, (rx, ry) will be used in the evaluation proof. + for (i, r) in claimed_ry_vars.iter().enumerate() { + ry_var[i].enforce_equal(r)?; } + + let input_as_sparse_poly_var = SparsePolynomialVar::new_variable( + cs.clone(), + || Ok(&self.input_as_sparse_poly), + AllocationMode::Witness, + )?; + + let poly_input_eval_var = input_as_sparse_poly_var.evaluate(&ry_var[1..]); + + let eval_vars_at_ry_var = FpVar::::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?; + + let eval_Z_at_ry_var = + (FpVar::::one() - &ry_var[0]) * &eval_vars_at_ry_var + &ry_var[0] * &poly_input_eval_var; + + let (eval_A_r, eval_B_r, eval_C_r) = self.evals; + + let eval_A_r_var = FpVar::::new_input(cs.clone(), || Ok(eval_A_r))?; + let eval_B_r_var = FpVar::::new_input(cs.clone(), || Ok(eval_B_r))?; + let eval_C_r_var = FpVar::::new_input(cs.clone(), || Ok(eval_C_r))?; + + let scalar_var = &r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var; + + let expected_claim_post_phase2_var = eval_Z_at_ry_var * scalar_var; + claim_post_phase2_var.enforce_equal(&expected_claim_post_phase2_var)?; + let expected_transcript_state_var = transcript_var.challenge()?; + let claimed_transcript_state_var = + FpVar::::new_input(cs, || Ok(self.claimed_transcript_sat_state))?; + + // Ensure that the prover and verifier transcipt views are consistent at + // the end of the satisfiability proof. + expected_transcript_state_var.enforce_equal(&claimed_transcript_state_var)?; + Ok(()) + } } -impl ConstraintSynthesizer for VerifierCircuit { - fn generate_constraints(self, cs: ConstraintSystemRef) -> ark_relations::r1cs::Result<()> { - let proof_var = - ProofVar::::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?; - let (v_A, v_B, v_C, v_AB) = self.claims_phase2; - let mut pubs = vec![]; - pubs.extend(self.ry); - pubs.extend(vec![v_A, v_B, v_C, v_AB]); - pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]); - - let bits = pubs - .iter() - .map(|c| { - let bits: Vec = BitIteratorLE::new(c.into_repr().as_ref().to_vec()).collect(); - Vec::new_witness(cs.clone(), || Ok(bits)) - }) - .collect::, _>>()?; - let input_var = BooleanInputVar::::new(bits); - - let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?; - Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)? - .enforce_equal(&Boolean::constant(true))?; - Ok(()) - } +#[derive(Clone)] +pub struct VerifierConfig { + pub comm: Commitment, + pub num_vars: usize, + pub num_cons: usize, + pub input: Vec, + pub input_as_sparse_poly: SparsePolynomial, + pub evals: (E::ScalarField, E::ScalarField, E::ScalarField), + pub params: PoseidonConfig, + pub prev_challenge: E::ScalarField, + pub claims_phase2: ( + E::ScalarField, + E::ScalarField, + E::ScalarField, + E::ScalarField, + ), + pub eval_vars_at_ry: E::ScalarField, + pub polys_sc1: Vec>, + pub polys_sc2: Vec>, + pub rx: Vec, + pub ry: Vec, + pub transcript_sat_state: E::ScalarField, } + +// Skeleton for the polynomial commitment verification circuit +// #[derive(Clone)] +// pub struct VerifierCircuit { +// pub inner_circuit: R1CSVerificationCircuit, +// pub inner_proof: GrothProof, +// pub inner_vk: PreparedVerifyingKey, +// pub eval_vars_at_ry: Fr, +// pub claims_phase2: (Fr, Fr, Fr, Fr), +// pub ry: Vec, +// pub transcript_sat_state: Scalar, +// } + +// impl VerifierCircuit { +// pub fn new( +// config: &VerifierConfig, +// mut rng: &mut R, +// ) -> Result { +// let inner_circuit = R1CSVerificationCircuit::new(config); +// let (pk, vk) = Groth16::::setup(inner_circuit.clone(), &mut rng).unwrap(); +// let proof = Groth16::::prove(&pk, inner_circuit.clone(), &mut rng)?; +// let pvk = Groth16::::process_vk(&vk).unwrap(); +// Ok(Self { +// inner_circuit, +// inner_proof: proof, +// inner_vk: pvk, +// eval_vars_at_ry: config.eval_vars_at_ry, +// claims_phase2: config.claims_phase2, +// ry: config.ry.clone(), +// transcript_sat_state: config.transcript_sat_state, +// }) +// } +// } + +// impl ConstraintSynthesizer for VerifierCircuit { +// fn generate_constraints(self, cs: ConstraintSystemRef) -> ark_relations::r1cs::Result<()> { +// let proof_var = ProofVar::::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?; +// let (v_A, v_B, v_C, v_AB) = self.claims_phase2; +// let mut pubs = vec![]; +// pubs.extend(self.ry); +// pubs.extend(vec![v_A, v_B, v_C, v_AB]); +// pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]); +// let bits = pubs +// .iter() +// .map(|c| { +// let bits: Vec = BitIteratorLE::new(c.into_bigint().as_ref().to_vec()).collect(); +// Vec::new_witness(cs.clone(), || Ok(bits)) +// }) +// .collect::, _>>()?; +// let input_var = BooleanInputVar::::new(bits); +// let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?; +// Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)? +// .enforce_equal(&Boolean::constant(true))?; +// Ok(()) +// } +// } diff --git a/src/dense_mlpoly.rs b/src/dense_mlpoly.rs index e2a4081..50af1b6 100644 --- a/src/dense_mlpoly.rs +++ b/src/dense_mlpoly.rs @@ -1,788 +1,768 @@ #![allow(clippy::too_many_arguments)] -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; - -use super::commitments::{Commitments, MultiCommitGens}; +use super::commitments::{MultiCommitGens, PedersenCommit}; use super::errors::ProofVerifyError; -use super::group::{ - CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, - VartimeMultiscalarMul, -}; use super::math::Math; use super::nizk::{DotProductProofGens, DotProductProofLog}; -use super::random::RandomTape; -use super::scalar::Scalar; -use super::transcript::{AppendToTranscript, ProofTranscript}; -use ark_bls12_377::Bls12_377 as I; -use ark_ff::{One, UniformRand, Zero}; +use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter}; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::scalar_mul::variable_base::VariableBaseMSM; +use ark_ec::{pairing::Pairing, CurveGroup}; +use ark_ff::{PrimeField, Zero}; use ark_poly::MultilinearExtension; use ark_poly_commit::multilinear_pc::data_structures::{CommitterKey, VerifierKey}; use ark_poly_commit::multilinear_pc::MultilinearPC; use ark_serialize::*; use core::ops::Index; -use merlin::Transcript; -use std::ops::{Add, AddAssign, Neg, Sub, SubAssign}; - #[cfg(feature = "multicore")] use rayon::prelude::*; - +use std::ops::{Add, AddAssign, Neg, Sub, SubAssign}; // TODO: integrate the DenseMultilinearExtension(and Sparse) https://github.com/arkworks-rs/algebra/tree/master/poly/src/evaluations/multivariate/multilinear from arkworks into Spartan. This requires moving the specific Spartan functionalities in separate traits. #[derive(Debug, Clone, Eq, PartialEq, Hash, CanonicalDeserialize, CanonicalSerialize)] -pub struct DensePolynomial { - num_vars: usize, // the number of variables in the multilinear polynomial - len: usize, - Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs +pub struct DensePolynomial { + pub num_vars: usize, // the number of variables in the multilinear polynomial + pub len: usize, + pub Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } -impl MultilinearExtension for DensePolynomial { - fn num_vars(&self) -> usize { - self.get_num_vars() - } +impl MultilinearExtension for DensePolynomial { + fn num_vars(&self) -> usize { + self.get_num_vars() + } - fn evaluate(&self, point: &[Scalar]) -> Option { - if point.len() == self.num_vars { - Some(self.evaluate(&point)) - } else { - None - } + fn evaluate(&self, point: &[F]) -> Option { + if point.len() == self.num_vars { + Some(self.evaluate(&point)) + } else { + None } + } - fn rand(num_vars: usize, rng: &mut R) -> Self { - let evals = (0..(1 << num_vars)).map(|_| Scalar::rand(rng)).collect(); - Self { - num_vars: num_vars, - len: 1 << num_vars, - Z: evals, - } + fn rand(num_vars: usize, rng: &mut R) -> Self { + let evals = (0..(1 << num_vars)).map(|_| F::rand(rng)).collect(); + Self { + num_vars, + len: 1 << num_vars, + Z: evals, } + } - fn relabel(&self, _a: usize, _b: usize, _k: usize) -> Self { - unimplemented!() - } + fn relabel(&self, _a: usize, _b: usize, _k: usize) -> Self { + unimplemented!() + } - fn fix_variables(&self, _partial_point: &[Scalar]) -> Self { - unimplemented!() - } + fn fix_variables(&self, _partial_point: &[F]) -> Self { + unimplemented!() + } - fn to_evaluations(&self) -> Vec { - self.Z.to_vec() - } + fn to_evaluations(&self) -> Vec { + self.Z.to_vec() + } } -impl Zero for DensePolynomial { - fn zero() -> Self { - Self { - num_vars: 0, - len: 1, - Z: vec![Scalar::zero()], - } +impl Zero for DensePolynomial { + fn zero() -> Self { + Self { + num_vars: 0, + len: 1, + Z: vec![F::zero()], } + } - fn is_zero(&self) -> bool { - self.num_vars == 0 && self.len == 1 && self.Z[0].is_zero() - } + fn is_zero(&self) -> bool { + self.num_vars == 0 && self.len == 1 && self.Z[0].is_zero() + } } -impl Add for DensePolynomial { - type Output = DensePolynomial; - fn add(self, other: Self) -> Self { - &self + &other - } +impl Add for DensePolynomial { + type Output = DensePolynomial; + fn add(self, other: Self) -> Self { + &self + &other + } } // function needed because the result might have a different lifetime than the // operands -impl<'a, 'b> Add<&'a DensePolynomial> for &'b DensePolynomial { - type Output = DensePolynomial; - - fn add(self, other: &'a DensePolynomial) -> Self::Output { - if other.is_zero() { - return self.clone(); - } - if self.is_zero() { - return other.clone(); - } - assert_eq!(self.num_vars, other.num_vars); - - let res: Vec = self - .Z - .iter() - .zip(other.Z.iter()) - .map(|(a, b)| *a + *b) - .collect(); - Self::Output { - num_vars: self.num_vars, - len: self.len, - Z: res, - } - } +impl<'a, 'b, F: PrimeField> Add<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + fn add(self, other: &'a DensePolynomial) -> Self::Output { + if other.is_zero() { + return self.clone(); + } + if self.is_zero() { + return other.clone(); + } + assert_eq!(self.num_vars, other.num_vars); + + let res = self + .Z + .iter() + .zip(other.Z.iter()) + .map(|(a, b)| *a + *b) + .collect(); + Self::Output { + num_vars: self.num_vars, + len: self.len, + Z: res, + } + } } -impl AddAssign for DensePolynomial { - fn add_assign(&mut self, other: Self) { - *self = &*self + &other; - } +impl AddAssign for DensePolynomial { + fn add_assign(&mut self, other: Self) { + *self = &*self + &other; + } } -impl<'a, 'b> AddAssign<&'a DensePolynomial> for DensePolynomial { - fn add_assign(&mut self, other: &'a DensePolynomial) { - *self = &*self + other; - } +impl<'a, 'b, F: PrimeField> AddAssign<&'a DensePolynomial> for DensePolynomial { + fn add_assign(&mut self, other: &'a DensePolynomial) { + *self = &*self + other; + } } -impl<'a, 'b> AddAssign<(Scalar, &'a DensePolynomial)> for DensePolynomial { - fn add_assign(&mut self, (scalar, other): (Scalar, &'a DensePolynomial)) { - let other = Self { - num_vars: other.num_vars, - len: 1 << other.num_vars, - Z: other.Z.iter().map(|x| scalar * x).collect(), - }; - *self = &*self + &other; - } +impl<'a, 'b, F: PrimeField> AddAssign<(F, &'a DensePolynomial)> for DensePolynomial { + fn add_assign(&mut self, (scalar, other): (F, &'a DensePolynomial)) { + let other = Self { + num_vars: other.num_vars, + len: 1 << other.num_vars, + Z: other.Z.iter().map(|x| scalar * x).collect(), + }; + *self = &*self + &other; + } } -impl Neg for DensePolynomial { - type Output = DensePolynomial; +impl Neg for DensePolynomial { + type Output = DensePolynomial; - fn neg(self) -> Self::Output { - Self::Output { - num_vars: self.num_vars, - len: self.len, - Z: self.Z.iter().map(|x| -*x).collect(), - } + fn neg(self) -> Self::Output { + Self::Output { + num_vars: self.num_vars, + len: self.len, + Z: self.Z.iter().map(|x| -*x).collect(), } + } } -impl Sub for DensePolynomial { - type Output = DensePolynomial; +impl Sub for DensePolynomial { + type Output = DensePolynomial; - fn sub(self, other: Self) -> Self::Output { - &self - &other - } + fn sub(self, other: Self) -> Self::Output { + &self - &other + } } -impl<'a, 'b> Sub<&'a DensePolynomial> for &'b DensePolynomial { - type Output = DensePolynomial; +impl<'a, 'b, F: PrimeField> Sub<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; - fn sub(self, other: &'a DensePolynomial) -> Self::Output { - self + &other.clone().neg() - } + fn sub(self, other: &'a DensePolynomial) -> Self::Output { + self + &other.clone().neg() + } } -impl SubAssign for DensePolynomial { - fn sub_assign(&mut self, other: Self) { - *self = &*self - &other; - } +impl SubAssign for DensePolynomial { + fn sub_assign(&mut self, other: Self) { + *self = &*self - &other; + } } -impl<'a, 'b> SubAssign<&'a DensePolynomial> for DensePolynomial { - fn sub_assign(&mut self, other: &'a DensePolynomial) { - *self = &*self - other; - } +impl<'a, 'b, F: PrimeField> SubAssign<&'a DensePolynomial> for DensePolynomial { + fn sub_assign(&mut self, other: &'a DensePolynomial) { + *self = &*self - other; + } } #[derive(Clone)] -pub struct PolyCommitmentGens { - pub gens: DotProductProofGens, - pub ck: CommitterKey, - pub vk: VerifierKey, +pub struct PolyCommitmentGens { + pub gens: DotProductProofGens, + pub ck: CommitterKey, + pub vk: VerifierKey, } -impl PolyCommitmentGens { - // num vars is the number of variables in the multilinear polynomial - // this gives the maximum degree bound - pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens { - let (_left, right) = EqPolynomial::compute_factored_lens(num_vars); - let gens = DotProductProofGens::new(right.pow2(), label); - - // Generates the SRS and trims it based on the number of variables in the - // multilinear polynomial. - let mut rng = ark_std::test_rng(); - let pst_gens = MultilinearPC::::setup(num_vars, &mut rng); - let (ck, vk) = MultilinearPC::::trim(&pst_gens, num_vars); - - PolyCommitmentGens { gens, ck, vk } - } +impl PolyCommitmentGens { + // num vars is the number of variables in the multilinear polynomial + // this gives the maximum degree bound + pub fn setup(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens { + let (_left, right) = EqPolynomial::::compute_factored_lens(num_vars); + let gens = DotProductProofGens::new(right.pow2(), label); + let odd = if num_vars % 2 == 1 { 1 } else { 0 }; + // Generates the SRS and trims it based on the number of variables in the + // multilinear polynomial. + // If num_vars is odd, a crs of size num_vars/2 + 1 will be needed for the + // polynomial commitment. + let mut rng = ark_std::test_rng(); + let pst_gens = MultilinearPC::::setup(num_vars / 2 + odd, &mut rng); + let (ck, vk) = MultilinearPC::::trim(&pst_gens, num_vars / 2 + odd); + + PolyCommitmentGens { gens, ck, vk } + } } -pub struct PolyCommitmentBlinds { - blinds: Vec, +pub struct PolyCommitmentBlinds { + blinds: Vec, } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct PolyCommitment { - C: Vec, +pub struct PolyCommitment { + C: Vec, } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct ConstPolyCommitment { - C: CompressedGroup, +pub struct ConstPolyCommitment { + C: G, } -pub struct EqPolynomial { - r: Vec, +pub struct EqPolynomial { + r: Vec, } -impl EqPolynomial { - pub fn new(r: Vec) -> Self { - EqPolynomial { r } - } - - pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { - assert_eq!(self.r.len(), rx.len()); - (0..rx.len()) - .map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i])) - .product() - } - - pub fn evals(&self) -> Vec { - let ell = self.r.len(); - - let mut evals: Vec = vec![Scalar::one(); ell.pow2()]; - let mut size = 1; - for j in 0..ell { - // in each iteration, we double the size of chis - size *= 2; - // TODO: this reverse causes inconsistent evaluation in comparison to the - //evaluation function in ark-poly-commit, we should look into this to - // avoid the extra constraints in the circuit - for i in (0..size).rev().step_by(2) { - // copy each element from the prior iteration twice - let scalar = evals[i / 2]; - evals[i] = scalar * self.r[j]; - evals[i - 1] = scalar - evals[i]; - } - } - evals - } - - pub fn compute_factored_lens(ell: usize) -> (usize, usize) { - (ell / 2, ell - ell / 2) - } - - pub fn compute_factored_evals(&self) -> (Vec, Vec) { - let ell = self.r.len(); - let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell); - - let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals(); - let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals(); - - (L, R) - } +impl EqPolynomial { + pub fn new(r: Vec) -> Self { + EqPolynomial { r } + } + + pub fn evaluate(&self, rx: &[F]) -> F { + assert_eq!(self.r.len(), rx.len()); + (0..rx.len()) + .map(|i| self.r[i] * rx[i] + (F::one() - self.r[i]) * (F::one() - rx[i])) + .product() + } + + pub fn evals(&self) -> Vec { + let ell = self.r.len(); + + let mut evals: Vec = vec![F::one(); ell.pow2()]; + let mut size = 1; + for j in 0..ell { + // in each iteration, we double the size of chis + size *= 2; + // TODO: this reverse causes inconsistent evaluation in comparison to the + //evaluation function in ark-poly-commit, we should look into this to + // avoid the extra constraints in the circuit + for i in (0..size).rev().step_by(2) { + // copy each element from the prior iteration twice + let scalar = evals[i / 2]; + evals[i] = scalar * self.r[j]; + evals[i - 1] = scalar - evals[i]; + } + } + evals + } + + pub fn compute_factored_lens(ell: usize) -> (usize, usize) { + (ell / 2, ell - ell / 2) + } + + pub fn compute_factored_evals(&self) -> (Vec, Vec) { + let ell = self.r.len(); + let (left_num_vars, _right_num_vars) = EqPolynomial::::compute_factored_lens(ell); + + let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals(); + let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals(); + + (L, R) + } } pub struct IdentityPolynomial { - size_point: usize, + size_point: usize, } impl IdentityPolynomial { - pub fn new(size_point: usize) -> Self { - IdentityPolynomial { size_point } - } - - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - let len = r.len(); - assert_eq!(len, self.size_point); - (0..len) - .map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i]) - .sum() - } + pub fn new(size_point: usize) -> Self { + IdentityPolynomial { size_point } + } + + pub fn evaluate(&self, r: &[F]) -> F { + let len = r.len(); + assert_eq!(len, self.size_point); + (0..len) + .map(|i| F::from((len - i - 1).pow2() as u64) * r[i]) + .sum() + } } -impl DensePolynomial { - pub fn new(Z: Vec) -> Self { - DensePolynomial { - num_vars: Z.len().log_2(), - len: Z.len(), - Z, - } - } - - pub fn get_num_vars(&self) -> usize { - self.num_vars - } - - pub fn len(&self) -> usize { - self.len - } - - pub fn clone(&self) -> DensePolynomial { - DensePolynomial::new(self.Z[0..self.len].to_vec()) - } - - pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { - assert!(idx < self.len()); - ( - DensePolynomial::new(self.Z[..idx].to_vec()), - DensePolynomial::new(self.Z[idx..2 * idx].to_vec()), - ) - } - - #[cfg(feature = "multicore")] - fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { - let L_size = blinds.len(); - let R_size = self.Z.len() / L_size; - assert_eq!(L_size * R_size, self.Z.len()); - let C = (0..L_size) - .into_par_iter() - .map(|i| { - self.Z[R_size * i..R_size * (i + 1)] - .commit(&blinds[i], gens) - .compress() - }) - .collect(); - PolyCommitment { C } - } - - #[cfg(not(feature = "multicore"))] - fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { - let L_size = blinds.len(); - let R_size = self.Z.len() / L_size; - assert_eq!(L_size * R_size, self.Z.len()); - let C = (0..L_size) - .map(|i| { - self.Z[R_size * i..R_size * (i + 1)] - .commit(&blinds[i], gens) - .compress() - }) - .collect(); - PolyCommitment { C } - } - - pub fn commit( - &self, - gens: &PolyCommitmentGens, - random_tape: Option<&mut RandomTape>, - ) -> (PolyCommitment, PolyCommitmentBlinds) { - let n = self.Z.len(); - let ell = self.get_num_vars(); - assert_eq!(n, ell.pow2()); - - let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - assert_eq!(L_size * R_size, n); - - let blinds = if let Some(t) = random_tape { - PolyCommitmentBlinds { - blinds: t.random_vector(b"poly_blinds", L_size), - } - } else { - PolyCommitmentBlinds { - blinds: vec![Scalar::zero(); L_size], - } - }; - - (self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds) - } - - pub fn bound(&self, L: &[Scalar]) -> Vec { - let (left_num_vars, right_num_vars) = - EqPolynomial::compute_factored_lens(self.get_num_vars()); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - (0..R_size) - .map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum()) - .collect() - } - - pub fn bound_poly_var_top(&mut self, r: &Scalar) { - let n = self.len() / 2; - for i in 0..n { - self.Z[i] = self.Z[i] + (self.Z[i + n] - self.Z[i]) * r; - } - self.num_vars -= 1; - self.len = n; - } - - pub fn bound_poly_var_bot(&mut self, r: &Scalar) { - let n = self.len() / 2; - for i in 0..n { - self.Z[i] = self.Z[2 * i] + (self.Z[2 * i + 1] - self.Z[2 * i]) * r; - } - self.num_vars -= 1; - self.len = n; - } - - // returns Z(r) in O(n) time - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - // r must have a value for each variable - assert_eq!(r.len(), self.get_num_vars()); - let chis = EqPolynomial::new(r.to_vec()).evals(); - assert_eq!(chis.len(), self.Z.len()); - DotProductProofLog::compute_dotproduct(&self.Z, &chis) - } - - fn vec(&self) -> &Vec { - &self.Z - } - - pub fn extend(&mut self, other: &DensePolynomial) { - // TODO: allow extension even when some vars are bound - assert_eq!(self.Z.len(), self.len); - let other_vec = other.vec(); - assert_eq!(other_vec.len(), self.len); - self.Z.extend(other_vec); - self.num_vars += 1; - self.len *= 2; - assert_eq!(self.Z.len(), self.len); - } - - pub fn merge<'a, I>(polys: I) -> DensePolynomial - where - I: IntoIterator, - { - let mut Z: Vec = Vec::new(); - for poly in polys.into_iter() { - Z.extend(poly.vec()); - } - - // pad the polynomial with zero polynomial at the end - Z.resize(Z.len().next_power_of_two(), Scalar::zero()); - - DensePolynomial::new(Z) - } - - pub fn from_usize(Z: &[usize]) -> Self { - DensePolynomial::new( - (0..Z.len()) - .map(|i| Scalar::from(Z[i] as u64)) - .collect::>(), - ) - } +impl DensePolynomial { + pub fn new(Z: Vec) -> Self { + DensePolynomial { + num_vars: Z.len().log_2(), + len: Z.len(), + Z, + } + } + + pub fn get_num_vars(&self) -> usize { + self.num_vars + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn clone(&self) -> Self { + DensePolynomial::new(self.Z[0..self.len].to_vec()) + } + + pub fn split(&self, idx: usize) -> (Self, Self) { + assert!(idx < self.len()); + ( + DensePolynomial::new(self.Z[..idx].to_vec()), + DensePolynomial::new(self.Z[idx..2 * idx].to_vec()), + ) + } + + #[cfg(feature = "multicore")] + fn commit_inner(&self, blinds: &[F], gens: &MultiCommitGens) -> PolyCommitment + where + G: CurveGroup, + { + let L_size = blinds.len(); + let R_size = self.Z.len() / L_size; + assert_eq!(L_size * R_size, self.Z.len()); + let C = (0..L_size) + .into_par_iter() + .map(|i| { + PedersenCommit::commit_slice(&self.Z[R_size * i..R_size * (i + 1)], &blinds[i], gens) + }) + .collect(); + PolyCommitment { C } + } + + #[cfg(not(feature = "multicore"))] + fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment + where + G: CurveGroup, + { + let L_size = blinds.len(); + let R_size = self.Z.len() / L_size; + assert_eq!(L_size * R_size, self.Z.len()); + let C = (0..L_size) + .map(|i| { + self.Z[R_size * i..R_size * (i + 1)] + .commit(&blinds[i], gens) + .compress() + }) + .collect(); + PolyCommitment { C } + } + + pub fn commit( + &self, + gens: &PolyCommitmentGens, + random_blinds: bool, + ) -> (PolyCommitment, PolyCommitmentBlinds) + where + E: Pairing, + { + let n = self.Z.len(); + let ell = self.get_num_vars(); + assert_eq!(n, ell.pow2()); + let (left_num_vars, right_num_vars) = + EqPolynomial::::compute_factored_lens(ell); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + assert_eq!(L_size * R_size, n); + + let blinds = PolyCommitmentBlinds { + blinds: if random_blinds { + (0..L_size) + .map(|_| F::rand(&mut rand::thread_rng())) + .collect::>() + } else { + (0..L_size).map(|_| F::zero()).collect::>() + }, + }; + + (self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds) + } + + pub fn bound(&self, L: &[F]) -> Vec { + let (left_num_vars, right_num_vars) = + EqPolynomial::::compute_factored_lens(self.get_num_vars()); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + (0..R_size) + .map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum()) + .collect() + } + + pub fn bound_poly_var_top(&mut self, r: &F) { + let n = self.len() / 2; + for i in 0..n { + self.Z[i] = self.Z[i] + (self.Z[i + n] - self.Z[i]) * r; + } + self.num_vars -= 1; + self.len = n; + } + + pub fn bound_poly_var_bot(&mut self, r: &F) { + let n = self.len() / 2; + for i in 0..n { + self.Z[i] = self.Z[2 * i] + (self.Z[2 * i + 1] - self.Z[2 * i]) * r; + } + self.num_vars -= 1; + self.len = n; + } + + // returns Z(r) in O(n) time + pub fn evaluate(&self, r: &[F]) -> F { + // r must have a value for each variable + assert_eq!(r.len(), self.get_num_vars()); + let chis = EqPolynomial::new(r.to_vec()).evals(); + assert_eq!(chis.len(), self.Z.len()); + crate::dot_product(&self.Z, &chis) + } + + fn vec(&self) -> &Vec { + &self.Z + } + + pub fn extend(&mut self, other: &DensePolynomial) { + // TODO: allow extension even when some vars are bound + assert_eq!(self.Z.len(), self.len); + let other_vec = other.vec(); + assert_eq!(other_vec.len(), self.len); + self.Z.extend(other_vec); + self.num_vars += 1; + self.len *= 2; + assert_eq!(self.Z.len(), self.len); + } + + pub fn merge<'a, I>(polys: I) -> DensePolynomial + where + I: IntoIterator>, + { + let mut Z: Vec = Vec::new(); + for poly in polys.into_iter() { + Z.extend(poly.vec()); + } + + // pad the polynomial with zero polynomial at the end + Z.resize(Z.len().next_power_of_two(), F::zero()); + + DensePolynomial::new(Z) + } + + pub fn from_usize(Z: &[usize]) -> Self { + DensePolynomial::new( + (0..Z.len()) + .map(|i| F::from(Z[i] as u64)) + .collect::>(), + ) + } } -impl Index for DensePolynomial { - type Output = Scalar; +impl Index for DensePolynomial { + type Output = F; - #[inline(always)] - fn index(&self, _index: usize) -> &Scalar { - &(self.Z[_index]) - } + #[inline(always)] + fn index(&self, _index: usize) -> &F { + &(self.Z[_index]) + } } -impl AppendToTranscript for PolyCommitment { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_message(label, b"poly_commitment_begin"); - for i in 0..self.C.len() { - transcript.append_point(b"poly_commitment_share", &self.C[i]); - } - transcript.append_message(label, b"poly_commitment_end"); - } -} - -impl AppendToPoseidon for PolyCommitment { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - for i in 0..self.C.len() { - transcript.append_point(&self.C[i]); - } +impl TranscriptWriter for PolyCommitment { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript) { + for i in 0..self.C.len() { + transcript.append_point(b"", &self.C[i]); } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct PolyEvalProof { - proof: DotProductProofLog, +pub struct PolyEvalProof { + proof: DotProductProofLog, } -impl PolyEvalProof { - fn protocol_name() -> &'static [u8] { - b"polynomial evaluation proof" - } - - pub fn prove( - poly: &DensePolynomial, - blinds_opt: Option<&PolyCommitmentBlinds>, - r: &[Scalar], // point at which the polynomial is evaluated - Zr: &Scalar, // evaluation of \widetilde{Z}(r) - blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> (PolyEvalProof, CompressedGroup) { - // transcript.append_protocol_name(PolyEvalProof::protocol_name()); - - // assert vectors are of the right size - assert_eq!(poly.get_num_vars(), r.len()); - - let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len()); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - - let default_blinds = PolyCommitmentBlinds { - blinds: vec![Scalar::zero(); L_size], - }; - let blinds = blinds_opt.map_or(&default_blinds, |p| p); - - assert_eq!(blinds.blinds.len(), L_size); - - let zero = Scalar::zero(); - let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); - - // compute the L and R vectors - let eq = EqPolynomial::new(r.to_vec()); - let (L, R) = eq.compute_factored_evals(); - assert_eq!(L.len(), L_size); - assert_eq!(R.len(), R_size); - - // compute the vector underneath L*Z and the L*blinds - // compute vector-matrix product between L and Z viewed as a matrix - let LZ = poly.bound(&L); - let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); - - // a dot product proof of size R_size - let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove( - &gens.gens, - transcript, - random_tape, - &LZ, - &LZ_blind, - &R, - Zr, - blind_Zr, - ); - - (PolyEvalProof { proof }, C_Zr_prime) - } - - pub fn verify( - &self, - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - r: &[Scalar], // point at which the polynomial is evaluated - C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r) - comm: &PolyCommitment, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(PolyEvalProof::protocol_name()); - - // compute L and R - let eq = EqPolynomial::new(r.to_vec()); - let (L, R) = eq.compute_factored_evals(); - - // compute a weighted sum of commitments and L - let C_decompressed = comm - .C - .iter() - .map(|pt| GroupElement::decompress(pt).unwrap()) - .collect::>(); - - let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed.as_slice()).compress(); - - self.proof - .verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr) - } - - pub fn verify_plain( - &self, - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - r: &[Scalar], // point at which the polynomial is evaluated - Zr: &Scalar, // evaluation \widetilde{Z}(r) - comm: &PolyCommitment, - ) -> Result<(), ProofVerifyError> { - // compute a commitment to Zr with a blind of zero - let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress(); - - self.verify(gens, transcript, r, &C_Zr, comm) - } +impl PolyEvalProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + pub fn prove( + poly: &DensePolynomial, + blinds_opt: Option<&PolyCommitmentBlinds>, + r: &[E::ScalarField], // point at which the polynomial is evaluated + Zr: &E::ScalarField, // evaluation of \widetilde{Z}(r) + blind_Zr_opt: Option<&E::ScalarField>, // specifies a blind for Zr + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> (PolyEvalProof, E::G1) { + // transcript.append_protocol_name(PolyEvalProof::protocol_name()); + + // assert vectors are of the right size + assert_eq!(poly.get_num_vars(), r.len()); + + let (left_num_vars, right_num_vars) = + EqPolynomial::::compute_factored_lens(r.len()); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + + let default_blinds = PolyCommitmentBlinds { + blinds: vec![E::ScalarField::zero(); L_size], + }; + let blinds = blinds_opt.map_or(&default_blinds, |p| p); + + assert_eq!(blinds.blinds.len(), L_size); + + let zero = E::ScalarField::zero(); + let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); + + // compute the L and R vectors + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + assert_eq!(L.len(), L_size); + assert_eq!(R.len(), R_size); + + // compute the vector underneath L*Z and the L*blinds + // compute vector-matrix product between L and Z viewed as a matrix + let LZ = poly.bound(&L); + let LZ_blind: E::ScalarField = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); + + // a dot product proof of size R_size + let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove( + &gens.gens, + transcript, + LZ.as_slice(), + &LZ_blind, + &R, + Zr, + blind_Zr, + ); + + (PolyEvalProof { proof }, C_Zr_prime) + } + + pub fn verify( + &self, + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + r: &[E::ScalarField], // point at which the polynomial is evaluated + C_Zr: &E::G1, // commitment to \widetilde{Z}(r) + comm: &PolyCommitment, + ) -> Result<(), ProofVerifyError> { + // transcript.append_protocol_name(PolyEvalProof::protocol_name()); + + // compute L and R + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + + // compute a weighted sum of commitments and L + let C_decompressed = &comm.C; + + let C_LZ = + ::msm(&::normalize_batch(C_decompressed), &L) + .unwrap(); + + self + .proof + .verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr) + } + + pub fn verify_plain( + &self, + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + r: &[E::ScalarField], // point at which the polynomial is evaluated + Zr: &E::ScalarField, // evaluation \widetilde{Z}(r) + comm: &PolyCommitment, + ) -> Result<(), ProofVerifyError> { + // compute a commitment to Zr with a blind of zero + let C_Zr = PedersenCommit::commit_scalar(Zr, &E::ScalarField::zero(), &gens.gens.gens_1); + + self.verify(gens, transcript, r, &C_Zr, comm) + } } #[cfg(test)] mod tests { - use crate::parameters::poseidon_params; - - use super::*; - use ark_std::UniformRand; - - fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar { - let eq = EqPolynomial::new(r.to_vec()); - let (L, R) = eq.compute_factored_evals(); - - let ell = r.len(); - // ensure ell is even - assert!(ell % 2 == 0); - // compute n = 2^\ell - let n = ell.pow2(); - // compute m = sqrt(n) = 2^{\ell/2} - let m = n.square_root(); - - // compute vector-matrix product between L and Z viewed as a matrix - let LZ = (0..m) - .map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum()) - .collect::>(); - - // compute dot product between LZ and R - DotProductProofLog::compute_dotproduct(&LZ, &R) - } - - #[test] - fn check_polynomial_evaluation() { - // Z = [1, 2, 1, 4] - let Z = vec![ - Scalar::one(), - Scalar::from(2), - Scalar::from(1), - Scalar::from(4), - ]; - - // r = [4,3] - let r = vec![Scalar::from(4), Scalar::from(3)]; - - let eval_with_LR = evaluate_with_LR(&Z, &r); - let poly = DensePolynomial::new(Z); - - let eval = poly.evaluate(&r); - assert_eq!(eval, Scalar::from(28)); - assert_eq!(eval_with_LR, eval); - } - - pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec, Vec) { - let mut L: Vec = Vec::new(); - let mut R: Vec = Vec::new(); - - let ell = r.len(); - assert!(ell % 2 == 0); // ensure ell is even - let n = ell.pow2(); - let m = n.square_root(); - - // compute row vector L - for i in 0..m { - let mut chi_i = Scalar::one(); - for j in 0..ell / 2 { - let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0; - if bit_j { - chi_i *= r[j]; - } else { - chi_i *= Scalar::one() - r[j]; - } - } - L.push(chi_i); - } - - // compute column vector R - for i in 0..m { - let mut chi_i = Scalar::one(); - for j in ell / 2..ell { - let bit_j = (i & (1 << (r.len() - j - 1))) > 0; - if bit_j { - chi_i *= r[j]; - } else { - chi_i *= Scalar::one() - r[j]; - } - } - R.push(chi_i); - } - (L, R) - } - - pub fn compute_chis_at_r(r: &[Scalar]) -> Vec { - let ell = r.len(); - let n = ell.pow2(); - let mut chis: Vec = Vec::new(); - for i in 0..n { - let mut chi_i = Scalar::one(); - for j in 0..r.len() { - let bit_j = (i & (1 << (r.len() - j - 1))) > 0; - if bit_j { - chi_i *= r[j]; - } else { - chi_i *= Scalar::one() - r[j]; - } - } - chis.push(chi_i); - } - chis - } - - pub fn compute_outerproduct(L: Vec, R: Vec) -> Vec { - assert_eq!(L.len(), R.len()); - (0..L.len()) - .map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::>()) - .collect::>>() - .into_iter() - .flatten() - .collect::>() - } - - #[test] - fn check_memoized_chis() { - let mut rng = ark_std::rand::thread_rng(); - - let s = 10; - let mut r: Vec = Vec::new(); - for _i in 0..s { - r.push(Scalar::rand(&mut rng)); + use crate::ark_std::One; + use crate::parameters::poseidon_params; + + use super::*; + use ark_std::UniformRand; + + type F = ark_bls12_377::Fr; + type E = ark_bls12_377::Bls12_377; + + fn evaluate_with_LR(Z: &[F], r: &[F]) -> F { + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + + let ell = r.len(); + // ensure ell is even + assert!(ell % 2 == 0); + // compute n = 2^\ell + let n = ell.pow2(); + // compute m = sqrt(n) = 2^{\ell/2} + let m = n.square_root(); + + // compute vector-matrix product between L and Z viewed as a matrix + let LZ = (0..m) + .map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum()) + .collect::>(); + + // compute dot product between LZ and R + crate::dot_product(&LZ, &R) + } + + #[test] + fn check_polynomial_evaluation() { + // Z = [1, 2, 1, 4] + let Z = vec![F::one(), F::from(2), F::from(1), F::from(4)]; + + // r = [4,3] + let r = vec![F::from(4), F::from(3)]; + + let eval_with_LR = evaluate_with_LR(&Z, &r); + let poly = DensePolynomial::new(Z); + + let eval = poly.evaluate(&r); + assert_eq!(eval, F::from(28)); + assert_eq!(eval_with_LR, eval); + } + + pub fn compute_factored_chis_at_r(r: &[F]) -> (Vec, Vec) { + let mut L: Vec = Vec::new(); + let mut R: Vec = Vec::new(); + + let ell = r.len(); + assert!(ell % 2 == 0); // ensure ell is even + let n = ell.pow2(); + let m = n.square_root(); + + // compute row vector L + for i in 0..m { + let mut chi_i = F::one(); + for j in 0..ell / 2 { + let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= F::one() - r[j]; } - let chis = tests::compute_chis_at_r(&r); - let chis_m = EqPolynomial::new(r).evals(); - assert_eq!(chis, chis_m); + } + L.push(chi_i); } - #[test] - fn check_factored_chis() { - let mut rng = ark_std::rand::thread_rng(); - - let s = 10; - let mut r: Vec = Vec::new(); - for _i in 0..s { - r.push(Scalar::rand(&mut rng)); + // compute column vector R + for i in 0..m { + let mut chi_i = F::one(); + for j in ell / 2..ell { + let bit_j = (i & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= F::one() - r[j]; } - let chis = EqPolynomial::new(r.clone()).evals(); - let (L, R) = EqPolynomial::new(r).compute_factored_evals(); - let O = compute_outerproduct(L, R); - assert_eq!(chis, O); - } - - #[test] - fn check_memoized_factored_chis() { - let mut rng = ark_std::rand::thread_rng(); - - let s = 10; - let mut r: Vec = Vec::new(); - for _i in 0..s { - r.push(Scalar::rand(&mut rng)); + } + R.push(chi_i); + } + (L, R) + } + + pub fn compute_chis_at_r(r: &[F]) -> Vec { + let ell = r.len(); + let n = ell.pow2(); + let mut chis: Vec = Vec::new(); + for i in 0..n { + let mut chi_i = F::one(); + for j in 0..r.len() { + let bit_j = (i & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= F::one() - r[j]; } - let (L, R) = tests::compute_factored_chis_at_r(&r); - let eq = EqPolynomial::new(r); - let (L2, R2) = eq.compute_factored_evals(); - assert_eq!(L, L2); - assert_eq!(R, R2); - } - - #[test] - fn check_polynomial_commit() { - let Z = vec![ - Scalar::from(1), - Scalar::from(2), - Scalar::from(1), - Scalar::from(4), - ]; - let poly = DensePolynomial::new(Z); - - // r = [4,3] - let r = vec![Scalar::from(4), Scalar::from(3)]; - let eval = poly.evaluate(&r); - assert_eq!(eval, Scalar::from(28)); - - let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two"); - let (poly_commitment, blinds) = poly.commit(&gens, None); - - let mut random_tape = RandomTape::new(b"proof"); - let params = poseidon_params(); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, C_Zr) = PolyEvalProof::prove( - &poly, - Some(&blinds), - &r, - &eval, - None, - &gens, - &mut prover_transcript, - &mut random_tape, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment) - .is_ok()); - } + } + chis.push(chi_i); + } + chis + } + + pub fn compute_outerproduct(L: Vec, R: Vec) -> Vec { + assert_eq!(L.len(), R.len()); + (0..L.len()) + .map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::>()) + .collect::>>() + .into_iter() + .flatten() + .collect::>() + } + + #[test] + fn check_memoized_chis() { + let mut rng = ark_std::rand::thread_rng(); + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(F::rand(&mut rng)); + } + let chis = tests::compute_chis_at_r(&r); + let chis_m = EqPolynomial::new(r).evals(); + assert_eq!(chis, chis_m); + } + + #[test] + fn check_factored_chis() { + let mut rng = ark_std::rand::thread_rng(); + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(F::rand(&mut rng)); + } + let chis = EqPolynomial::new(r.clone()).evals(); + let (L, R) = EqPolynomial::new(r).compute_factored_evals(); + let O = compute_outerproduct(L, R); + assert_eq!(chis, O); + } + + #[test] + fn check_memoized_factored_chis() { + let mut rng = ark_std::rand::thread_rng(); + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(F::rand(&mut rng)); + } + let (L, R) = tests::compute_factored_chis_at_r(&r); + let eq = EqPolynomial::new(r); + let (L2, R2) = eq.compute_factored_evals(); + assert_eq!(L, L2); + assert_eq!(R, R2); + } + + #[test] + fn check_polynomial_commit() { + let Z = vec![F::from(1), F::from(2), F::from(1), F::from(4)]; + let poly = DensePolynomial::new(Z); + + // r = [4,3] + let r = vec![F::from(4), F::from(3)]; + let eval = poly.evaluate(&r); + assert_eq!(eval, F::from(28)); + + let gens = PolyCommitmentGens::setup(poly.get_num_vars(), b"test-two"); + let (poly_commitment, blinds) = poly.commit(&gens, false); + + let params = poseidon_params(); + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let (proof, C_Zr) = PolyEvalProof::::prove( + &poly, + Some(&blinds), + &r, + &eval, + None, + &gens, + &mut prover_transcript, + ); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + assert!(proof + .verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment) + .is_ok()); + } } diff --git a/src/errors.rs b/src/errors.rs index 1dce8e2..2a7dbe9 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -3,30 +3,30 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum ProofVerifyError { - #[error("Proof verification failed")] - InternalError, - #[error("Compressed group element failed to decompress: {0:?}")] - DecompressionError(Vec), + #[error("Proof verification failed")] + InternalError, + #[error("Compressed group element failed to decompress: {0:?}")] + DecompressionError(Vec), } impl Default for ProofVerifyError { - fn default() -> Self { - ProofVerifyError::InternalError - } + fn default() -> Self { + ProofVerifyError::InternalError + } } #[derive(Clone, Debug, Eq, PartialEq)] pub enum R1CSError { - /// returned if the number of constraints is not a power of 2 - NonPowerOfTwoCons, - /// returned if the number of variables is not a power of 2 - NonPowerOfTwoVars, - /// returned if a wrong number of inputs in an assignment are supplied - InvalidNumberOfInputs, - /// returned if a wrong number of variables in an assignment are supplied - InvalidNumberOfVars, - /// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255 - InvalidScalar, - /// returned if the supplied row or col in (row,col,val) tuple is out of range - InvalidIndex, + /// returned if the number of constraints is not a power of 2 + NonPowerOfTwoCons, + /// returned if the number of variables is not a power of 2 + NonPowerOfTwoVars, + /// returned if a wrong number of inputs in an assignment are supplied + InvalidNumberOfInputs, + /// returned if a wrong number of variables in an assignment are supplied + InvalidNumberOfVars, + /// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255 + InvalidScalar, + /// returned if the supplied row or col in (row,col,val) tuple is out of range + InvalidIndex, } diff --git a/src/group.rs b/src/group.rs deleted file mode 100644 index 0b5087c..0000000 --- a/src/group.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::errors::ProofVerifyError; -use ark_ec::msm::VariableBaseMSM; -use ark_ff::PrimeField; - -use lazy_static::lazy_static; - -use super::scalar::Scalar; - -use ark_ec::ProjectiveCurve; -use ark_serialize::*; -use core::borrow::Borrow; - -pub type GroupElement = ark_bls12_377::G1Projective; -pub type GroupElementAffine = ark_bls12_377::G1Affine; -pub type Fq = ark_bls12_377::Fq; -pub type Fr = ark_bls12_377::Fr; - -#[derive(Clone, Eq, PartialEq, Hash, Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct CompressedGroup(pub Vec); - -lazy_static! { - pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator(); -} - -pub trait CompressGroupElement { - fn compress(&self) -> CompressedGroup; -} - -pub trait DecompressGroupElement { - fn decompress(encoded: &CompressedGroup) -> Option; -} - -pub trait UnpackGroupElement { - fn unpack(&self) -> Result; -} - -impl CompressGroupElement for GroupElement { - fn compress(&self) -> CompressedGroup { - let mut point_encoding = Vec::new(); - self.serialize(&mut point_encoding).unwrap(); - CompressedGroup(point_encoding) - } -} - -impl DecompressGroupElement for GroupElement { - fn decompress(encoded: &CompressedGroup) -> Option { - let res = GroupElement::deserialize(&*encoded.0); - if let Ok(r) = res { - Some(r) - } else { - println!("{:?}", res); - None - } - } -} - -impl UnpackGroupElement for CompressedGroup { - fn unpack(&self) -> Result { - let encoded = self.0.clone(); - GroupElement::decompress(self).ok_or(ProofVerifyError::DecompressionError(encoded)) - } -} - -pub trait VartimeMultiscalarMul { - fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement; -} - -impl VartimeMultiscalarMul for GroupElement { - fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement { - let repr_scalars = scalars - .iter() - .map(|S| S.borrow().into_repr()) - .collect::::BigInt>>(); - let aff_points = points - .iter() - .map(|P| P.borrow().into_affine()) - .collect::>(); - VariableBaseMSM::multi_scalar_mul(aff_points.as_slice(), repr_scalars.as_slice()) - } -} diff --git a/src/lib.rs b/src/lib.rs index bb3a7ca..44893a9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,5 @@ #![allow(non_snake_case)] #![doc = include_str!("../README.md")] -#![feature(test)] #![allow(clippy::assertions_on_result_states)] extern crate ark_std; @@ -10,7 +9,6 @@ extern crate lazy_static; extern crate merlin; extern crate rand; extern crate sha3; -extern crate test; #[macro_use] extern crate json; @@ -21,18 +19,21 @@ extern crate rayon; mod commitments; mod dense_mlpoly; mod errors; -mod group; +#[macro_use] +pub(crate) mod macros; mod math; +pub(crate) mod mipp; mod nizk; mod product_tree; mod r1csinstance; mod r1csproof; -mod random; -mod scalar; mod sparse_mlpoly; +pub mod sqrt_pst; mod sumcheck; +pub mod testudo_nizk; +pub mod testudo_snark; mod timer; -mod transcript; +pub(crate) mod transcript; mod unipoly; pub mod parameters; @@ -40,804 +41,303 @@ pub mod parameters; mod constraints; pub mod poseidon_transcript; -use ark_ff::Field; - -use ark_serialize::*; -use ark_std::Zero; use core::cmp::max; -use errors::{ProofVerifyError, R1CSError}; +use errors::R1CSError; -use poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; -use r1csinstance::{ - R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance, -}; -use r1csproof::{R1CSGens, R1CSProof}; -use random::RandomTape; -use scalar::Scalar; +use r1csinstance::{R1CSCommitment, R1CSDecommitment, R1CSInstance}; -use timer::Timer; +use ark_ec::CurveGroup; /// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS) -pub struct ComputationCommitment { - comm: R1CSCommitment, +pub struct ComputationCommitment { + comm: R1CSCommitment, } +use ark_ff::PrimeField; /// `ComputationDecommitment` holds information to decommit `ComputationCommitment` -pub struct ComputationDecommitment { - decomm: R1CSDecommitment, +pub struct ComputationDecommitment { + decomm: R1CSDecommitment, } /// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance` #[derive(Clone)] -pub struct Assignment { - assignment: Vec, +pub struct Assignment { + assignment: Vec, } -impl Assignment { - /// Constructs a new `Assignment` from a vector - pub fn new(assignment: &Vec>) -> Result { - let bytes_to_scalar = |vec: &Vec>| -> Result, R1CSError> { - let mut vec_scalar: Vec = Vec::new(); - for v in vec { - let val = Scalar::from_random_bytes(v.as_slice()); - if let Some(v) = val { - vec_scalar.push(v); - } else { - return Err(R1CSError::InvalidScalar); - } - } - Ok(vec_scalar) - }; - - let assignment_scalar = bytes_to_scalar(assignment); - - // check for any parsing errors - if assignment_scalar.is_err() { - return Err(R1CSError::InvalidScalar); +impl Assignment { + /// Constructs a new `Assignment` from a vector + pub fn new(assignment: &Vec>) -> Result { + let bytes_to_scalar = |vec: &Vec>| -> Result, R1CSError> { + let mut vec_scalar: Vec = Vec::new(); + for v in vec { + let val = F::from_random_bytes(v.as_slice()); + if let Some(v) = val { + vec_scalar.push(v); + } else { + return Err(R1CSError::InvalidScalar); } + } + Ok(vec_scalar) + }; + + let assignment_scalar = bytes_to_scalar(assignment); - Ok(Assignment { - assignment: assignment_scalar.unwrap(), - }) + // check for any parsing errors + if assignment_scalar.is_err() { + return Err(R1CSError::InvalidScalar); } - /// pads Assignment to the specified length - fn pad(&self, len: usize) -> VarsAssignment { - // check that the new length is higher than current length - assert!(len > self.assignment.len()); + Ok(Assignment { + assignment: assignment_scalar.unwrap(), + }) + } - let padded_assignment = { - let mut padded_assignment = self.assignment.clone(); - padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]); - padded_assignment - }; + /// pads Assignment to the specified length + fn pad(&self, len: usize) -> VarsAssignment { + // check that the new length is higher than current length + assert!(len > self.assignment.len()); - VarsAssignment { - assignment: padded_assignment, - } + let padded_assignment = { + let mut padded_assignment = self.assignment.clone(); + padded_assignment.extend(vec![F::zero(); len - self.assignment.len()]); + padded_assignment + }; + + VarsAssignment { + assignment: padded_assignment, } + } } /// `VarsAssignment` holds an assignment of values to variables in an `Instance` -pub type VarsAssignment = Assignment; +pub type VarsAssignment = Assignment; /// `InputsAssignment` holds an assignment of values to variables in an `Instance` -pub type InputsAssignment = Assignment; +pub type InputsAssignment = Assignment; /// `Instance` holds the description of R1CS matrices and a hash of the matrices -#[derive(Debug)] -pub struct Instance { - inst: R1CSInstance, - digest: Vec, +pub struct Instance { + inst: R1CSInstance, + digest: Vec, } -impl Instance { - /// Constructs a new `Instance` and an associated satisfying assignment - pub fn new( - num_cons: usize, - num_vars: usize, - num_inputs: usize, - A: &[(usize, usize, Vec)], - B: &[(usize, usize, Vec)], - C: &[(usize, usize, Vec)], - ) -> Result { - let (num_vars_padded, num_cons_padded) = { - let num_vars_padded = { - let mut num_vars_padded = num_vars; - - // ensure that num_inputs + 1 <= num_vars - num_vars_padded = max(num_vars_padded, num_inputs + 1); - - // ensure that num_vars_padded a power of two - if num_vars_padded.next_power_of_two() != num_vars_padded { - num_vars_padded = num_vars_padded.next_power_of_two(); - } - num_vars_padded - }; - - let num_cons_padded = { - let mut num_cons_padded = num_cons; - - // ensure that num_cons_padded is at least 2 - if num_cons_padded == 0 || num_cons_padded == 1 { - num_cons_padded = 2; - } - - // ensure that num_cons_padded is power of 2 - if num_cons.next_power_of_two() != num_cons { - num_cons_padded = num_cons.next_power_of_two(); - } - num_cons_padded - }; - - (num_vars_padded, num_cons_padded) - }; - - let bytes_to_scalar = - |tups: &[(usize, usize, Vec)]| -> Result, R1CSError> { - let mut mat: Vec<(usize, usize, Scalar)> = Vec::new(); - for (row, col, val_bytes) in tups { - // row must be smaller than num_cons - if *row >= num_cons { - return Err(R1CSError::InvalidIndex); - } - - // col must be smaller than num_vars + 1 + num_inputs - if *col >= num_vars + 1 + num_inputs { - return Err(R1CSError::InvalidIndex); - } - - let val = Scalar::from_random_bytes(val_bytes.as_slice()); - if let Some(v) = val { - // if col >= num_vars, it means that it is referencing a 1 or input in the satisfying - // assignment - if *col >= num_vars { - mat.push((*row, *col + num_vars_padded - num_vars, v)); - } else { - mat.push((*row, *col, v)); - } - } else { - return Err(R1CSError::InvalidScalar); - } - } - - // pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1 - // we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol - if num_cons == 0 || num_cons == 1 { - for i in tups.len()..num_cons_padded { - mat.push((i, num_vars, Scalar::zero())); - } - } - - Ok(mat) - }; - - let A_scalar = bytes_to_scalar(A); - if A_scalar.is_err() { - return Err(A_scalar.err().unwrap()); - } - - let B_scalar = bytes_to_scalar(B); - if B_scalar.is_err() { - return Err(B_scalar.err().unwrap()); +impl Instance { + /// Constructs a new `Instance` and an associated satisfying assignment + pub fn new( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: &[(usize, usize, Vec)], + B: &[(usize, usize, Vec)], + C: &[(usize, usize, Vec)], + ) -> Result { + let (num_vars_padded, num_cons_padded) = { + let num_vars_padded = { + let mut num_vars_padded = num_vars; + + // ensure that num_inputs + 1 <= num_vars + num_vars_padded = max(num_vars_padded, num_inputs + 1); + + // ensure that num_vars_padded a power of two + if num_vars_padded.next_power_of_two() != num_vars_padded { + num_vars_padded = num_vars_padded.next_power_of_two(); } + num_vars_padded + }; - let C_scalar = bytes_to_scalar(C); - if C_scalar.is_err() { - return Err(C_scalar.err().unwrap()); - } - - let inst = R1CSInstance::new( - num_cons_padded, - num_vars_padded, - num_inputs, - &A_scalar.unwrap(), - &B_scalar.unwrap(), - &C_scalar.unwrap(), - ); - - let digest = inst.get_digest(); + let num_cons_padded = { + let mut num_cons_padded = num_cons; - Ok(Instance { inst, digest }) - } - - /// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments - pub fn is_sat( - &self, - vars: &VarsAssignment, - inputs: &InputsAssignment, - ) -> Result { - if vars.assignment.len() > self.inst.get_num_vars() { - return Err(R1CSError::InvalidNumberOfInputs); + // ensure that num_cons_padded is at least 2 + if num_cons_padded == 0 || num_cons_padded == 1 { + num_cons_padded = 2; } - if inputs.assignment.len() != self.inst.get_num_inputs() { - return Err(R1CSError::InvalidNumberOfInputs); + // ensure that num_cons_padded is power of 2 + if num_cons.next_power_of_two() != num_cons { + num_cons_padded = num_cons.next_power_of_two(); } - - // we might need to pad variables - let padded_vars = { - let num_padded_vars = self.inst.get_num_vars(); - let num_vars = vars.assignment.len(); - if num_padded_vars > num_vars { - vars.pad(num_padded_vars) + num_cons_padded + }; + + (num_vars_padded, num_cons_padded) + }; + + let bytes_to_scalar = + |tups: &[(usize, usize, Vec)]| -> Result, R1CSError> { + let mut mat: Vec<(usize, usize, F)> = Vec::new(); + for (row, col, val_bytes) in tups { + // row must be smaller than num_cons + if *row >= num_cons { + return Err(R1CSError::InvalidIndex); + } + + // col must be smaller than num_vars + 1 + num_inputs + if *col >= num_vars + 1 + num_inputs { + return Err(R1CSError::InvalidIndex); + } + + let val = F::from_random_bytes(val_bytes.as_slice()); + if let Some(v) = val { + // if col >= num_vars, it means that it is referencing a 1 or input in the satisfying + // assignment + if *col >= num_vars { + mat.push((*row, *col + num_vars_padded - num_vars, v)); } else { - vars.clone() + mat.push((*row, *col, v)); } - }; - - Ok(self - .inst - .is_sat(&padded_vars.assignment, &inputs.assignment)) - } - - /// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment - pub fn produce_synthetic_r1cs( - num_cons: usize, - num_vars: usize, - num_inputs: usize, - ) -> (Instance, VarsAssignment, InputsAssignment) { - let (inst, vars, inputs) = - R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - let digest = inst.get_digest(); - ( - Instance { inst, digest }, - VarsAssignment { assignment: vars }, - InputsAssignment { assignment: inputs }, - ) - } -} - -/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK -pub struct SNARKGens { - gens_r1cs_sat: R1CSGens, - gens_r1cs_eval: R1CSCommitmentGens, -} + } else { + return Err(R1CSError::InvalidScalar); + } + } -impl SNARKGens { - /// Constructs a new `SNARKGens` given the size of the R1CS statement - /// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices - pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self { - let num_vars_padded = { - let mut num_vars_padded = max(num_vars, num_inputs + 1); - if num_vars_padded != num_vars_padded.next_power_of_two() { - num_vars_padded = num_vars_padded.next_power_of_two(); - } - num_vars_padded - }; - - let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); - let gens_r1cs_eval = R1CSCommitmentGens::new( - b"gens_r1cs_eval", - num_cons, - num_vars_padded, - num_inputs, - num_nz_entries, - ); - SNARKGens { - gens_r1cs_sat, - gens_r1cs_eval, + // pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1 + // we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol + if num_cons == 0 || num_cons == 1 { + for i in tups.len()..num_cons_padded { + mat.push((i, num_vars, F::zero())); + } } - } -} -/// `SNARK` holds a proof produced by Spartan SNARK -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct SNARK { - r1cs_sat_proof: R1CSProof, - inst_evals: (Scalar, Scalar, Scalar), - r1cs_eval_proof: R1CSEvalProof, - rx: Vec, - ry: Vec, -} + Ok(mat) + }; -impl SNARK { - fn protocol_name() -> &'static [u8] { - b"Spartan SNARK proof" + let A_scalar = bytes_to_scalar(A); + if A_scalar.is_err() { + return Err(A_scalar.err().unwrap()); } - /// A public computation to create a commitment to an R1CS instance - pub fn encode( - inst: &Instance, - gens: &SNARKGens, - ) -> (ComputationCommitment, ComputationDecommitment) { - let timer_encode = Timer::new("SNARK::encode"); - let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval); - timer_encode.stop(); - ( - ComputationCommitment { comm }, - ComputationDecommitment { decomm }, - ) + let B_scalar = bytes_to_scalar(B); + if B_scalar.is_err() { + return Err(B_scalar.err().unwrap()); } - /// A method to produce a SNARK proof of the satisfiability of an R1CS instance - pub fn prove( - inst: &Instance, - comm: &ComputationCommitment, - decomm: &ComputationDecommitment, - vars: VarsAssignment, - inputs: &InputsAssignment, - gens: &SNARKGens, - transcript: &mut PoseidonTranscript, - ) -> Self { - let timer_prove = Timer::new("SNARK::prove"); - - // we create a Transcript object seeded with a random Scalar - // to aid the prover produce its randomness - let mut random_tape = RandomTape::new(b"proof"); - - // transcript.append_protocol_name(SNARK::protocol_name()); - comm.comm.append_to_poseidon(transcript); - - let (r1cs_sat_proof, rx, ry) = { - let (proof, rx, ry) = { - // we might need to pad variables - let padded_vars = { - let num_padded_vars = inst.inst.get_num_vars(); - let num_vars = vars.assignment.len(); - if num_padded_vars > num_vars { - vars.pad(num_padded_vars) - } else { - vars - } - }; - - R1CSProof::prove( - &inst.inst, - padded_vars.assignment, - &inputs.assignment, - &gens.gens_r1cs_sat, - transcript, - // &mut random_tape, - ) - }; - - let mut proof_encoded: Vec = Vec::new(); - proof.serialize(&mut proof_encoded).unwrap(); - Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); - - (proof, rx, ry) - }; - - // We need to reset the transcript state before starting the evaluation - // proof and share this state with the verifier because, on the verifier's - // side all the previous updates are done on the transcript - // circuit variable and the transcript outside the circuit will be - // inconsistent wrt to the prover's. - transcript.new_from_state(&r1cs_sat_proof.transcript_sat_state); - - // We send evaluations of A, B, C at r = (rx, ry) as claims - // to enable the verifier complete the first sum-check - let timer_eval = Timer::new("eval_sparse_polys"); - let inst_evals = { - let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry); - transcript.append_scalar(&Ar); - transcript.append_scalar(&Br); - transcript.append_scalar(&Cr); - (Ar, Br, Cr) - }; - timer_eval.stop(); - - let r1cs_eval_proof = { - let proof = R1CSEvalProof::prove( - &decomm.decomm, - &rx, - &ry, - &inst_evals, - &gens.gens_r1cs_eval, - transcript, - &mut random_tape, - ); - - let mut proof_encoded: Vec = Vec::new(); - proof.serialize(&mut proof_encoded).unwrap(); - Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len())); - proof - }; - - timer_prove.stop(); - SNARK { - r1cs_sat_proof, - inst_evals, - r1cs_eval_proof, - rx, - ry, - } + let C_scalar = bytes_to_scalar(C); + if C_scalar.is_err() { + return Err(C_scalar.err().unwrap()); } - /// A method to verify the SNARK proof of the satisfiability of an R1CS instance - pub fn verify( - &self, - comm: &ComputationCommitment, - input: &InputsAssignment, - transcript: &mut PoseidonTranscript, - gens: &SNARKGens, - ) -> Result<(u128, u128, u128), ProofVerifyError> { - let timer_verify = Timer::new("SNARK::verify"); - // transcript.append_protocol_name(SNARK::protocol_name()); - - // append a commitment to the computation to the transcript - comm.comm.append_to_poseidon(transcript); - - let timer_sat_proof = Timer::new("verify_sat_proof"); - assert_eq!(input.assignment.len(), comm.comm.get_num_inputs()); - // let (rx, ry) = - let res = self.r1cs_sat_proof.verify_groth16( - comm.comm.get_num_vars(), - comm.comm.get_num_cons(), - &input.assignment, - &self.inst_evals, - transcript, - &gens.gens_r1cs_sat, - )?; - timer_sat_proof.stop(); - - let timer_eval_proof = Timer::new("verify_eval_proof"); - // Reset the transcript using the state sent by the prover. - // TODO: find a way to retrieve this state from the circuit. Currently - // the API for generating constraints doesn't support returning values - // computed inside the circuit. - transcript.new_from_state(&self.r1cs_sat_proof.transcript_sat_state); - - let (Ar, Br, Cr) = &self.inst_evals; - transcript.append_scalar(&Ar); - transcript.append_scalar(&Br); - transcript.append_scalar(&Cr); - - self.r1cs_eval_proof.verify( - &comm.comm, - &self.rx, - &self.ry, - &self.inst_evals, - &gens.gens_r1cs_eval, - transcript, - )?; - timer_eval_proof.stop(); - timer_verify.stop(); - Ok(res) + let inst = R1CSInstance::new( + num_cons_padded, + num_vars_padded, + num_inputs, + &A_scalar.unwrap(), + &B_scalar.unwrap(), + &C_scalar.unwrap(), + ); + + let digest = inst.get_digest(); + + Ok(Instance { inst, digest }) + } + + /// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments + pub fn is_sat( + &self, + vars: &VarsAssignment, + inputs: &InputsAssignment, + ) -> Result { + if vars.assignment.len() > self.inst.get_num_vars() { + return Err(R1CSError::InvalidNumberOfInputs); } -} - -#[derive(Clone)] -/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK -pub struct NIZKGens { - gens_r1cs_sat: R1CSGens, -} -impl NIZKGens { - /// Constructs a new `NIZKGens` given the size of the R1CS statement - pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self { - let num_vars_padded = { - let mut num_vars_padded = max(num_vars, num_inputs + 1); - if num_vars_padded != num_vars_padded.next_power_of_two() { - num_vars_padded = num_vars_padded.next_power_of_two(); - } - num_vars_padded - }; - - let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); - NIZKGens { gens_r1cs_sat } + if inputs.assignment.len() != self.inst.get_num_inputs() { + return Err(R1CSError::InvalidNumberOfInputs); } -} -/// `NIZK` holds a proof produced by Spartan NIZK -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct NIZK { - r1cs_sat_proof: R1CSProof, - r: (Vec, Vec), + // we might need to pad variables + let padded_vars = { + let num_padded_vars = self.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars.clone() + } + }; + + Ok( + self + .inst + .is_sat(&padded_vars.assignment, &inputs.assignment), + ) + } + + /// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment + pub fn produce_synthetic_r1cs( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + ) -> (Instance, VarsAssignment, InputsAssignment) { + let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + let digest = inst.get_digest(); + ( + Instance { inst, digest }, + VarsAssignment { assignment: vars }, + InputsAssignment { assignment: inputs }, + ) + } } -impl NIZK { - fn protocol_name() -> &'static [u8] { - b"Spartan NIZK proof" - } - - /// A method to produce a NIZK proof of the satisfiability of an R1CS instance - pub fn prove( - inst: &Instance, - vars: VarsAssignment, - input: &InputsAssignment, - gens: &NIZKGens, - transcript: &mut PoseidonTranscript, - ) -> Self { - let timer_prove = Timer::new("NIZK::prove"); - // we create a Transcript object seeded with a random Scalar - // to aid the prover produce its randomness - let _random_tape = RandomTape::new(b"proof"); - - // transcript.append_protocol_name(NIZK::protocol_name()); - transcript.append_bytes(&inst.digest); - - let (r1cs_sat_proof, rx, ry) = { - // we might need to pad variables - let padded_vars = { - let num_padded_vars = inst.inst.get_num_vars(); - let num_vars = vars.assignment.len(); - if num_padded_vars > num_vars { - vars.pad(num_padded_vars) - } else { - vars - } - }; - - let (proof, rx, ry) = R1CSProof::prove( - &inst.inst, - padded_vars.assignment, - &input.assignment, - &gens.gens_r1cs_sat, - transcript, - // &mut random_tape, - ); - let mut proof_encoded = Vec::new(); - proof.serialize(&mut proof_encoded).unwrap(); - Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); - (proof, rx, ry) - }; - - timer_prove.stop(); - NIZK { - r1cs_sat_proof, - r: (rx, ry), - } - } - - /// A method to verify a NIZK proof of the satisfiability of an R1CS instance - pub fn verify( - &self, - inst: &Instance, - input: &InputsAssignment, - transcript: &mut PoseidonTranscript, - gens: &NIZKGens, - ) -> Result { - let timer_verify = Timer::new("NIZK::verify"); - - transcript.append_bytes(&inst.digest); - - // We send evaluations of A, B, C at r = (rx, ry) as claims - // to enable the verifier complete the first sum-check - let timer_eval = Timer::new("eval_sparse_polys"); - let (claimed_rx, claimed_ry) = &self.r; - let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); - timer_eval.stop(); - - let timer_sat_proof = Timer::new("verify_sat_proof"); - assert_eq!(input.assignment.len(), inst.inst.get_num_inputs()); - // let (rx, ry) = - let nc = self.r1cs_sat_proof.circuit_size( - inst.inst.get_num_vars(), - inst.inst.get_num_cons(), - &input.assignment, - &inst_evals, - transcript, - &gens.gens_r1cs_sat, - )?; - - // verify if claimed rx and ry are correct - // assert_eq!(rx, *claimed_rx); - // assert_eq!(ry, *claimed_ry); - timer_sat_proof.stop(); - timer_verify.stop(); - - Ok(nc) - } - - /// A method to verify a NIZK proof of the satisfiability of an R1CS instance with Groth16 - pub fn verify_groth16( - &self, - inst: &Instance, - input: &InputsAssignment, - transcript: &mut PoseidonTranscript, - gens: &NIZKGens, - ) -> Result<(u128, u128, u128), ProofVerifyError> { - let timer_verify = Timer::new("NIZK::verify"); - - // transcript.append_protocol_name(NIZK::protocol_name()); - transcript.append_bytes(&inst.digest); - - // We send evaluations of A, B, C at r = (rx, ry) as claims - // to enable the verifier complete the first sum-check - let timer_eval = Timer::new("eval_sparse_polys"); - let (claimed_rx, claimed_ry) = &self.r; - let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); - timer_eval.stop(); - - let timer_sat_proof = Timer::new("verify_sat_proof"); - assert_eq!(input.assignment.len(), inst.inst.get_num_inputs()); - // let (rx, ry) = - let (ds, dp, dv) = self.r1cs_sat_proof.verify_groth16( - inst.inst.get_num_vars(), - inst.inst.get_num_cons(), - &input.assignment, - &inst_evals, - transcript, - &gens.gens_r1cs_sat, - )?; - - // verify if claimed rx and ry are correct - // assert_eq!(rx, *claimed_rx); - // assert_eq!(ry, *claimed_ry); - timer_sat_proof.stop(); - timer_verify.stop(); - - Ok((ds, dp, dv)) - } +#[inline] +pub(crate) fn dot_product(a: &[F], b: &[F]) -> F { + let mut res = F::zero(); + for i in 0..a.len() { + res += a[i] * &b[i]; + } + res } #[cfg(test)] mod tests { - use crate::parameters::poseidon_params; - - use super::*; - use ark_ff::{BigInteger, One, PrimeField}; - - #[test] - pub fn check_snark() { - let num_vars = 256; - let num_cons = num_vars; - let num_inputs = 10; - - // produce public generators - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); - - // produce a synthetic R1CSInstance - let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - // create a commitment to R1CSInstance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - let params = poseidon_params(); - - // produce a proof - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - vars, - &inputs, - &gens, - &mut prover_transcript, - ); - - // verify the proof - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &inputs, &mut verifier_transcript, &gens) - .is_ok()); - } - - #[test] - pub fn check_r1cs_invalid_index() { - let num_cons = 4; - let num_vars = 8; - let num_inputs = 1; - - let zero: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - let A = vec![(0, 0, zero.to_vec())]; - let B = vec![(100, 1, zero.to_vec())]; - let C = vec![(1, 1, zero.to_vec())]; - - let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); - assert!(inst.is_err()); - assert_eq!(inst.err(), Some(R1CSError::InvalidIndex)); - } - - #[test] - pub fn check_r1cs_invalid_scalar() { - let num_cons = 4; - let num_vars = 8; - let num_inputs = 1; - - let zero: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - - let larger_than_mod = [ - 3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, - 216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115, - ]; - - let A = vec![(0, 0, zero.to_vec())]; - let B = vec![(1, 1, larger_than_mod.to_vec())]; - let C = vec![(1, 1, zero.to_vec())]; - - let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); - assert!(inst.is_err()); - assert_eq!(inst.err(), Some(R1CSError::InvalidScalar)); - } - - #[test] - fn test_padded_constraints() { - // parameters of the R1CS instance - let num_cons = 1; - let num_vars = 0; - let num_inputs = 3; - let num_non_zero_entries = 3; - - // We will encode the above constraints into three matrices, where - // the coefficients in the matrix are in the little-endian byte order - let mut A: Vec<(usize, usize, Vec)> = Vec::new(); - let mut B: Vec<(usize, usize, Vec)> = Vec::new(); - let mut C: Vec<(usize, usize, Vec)> = Vec::new(); - - // Create a^2 + b + 13 - A.push((0, num_vars + 2, (Scalar::one().into_repr().to_bytes_le()))); // 1*a - B.push((0, num_vars + 2, Scalar::one().into_repr().to_bytes_le())); // 1*a - C.push((0, num_vars + 1, Scalar::one().into_repr().to_bytes_le())); // 1*z - C.push(( - 0, - num_vars, - (-Scalar::from(13u64)).into_repr().to_bytes_le(), - )); // -13*1 - C.push((0, num_vars + 3, (-Scalar::one()).into_repr().to_bytes_le())); // -1*b - - // Var Assignments (Z_0 = 16 is the only output) - let vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars]; - - // create an InputsAssignment (a = 1, b = 2) - let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs]; - inputs[0] = Scalar::from(16u64).into_repr().to_bytes_le(); - inputs[1] = Scalar::from(1u64).into_repr().to_bytes_le(); - inputs[2] = Scalar::from(2u64).into_repr().to_bytes_le(); - - let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); - let assignment_vars = VarsAssignment::new(&vars).unwrap(); - - // Check if instance is satisfiable - let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); - let res = inst.is_sat(&assignment_vars, &assignment_inputs); - assert!(res.unwrap(), "should be satisfied"); - - // SNARK public params - let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); - - // create a commitment to the R1CS instance - let (comm, decomm) = SNARK::encode(&inst, &gens); - - let params = poseidon_params(); - - // produce a SNARK - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SNARK::prove( - &inst, - &comm, - &decomm, - assignment_vars.clone(), - &assignment_inputs, - &gens, - &mut prover_transcript, - ); - - // verify the SNARK - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) - .is_ok()); - - // NIZK public params - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - - let params = poseidon_params(); - - // produce a NIZK - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = NIZK::prove( - &inst, - assignment_vars, - &assignment_inputs, - &gens, - &mut prover_transcript, - ); - - // verify the NIZK - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify_groth16(&inst, &assignment_inputs, &mut verifier_transcript, &gens) - .is_ok()); - } + use super::*; + + type F = ark_bls12_377::Fr; + + #[test] + pub fn check_r1cs_invalid_index() { + let num_cons = 4; + let num_vars = 8; + let num_inputs = 1; + + let zero: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ]; + + let A = vec![(0, 0, zero.to_vec())]; + let B = vec![(100, 1, zero.to_vec())]; + let C = vec![(1, 1, zero.to_vec())]; + + let inst = Instance::::new(num_cons, num_vars, num_inputs, &A, &B, &C); + assert!(inst.is_err()); + assert_eq!(inst.err(), Some(R1CSError::InvalidIndex)); + } + + #[test] + pub fn check_r1cs_invalid_scalar() { + let num_cons = 4; + let num_vars = 8; + let num_inputs = 1; + + let zero: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ]; + + let larger_than_mod = [ + 3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 57, 51, 72, 125, 157, 41, 83, 167, 237, 115, + ]; + + let A = vec![(0, 0, zero.to_vec())]; + let B = vec![(1, 1, larger_than_mod.to_vec())]; + let C = vec![(1, 1, zero.to_vec())]; + + let inst = Instance::::new(num_cons, num_vars, num_inputs, &A, &B, &C); + assert!(inst.is_err()); + assert_eq!(inst.err(), Some(R1CSError::InvalidScalar)); + } } diff --git a/src/macros.rs b/src/macros.rs new file mode 100644 index 0000000..f3aac90 --- /dev/null +++ b/src/macros.rs @@ -0,0 +1,56 @@ +macro_rules! try_par { + ($(let $name:ident = $f:expr),+) => { + $( + let mut $name = None; + )+ + rayon::scope(|s| { + $( + let $name = &mut $name; + s.spawn(move |_| { + *$name = Some($f); + });)+ + }); + $( + let $name = $name.unwrap()?; + )+ + }; +} + +macro_rules! par { + ($(let $name:ident = $f:expr),+) => { + $( + let mut $name = None; + )+ + rayon::scope(|s| { + $( + let $name = &mut $name; + s.spawn(move |_| { + *$name = Some($f); + });)+ + }); + $( + let $name = $name.unwrap(); + )+ + }; + + ($(let ($name1:ident, $name2:ident) = $f:block),+) => { + $( + let mut $name1 = None; + let mut $name2 = None; + )+ + rayon::scope(|s| { + $( + let $name1 = &mut $name1; + let $name2 = &mut $name2; + s.spawn(move |_| { + let (a, b) = $f; + *$name1 = Some(a); + *$name2 = Some(b); + });)+ + }); + $( + let $name1 = $name1.unwrap(); + let $name2 = $name2.unwrap(); + )+ + } +} diff --git a/src/math.rs b/src/math.rs index c0d1634..33e9e14 100644 --- a/src/math.rs +++ b/src/math.rs @@ -1,36 +1,36 @@ pub trait Math { - fn square_root(self) -> usize; - fn pow2(self) -> usize; - fn get_bits(self, num_bits: usize) -> Vec; - fn log_2(self) -> usize; + fn square_root(self) -> usize; + fn pow2(self) -> usize; + fn get_bits(self, num_bits: usize) -> Vec; + fn log_2(self) -> usize; } impl Math for usize { - #[inline] - fn square_root(self) -> usize { - (self as f64).sqrt() as usize - } + #[inline] + fn square_root(self) -> usize { + (self as f64).sqrt() as usize + } - #[inline] - fn pow2(self) -> usize { - let base: usize = 2; - base.pow(self as u32) - } + #[inline] + fn pow2(self) -> usize { + let base: usize = 2; + base.pow(self as u32) + } - /// Returns the num_bits from n in a canonical order - fn get_bits(self, num_bits: usize) -> Vec { - (0..num_bits) - .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) - .collect::>() - } + /// Returns the num_bits from n in a canonical order + fn get_bits(self, num_bits: usize) -> Vec { + (0..num_bits) + .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) + .collect::>() + } - fn log_2(self) -> usize { - assert_ne!(self, 0); + fn log_2(self) -> usize { + assert_ne!(self, 0); - if self.is_power_of_two() { - (1usize.leading_zeros() - self.leading_zeros()) as usize - } else { - (0usize.leading_zeros() - self.leading_zeros()) as usize - } + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as usize + } else { + (0usize.leading_zeros() - self.leading_zeros()) as usize } + } } diff --git a/src/mipp.rs b/src/mipp.rs new file mode 100644 index 0000000..93f7a9c --- /dev/null +++ b/src/mipp.rs @@ -0,0 +1,410 @@ +use crate::poseidon_transcript::PoseidonTranscript; +use crate::transcript::Transcript; +use ark_ec::scalar_mul::variable_base::VariableBaseMSM; +use ark_ec::CurveGroup; +use ark_ec::{pairing::Pairing, AffineRepr}; +use ark_ff::{Field, PrimeField}; +use ark_poly::DenseMultilinearExtension; +use ark_poly_commit::multilinear_pc::data_structures::{ + CommitmentG2, CommitterKey, ProofG1, VerifierKey, +}; +use ark_poly_commit::multilinear_pc::MultilinearPC; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::One; +use ark_std::Zero; +use rayon::iter::ParallelIterator; +use rayon::prelude::IntoParallelIterator; +use rayon::prelude::*; +use std::ops::{AddAssign, Mul, MulAssign}; +use thiserror::Error; + +#[derive(Debug, Clone, CanonicalDeserialize, CanonicalSerialize)] +pub struct MippProof { + pub comms_t: Vec<(::TargetField, ::TargetField)>, + pub comms_u: Vec<(E::G1Affine, E::G1Affine)>, + pub final_a: E::G1Affine, + pub final_h: E::G2Affine, + pub pst_proof_h: ProofG1, +} + +impl MippProof { + pub fn prove( + transcript: &mut PoseidonTranscript, + ck: &CommitterKey, + a: Vec, + y: Vec, + h: Vec, + U: &E::G1Affine, + _T: &::TargetField, + ) -> Result, Error> { + // the values of vectors A and y rescaled at each step of the loop + let (mut m_a, mut m_y) = (a.clone(), y.clone()); + // the values of the commitment keys h for the vector A rescaled at + // each step of the loop + let mut m_h = h.clone(); + + // storing the cross commitments for including in the proofs + let mut comms_t = Vec::new(); + let mut comms_u = Vec::new(); + + // the transcript challenges + let mut xs: Vec = Vec::new(); + let mut xs_inv: Vec = Vec::new(); + + // we append only the MIPP because the aggregated commitment T has been + // appended already + transcript.append(b"U", U); + + while m_a.len() > 1 { + // recursive step + // Recurse with problem of half size + let split = m_a.len() / 2; + + // MIPP where n' = split/// + // a[:n'] a[n':] + let (a_l, a_r) = m_a.split_at_mut(split); + // y[:n'] y[n':] + let (y_l, y_r) = m_y.split_at_mut(split); + // h[:n'] y[n':] + let (h_l, h_r) = m_h.split_at_mut(split); + + // since we do this in parallel we take reference first so it can be + // moved within the macro's rayon scope. + let (_rh_l, _rh_r) = (&h_l, &h_r); + let (ra_l, ra_r) = (&a_l, &a_r); + let (ry_l, ry_r) = (&y_l, &y_r); + + try_par! { + // MIPP part + // Compute cross commitments + // u_l = a[n':] ^ y[:n'] + // TODO to replace by bitsf_multiexp + let comm_u_l = multiexponentiation(ra_l, &ry_r), + // u_r = a[:n'] ^ y[n':] + let comm_u_r = multiexponentiation(ra_r, &ry_l) + }; + + par! { + // Compute the cross pairing products over the distinct halfs of A + // t_l = a[n':] * h[:n'] + let comm_t_l = pairings_product::(&a_l, h_r), + // t_r = a[:n'] * h[n':] + let comm_t_r = pairings_product::(&a_r, h_l) + + }; + + // Fiat-Shamir challenge + transcript.append(b"comm_u_l", &comm_u_l); + transcript.append(b"comm_u_r", &comm_u_r); + transcript.append(b"comm_t_l", &comm_t_l); + transcript.append(b"comm_t_r", &comm_t_r); + let c_inv = transcript.challenge_scalar::(b"challenge_i"); + + // Optimization for multiexponentiation to rescale G2 elements with + // 128-bit challenge Swap 'c' and 'c_inv' since we + // can't control bit size of c_inv + let c = c_inv.inverse().unwrap(); + + // Set up values for next step of recursion by compressing as follows + // a[n':] + a[:n']^x + compress(&mut m_a, split, &c); + // y[n':] + y[:n']^x_inv + compress_field(&mut m_y, split, &c_inv); + // h[n':] + h[:n']^x_inv + compress(&mut m_h, split, &c_inv); + + comms_t.push((comm_t_l, comm_t_r)); + comms_u.push((comm_u_l.into_affine(), comm_u_r.into_affine())); + xs.push(c); + xs_inv.push(c_inv); + } + assert!(m_a.len() == 1 && m_y.len() == 1 && m_h.len() == 1); + + let final_a = m_a[0]; + let final_h = m_h[0]; + + // get the structured polynomial p_h for which final_h = h^p_h(vec{t}) + // is the PST commitment given generator h and toxic waste \vec{t} + let poly = DenseMultilinearExtension::::from_evaluations_vec( + xs_inv.len(), + Self::polynomial_evaluations_from_transcript::(&xs_inv), + ); + let c = MultilinearPC::::commit_g2(ck, &poly); + debug_assert!(c.h_product == final_h); + + // generate a proof of opening final_h at the random point rs + // from the transcript + let rs: Vec = (0..poly.num_vars) + .into_iter() + .map(|_| transcript.challenge_scalar::(b"random_point")) + .collect(); + + let pst_proof_h = MultilinearPC::::open_g1(ck, &poly, &rs); + + Ok(MippProof { + comms_t, + comms_u, + final_a, + final_h, + pst_proof_h, + }) + } + + // builds the polynomial p_h in Lagrange basis which uses the + // inverses of transcript challenges this is the following + // structured polynomial $\prod_i(1 - z_i + cs_inv[m - i - 1] * z_i)$ + // where m is the length of cs_inv and z_i is the unknown + fn polynomial_evaluations_from_transcript(cs_inv: &[F]) -> Vec { + let m = cs_inv.len(); + let pow_m = 2_usize.pow(m as u32); + + // constructs the list of evaluations over the boolean hypercube \{0,1\}^m + let evals = (0..pow_m) + .into_par_iter() + .map(|i| { + let mut res = F::one(); + for j in 0..m { + // we iterate from lsb to msb and, in case the bit is 1, + // we multiply by the corresponding challenge i.e whose + // index corresponds to the bit's position + if (i >> j) & 1 == 1 { + res *= cs_inv[m - j - 1]; + } + } + res + }) + .collect(); + evals + } + + pub fn verify( + vk: &VerifierKey, + transcript: &mut PoseidonTranscript, + proof: &MippProof, + point: Vec, + U: &E::G1Affine, + T: &::TargetField, + ) -> bool { + let comms_u = proof.comms_u.clone(); + let comms_t = proof.comms_t.clone(); + + let mut xs = Vec::new(); + let mut xs_inv = Vec::new(); + let mut final_y = E::ScalarField::one(); + + let mut final_res = MippTU { + tc: T.clone(), + uc: U.into_group(), + }; + + transcript.append(b"U", U); + + // Challenges need to be generated first in sequential order so the + // prover and the verifier have a consistent view of the transcript + for (i, (comm_u, comm_t)) in comms_u.iter().zip(comms_t.iter()).enumerate() { + let (comm_u_l, comm_u_r) = comm_u; + let (comm_t_l, comm_t_r) = comm_t; + + // Fiat-Shamir challenge + transcript.append(b"comm_u_l", comm_u_l); + transcript.append(b"comm_u_r", comm_u_r); + transcript.append(b"comm_t_l", comm_t_l); + transcript.append(b"comm_t_r", comm_t_r); + let c_inv = transcript.challenge_scalar::(b"challenge_i"); + let c = c_inv.inverse().unwrap(); + + xs.push(c); + xs_inv.push(c_inv); + + // the verifier computes the final_y by themselves because + // this is field operations so it's quite fast and parallelisation + // doesn't bring much improvement + final_y *= E::ScalarField::one() + c_inv.mul(point[i]) - point[i]; + } + + // First, each entry of T and U are multiplied independently by their + // respective challenges which is done in parralel and, at the end, + // the results are merged together for each vector following their + // corresponding merge operation. + enum Op<'a, E: Pairing> { + TC(&'a E::TargetField, ::BigInt), + UC(&'a E::G1Affine, &'a E::ScalarField), + } + + let res = comms_t + .par_iter() + .zip(comms_u.par_iter()) + .zip(xs.par_iter().zip(xs_inv.par_iter())) + .flat_map(|((comm_t, comm_u), (c, c_inv))| { + let (comm_t_l, comm_t_r) = comm_t; + let (comm_u_l, comm_u_r) = comm_u; + + // we multiple left side by x^-1 and right side by x + vec![ + Op::TC::(comm_t_l, c_inv.into_bigint()), + Op::TC(comm_t_r, c.into_bigint()), + Op::UC(comm_u_l, c_inv), + Op::UC(comm_u_r, c), + ] + }) + .fold(MippTU::::default, |mut res, op: Op| { + match op { + Op::TC(tx, c) => { + let tx: E::TargetField = tx.pow(c); + res.tc.mul_assign(&tx); + } + Op::UC(zx, c) => { + let uxp: E::G1 = zx.mul(c); + res.uc.add_assign(&uxp); + } + } + res + }) + .reduce(MippTU::default, |mut acc_res, res| { + acc_res.merge(&res); + acc_res + }); + + // the initial values of T and U are also merged to get the final result + let ref_final_res = &mut final_res; + ref_final_res.merge(&res); + + // get the point rs from the transcript, used by the prover to generate + // the PST proof + let mut rs: Vec = Vec::new(); + let m = xs_inv.len(); + for _i in 0..m { + let r = transcript.challenge_scalar::(b"random_point"); + rs.push(r); + } + + // Given p_h is structured as defined above, the verifier can compute + // p_h(rs) by themselves in O(m) time + let v = (0..m) + .into_par_iter() + .map(|i| E::ScalarField::one() + rs[i].mul(xs_inv[m - i - 1]) - rs[i]) + .product(); + + let comm_h = CommitmentG2 { + nv: m, + h_product: proof.final_h, + }; + + // final_h is the commitment of p_h so the verifier can perform + // a PST verification at the random point rs, given the pst proof + // received from the prover prover + let check_h = MultilinearPC::::check_2(vk, &comm_h, &rs, v, &proof.pst_proof_h); + assert!(check_h == true); + + let final_u = proof.final_a.mul(final_y); + let final_t: ::TargetField = E::pairing(proof.final_a, proof.final_h).0; + + let check_t = ref_final_res.tc == final_t; + assert!(check_t == true); + + let check_u = ref_final_res.uc == final_u; + assert!(check_u == true); + check_h & check_u + } +} + +/// MippTU keeps track of the variables that have been sent by the prover and +/// must be multiplied together by the verifier. +struct MippTU { + pub tc: E::TargetField, + pub uc: E::G1, +} + +impl Default for MippTU +where + E: Pairing, +{ + fn default() -> Self { + Self { + tc: E::TargetField::one(), + uc: E::G1::zero(), + } + } +} + +impl MippTU +where + E: Pairing, +{ + fn merge(&mut self, other: &Self) { + self.tc.mul_assign(&other.tc); + self.uc.add_assign(&other.uc); + } +} + +/// compress modifies the `vec` vector by setting the value at +/// index $i:0 -> split$ $vec[i] = vec[i] + vec[i+split]^scaler$. +/// The `vec` vector is half of its size after this call. +pub fn compress(vec: &mut Vec, split: usize, scaler: &C::ScalarField) { + let (left, right) = vec.split_at_mut(split); + left + .par_iter_mut() + .zip(right.par_iter()) + .for_each(|(a_l, a_r)| { + // TODO remove that with master version + let mut x = a_r.mul(scaler); + x.add_assign(a_l.into_group()); + *a_l = x.into_affine(); + }); + let len = left.len(); + vec.resize(len, C::zero()); +} + +// TODO make that generic with points as well +pub fn compress_field(vec: &mut Vec, split: usize, scaler: &F) { + let (left, right) = vec.split_at_mut(split); + assert!(left.len() == right.len()); + left + .par_iter_mut() + .zip(right.par_iter_mut()) + .for_each(|(a_l, a_r)| { + // TODO remove copy + a_r.mul_assign(scaler); + a_l.add_assign(a_r.clone()); + }); + let len = left.len(); + vec.resize(len, F::zero()); +} + +pub fn multiexponentiation( + left: &[G], + right: &[G::ScalarField], +) -> Result { + if left.len() != right.len() { + return Err(Error::InvalidIPVectorLength); + } + + Ok(::msm_unchecked(left, right)) +} + +pub fn pairings_product(gs: &[E::G1Affine], hs: &[E::G2Affine]) -> E::TargetField { + E::multi_pairing(gs, hs).0 +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("Serialization error: {0}")] + Serialization(#[from] SerializationError), + + #[error("Vectors length do not match for inner product (IP)")] + InvalidIPVectorLength, + // #[error("Commitment key length invalid")] + // InvalidKeyLength, + + // #[error("Invalid pairing result")] + // InvalidPairing, + + // #[error("Invalid SRS: {0}")] + // InvalidSRS(String), + + // #[error("Invalid proof: {0}")] + // InvalidProof(String), + + // #[error("Malformed Groth16 verifying key")] + // MalformedVerifyingKey, +} diff --git a/src/nizk/bullet.rs b/src/nizk/bullet.rs index abb8b88..5d84b67 100644 --- a/src/nizk/bullet.rs +++ b/src/nizk/bullet.rs @@ -3,259 +3,261 @@ #![allow(non_snake_case)] #![allow(clippy::type_complexity)] #![allow(clippy::too_many_arguments)] +use super::super::errors::ProofVerifyError; use crate::math::Math; use crate::poseidon_transcript::PoseidonTranscript; - -use super::super::errors::ProofVerifyError; -use super::super::group::{ - CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, - VartimeMultiscalarMul, -}; -use super::super::scalar::Scalar; +use crate::transcript::Transcript; +use ark_ec::AffineRepr; +use ark_ec::CurveGroup; use ark_ff::Field; use ark_serialize::*; use ark_std::{One, Zero}; use core::iter; +use std::ops::Mul; use std::ops::MulAssign; #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct BulletReductionProof { - L_vec: Vec, - R_vec: Vec, +pub struct BulletReductionProof { + L_vec: Vec, + R_vec: Vec, } -impl BulletReductionProof { - /// Create an inner-product proof. - /// - /// The proof is created with respect to the bases \\(G\\). - /// - /// The `transcript` is passed in as a parameter so that the - /// challenges depend on the *entire* transcript (including parent - /// protocols). - /// - /// The lengths of the vectors must all be the same, and must all be - /// either 0 or a power of 2. - pub fn prove( - transcript: &mut PoseidonTranscript, - Q: &GroupElement, - G_vec: &[GroupElement], - H: &GroupElement, - a_vec: &[Scalar], - b_vec: &[Scalar], - blind: &Scalar, - blinds_vec: &[(Scalar, Scalar)], - ) -> ( - BulletReductionProof, - GroupElement, - Scalar, - Scalar, - GroupElement, - Scalar, - ) { - // Create slices G, H, a, b backed by their respective - // vectors. This lets us reslice as we compress the lengths - // of the vectors in the main loop below. - let mut G = &mut G_vec.to_owned()[..]; - let mut a = &mut a_vec.to_owned()[..]; - let mut b = &mut b_vec.to_owned()[..]; - - // All of the input vectors must have a length that is a power of two. - let mut n = G.len(); - assert!(n.is_power_of_two()); - let lg_n = n.log_2(); - - // All of the input vectors must have the same length. - assert_eq!(G.len(), n); - assert_eq!(a.len(), n); - assert_eq!(b.len(), n); - assert_eq!(blinds_vec.len(), 2 * lg_n); - - let mut L_vec = Vec::with_capacity(lg_n); - let mut R_vec = Vec::with_capacity(lg_n); - let mut blinds_iter = blinds_vec.iter(); - let mut blind_fin = *blind; - - while n != 1 { - n /= 2; - let (a_L, a_R) = a.split_at_mut(n); - let (b_L, b_R) = b.split_at_mut(n); - let (G_L, G_R) = G.split_at_mut(n); - - let c_L = inner_product(a_L, b_R); - let c_R = inner_product(a_R, b_L); - - let (blind_L, blind_R) = blinds_iter.next().unwrap(); - - let L = GroupElement::vartime_multiscalar_mul( - a_L.iter() - .chain(iter::once(&c_L)) - .chain(iter::once(blind_L)) - .copied() - .collect::>() - .as_slice(), - G_R.iter() - .chain(iter::once(Q)) - .chain(iter::once(H)) - .copied() - .collect::>() - .as_slice(), - ); - - let R = GroupElement::vartime_multiscalar_mul( - a_R.iter() - .chain(iter::once(&c_R)) - .chain(iter::once(blind_R)) - .copied() - .collect::>() - .as_slice(), - G_L.iter() - .chain(iter::once(Q)) - .chain(iter::once(H)) - .copied() - .collect::>() - .as_slice(), - ); - - transcript.append_point(&L.compress()); - transcript.append_point(&R.compress()); - - let u = transcript.challenge_scalar(); - let u_inv = u.inverse().unwrap(); - - for i in 0..n { - a_L[i] = a_L[i] * u + u_inv * a_R[i]; - b_L[i] = b_L[i] * u_inv + u * b_R[i]; - G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); - } - - blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R; - - L_vec.push(L.compress()); - R_vec.push(R.compress()); - - a = a_L; - b = b_L; - G = G_L; - } - - let Gamma_hat = - GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]); - - ( - BulletReductionProof { L_vec, R_vec }, - Gamma_hat, - a[0], - b[0], - G[0], - blind_fin, - ) +impl BulletReductionProof { + /// Create an inner-product proof. + /// + /// The proof is created with respect to the bases \\(G\\). + /// + /// The `transcript` is passed in as a parameter so that the + /// challenges depend on the *entire* transcript (including parent + /// protocols). + /// + /// The lengths of the vectors must all be the same, and must all be + /// either 0 or a power of 2. + pub fn prove( + transcript: &mut PoseidonTranscript, + Q: &G::Affine, + G_vec: &[G::Affine], + H: &G::Affine, + a_vec: &[G::ScalarField], + b_vec: &[G::ScalarField], + blind: &G::ScalarField, + blinds_vec: &[(G::ScalarField, G::ScalarField)], + ) -> ( + BulletReductionProof, + G, + G::ScalarField, + G::ScalarField, + G, + G::ScalarField, + ) { + // Create slices G, H, a, b backed by their respective + // vectors. This lets us reslice as we compress the lengths + // of the vectors in the main loop below. + let mut G = &mut G_vec.to_owned()[..]; + let mut a = &mut a_vec.to_owned()[..]; + let mut b = &mut b_vec.to_owned()[..]; + + // All of the input vectors must have a length that is a power of two. + let mut n = G.len(); + assert!(n.is_power_of_two()); + let lg_n = n.log_2(); + + // All of the input vectors must have the same length. + assert_eq!(G.len(), n); + assert_eq!(a.len(), n); + assert_eq!(b.len(), n); + assert_eq!(blinds_vec.len(), 2 * lg_n); + + let mut L_vec = Vec::with_capacity(lg_n); + let mut R_vec = Vec::with_capacity(lg_n); + let mut blinds_iter = blinds_vec.iter(); + let mut blind_fin = *blind; + + while n != 1 { + n /= 2; + let (a_L, a_R) = a.split_at_mut(n); + let (b_L, b_R) = b.split_at_mut(n); + let (G_L, G_R) = G.split_at_mut(n); + + let c_L = inner_product(a_L, b_R); + let c_R = inner_product(a_R, b_L); + + let (blind_L, blind_R) = blinds_iter.next().unwrap(); + let gright_vec = G_R + .iter() + .chain(iter::once(Q)) + .chain(iter::once(H)) + .cloned() + .collect::>(); + + let L = G::msm_unchecked( + &gright_vec, + a_L + .iter() + .chain(iter::once(&c_L)) + .chain(iter::once(blind_L)) + .copied() + .collect::>() + .as_slice(), + ); + let gl_vec = G_L + .iter() + .chain(iter::once(Q)) + .chain(iter::once(H)) + .cloned() + .collect::>(); + let R = G::msm_unchecked( + &gl_vec, + a_R + .iter() + .chain(iter::once(&c_R)) + .chain(iter::once(blind_R)) + .copied() + .collect::>() + .as_slice(), + ); + + transcript.append_point(b"", &L); + transcript.append_point(b"", &R); + + let u: G::ScalarField = transcript.challenge_scalar(b""); + let u_inv = u.inverse().unwrap(); + + for i in 0..n { + a_L[i] = a_L[i] * u + u_inv * a_R[i]; + b_L[i] = b_L[i] * u_inv + u * b_R[i]; + G_L[i] = (G_L[i].mul(u_inv) + G_R[i].mul(u)).into_affine(); + } + + blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R; + + L_vec.push(L); + R_vec.push(R); + + a = a_L; + b = b_L; + G = G_L; } - /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication - /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. - /// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof. - fn verification_scalars( - &self, - n: usize, - transcript: &mut PoseidonTranscript, - ) -> Result<(Vec, Vec, Vec), ProofVerifyError> { - let lg_n = self.L_vec.len(); - if lg_n >= 32 { - // 4 billion multiplications should be enough for anyone - // and this check prevents overflow in 1< = challenges.clone(); - - ark_ff::fields::batch_inversion(&mut challenges_inv); - let mut allinv: Scalar = Scalar::one(); - for c in challenges.iter().filter(|s| !s.is_zero()) { - allinv.mul_assign(c); - } - allinv = allinv.inverse().unwrap(); - - // 3. Compute u_i^2 and (1/u_i)^2 - for i in 0..lg_n { - challenges[i] = challenges[i].square(); - challenges_inv[i] = challenges_inv[i].square(); - } - let challenges_sq = challenges; - let challenges_inv_sq = challenges_inv; - - // 4. Compute s values inductively. - let mut s = Vec::with_capacity(n); - s.push(allinv); - for i in 1..n { - let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; - let k = 1 << lg_i; - // The challenges are stored in "creation order" as [u_k,...,u_1], - // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i - let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; - s.push(s[i - k] * u_lg_i_sq); - } - - Ok((challenges_sq, challenges_inv_sq, s)) + let Gamma_hat = G::msm_unchecked(&[G[0], *Q, *H], &[a[0], a[0] * b[0], blind_fin]); + + ( + BulletReductionProof { L_vec, R_vec }, + Gamma_hat, + a[0], + b[0], + G[0].into_group(), + blind_fin, + ) + } + + /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication + /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. + /// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof. + fn verification_scalars( + &self, + n: usize, + transcript: &mut PoseidonTranscript, + ) -> Result< + ( + Vec, + Vec, + Vec, + ), + ProofVerifyError, + > { + let lg_n = self.L_vec.len(); + if lg_n >= 32 { + // 4 billion multiplications should be enough for anyone + // and this check prevents overflow in 1< Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> { - let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?; - - let Ls = self - .L_vec - .iter() - .map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError)) - .collect::, _>>()?; - - let Rs = self - .R_vec - .iter() - .map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError)) - .collect::, _>>()?; + // 1. Recompute x_k,...,x_1 based on the proof transcript + let mut challenges = Vec::with_capacity(lg_n); + for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { + transcript.append_point(b"", L); + transcript.append_point(b"", R); + challenges.push(transcript.challenge_scalar(b"")); + } - let G_hat = GroupElement::vartime_multiscalar_mul(s.as_slice(), G); - let a_hat = inner_product(a, &s); + // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 + let mut challenges_inv: Vec = challenges.clone(); - let Gamma_hat = GroupElement::vartime_multiscalar_mul( - u_sq.iter() - .chain(u_inv_sq.iter()) - .chain(iter::once(&Scalar::one())) - .copied() - .collect::>() - .as_slice(), - Ls.iter() - .chain(Rs.iter()) - .chain(iter::once(Gamma)) - .copied() - .collect::>() - .as_slice(), - ); + ark_ff::fields::batch_inversion(&mut challenges_inv); + let mut allinv = G::ScalarField::one(); + for c in challenges.iter().filter(|s| !s.is_zero()) { + allinv.mul_assign(c); + } + allinv = allinv.inverse().unwrap(); - Ok((G_hat, Gamma_hat, a_hat)) + // 3. Compute u_i^2 and (1/u_i)^2 + for i in 0..lg_n { + challenges[i] = challenges[i].square(); + challenges_inv[i] = challenges_inv[i].square(); + } + let challenges_sq = challenges; + let challenges_inv_sq = challenges_inv; + + // 4. Compute s values inductively. + let mut s = Vec::with_capacity(n); + s.push(allinv); + for i in 1..n { + let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; + let k = 1 << lg_i; + // The challenges are stored in "creation order" as [u_k,...,u_1], + // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i + let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; + s.push(s[i - k] * u_lg_i_sq); } + + Ok((challenges_sq, challenges_inv_sq, s)) + } + + /// This method is for testing that proof generation work, + /// but for efficiency the actual protocols would use `verification_scalars` + /// method to combine inner product verification with other checks + /// in a single multiscalar multiplication. + pub fn verify( + &self, + n: usize, + a: &[G::ScalarField], + transcript: &mut PoseidonTranscript, + Gamma: &G, + Gs: &[G::Affine], + ) -> Result<(G, G, G::ScalarField), ProofVerifyError> { + let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?; + + let Ls = &self.L_vec; + let Rs = &self.R_vec; + + let G_hat = G::msm(Gs, s.as_slice()).map_err(|_| ProofVerifyError::InternalError)?; + let a_hat = inner_product(a, &s); + + let Gamma_hat = G::msm( + &G::normalize_batch( + &Ls + .iter() + .chain(Rs.iter()) + .chain(iter::once(Gamma)) + .copied() + .collect::>(), + ), + u_sq + .iter() + .chain(u_inv_sq.iter()) + .chain(iter::once(&G::ScalarField::one())) + .copied() + .collect::>() + .as_slice(), + ) + .map_err(|_| ProofVerifyError::InternalError)?; + + Ok((G_hat, Gamma_hat, a_hat)) + } } /// Computes an inner product of two vectors @@ -263,14 +265,14 @@ impl BulletReductionProof { /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. -pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { - assert!( - a.len() == b.len(), - "inner_product(a,b): lengths of vectors do not match" - ); - let mut out = Scalar::zero(); - for i in 0..a.len() { - out += a[i] * b[i]; - } - out +fn inner_product(a: &[F], b: &[F]) -> F { + assert!( + a.len() == b.len(), + "inner_product(a,b): lengths of vectors do not match" + ); + let mut out = F::zero(); + for i in 0..a.len() { + out += a[i] * b[i]; + } + out } diff --git a/src/nizk/mod.rs b/src/nizk/mod.rs index 26bcc74..ffafabb 100644 --- a/src/nizk/mod.rs +++ b/src/nizk/mod.rs @@ -1,760 +1,217 @@ #![allow(clippy::too_many_arguments)] +use super::commitments::{MultiCommitGens, PedersenCommit}; +use super::errors::ProofVerifyError; +use crate::ark_std::UniformRand; use crate::math::Math; -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; +use crate::poseidon_transcript::PoseidonTranscript; +use crate::transcript::Transcript; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::CurveGroup; -use super::commitments::{Commitments, MultiCommitGens}; -use super::errors::ProofVerifyError; -use super::group::{ - CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, UnpackGroupElement, -}; -use super::random::RandomTape; -use super::scalar::Scalar; -use ark_ec::ProjectiveCurve; -use ark_ff::PrimeField; use ark_serialize::*; +use std::ops::Mul; mod bullet; use bullet::BulletReductionProof; -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct KnowledgeProof { - alpha: CompressedGroup, - z1: Scalar, - z2: Scalar, -} - -impl KnowledgeProof { - fn protocol_name() -> &'static [u8] { - b"knowledge proof" - } - - pub fn prove( - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - x: &Scalar, - r: &Scalar, - ) -> (KnowledgeProof, CompressedGroup) { - // transcript.append_protocol_name(KnowledgeProof::protocol_name()); - - // produce two random Scalars - let t1 = random_tape.random_scalar(b"t1"); - let t2 = random_tape.random_scalar(b"t2"); - - let C = x.commit(r, gens_n).compress(); - C.append_to_poseidon(transcript); - - let alpha = t1.commit(&t2, gens_n).compress(); - alpha.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let z1 = c * x + t1; - let z2 = c * r + t2; - - (KnowledgeProof { alpha, z1, z2 }, C) - } - - pub fn verify( - &self, - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - C: &CompressedGroup, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(KnowledgeProof::protocol_name()); - C.append_to_poseidon(transcript); - self.alpha.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let lhs = self.z1.commit(&self.z2, gens_n).compress(); - let rhs = (C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress(); - - if lhs == rhs { - Ok(()) - } else { - Err(ProofVerifyError::InternalError) - } - } -} - -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct EqualityProof { - alpha: CompressedGroup, - z: Scalar, -} - -impl EqualityProof { - fn protocol_name() -> &'static [u8] { - b"equality proof" - } - - pub fn prove( - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - v1: &Scalar, - s1: &Scalar, - v2: &Scalar, - s2: &Scalar, - ) -> (EqualityProof, CompressedGroup, CompressedGroup) { - // transcript.append_protocol_name(EqualityProof::protocol_name()); - - // produce a random Scalar - let r = random_tape.random_scalar(b"r"); - - let C1 = v1.commit(s1, gens_n).compress(); - transcript.append_point(&C1); - - let C2 = v2.commit(s2, gens_n).compress(); - transcript.append_point(&C2); - - let alpha = gens_n.h.mul(r.into_repr()).compress(); - transcript.append_point(&alpha); - - let c = transcript.challenge_scalar(); - - let z = c * ((*s1) - s2) + r; - - (EqualityProof { alpha, z }, C1, C2) - } - - pub fn verify( - &self, - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - C1: &CompressedGroup, - C2: &CompressedGroup, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(EqualityProof::protocol_name()); - - transcript.append_point(C1); - transcript.append_point(C2); - transcript.append_point(&self.alpha); - - let c = transcript.challenge_scalar(); - let rhs = { - let C = C1.unpack()? - C2.unpack()?; - (C.mul(c.into_repr()) + self.alpha.unpack()?).compress() - }; - println!("rhs {:?}", rhs); - - let lhs = gens_n.h.mul(self.z.into_repr()).compress(); - println!("lhs {:?}", lhs); - if lhs == rhs { - Ok(()) - } else { - Err(ProofVerifyError::InternalError) - } - } -} - -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct ProductProof { - alpha: CompressedGroup, - beta: CompressedGroup, - delta: CompressedGroup, - z: Vec, -} - -impl ProductProof { - fn protocol_name() -> &'static [u8] { - b"product proof" - } - - pub fn prove( - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - x: &Scalar, - rX: &Scalar, - y: &Scalar, - rY: &Scalar, - z: &Scalar, - rZ: &Scalar, - ) -> ( - ProductProof, - CompressedGroup, - CompressedGroup, - CompressedGroup, - ) { - // transcript.append_protocol_name(ProductProof::protocol_name()); - - // produce five random Scalar - let b1 = random_tape.random_scalar(b"b1"); - let b2 = random_tape.random_scalar(b"b2"); - let b3 = random_tape.random_scalar(b"b3"); - let b4 = random_tape.random_scalar(b"b4"); - let b5 = random_tape.random_scalar(b"b5"); - - let X_unc = x.commit(rX, gens_n); - - let X = X_unc.compress(); - transcript.append_point(&X); - let X_new = GroupElement::decompress(&X); - - assert_eq!(X_unc, X_new.unwrap()); - - let Y = y.commit(rY, gens_n).compress(); - transcript.append_point(&Y); - - let Z = z.commit(rZ, gens_n).compress(); - transcript.append_point(&Z); - - let alpha = b1.commit(&b2, gens_n).compress(); - transcript.append_point(&alpha); - - let beta = b3.commit(&b4, gens_n).compress(); - transcript.append_point(&beta); - - let delta = { - let gens_X = &MultiCommitGens { - n: 1, - G: vec![GroupElement::decompress(&X).unwrap()], - h: gens_n.h, - }; - b3.commit(&b5, gens_X).compress() - }; - transcript.append_point(&delta); - - let c = transcript.challenge_scalar(); - - let z1 = b1 + c * x; - let z2 = b2 + c * rX; - let z3 = b3 + c * y; - let z4 = b4 + c * rY; - let z5 = b5 + c * ((*rZ) - (*rX) * y); - let z = [z1, z2, z3, z4, z5].to_vec(); - - ( - ProductProof { - alpha, - beta, - delta, - z, - }, - X, - Y, - Z, - ) - } - - fn check_equality( - P: &CompressedGroup, - X: &CompressedGroup, - c: &Scalar, - gens_n: &MultiCommitGens, - z1: &Scalar, - z2: &Scalar, - ) -> bool { - println!("{:?}", X); - let lhs = (GroupElement::decompress(P).unwrap() - + GroupElement::decompress(X).unwrap().mul(c.into_repr())) - .compress(); - let rhs = z1.commit(z2, gens_n).compress(); - - lhs == rhs - } - - pub fn verify( - &self, - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - X: &CompressedGroup, - Y: &CompressedGroup, - Z: &CompressedGroup, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(ProductProof::protocol_name()); - - X.append_to_poseidon(transcript); - Y.append_to_poseidon(transcript); - Z.append_to_poseidon(transcript); - self.alpha.append_to_poseidon(transcript); - self.beta.append_to_poseidon(transcript); - self.delta.append_to_poseidon(transcript); - - let z1 = self.z[0]; - let z2 = self.z[1]; - let z3 = self.z[2]; - let z4 = self.z[3]; - let z5 = self.z[4]; - - let c = transcript.challenge_scalar(); - - if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2) - && ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4) - && ProductProof::check_equality( - &self.delta, - Z, - &c, - &MultiCommitGens { - n: 1, - G: vec![X.unpack()?], - h: gens_n.h, - }, - &z3, - &z5, - ) - { - Ok(()) - } else { - Err(ProofVerifyError::InternalError) - } - } -} - -#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct DotProductProof { - delta: CompressedGroup, - beta: CompressedGroup, - z: Vec, - z_delta: Scalar, - z_beta: Scalar, -} - -impl DotProductProof { - fn protocol_name() -> &'static [u8] { - b"dot product proof" - } - - pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { - assert_eq!(a.len(), b.len()); - (0..a.len()).map(|i| a[i] * b[i]).sum() - } - - pub fn prove( - gens_1: &MultiCommitGens, - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - x_vec: &[Scalar], - blind_x: &Scalar, - a_vec: &[Scalar], - y: &Scalar, - blind_y: &Scalar, - ) -> (DotProductProof, CompressedGroup, CompressedGroup) { - // transcript.append_protocol_name(DotProductProof::protocol_name()); - - let n = x_vec.len(); - assert_eq!(x_vec.len(), a_vec.len()); - assert_eq!(gens_n.n, a_vec.len()); - assert_eq!(gens_1.n, 1); - - // produce randomness for the proofs - let d_vec = random_tape.random_vector(b"d_vec", n); - let r_delta = random_tape.random_scalar(b"r_delta"); - let r_beta = random_tape.random_scalar(b"r_beta"); - - let Cx = x_vec.commit(blind_x, gens_n).compress(); - Cx.append_to_poseidon(transcript); - - let Cy = y.commit(blind_y, gens_1).compress(); - Cy.append_to_poseidon(transcript); - - transcript.append_scalar_vector(a_vec); - - let delta = d_vec.commit(&r_delta, gens_n).compress(); - delta.append_to_poseidon(transcript); - - let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec); - - let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress(); - beta.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let z = (0..d_vec.len()) - .map(|i| c * x_vec[i] + d_vec[i]) - .collect::>(); - - let z_delta = c * blind_x + r_delta; - let z_beta = c * blind_y + r_beta; - - ( - DotProductProof { - delta, - beta, - z, - z_delta, - z_beta, - }, - Cx, - Cy, - ) - } - - pub fn verify( - &self, - gens_1: &MultiCommitGens, - gens_n: &MultiCommitGens, - transcript: &mut PoseidonTranscript, - a: &[Scalar], - Cx: &CompressedGroup, - Cy: &CompressedGroup, - ) -> Result<(), ProofVerifyError> { - assert_eq!(gens_n.n, a.len()); - assert_eq!(gens_1.n, 1); - - // transcript.append_protocol_name(DotProductProof::protocol_name()); - Cx.append_to_poseidon(transcript); - Cy.append_to_poseidon(transcript); - transcript.append_scalar_vector(a); - self.delta.append_to_poseidon(transcript); - self.beta.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let mut result = Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()? - == self.z.commit(&self.z_delta, gens_n); - - let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a); - result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()? - == dotproduct_z_a.commit(&self.z_beta, gens_1); - if result { - Ok(()) - } else { - Err(ProofVerifyError::InternalError) - } - } -} - #[derive(Clone)] -pub struct DotProductProofGens { - n: usize, - pub gens_n: MultiCommitGens, - pub gens_1: MultiCommitGens, +pub struct DotProductProofGens { + n: usize, + pub gens_n: MultiCommitGens, + pub gens_1: MultiCommitGens, } -impl DotProductProofGens { - pub fn new(n: usize, label: &[u8]) -> Self { - let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n); - DotProductProofGens { n, gens_n, gens_1 } - } +impl DotProductProofGens { + pub fn new(n: usize, label: &[u8]) -> Self { + let (gens_n, gens_1) = MultiCommitGens::::new(n + 1, label).split_at(n); + DotProductProofGens { n, gens_n, gens_1 } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct DotProductProofLog { - bullet_reduction_proof: BulletReductionProof, - delta: CompressedGroup, - beta: CompressedGroup, - z1: Scalar, - z2: Scalar, +pub struct DotProductProofLog { + bullet_reduction_proof: BulletReductionProof, + delta: G, + beta: G, + z1: G::ScalarField, + z2: G::ScalarField, } -impl DotProductProofLog { - fn protocol_name() -> &'static [u8] { - b"dot product proof (log)" - } - - pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { - assert_eq!(a.len(), b.len()); - (0..a.len()).map(|i| a[i] * b[i]).sum() - } - - pub fn prove( - gens: &DotProductProofGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - x_vec: &[Scalar], - blind_x: &Scalar, - a_vec: &[Scalar], - y: &Scalar, - blind_y: &Scalar, - ) -> (DotProductProofLog, CompressedGroup, CompressedGroup) { - // transcript.append_protocol_name(DotProductProofLog::protocol_name()); - - let n = x_vec.len(); - assert_eq!(x_vec.len(), a_vec.len()); - assert_eq!(gens.n, n); - - // produce randomness for generating a proof - let d = random_tape.random_scalar(b"d"); - let r_delta = random_tape.random_scalar(b"r_delta"); - let r_beta = random_tape.random_scalar(b"r_delta"); - let blinds_vec = { - let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2()); - let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2()); - (0..v1.len()) - .map(|i| (v1[i], v2[i])) - .collect::>() - }; - - let Cx = x_vec.commit(blind_x, &gens.gens_n).compress(); - transcript.append_point(&Cx); - - let Cy = y.commit(blind_y, &gens.gens_1).compress(); - transcript.append_point(&Cy); - transcript.append_scalar_vector(a_vec); - - let blind_Gamma = (*blind_x) + blind_y; - let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) = - BulletReductionProof::prove( - transcript, - &gens.gens_1.G[0], - &gens.gens_n.G, - &gens.gens_n.h, - x_vec, - a_vec, - &blind_Gamma, - &blinds_vec, - ); - let y_hat = x_hat * a_hat; - - let delta = { - let gens_hat = MultiCommitGens { - n: 1, - G: vec![g_hat], - h: gens.gens_1.h, - }; - d.commit(&r_delta, &gens_hat).compress() - }; - transcript.append_point(&delta); - - let beta = d.commit(&r_beta, &gens.gens_1).compress(); - transcript.append_point(&beta); - - let c = transcript.challenge_scalar(); - - let z1 = d + c * y_hat; - let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta; - - ( - DotProductProofLog { - bullet_reduction_proof, - delta, - beta, - z1, - z2, - }, - Cx, - Cy, - ) - } - - pub fn verify( - &self, - n: usize, - gens: &DotProductProofGens, - transcript: &mut PoseidonTranscript, - a: &[Scalar], - Cx: &CompressedGroup, - Cy: &CompressedGroup, - ) -> Result<(), ProofVerifyError> { - assert_eq!(gens.n, n); - assert_eq!(a.len(), n); - - // transcript.append_protocol_name(DotProductProofLog::protocol_name()); - // Cx.append_to_poseidon( transcript); - // Cy.append_to_poseidon( transcript); - // a.append_to_poseidon( transcript); - - transcript.append_point(Cx); - transcript.append_point(Cy); - transcript.append_scalar_vector(a); - - let Gamma = Cx.unpack()? + Cy.unpack()?; - - let (g_hat, Gamma_hat, a_hat) = - self.bullet_reduction_proof - .verify(n, a, transcript, &Gamma, &gens.gens_n.G)?; - // self.delta.append_to_poseidon( transcript); - // self.beta.append_to_poseidon( transcript); - - transcript.append_point(&self.delta); - transcript.append_point(&self.beta); - - let c = transcript.challenge_scalar(); - - let c_s = &c; - let beta_s = self.beta.unpack()?; - let a_hat_s = &a_hat; - let delta_s = self.delta.unpack()?; - let z1_s = &self.z1; - let z2_s = &self.z2; - - let lhs = ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s) - .compress(); - let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr()) - + gens.gens_1.h.mul(z2_s.into_repr())) - .compress(); - - assert_eq!(lhs, rhs); - - if lhs == rhs { - Ok(()) - } else { - Err(ProofVerifyError::InternalError) - } - } +impl DotProductProofLog +where + G: CurveGroup, + G::ScalarField: Absorb, +{ + pub fn prove( + gens: &DotProductProofGens, + transcript: &mut PoseidonTranscript, + x_vec: &[G::ScalarField], + blind_x: &G::ScalarField, + a_vec: &[G::ScalarField], + y: &G::ScalarField, + blind_y: &G::ScalarField, + ) -> (Self, G, G) { + // transcript.append_protocol_name(DotProductProofLog::protocol_name()); + + let n = x_vec.len(); + assert_eq!(x_vec.len(), a_vec.len()); + assert_eq!(gens.n, n); + + // produce randomness for generating a proof + let d = G::ScalarField::rand(&mut rand::thread_rng()); + let r_delta = G::ScalarField::rand(&mut rand::thread_rng()).into(); + let r_beta = G::ScalarField::rand(&mut rand::thread_rng()).into(); + let blinds_vec = { + (0..2 * n.log_2()) + .map(|_| { + ( + G::ScalarField::rand(&mut rand::thread_rng()).into(), + G::ScalarField::rand(&mut rand::thread_rng()).into(), + ) + }) + .collect::>() + }; + + let Cx = PedersenCommit::commit_slice(x_vec, blind_x, &gens.gens_n); + transcript.append_point(b"", &Cx); + + let Cy = PedersenCommit::commit_scalar(y, blind_y, &gens.gens_1); + transcript.append_point(b"", &Cy); + transcript.append_scalar_vector(b"", &a_vec); + + let blind_Gamma = (*blind_x) + blind_y; + let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) = + BulletReductionProof::::prove( + transcript, + &gens.gens_1.G[0], + &gens.gens_n.G, + &gens.gens_n.h, + x_vec, + a_vec, + &blind_Gamma, + &blinds_vec, + ); + let y_hat = x_hat * a_hat; + + let delta = { + let gens_hat = MultiCommitGens { + n: 1, + G: vec![g_hat.into_affine()], + h: gens.gens_1.h, + }; + PedersenCommit::commit_scalar(&d, &r_delta, &gens_hat) + }; + transcript.append_point(b"", &delta); + + let beta = PedersenCommit::commit_scalar(&d, &r_beta, &gens.gens_1); + transcript.append_point(b"", &beta); + + let c: G::ScalarField = transcript.challenge_scalar(b""); + + let z1 = d + c * y_hat; + let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta; + + ( + Self { + bullet_reduction_proof, + delta, + beta, + z1, + z2, + }, + Cx, + Cy, + ) + } + + pub fn verify( + &self, + n: usize, + gens: &DotProductProofGens, + transcript: &mut PoseidonTranscript, + a: &[G::ScalarField], + Cx: &G, + Cy: &G, + ) -> Result<(), ProofVerifyError> { + assert_eq!(gens.n, n); + assert_eq!(a.len(), n); + + // transcript.append_protocol_name(DotProductProofLog::protocol_name()); + // Cx.write_to_transcript( transcript); + // Cy.write_to_transcript( transcript); + // a.write_to_transcript( transcript); + + transcript.append_point(b"", Cx); + transcript.append_point(b"", Cy); + transcript.append_scalar_vector(b"", &a); + + let Gamma = Cx.add(Cy); + + let (g_hat, Gamma_hat, a_hat) = + self + .bullet_reduction_proof + .verify(n, a, transcript, &Gamma, &gens.gens_n.G)?; + // self.delta.write_to_transcript( transcript); + // self.beta.write_to_transcript( transcript); + + transcript.append_point(b"", &self.delta); + transcript.append_point(b"", &self.beta); + + let c = transcript.challenge_scalar(b""); + + let c_s = &c; + let beta_s = self.beta; + let a_hat_s = &a_hat; + let delta_s = self.delta; + let z1_s = &self.z1; + let z2_s = &self.z2; + + let lhs = (Gamma_hat.mul(c_s) + beta_s).mul(a_hat_s) + delta_s; + let rhs = (g_hat + gens.gens_1.G[0].mul(a_hat_s)).mul(z1_s) + gens.gens_1.h.mul(z2_s); + + assert_eq!(lhs, rhs); + + if lhs == rhs { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } } #[cfg(test)] mod tests { - use crate::parameters::poseidon_params; + use crate::parameters::poseidon_params; - use super::*; - use ark_std::UniformRand; - #[test] - fn check_knowledgeproof() { - let mut rng = ark_std::rand::thread_rng(); + use super::*; + use ark_std::UniformRand; + type F = ark_bls12_377::Fr; + type G = ark_bls12_377::G1Projective; - let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); + #[test] + fn check_dotproductproof_log() { + let mut rng = ark_std::rand::thread_rng(); - let x = Scalar::rand(&mut rng); - let r = Scalar::rand(&mut rng); + let n = 1024; - let params = poseidon_params(); + let gens = DotProductProofGens::::new(n, b"test-1024"); - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, committed_value) = - KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r); + let x: Vec = (0..n).map(|_i| F::rand(&mut rng)).collect(); + let a: Vec = (0..n).map(|_i| F::rand(&mut rng)).collect(); + let y = crate::dot_product(&x, &a); - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&gens_1, &mut verifier_transcript, &committed_value) - .is_ok()); - } + let r_x = F::rand(&mut rng); + let r_y = F::rand(&mut rng); - #[test] - fn check_equalityproof() { - let mut rng = ark_std::rand::thread_rng(); - let params = poseidon_params(); - - let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); - let v1 = Scalar::rand(&mut rng); - let v2 = v1; - let s1 = Scalar::rand(&mut rng); - let s2 = Scalar::rand(&mut rng); - - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, C1, C2) = EqualityProof::prove( - &gens_1, - &mut prover_transcript, - &mut random_tape, - &v1, - &s1, - &v2, - &s2, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&gens_1, &mut verifier_transcript, &C1, &C2) - .is_ok()); - } + let params = poseidon_params(); + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let (proof, Cx, Cy) = + DotProductProofLog::::prove(&gens, &mut prover_transcript, &x, &r_x, &a, &y, &r_y); - #[test] - fn check_productproof() { - let mut rng = ark_std::rand::thread_rng(); - let pt = GroupElement::rand(&mut rng); - let pt_c = pt.compress(); - let pt2 = GroupElement::decompress(&pt_c).unwrap(); - assert_eq!(pt, pt2); - let params = poseidon_params(); - - let gens_1 = MultiCommitGens::new(1, b"test-productproof"); - let x = Scalar::rand(&mut rng); - let rX = Scalar::rand(&mut rng); - let y = Scalar::rand(&mut rng); - let rY = Scalar::rand(&mut rng); - let z = x * y; - let rZ = Scalar::rand(&mut rng); - - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, X, Y, Z) = ProductProof::prove( - &gens_1, - &mut prover_transcript, - &mut random_tape, - &x, - &rX, - &y, - &rY, - &z, - &rZ, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z) - .is_ok()); - } - - #[test] - fn check_dotproductproof() { - let mut rng = ark_std::rand::thread_rng(); - - let n = 1024; - - let gens_1 = MultiCommitGens::new(1, b"test-two"); - let gens_1024 = MultiCommitGens::new(n, b"test-1024"); - let params = poseidon_params(); - - let mut x: Vec = Vec::new(); - let mut a: Vec = Vec::new(); - for _ in 0..n { - x.push(Scalar::rand(&mut rng)); - a.push(Scalar::rand(&mut rng)); - } - let y = DotProductProofLog::compute_dotproduct(&x, &a); - let r_x = Scalar::rand(&mut rng); - let r_y = Scalar::rand(&mut rng); - - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, Cx, Cy) = DotProductProof::prove( - &gens_1, - &gens_1024, - &mut prover_transcript, - &mut random_tape, - &x, - &r_x, - &a, - &y, - &r_y, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy) - .is_ok()); - } - - #[test] - fn check_dotproductproof_log() { - let mut rng = ark_std::rand::thread_rng(); - - let n = 1024; - - let gens = DotProductProofGens::new(n, b"test-1024"); - - let x: Vec = (0..n).map(|_i| Scalar::rand(&mut rng)).collect(); - let a: Vec = (0..n).map(|_i| Scalar::rand(&mut rng)).collect(); - let y = DotProductProof::compute_dotproduct(&x, &a); - - let r_x = Scalar::rand(&mut rng); - let r_y = Scalar::rand(&mut rng); - - let params = poseidon_params(); - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, Cx, Cy) = DotProductProofLog::prove( - &gens, - &mut prover_transcript, - &mut random_tape, - &x, - &r_x, - &a, - &y, - &r_y, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy) - .is_ok()); - } + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + assert!(proof + .verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy) + .is_ok()); + } } diff --git a/src/parameters.rs b/src/parameters.rs index 5cf3761..ce5b5a6 100644 --- a/src/parameters.rs +++ b/src/parameters.rs @@ -1,11 +1,10 @@ +use ark_bls12_377::Fr; +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; use std::str::FromStr; - -use ark_sponge::poseidon::PoseidonParameters; // Copyright: https://github.com/nikkolasg/ark-dkg/blob/main/src/parameters.rs use json::JsonValue; use lazy_static::lazy_static; -use crate::group::Fr; lazy_static! { // bls12377_rate2_constraints_fr: pub static ref FR: JsonValue = object! { @@ -145,32 +144,2333 @@ array!["228517621981785468369663538305998424621845824654552006112396193307208970 } /// TODO -pub fn poseidon_params() -> PoseidonParameters { - let arks = FR["ark"] - .members() - .map(|ark| { - ark.members() - .map(|v| Fr::from_str(v.as_str().unwrap()).unwrap()) - .collect::>() - }) - .collect::>(); - let mds = FR["mds"] +pub fn poseidon_params() -> PoseidonConfig { + let arks = FR["ark"] + .members() + .map(|ark| { + ark .members() - .map(|m| { - m.members() - .map(|v| Fr::from_str(v.as_str().unwrap()).unwrap()) - .collect::>() - }) - .collect::>(); - PoseidonParameters::new( - FR["full_rounds"].as_u32().unwrap(), - FR["partial_rounds"].as_u32().unwrap(), - FR["alpha"].as_u64().unwrap(), - mds, - arks, - ) + .map(|v| Fr::from_str(v.as_str().unwrap()).unwrap()) + .collect::>() + }) + .collect::>(); + let mds = FR["mds"] + .members() + .map(|m| { + m.members() + .map(|v| Fr::from_str(v.as_str().unwrap()).unwrap()) + .collect::>() + }) + .collect::>(); + PoseidonConfig::new( + FR["full_rounds"].as_usize().unwrap(), + FR["partial_rounds"].as_usize().unwrap(), + FR["alpha"].as_u64().unwrap(), + mds, + arks, + FR["rate"].as_usize().unwrap(), + // TODO (nikkolasg): check out the concrete parameters for the capacity + // so far taken from https://github.com/AleoHQ/snarkVM/blob/d6ce2d3540b9355b59ef580db998188c786f8599/fields/src/traits/poseidon_default.rs#L43 + 1, + ) } +// Generated from poseidon_transcript::test::poseidon_parameters_generation +pub fn poseidon_params_bls12381() -> PoseidonConfig { + use ark_ff::PrimeField; + + use poseidon_parameters::{ + Alpha, ArcMatrix, Matrix, MatrixOperations, MdsMatrix, OptimizedArcMatrix, + OptimizedMdsMatrices, PoseidonParameters, RoundNumbers, SquareMatrix, + }; + + /// Parameters for the rate-1 instance of Poseidon. + pub fn rate_1() -> PoseidonParameters { + PoseidonParameters { + M: 128, + t: 2, + arc: ArcMatrix::new( + 64, + 2, + vec![ + F::from_str( + "35174739893055911104493616029378130908017657834702731071195911003169112450229", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40230473166484073181383530626136429631051240172158259323832118663695222064618", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "44839971797550275719608927493602345619671796219131839128512847210863471539893", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "48611564661854252146943435349048894917868766542696555144699929229862960415826", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40685922923883326639435460208454299274336241847181916712959630054792010757353", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "15664326207051802832937062272185115402684860397390155009854115660846132411502", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "20394588293088308460232807257084465383971903994422965392216628021702933756783", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38072899122370719412288433912125900148799524273752876684473712699262778897654", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "49322204581160802920728528441668898478821361914594574984125707955891751339030", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "50347305345520436535964839331484028706021264874956603553067798037836173651466", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40078972002964969619442410831168416589272403782380052533737439660325916494912", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16924481893633890556036089675091788745304335123156116162143840931183063196275", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "15950945481350597658451757244282476321967934435937826665895833437077276040754", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "45461282993637819573288749652453263783235945959670912694991729940283965985733", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "28038644046234298370628400737789907143817173874662134250914598498795544431933", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "11068658063110546729562922745621277134925014371838931507832785600289614789661", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "52268179647931159024439236070026950641875512180000134500458099574638256905396", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "20086107503624605748636970696926057193402564249603453473832198318657465264844", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "43167135986193729211374724389523185950396183466709666189982348666853925219274", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "17447884039561508187156464597183128324504945258768782636135974167343387840370", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38143921110340159188228723493420793611575037275078276538202087394517162729554", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16552274712964434889263219772539968802934308159216951923060313539515542031786", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "21533690197859269486445042768281999749203728740955226853531960445800814390872", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "21880324978302337713295482389160476412101864070449056420604485964938425631874", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "21087213863282472584378891060158648179244240590829455065271622458534461188836", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "45441431536584624993024537766051601076632677727170866925607963222280486713049", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "30366589203056029534723552859270912699187398046791858542022387243548834105416", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16733270300941292512227942400499003353179417300568446496228472021958136869720", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47689344912249729511524931558254404476237521452234110431416696572010445584377", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "51082398733030351856006960204664584892075227924560364715540908659573989781750", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "43167631539924493632724725819354890041217481730115306669306358893967667398874", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "26279180116818040201102703456273647693759458841914982758720296510206405428021", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "46216367353381802394196440660508655919466089439065744128194487821032228754652", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "7697386669681950056820540614309128742475760814776187777185453179049760288818", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "21817782321868614563882198267973607652180292878221625474094565055913249527776", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "32913105459022632076500291209667002113801444807962815598956143828295422498435", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "8502502120080628515364514243099640525167810254369523636419786457595198827016", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "30648576830725883184207011877575066156796295678736109722327320104219004516016", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "45671339585608301452963125193632555070872004563546575584248952737167410364255", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "2451562695720526255410159479987351458212942773001104345088347268337017907959", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "7231969043003068614870036828648884326376597612869950504826116686399812612615", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47273307399297813289148635743141600935869642174900722409535446469272321692250", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "27329929942264081140373255677952719498722443205980316837000158653641048193545", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34885886867965298185213628215610826501969641592969627478599392659366672373049", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "25721535552417582691507498115295027536875255063383254204046680789623453672738", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "33423368799112636093951957186824418831319616314165017049161108153912788138739", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "25189833401427047672367010746537756277335149535290712725530069868987176912248", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47958321445457642291354706357838648199395839243428939094705320859005837779683", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "31774563161791760397918769926476984438505538732495587514473389056037994152917", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47850858374935538132163048932011117808042772861034650362901884832239133015769", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "12199128582337530870446335645513552144450426981759317374083089598580379977396", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47383053307574057321551022954108360871160286114060097754143644961632203800586", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "13803873123545025453120373087635256640332504728831881021333511396025254579362", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "23837178543685639949465643421241187863068824855019641916163779619876342616994", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "31189587782086980076115943766865860302487147542493494232203123657276644861038", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "46499582074722273407549664254855207560785631035023694347608062453482015876732", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "26662253988013080389152043229142922449414668235746968994211290964651963062577", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "7640762545216115926527336572554187073301504782874830986070234716899751469743", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34498963117721665288552413822781095260286143856801028464976598055529802364121", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "36341838182182173408654982018067474537982027919615372559077851240339996099909", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "48300815548868869899847665404889851174998730453507260888165473018017728774364", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "32747300835280625438818285868260843662581362309780466961516941514877133787742", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "45303055704904829718227319208556302736092241137651995389242696979297340573498", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "4026222424264187292280393735543795340594832329968061359731987906507864963350", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "31228142597140675203545792153829318135052985461159014806691885654670181871080", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16750970802520682106296025351756788120283612961882144708385294780405339907653", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "20590763839978944194181943371062517541785283010773701162056751611994399836929", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "37594126822410828138420253267022641722217448081718524151329800547846230255554", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "2394917091132363277251637490654694317375647018617394697153359156851524846663", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "19737567843628339977867209886872321409023634418218069475717765230353858108711", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "42204066697221303743688912704913307336835691602781490921152392223146514657878", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38876322214460807353380405022935203128264986058834077654885314090533204762902", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40247991658021514679970964566577204929744204347802761928836742882323686245178", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "39561906161264753200012194261000443020947453081833838411624410505039816555019", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "11166460550589161376470931941636300531100498757195643048019370497750388675069", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "24790565769125580275808568537357949817511203246184311136698467928472412457388", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "11554811916664964398531930051533768691042015283409338870777253388332997487366", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "29075039934313065791873216420662295250443756892492849722546053940305352208592", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "39006987007077517708714677464659567763889315959219170058379373515871687446558", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "10061028159773233365380812282944217591122833800994251428395880422048305719205", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "25031744215108526550081827540392247511207429766528399786265447570055547629537", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "10547501786444119997366494351393198125463151669258024373934193854191592243660", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "41670235909630407351693051359364867863450981844765151303749913478896054145311", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "1550363730816174450383908114106000535080436631518311072819950124158318191772", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "45637974422753200101239746614094187617113326582060147125507840703682309645118", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "6370060789642013553710100441744454816236009933679880713369392969145674474801", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "35620803994636505121581975698616513950631274399330479421894994077745439358719", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16067112068611935345405777398473590697557771847332060708439576094222338198471", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "30951410852793823879089954146089097398960021512400590702302072057244085482851", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "12276812530571706718883327429408937606433993990196477479137283768845651925632", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "1005720997907928078270582364714493795153874104882338052955430808063076949858", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "9432539242737555038548151091729061683667954775317685872580932218706555557193", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "48144091886915459055979584468520005030090312584666359911350135788203438616374", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "22450925011805294935671299553107487783565275020715623196582033415826118494250", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "28149684753511832845415363706857286714650553416094912809755106801999479476946", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "18984416380626314263255929984317834766173910658882331424593963038930830788043", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "48109900230700383470215980955557162221434150244334962594952526792484549956555", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40552374456316069085238865117122876015343906448762540220517521948829052005624", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "11526656751459710456216830286109707102065783384712439837497260759522383867216", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "49248089956591852389575948260598336493822205882486787578128339029114849462975", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "39372448145844604168126556278211607335704246721513006643620494230495045549807", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "47966243235447948688248399775419397863458889989786324048551282844713998917659", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "10668975765352037241237726978611677229760273562923261307501184329169652545894", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "39507142063581063677057438880300974612179697912640172055190488555355094497813", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "15638468735129173771932513149618051284064233358516491068231561062569214055646", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "17353431924571233181583986362517131255570540171642397316569828436646526591477", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "1113151614126806805734456303419241558692549524321573289125509096780876830226", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "9984420714488068655842637440690086256585448839019918893900233218326556751996", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "35272971102390074745676114574216945810721613586667868556864593470832260078394", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "41799703040810741398331928206395151226132289978178889483708245720764196041488", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "23792232833587856422363054609833448590404370086820676679702830143363402049151", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "23400519399894650768950735628474893615670522380725820954092838104223338024750", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "27249609507238299376315178191811683563055616067419555246827701486258799476504", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "31460888976162844111664970745263960397533358231300142520392677905396078538854", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "50541111392701415266654707208995846306449617779331389560072929586596066181460", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16849402175596403120072436766532798469469816167994479896578935023255475833387", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "42001541432800637111202252596131403836308927391584450209899896684682298556999", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "7555678132801484327842862804202549162661136010229184979905673538160644182471", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16973896084242861113146860522670036493260039473958217656803484585799718732593", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "42770539705351951841624093062193706601998280964803890045059967451602239435405", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "32967596628774896802370445267419978233370846153035552840999780942126744328522", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "29885680756072762644563275495535410002799284565481770605563275267677242393534", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "6767565503770123230340610856538459014642791435134695578527084133222911388089", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38034555907959016273994483610140537209819290169171432036495630086575144181148", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "35085508904879414050180652811899618801153816228166027426392174635054369363148", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "36657441819968029240636827111992013950645228690585008802597958876784513528202", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "1322579115549556048043817488502303168290944210869541203532619083870097554885", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16882844793625225132625646093399829180269843611668936510323341639265456546672", + ) + .map_err(|_| ()) + .unwrap(), + ], + ), + mds: MdsMatrix::from_elements(vec![ + F::from_str( + "26217937587563095239723870254092982918845276250263818911301829349969290592257", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "39326906381344642859585805381139474378267914375395728366952744024953935888385", + ) + .map_err(|_| ()) + .unwrap(), + ]), + alpha: Alpha::Exponent(5), + rounds: RoundNumbers { r_P: 56, r_F: 8 }, + optimized_mds: OptimizedMdsMatrices { + M_hat: SquareMatrix::new( + 1, + 1, + vec![F::from_str( + "39326906381344642859585805381139474378267914375395728366952744024953935888385", + ) + .map_err(|_| ()) + .unwrap()], + ), + v: Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap()], + ), + w: Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap()], + ), + M_prime: SquareMatrix::new( + 2, + 2, + vec![ + F::from_str("1").map_err(|_| ()).unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "39326906381344642859585805381139474378267914375395728366952744024953935888385", + ) + .map_err(|_| ()) + .unwrap(), + ], + ), + M_doubleprime: SquareMatrix::new( + 2, + 2, + vec![ + F::from_str( + "26217937587563095239723870254092982918845276250263818911301829349969290592257", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123010", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("1").map_err(|_| ()).unwrap(), + ], + ), + M_inverse: SquareMatrix::new( + 2, + 2, + vec![ + F::from_str("18").map_err(|_| ()).unwrap(), + F::from_str( + "52435875175126190479447740508185965837690552500527637822603658699938581184489", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "52435875175126190479447740508185965837690552500527637822603658699938581184489", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("36").map_err(|_| ()).unwrap(), + ], + ), + M_hat_inverse: SquareMatrix::new(1, 1, vec![F::from_str("4").map_err(|_| ()).unwrap()]), + M_00: F::from_str( + "26217937587563095239723870254092982918845276250263818911301829349969290592257", + ) + .map_err(|_| ()) + .unwrap(), + M_i: Matrix::new( + 2, + 2, + vec![ + F::from_str("1").map_err(|_| ()).unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "18844142621030738849315867221324380626272463491487726140277535101490822753350", + ) + .map_err(|_| ()) + .unwrap(), + ], + ), + v_collection: vec![ + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123009", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "48066218910532341272827095465837135351216339792150334670720020474943699419137", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "51343461108977728177792579247598758216071999323433312034632749143689860743169", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52162771658589074904033950193039163932285914206254056375610931310876401074177", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52367599295991911585594292929399265361339392926959242460855476852673036156929", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52418806205342620755984378613489290718602762607135538982166613238122194927617", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52431607932680298048581900034511797057918605027179613112494397334484484620289", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52434808364514717371731280389767423642747565632190631645076343358575057043457", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435608472473322202518625478581330288954805783443386278221829864597700149249", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435808499462973410215461750784806950506615821256574936508201491103360925697", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435858506210386212139670818835676115894568330709872101079794397729776119809", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435871007897239412620723085848393407241556458073196392222692624386379918337", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435874133318952712740986152601572730078303489914027465008417181050530867969", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435874914674381037771051919289867560787490247874235233204848320216568605377", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435875110013238119028568360961941268464786937364287175253956105008078039729", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435875158847952389342947471379959695384111109736800160766233051205955398317", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435875171056630956921542248984464302113942152829928407144302287755424737964", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "13108968792764157739230385562246116075528485538207482101786075571938856184491", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "16386210991972587054669531517608020478304759509683779981097433567969359342251", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17205521541774694383529318006448496578998828002552854450925273066976985131691", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17410349179225221215744264628658615604172345125770123068382232941728891579051", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17461556088587852923798001284211145360465724406574440222746472910416868190891", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17474357815928510850811435448099277799539069226775519511337532902588862343851", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17477558247763675332564793989071310909307405431825789333485297900631860882091", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478358355722466453003133624314319186749489483088356789022239150142610516651", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478558382712164233112718533125071256110010495903998652906474462520297925291", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478608389459588678140114760327759273450140749107909118877533290614719777451", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478620891146444789396963817128431277785173312408886735370297997638325240491", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478624016568158817211176081328599278868931453234131139493489174394226606251", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478624797923587324164729147378641279139870988440442240524286968583201947691", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478624993262444450903117413891151779207605872242020015781986417130445783051", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478625042097158732587714480519279404224539593192414459596411279267256741891", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "17478625054305837303008863747176311310478773023430013070550017494801459481601", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "43696562644921102185338021317933552205887607631253231634590248398654300758785", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "50251047042574918405920310710622862429739816283209036275600306124617511078081", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "51889668141988372461065883058795189985702868446197987435852820556108313657905", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52299323416841735974852276145838271874693631486945225225915949163981014302861", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52401737235555076853298874417599042346941322247132034673431731315949189464100", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "13100434308888769213324718604399760586735330561783008668357932828987297366025", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "42602014958566835162916985032239414524951747015841480534042227232200760229891", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "23759472533423256410591181385106345090660574879092279589161471483034835353601", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "45266774514700456962233600727416060650933058095168798264243111895712644726785", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "50643600010019757100144205562993489541001178899187927933013521998882097070081", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "51987806383849582134621856771887846763518209100192710350206124524674460155905", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52323857977307038393241269574111436069147466650443905954504275156122550927361", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52407870875671402457896122774667333395554781038006704855578812813984573620225", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52428874100262493474059836074806307727156609634897404580847447228450079293441", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52434124906410266228100764399841051310057066784120079512164605832066455711745", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435437607947209416610996481099737205782181071425748244993895482970549816321", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435765783331445213738554501414408679713459643252165428201217895696573342465", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435847827177504163020444006493076548196279286208769724003048498878079224001", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435868338139018900340916382762743515316984196947920797953506149673455694385", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435873465879397584671034476830160257097160424632708566441120562372299811981", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "52435874747814492255753564000347014442542204481553905508563024165547010841380", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "13108968686953623063938391000086753610635551120388476377140756041386752710345", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "42604148553083048625570403131161162780926802155492847461237933035300624065971", + ) + .map_err(|_| ()) + .unwrap()], + ), + ], + w_hat_collection: vec![ + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123010", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123014", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123030", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123094", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054123350", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054124374", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054128470", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054144854", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054210390", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959054472534", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959055521110", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959059715414", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959076492630", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959143601494", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799959412036950", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799960485778774", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799964780746070", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105799981960615254", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105800050680091990", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105800325557998934", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105801425069626710", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105805823116137814", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105823415302182230", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069105893784046359894", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069106175259023070550", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069107301158929913174", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069111804758557283670", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069129819157066765654", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069201876751104693590", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215069490107127256405334", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215070643028631863252310", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215075254714650290640214", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215093701458724000191830", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215167488435018838398294", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425215462636340198191224150", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425216643227960915602527574", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425221365594443785247741270", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425240255060375263828596054", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425315812924101178152015190", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018425618044379004835445691734", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018426826970198619464620397910", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018431662673477077981319222614", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018451005486590912048114521430", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018528376739046248315295716694", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667018837861748867593384020497750", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667020075801788152973658919621974", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667025027561945294494758516118870", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667044834602573860579156902106454", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667124062765088124916750446056790", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701667440975415145182267124621858134", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701668708626015373411668621325063510", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701673779228416286329274608137885014", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701694061638019937999698555389171030", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793701775191276434544681394344394315094", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793702099709830092971408177500414891350", + ) + .map_err(|_| ()) + .unwrap()], + ), + Matrix::new( + 1, + 1, + vec![F::from_str( + "34957250116750793652965160338790643891793703397784044726678315310124497196374", + ) + .map_err(|_| ()) + .unwrap()], + ), + ], + }, + optimized_arc: OptimizedArcMatrix::new( + 64, + 2, + vec![ + F::from_str( + "35174739893055911104493616029378130908017657834702731071195911003169112450229", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40230473166484073181383530626136429631051240172158259323832118663695222064618", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "44839971797550275719608927493602345619671796219131839128512847210863471539893", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "48611564661854252146943435349048894917868766542696555144699929229862960415826", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "40685922923883326639435460208454299274336241847181916712959630054792010757353", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "15664326207051802832937062272185115402684860397390155009854115660846132411502", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "20394588293088308460232807257084465383971903994422965392216628021702933756783", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38072899122370719412288433912125900148799524273752876684473712699262778897654", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "49322204581160802920728528441668898478821361914594574984125707955891751339030", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "11546720471100324356589604343765555296510298649382063718189403564461171492863", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "36110500355375093385871324309901194334631381714439614527255597369650392849542", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "3756733109852649396584228778466872196362658360207834645541184763961143083412", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "20251607517129761800297717300935980834606026680555472569470067927197814159058", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "5239353656737021436485173734268526002414965521861452100759532275936501964328", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "21300582910621453970075769970860650148943961329345636625818996955442242699131", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "21774765045116391148518722696232516163747969484119487964534360379873683242474", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "32722020057382082921329627018209709241475223852823289284428837414575534351309", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "33794442154315934695685025730187059000350644284482738453096808368436585150449", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "33356374759281451907993729503609407203078972387590420010346940719085856704600", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "16536766136174515568748693372704332161755684369233290734723311101392999671135", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "8155200347229826422648196061952312662995020845960673032039978897344425954944", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "41985238868657599524996779043300115443521336708491497391680518882421278207729", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "2063843216042971340057236348680603623021991141192327802421586125773152594966", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "19356199834277755257103978861422834427804178278770847736951931659601323041542", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "29407672119923084920129945390192575072371921764906862646182735700097611522286", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "23210564968750306632117638368081722417964237341952338866982652674742521848757", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "8919995046928353282196218782858660366384850716988440303338558248538040137799", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "19043131128131902196810002314394958855517905924691850071099334468401919991484", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "25372870640639931691894472507697938527971989245889491446494524936601040512930", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "28282246530559666639248292418150612779488812032571757345639609704112262688983", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "4783363587969676537071285052282175798266183157943408226768189618569177474295", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "8076640637611150676334686135767012090042738255090463961889429353861598391372", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "24983375671346681151260101878347553784741288686089866276697910000786751968362", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "31553063832521567697657525429909509843798042381162189406532027953053159867207", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "19397104375854813327815726265263423471142957336141256772200649485967743774458", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "16725724823339561221215536793048178667667680016815450011449579154320911341709", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "29033205376761356766961278958992230640915887129936846622211162138860547139883", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "38925727287210137779737577678032857716007633840925015412872642147337070197265", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "13054654952792030513819618373016885831657266413665857293819158146045635067324", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "11180520252272331206279504788346032423267894347729733535225909704781507388571", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "7829946038227950055107016972108651685558876858831437098551135271673396051934", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "12701392201570649781537742561621512715498088373227761999475504609950261026304", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "23110137476929231098068588344230895315869860683132386592399445163537587938902", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "34695385667367102489869345055605545198093190593541852158466006956870675273759", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "12473323874126447804974523548032043130473262290187841938491511255009575125315", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "33244171987325668610688838482180693579479913109349074619790699084518395114481", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "27800393704277881477423349646564407645504532574407056486674972536299508573197", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "38745941841134632283836831790084585919738726247813020052320436485069697160134", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "4222833134514534672312302121120063621259334942025065036721108691056782902710", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "2485253499287676466887039925919700883876238304552596149921625457687783981441", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "25376906928048350822232005681485535724988673469685171030423527493301156576940", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "50342721676243810030803571465468547773052568618511402750629061381943937849683", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "50049567928418632800795449103016632592486776140307034477479632433833610837266", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "23363897037620979716239578483167524838213385084153054827737272694281707133655", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "42704349801891305094989489174556449021777358796153454937762648266288696875551", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "15237078269491179837022602841512824373125754318519659312626032716476523314968", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "7834537534669842390771199937679522681049234016864851395164092776167283317372", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "40845753633682491334484157504358463583021595599904903653237711863475293554700", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "30415461155839336041159980430244598228679743472933918944484463593999625263250", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "45446374854682768122749806214133032507584129094447282144188920433140420665338", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "44511327229393971496561348546051674374277199034903648290060869960535543887325", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "31765384944890114175945271689093192004082637054270389724882306488698748316643", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "11724725910654336698843657420057198898582989110234170277283715189016057454856", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "35333898667082217702239272910553250687762082480432049530115838213528273162930", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "13139429039691322549933623030015220158389702383341486257474725505081327320136", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str("0").map_err(|_| ()).unwrap(), + F::from_str( + "32967596628774896802370445267419978233370846153035552840999780942126744328522", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "29885680756072762644563275495535410002799284565481770605563275267677242393534", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "6767565503770123230340610856538459014642791435134695578527084133222911388089", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "38034555907959016273994483610140537209819290169171432036495630086575144181148", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "35085508904879414050180652811899618801153816228166027426392174635054369363148", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "36657441819968029240636827111992013950645228690585008802597958876784513528202", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "1322579115549556048043817488502303168290944210869541203532619083870097554885", + ) + .map_err(|_| ()) + .unwrap(), + F::from_str( + "16882844793625225132625646093399829180269843611668936510323341639265456546672", + ) + .map_err(|_| ()) + .unwrap(), + ], + ), + } + } + let rate1_config = rate_1::(); + let arkwork_mds = (0..rate1_config.mds.0 .0.n_rows) + .map(|i| rate1_config.mds.0 .0.row_vector(i).elements) + .collect::>>(); + let arkwork_ark = (0..rate1_config.arc.0.n_rows) + .map(|i| rate1_config.arc.0.row_vector(i).elements) + .collect::>>(); + PoseidonConfig { + full_rounds: rate1_config.rounds.r_F, + partial_rounds: rate1_config.rounds.r_P, + alpha: match rate1_config.alpha { + poseidon_parameters::Alpha::Exponent(alpha) => alpha as u64, + _ => panic!("Alpha is not exponent"), + }, + ark: arkwork_ark, + mds: arkwork_mds, + rate: 1, // only hash one at a time + capacity: 1, // ?? + } +} + +pub trait PoseidonConfiguration: ark_ff::PrimeField { + fn poseidon_params() -> PoseidonConfig; +} + +impl PoseidonConfiguration for ark_bls12_381::Fr { + fn poseidon_params() -> PoseidonConfig { + poseidon_params_bls12381() + } +} + +impl PoseidonConfiguration for ark_bls12_377::Fr { + fn poseidon_params() -> PoseidonConfig { + poseidon_params() + } +} + +impl PoseidonConfiguration for ark_blst::Scalar { + fn poseidon_params() -> PoseidonConfig { + let config = poseidon_params_bls12381(); + let arks = config + .ark + .iter() + .map(|v| { + v.iter() + .map(|&e| ark_blst::Scalar::from(e)) + .collect::>() + }) + .collect(); + let mdss = config + .mds + .iter() + .map(|v| { + v.iter() + .map(|&e| ark_blst::Scalar::from(e)) + .collect::>() + }) + .collect(); + PoseidonConfig { + full_rounds: config.full_rounds, + partial_rounds: config.partial_rounds, + alpha: config.alpha, + ark: arks, + mds: mdss, + rate: config.rate, + capacity: config.capacity, + } + } +} lazy_static! { - pub static ref POSEIDON_PARAMETERS_FR_377: PoseidonParameters = poseidon_params(); + pub static ref POSEIDON_PARAMETERS_FR_377: PoseidonConfig = poseidon_params(); } diff --git a/src/poseidon_transcript.rs b/src/poseidon_transcript.rs index 479e354..8a08ae7 100644 --- a/src/poseidon_transcript.rs +++ b/src/poseidon_transcript.rs @@ -1,82 +1,118 @@ -use crate::group::{CompressedGroup, Fr}; - -use super::scalar::Scalar; -use ark_bls12_377::Bls12_377 as I; -use ark_poly_commit::multilinear_pc::data_structures::Commitment; -use ark_serialize::CanonicalSerialize; -// use ark_r1cs_std::prelude::*; -use ark_sponge::{ - poseidon::{PoseidonParameters, PoseidonSponge}, - CryptographicSponge, +use crate::transcript::Transcript; +use ark_crypto_primitives::sponge::{ + poseidon::{PoseidonConfig, PoseidonSponge}, + Absorb, CryptographicSponge, }; - +use ark_ec::{pairing::Pairing, CurveGroup}; +use ark_ff::PrimeField; +use ark_serialize::CanonicalSerialize; +use ark_serialize::Compress; #[derive(Clone)] /// TODO -pub struct PoseidonTranscript { - sponge: PoseidonSponge, - params: PoseidonParameters, +pub struct PoseidonTranscript { + sponge: PoseidonSponge, + params: PoseidonConfig, } -impl PoseidonTranscript { - /// create a new transcript - pub fn new(params: &PoseidonParameters) -> Self { - let sponge = PoseidonSponge::new(params); - PoseidonTranscript { - sponge, - params: params.clone(), - } - } +impl Transcript for PoseidonTranscript { + fn domain_sep(&mut self) { + self.sponge.absorb(&b"testudo".to_vec()); + } - pub fn new_from_state(&mut self, challenge: &Scalar) { - self.sponge = PoseidonSponge::new(&self.params); - self.append_scalar(challenge); - } + fn append(&mut self, _label: &'static [u8], point: &S) { + let mut buf = Vec::new(); + point + .serialize_with_mode(&mut buf, Compress::Yes) + .expect("serialization failed"); + self.sponge.absorb(&buf); + } - pub fn append_u64(&mut self, x: u64) { - self.sponge.absorb(&x); - } + fn challenge_scalar(&mut self, _label: &'static [u8]) -> FF { + self.sponge.squeeze_field_elements(1).remove(0) + } +} - pub fn append_bytes(&mut self, x: &Vec) { - self.sponge.absorb(x); +impl PoseidonTranscript { + /// create a new transcript + pub fn new(params: &PoseidonConfig) -> Self { + let sponge = PoseidonSponge::new(params); + PoseidonTranscript { + sponge, + params: params.clone(), } + } +} - pub fn append_scalar(&mut self, scalar: &Scalar) { - self.sponge.absorb(&scalar); - } +impl PoseidonTranscript { + pub fn new_from_state(&mut self, challenge: &F) { + self.sponge = PoseidonSponge::new(&self.params.clone()); + self.append_scalar(b"", challenge); + } +} - pub fn append_point(&mut self, point: &CompressedGroup) { - self.sponge.absorb(&point.0); - } +impl PoseidonTranscript { + pub fn append_u64(&mut self, _label: &'static [u8], x: u64) { + self.sponge.absorb(&x); + } - pub fn append_scalar_vector(&mut self, scalars: &[Scalar]) { - for scalar in scalars.iter() { - self.append_scalar(scalar); - } - } + pub fn append_bytes(&mut self, _label: &'static [u8], x: &Vec) { + self.sponge.absorb(x); + } - pub fn challenge_scalar(&mut self) -> Scalar { - self.sponge.squeeze_field_elements(1).remove(0) - } + pub fn append_scalar(&mut self, _label: &'static [u8], scalar: &T) { + self.sponge.absorb(&scalar); + } + + pub fn append_point(&mut self, _label: &'static [u8], point: &G) + where + G: CurveGroup, + { + let mut point_encoding = Vec::new(); + point + .serialize_with_mode(&mut point_encoding, Compress::Yes) + .unwrap(); + self.sponge.absorb(&point_encoding); + } - pub fn challenge_vector(&mut self, len: usize) -> Vec { - self.sponge.squeeze_field_elements(len) + pub fn append_scalar_vector( + &mut self, + _label: &'static [u8], + scalars: &[T], + ) { + for scalar in scalars.iter() { + self.append_scalar(b"", scalar); } -} + } -pub trait AppendToPoseidon { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript); + pub fn append_gt(&mut self, _label: &'static [u8], g_t: &E::TargetField) + where + E: Pairing, + { + let mut bytes = Vec::new(); + g_t.serialize_with_mode(&mut bytes, Compress::Yes).unwrap(); + self.append_bytes(b"", &bytes); + } } -impl AppendToPoseidon for CompressedGroup { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - transcript.append_point(self); - } +pub trait TranscriptWriter { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript); } -impl AppendToPoseidon for Commitment { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - let mut bytes = Vec::new(); - self.serialize(&mut bytes).unwrap(); - transcript.append_bytes(&bytes); - } +#[cfg(test)] +mod test { + use ark_bls12_381::Fr; + use ark_ff::PrimeField; + use poseidon_paramgen; + #[test] + fn poseidon_parameters_generation() { + print_modulus::(); + println!( + "{}", + poseidon_paramgen::poseidon_build::compile::(128, vec![2], Fr::MODULUS, true) + ); + } + + fn print_modulus() { + println!("modulus: {:?}", F::MODULUS); + } } diff --git a/src/product_tree.rs b/src/product_tree.rs index 7d76faf..32af26f 100644 --- a/src/product_tree.rs +++ b/src/product_tree.rs @@ -1,491 +1,477 @@ #![allow(dead_code)] -use crate::poseidon_transcript::PoseidonTranscript; - use super::dense_mlpoly::DensePolynomial; use super::dense_mlpoly::EqPolynomial; use super::math::Math; -use super::scalar::Scalar; use super::sumcheck::SumcheckInstanceProof; +use crate::poseidon_transcript::PoseidonTranscript; +use crate::transcript::Transcript; +use ark_crypto_primitives::sponge::Absorb; +use ark_ff::PrimeField; use ark_serialize::*; -use ark_std::One; #[derive(Debug)] -pub struct ProductCircuit { - left_vec: Vec, - right_vec: Vec, +pub struct ProductCircuit { + left_vec: Vec>, + right_vec: Vec>, } -impl ProductCircuit { - fn compute_layer( - inp_left: &DensePolynomial, - inp_right: &DensePolynomial, - ) -> (DensePolynomial, DensePolynomial) { - let len = inp_left.len() + inp_right.len(); - let outp_left = (0..len / 4) - .map(|i| inp_left[i] * inp_right[i]) - .collect::>(); - let outp_right = (len / 4..len / 2) - .map(|i| inp_left[i] * inp_right[i]) - .collect::>(); - - ( - DensePolynomial::new(outp_left), - DensePolynomial::new(outp_right), - ) - } - - pub fn new(poly: &DensePolynomial) -> Self { - let mut left_vec: Vec = Vec::new(); - let mut right_vec: Vec = Vec::new(); - - let num_layers = poly.len().log_2(); - let (outp_left, outp_right) = poly.split(poly.len() / 2); - - left_vec.push(outp_left); - right_vec.push(outp_right); - - for i in 0..num_layers - 1 { - let (outp_left, outp_right) = - ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]); - left_vec.push(outp_left); - right_vec.push(outp_right); - } - - ProductCircuit { - left_vec, - right_vec, - } +impl ProductCircuit { + fn compute_layer( + inp_left: &DensePolynomial, + inp_right: &DensePolynomial, + ) -> (DensePolynomial, DensePolynomial) { + let len = inp_left.len() + inp_right.len(); + let outp_left = (0..len / 4) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + let outp_right = (len / 4..len / 2) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + ( + DensePolynomial::new(outp_left), + DensePolynomial::new(outp_right), + ) + } + + pub fn new(poly: &DensePolynomial) -> Self { + let mut left_vec: Vec> = Vec::new(); + let mut right_vec: Vec> = Vec::new(); + + let num_layers = poly.len().log_2(); + let (outp_left, outp_right) = poly.split(poly.len() / 2); + + left_vec.push(outp_left); + right_vec.push(outp_right); + + for i in 0..num_layers - 1 { + let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]); + left_vec.push(outp_left); + right_vec.push(outp_right); } - pub fn evaluate(&self) -> Scalar { - let len = self.left_vec.len(); - assert_eq!(self.left_vec[len - 1].get_num_vars(), 0); - assert_eq!(self.right_vec[len - 1].get_num_vars(), 0); - self.left_vec[len - 1][0] * self.right_vec[len - 1][0] + ProductCircuit { + left_vec, + right_vec, } + } + + pub fn evaluate(&self) -> F { + let len = self.left_vec.len(); + assert_eq!(self.left_vec[len - 1].get_num_vars(), 0); + assert_eq!(self.right_vec[len - 1].get_num_vars(), 0); + self.left_vec[len - 1][0] * self.right_vec[len - 1][0] + } } -pub struct DotProductCircuit { - left: DensePolynomial, - right: DensePolynomial, - weight: DensePolynomial, +pub struct DotProductCircuit { + left: DensePolynomial, + right: DensePolynomial, + weight: DensePolynomial, } -impl DotProductCircuit { - pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self { - assert_eq!(left.len(), right.len()); - assert_eq!(left.len(), weight.len()); - DotProductCircuit { - left, - right, - weight, - } - } - - pub fn evaluate(&self) -> Scalar { - (0..self.left.len()) - .map(|i| self.left[i] * self.right[i] * self.weight[i]) - .sum() - } - - pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) { - let idx = self.left.len() / 2; - assert_eq!(idx * 2, self.left.len()); - let (l1, l2) = self.left.split(idx); - let (r1, r2) = self.right.split(idx); - let (w1, w2) = self.weight.split(idx); - ( - DotProductCircuit { - left: l1, - right: r1, - weight: w1, - }, - DotProductCircuit { - left: l2, - right: r2, - weight: w2, - }, - ) +impl DotProductCircuit { + pub fn new( + left: DensePolynomial, + right: DensePolynomial, + weight: DensePolynomial, + ) -> Self { + assert_eq!(left.len(), right.len()); + assert_eq!(left.len(), weight.len()); + DotProductCircuit { + left, + right, + weight, } + } + + pub fn evaluate(&self) -> F { + (0..self.left.len()) + .map(|i| self.left[i] * self.right[i] * self.weight[i]) + .sum() + } + + pub fn split(&mut self) -> (Self, Self) { + let idx = self.left.len() / 2; + assert_eq!(idx * 2, self.left.len()); + let (l1, l2) = self.left.split(idx); + let (r1, r2) = self.right.split(idx); + let (w1, w2) = self.weight.split(idx); + ( + DotProductCircuit { + left: l1, + right: r1, + weight: w1, + }, + DotProductCircuit { + left: l2, + right: r2, + weight: w2, + }, + ) + } } #[allow(dead_code)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct LayerProof { - pub proof: SumcheckInstanceProof, - pub claims: Vec, +pub struct LayerProof { + pub proof: SumcheckInstanceProof, + pub claims: Vec, } #[allow(dead_code)] -impl LayerProof { - pub fn verify( - &self, - claim: Scalar, - num_rounds: usize, - degree_bound: usize, - transcript: &mut PoseidonTranscript, - ) -> (Scalar, Vec) { - self.proof - .verify(claim, num_rounds, degree_bound, transcript) - .unwrap() - } +impl LayerProof { + pub fn verify( + &self, + claim: F, + num_rounds: usize, + degree_bound: usize, + transcript: &mut PoseidonTranscript, + ) -> (F, Vec) { + self + .proof + .verify(claim, num_rounds, degree_bound, transcript) + .unwrap() + } } #[allow(dead_code)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct LayerProofBatched { - pub proof: SumcheckInstanceProof, - pub claims_prod_left: Vec, - pub claims_prod_right: Vec, +pub struct LayerProofBatched { + pub proof: SumcheckInstanceProof, + pub claims_prod_left: Vec, + pub claims_prod_right: Vec, } #[allow(dead_code)] -impl LayerProofBatched { - pub fn verify( - &self, - claim: Scalar, - num_rounds: usize, - degree_bound: usize, - transcript: &mut PoseidonTranscript, - ) -> (Scalar, Vec) { - self.proof - .verify(claim, num_rounds, degree_bound, transcript) - .unwrap() - } +impl LayerProofBatched { + pub fn verify( + &self, + claim: F, + num_rounds: usize, + degree_bound: usize, + transcript: &mut PoseidonTranscript, + ) -> (F, Vec) { + self + .proof + .verify(claim, num_rounds, degree_bound, transcript) + .unwrap() + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct ProductCircuitEvalProof { - proof: Vec, +pub struct ProductCircuitEvalProof { + proof: Vec>, } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct ProductCircuitEvalProofBatched { - proof: Vec, - claims_dotp: (Vec, Vec, Vec), +pub struct ProductCircuitEvalProofBatched { + proof: Vec>, + claims_dotp: (Vec, Vec, Vec), } -impl ProductCircuitEvalProof { - #![allow(dead_code)] - pub fn prove( - circuit: &mut ProductCircuit, - transcript: &mut PoseidonTranscript, - ) -> (Self, Scalar, Vec) { - let mut proof: Vec = Vec::new(); - let num_layers = circuit.left_vec.len(); - - let mut claim = circuit.evaluate(); - let mut rand = Vec::new(); - for layer_id in (0..num_layers).rev() { - let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len(); - - let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); - assert_eq!(poly_C.len(), len / 2); - - let num_rounds_prod = poly_C.len().log_2(); - let comb_func_prod = - |poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar { - (*poly_A_comp) * poly_B_comp * poly_C_comp - }; - let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic( - &claim, - num_rounds_prod, - &mut circuit.left_vec[layer_id], - &mut circuit.right_vec[layer_id], - &mut poly_C, - comb_func_prod, - transcript, - ); - - transcript.append_scalar(&claims_prod[0]); - transcript.append_scalar(&claims_prod[1]); - - // produce a random challenge - let r_layer = transcript.challenge_scalar(); - claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]); - - let mut ext = vec![r_layer]; - ext.extend(rand_prod); - rand = ext; - - proof.push(LayerProof { - proof: proof_prod, - claims: claims_prod[0..claims_prod.len() - 1].to_vec(), - }); - } +impl ProductCircuitEvalProof { + #![allow(dead_code)] + pub fn prove( + circuit: &mut ProductCircuit, + transcript: &mut PoseidonTranscript, + ) -> (Self, F, Vec) { + let mut proof: Vec> = Vec::new(); + let num_layers = circuit.left_vec.len(); + + let mut claim = circuit.evaluate(); + let mut rand = Vec::new(); + for layer_id in (0..num_layers).rev() { + let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len(); + + let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); + assert_eq!(poly_C.len(), len / 2); + + let num_rounds_prod = poly_C.len().log_2(); + let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F { + (*poly_A_comp) * poly_B_comp * poly_C_comp + }; + let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic( + &claim, + num_rounds_prod, + &mut circuit.left_vec[layer_id], + &mut circuit.right_vec[layer_id], + &mut poly_C, + comb_func_prod, + transcript, + ); + + transcript.append_scalar(b"", &claims_prod[0]); + transcript.append_scalar(b"", &claims_prod[1]); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b""); + claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + + proof.push(LayerProof { + proof: proof_prod, + claims: claims_prod[0..claims_prod.len() - 1].to_vec(), + }); + } - (ProductCircuitEvalProof { proof }, claim, rand) + (ProductCircuitEvalProof { proof }, claim, rand) + } + + pub fn verify(&self, eval: F, len: usize, transcript: &mut PoseidonTranscript) -> (F, Vec) { + let num_layers = len.log_2(); + let mut claim = eval; + let mut rand: Vec = Vec::new(); + //let mut num_rounds = 0; + assert_eq!(self.proof.len(), num_layers); + for (num_rounds, i) in (0..num_layers).enumerate() { + let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); + + let claims_prod = &self.proof[i].claims; + transcript.append_scalar(b"", &claims_prod[0]); + transcript.append_scalar(b"", &claims_prod[1]); + + assert_eq!(rand.len(), rand_prod.len()); + let eq: F = (0..rand.len()) + .map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i])) + .product(); + assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b""); + claim = (F::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1]; + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; } - pub fn verify( - &self, - eval: Scalar, - len: usize, - transcript: &mut PoseidonTranscript, - ) -> (Scalar, Vec) { - let num_layers = len.log_2(); - let mut claim = eval; - let mut rand: Vec = Vec::new(); - //let mut num_rounds = 0; - assert_eq!(self.proof.len(), num_layers); - for (num_rounds, i) in (0..num_layers).enumerate() { - let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); - - let claims_prod = &self.proof[i].claims; - transcript.append_scalar(&claims_prod[0]); - transcript.append_scalar(&claims_prod[1]); - - assert_eq!(rand.len(), rand_prod.len()); - let eq: Scalar = (0..rand.len()) - .map(|i| { - rand[i] * rand_prod[i] - + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) - }) - .product(); - assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last); - - // produce a random challenge - let r_layer = transcript.challenge_scalar(); - claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1]; - let mut ext = vec![r_layer]; - ext.extend(rand_prod); - rand = ext; + (claim, rand) + } +} + +impl ProductCircuitEvalProofBatched { + pub fn prove( + prod_circuit_vec: &mut Vec<&mut ProductCircuit>, + dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec) { + assert!(!prod_circuit_vec.is_empty()); + + let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new()); + + let mut proof_layers: Vec> = Vec::new(); + let num_layers = prod_circuit_vec[0].left_vec.len(); + let mut claims_to_verify = (0..prod_circuit_vec.len()) + .map(|i| prod_circuit_vec[i].evaluate()) + .collect::>(); + let mut rand = Vec::new(); + for layer_id in (0..num_layers).rev() { + // prepare paralell instance that share poly_C first + let len = prod_circuit_vec[0].left_vec[layer_id].len() + + prod_circuit_vec[0].right_vec[layer_id].len(); + + let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); + assert_eq!(poly_C_par.len(), len / 2); + + let num_rounds_prod = poly_C_par.len().log_2(); + let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F { + (*poly_A_comp) * poly_B_comp * poly_C_comp + }; + + let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + for prod_circuit in prod_circuit_vec.iter_mut() { + poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]); + poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id]) + } + let poly_vec_par = ( + &mut poly_A_batched_par, + &mut poly_B_batched_par, + &mut poly_C_par, + ); + + // prepare sequential instances that don't share poly_C + let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + if layer_id == 0 && !dotp_circuit_vec.is_empty() { + // add additional claims + for item in dotp_circuit_vec.iter() { + claims_to_verify.push(item.evaluate()); + assert_eq!(len / 2, item.left.len()); + assert_eq!(len / 2, item.right.len()); + assert_eq!(len / 2, item.weight.len()); } - (claim, rand) + for dotp_circuit in dotp_circuit_vec.iter_mut() { + poly_A_batched_seq.push(&mut dotp_circuit.left); + poly_B_batched_seq.push(&mut dotp_circuit.right); + poly_C_batched_seq.push(&mut dotp_circuit.weight); + } + } + let poly_vec_seq = ( + &mut poly_A_batched_seq, + &mut poly_B_batched_seq, + &mut poly_C_batched_seq, + ); + + // produce a fresh set of coeffs and a joint claim + let coeff_vec = transcript.challenge_scalar_vec(b"", claims_to_verify.len()); + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .sum(); + + let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched( + &claim, + num_rounds_prod, + poly_vec_par, + poly_vec_seq, + &coeff_vec, + comb_func_prod, + transcript, + ); + + let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod; + for i in 0..prod_circuit_vec.len() { + transcript.append_scalar(b"", &claims_prod_left[i]); + transcript.append_scalar(b"", &claims_prod_right[i]); + } + + if layer_id == 0 && !dotp_circuit_vec.is_empty() { + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp; + for i in 0..dotp_circuit_vec.len() { + transcript.append_scalar(b"", &claims_dotp_left[i]); + transcript.append_scalar(b"", &claims_dotp_right[i]); + transcript.append_scalar(b"", &claims_dotp_weight[i]); + } + claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight); + } + + // produce a random challenge to condense two claims into a single claim + let r_layer = transcript.challenge_scalar(b""); + + claims_to_verify = (0..prod_circuit_vec.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect::>(); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + + proof_layers.push(LayerProofBatched { + proof, + claims_prod_left, + claims_prod_right, + }); } -} -impl ProductCircuitEvalProofBatched { - pub fn prove( - prod_circuit_vec: &mut Vec<&mut ProductCircuit>, - dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>, - transcript: &mut PoseidonTranscript, - ) -> (Self, Vec) { - assert!(!prod_circuit_vec.is_empty()); - - let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new()); - - let mut proof_layers: Vec = Vec::new(); - let num_layers = prod_circuit_vec[0].left_vec.len(); - let mut claims_to_verify = (0..prod_circuit_vec.len()) - .map(|i| prod_circuit_vec[i].evaluate()) - .collect::>(); - let mut rand = Vec::new(); - for layer_id in (0..num_layers).rev() { - // prepare paralell instance that share poly_C first - let len = prod_circuit_vec[0].left_vec[layer_id].len() - + prod_circuit_vec[0].right_vec[layer_id].len(); - - let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); - assert_eq!(poly_C_par.len(), len / 2); - - let num_rounds_prod = poly_C_par.len().log_2(); - let comb_func_prod = - |poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar { - (*poly_A_comp) * poly_B_comp * poly_C_comp - }; - - let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); - for prod_circuit in prod_circuit_vec.iter_mut() { - poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]); - poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id]) - } - let poly_vec_par = ( - &mut poly_A_batched_par, - &mut poly_B_batched_par, - &mut poly_C_par, - ); - - // prepare sequential instances that don't share poly_C - let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); - if layer_id == 0 && !dotp_circuit_vec.is_empty() { - // add additional claims - for item in dotp_circuit_vec.iter() { - claims_to_verify.push(item.evaluate()); - assert_eq!(len / 2, item.left.len()); - assert_eq!(len / 2, item.right.len()); - assert_eq!(len / 2, item.weight.len()); - } - - for dotp_circuit in dotp_circuit_vec.iter_mut() { - poly_A_batched_seq.push(&mut dotp_circuit.left); - poly_B_batched_seq.push(&mut dotp_circuit.right); - poly_C_batched_seq.push(&mut dotp_circuit.weight); - } - } - let poly_vec_seq = ( - &mut poly_A_batched_seq, - &mut poly_B_batched_seq, - &mut poly_C_batched_seq, - ); - - // produce a fresh set of coeffs and a joint claim - let coeff_vec = transcript.challenge_vector(claims_to_verify.len()); - let claim = (0..claims_to_verify.len()) - .map(|i| claims_to_verify[i] * coeff_vec[i]) - .sum(); - - let (proof, rand_prod, claims_prod, claims_dotp) = - SumcheckInstanceProof::prove_cubic_batched( - &claim, - num_rounds_prod, - poly_vec_par, - poly_vec_seq, - &coeff_vec, - comb_func_prod, - transcript, - ); - - let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod; - for i in 0..prod_circuit_vec.len() { - transcript.append_scalar(&claims_prod_left[i]); - transcript.append_scalar(&claims_prod_right[i]); - } - - if layer_id == 0 && !dotp_circuit_vec.is_empty() { - let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp; - for i in 0..dotp_circuit_vec.len() { - transcript.append_scalar(&claims_dotp_left[i]); - transcript.append_scalar(&claims_dotp_right[i]); - transcript.append_scalar(&claims_dotp_weight[i]); - } - claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight); - } - - // produce a random challenge to condense two claims into a single claim - let r_layer = transcript.challenge_scalar(); - - claims_to_verify = (0..prod_circuit_vec.len()) - .map(|i| { - claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]) - }) - .collect::>(); - - let mut ext = vec![r_layer]; - ext.extend(rand_prod); - rand = ext; - - proof_layers.push(LayerProofBatched { - proof, - claims_prod_left, - claims_prod_right, - }); + ( + ProductCircuitEvalProofBatched { + proof: proof_layers, + claims_dotp: claims_dotp_final, + }, + rand, + ) + } + + pub fn verify( + &self, + claims_prod_vec: &[F], + claims_dotp_vec: &[F], + len: usize, + transcript: &mut PoseidonTranscript, + ) -> (Vec, Vec, Vec) { + let num_layers = len.log_2(); + let mut rand: Vec = Vec::new(); + //let mut num_rounds = 0; + assert_eq!(self.proof.len(), num_layers); + + let mut claims_to_verify = claims_prod_vec.to_owned(); + let mut claims_to_verify_dotp: Vec = Vec::new(); + for (num_rounds, i) in (0..num_layers).enumerate() { + if i == num_layers - 1 { + claims_to_verify.extend(claims_dotp_vec); + } + + // produce random coefficients, one for each instance + let coeff_vec: Vec = transcript.challenge_scalar_vec(b"", claims_to_verify.len()); + + // produce a joint claim + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .sum(); + + let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); + + let claims_prod_left = &self.proof[i].claims_prod_left; + let claims_prod_right = &self.proof[i].claims_prod_right; + assert_eq!(claims_prod_left.len(), claims_prod_vec.len()); + assert_eq!(claims_prod_right.len(), claims_prod_vec.len()); + + for i in 0..claims_prod_vec.len() { + transcript.append_scalar(b"", &claims_prod_left[i]); + transcript.append_scalar(b"", &claims_prod_right[i]); + } + + assert_eq!(rand.len(), rand_prod.len()); + let eq: F = (0..rand.len()) + .map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i])) + .product(); + let mut claim_expected: F = (0..claims_prod_vec.len()) + .map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq)) + .sum(); + + // add claims from the dotp instances + if i == num_layers - 1 { + let num_prod_instances = claims_prod_vec.len(); + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; + for i in 0..claims_dotp_left.len() { + transcript.append_scalar(b"", &claims_dotp_left[i]); + transcript.append_scalar(b"", &claims_dotp_right[i]); + transcript.append_scalar(b"", &claims_dotp_weight[i]); + + claim_expected += coeff_vec[i + num_prod_instances] + * claims_dotp_left[i] + * claims_dotp_right[i] + * claims_dotp_weight[i]; } + } - ( - ProductCircuitEvalProofBatched { - proof: proof_layers, - claims_dotp: claims_dotp_final, - }, - rand, - ) - } + assert_eq!(claim_expected, claim_last); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b""); + + claims_to_verify = (0..claims_prod_left.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect(); - pub fn verify( - &self, - claims_prod_vec: &[Scalar], - claims_dotp_vec: &[Scalar], - len: usize, - transcript: &mut PoseidonTranscript, - ) -> (Vec, Vec, Vec) { - let num_layers = len.log_2(); - let mut rand: Vec = Vec::new(); - //let mut num_rounds = 0; - assert_eq!(self.proof.len(), num_layers); - - let mut claims_to_verify = claims_prod_vec.to_owned(); - let mut claims_to_verify_dotp: Vec = Vec::new(); - for (num_rounds, i) in (0..num_layers).enumerate() { - if i == num_layers - 1 { - claims_to_verify.extend(claims_dotp_vec); - } - - // produce random coefficients, one for each instance - let coeff_vec = transcript.challenge_vector(claims_to_verify.len()); - - // produce a joint claim - let claim = (0..claims_to_verify.len()) - .map(|i| claims_to_verify[i] * coeff_vec[i]) - .sum(); - - let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); - - let claims_prod_left = &self.proof[i].claims_prod_left; - let claims_prod_right = &self.proof[i].claims_prod_right; - assert_eq!(claims_prod_left.len(), claims_prod_vec.len()); - assert_eq!(claims_prod_right.len(), claims_prod_vec.len()); - - for i in 0..claims_prod_vec.len() { - transcript.append_scalar(&claims_prod_left[i]); - transcript.append_scalar(&claims_prod_right[i]); - } - - assert_eq!(rand.len(), rand_prod.len()); - let eq: Scalar = (0..rand.len()) - .map(|i| { - rand[i] * rand_prod[i] - + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) - }) - .product(); - let mut claim_expected: Scalar = (0..claims_prod_vec.len()) - .map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq)) - .sum(); - - // add claims from the dotp instances - if i == num_layers - 1 { - let num_prod_instances = claims_prod_vec.len(); - let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; - for i in 0..claims_dotp_left.len() { - transcript.append_scalar(&claims_dotp_left[i]); - transcript.append_scalar(&claims_dotp_right[i]); - transcript.append_scalar(&claims_dotp_weight[i]); - - claim_expected += coeff_vec[i + num_prod_instances] - * claims_dotp_left[i] - * claims_dotp_right[i] - * claims_dotp_weight[i]; - } - } - - assert_eq!(claim_expected, claim_last); - - // produce a random challenge - let r_layer = transcript.challenge_scalar(); - - claims_to_verify = (0..claims_prod_left.len()) - .map(|i| { - claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]) - }) - .collect(); - - // add claims to verify for dotp circuit - if i == num_layers - 1 { - let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; - - for i in 0..claims_dotp_vec.len() / 2 { - // combine left claims - let claim_left = claims_dotp_left[2 * i] - + r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]); - - let claim_right = claims_dotp_right[2 * i] - + r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]); - - let claim_weight = claims_dotp_weight[2 * i] - + r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]); - claims_to_verify_dotp.push(claim_left); - claims_to_verify_dotp.push(claim_right); - claims_to_verify_dotp.push(claim_weight); - } - } - - let mut ext = vec![r_layer]; - ext.extend(rand_prod); - rand = ext; + // add claims to verify for dotp circuit + if i == num_layers - 1 { + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; + + for i in 0..claims_dotp_vec.len() / 2 { + // combine left claims + let claim_left = claims_dotp_left[2 * i] + + r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]); + + let claim_right = claims_dotp_right[2 * i] + + r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]); + + let claim_weight = claims_dotp_weight[2 * i] + + r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]); + claims_to_verify_dotp.push(claim_left); + claims_to_verify_dotp.push(claim_right); + claims_to_verify_dotp.push(claim_weight); } - (claims_to_verify, claims_to_verify_dotp, rand) + } + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; } + (claims_to_verify, claims_to_verify_dotp, rand) + } } diff --git a/src/r1csinstance.rs b/src/r1csinstance.rs index f9e89cb..c961b43 100644 --- a/src/r1csinstance.rs +++ b/src/r1csinstance.rs @@ -1,391 +1,386 @@ -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; -use crate::transcript::AppendToTranscript; - use super::dense_mlpoly::DensePolynomial; use super::errors::ProofVerifyError; use super::math::Math; -use super::random::RandomTape; -use super::scalar::Scalar; use super::sparse_mlpoly::{ - MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment, - SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial, + MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment, + SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial, }; use super::timer::Timer; -use ark_ff::Field; +use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter}; + +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; +use ark_ec::CurveGroup; +use ark_ff::PrimeField; use ark_serialize::*; -use ark_std::{One, UniformRand, Zero}; use digest::{ExtendableOutput, Input}; - -use merlin::Transcript; use sha3::Shake256; #[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)] -pub struct R1CSInstance { - num_cons: usize, - num_vars: usize, - num_inputs: usize, - A: SparseMatPolynomial, - B: SparseMatPolynomial, - C: SparseMatPolynomial, +pub struct R1CSInstance { + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: SparseMatPolynomial, + B: SparseMatPolynomial, + C: SparseMatPolynomial, } -pub struct R1CSCommitmentGens { - gens: SparseMatPolyCommitmentGens, +pub struct R1CSCommitmentGens { + gens: SparseMatPolyCommitmentGens, } -impl R1CSCommitmentGens { - pub fn new( - label: &'static [u8], - num_cons: usize, - num_vars: usize, - num_inputs: usize, - num_nz_entries: usize, - ) -> R1CSCommitmentGens { - assert!(num_inputs < num_vars); - let num_poly_vars_x = num_cons.log_2(); - let num_poly_vars_y = (2 * num_vars).log_2(); - let gens = SparseMatPolyCommitmentGens::new( - label, - num_poly_vars_x, - num_poly_vars_y, - num_nz_entries, - 3, - ); - R1CSCommitmentGens { gens } - } -} - -#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct R1CSCommitment { +impl R1CSCommitmentGens { + pub fn setup( + label: &'static [u8], num_cons: usize, num_vars: usize, num_inputs: usize, - comm: SparseMatPolyCommitment, + num_nz_entries: usize, + ) -> Self { + assert!(num_inputs < num_vars); + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + let gens = SparseMatPolyCommitmentGens::setup( + label, + num_poly_vars_x, + num_poly_vars_y, + num_nz_entries, + 3, + ); + R1CSCommitmentGens { gens } + } } -impl AppendToTranscript for R1CSCommitment { - fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) { - transcript.append_u64(b"num_cons", self.num_cons as u64); - transcript.append_u64(b"num_vars", self.num_vars as u64); - transcript.append_u64(b"num_inputs", self.num_inputs as u64); - self.comm.append_to_transcript(b"comm", transcript); - } +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct R1CSCommitment { + num_cons: usize, + num_vars: usize, + num_inputs: usize, + comm: SparseMatPolyCommitment, } -impl AppendToPoseidon for R1CSCommitment { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - transcript.append_u64(self.num_cons as u64); - transcript.append_u64(self.num_vars as u64); - transcript.append_u64(self.num_inputs as u64); - self.comm.append_to_poseidon(transcript); - } +impl TranscriptWriter for R1CSCommitment { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript) { + transcript.append_u64(b"", self.num_cons as u64); + transcript.append_u64(b"", self.num_vars as u64); + transcript.append_u64(b"", self.num_inputs as u64); + self.comm.write_to_transcript(transcript); + } } -pub struct R1CSDecommitment { - dense: MultiSparseMatPolynomialAsDense, +pub struct R1CSDecommitment { + dense: MultiSparseMatPolynomialAsDense, } -impl R1CSCommitment { - pub fn get_num_cons(&self) -> usize { - self.num_cons - } +impl R1CSCommitment { + pub fn get_num_cons(&self) -> usize { + self.num_cons + } - pub fn get_num_vars(&self) -> usize { - self.num_vars - } + pub fn get_num_vars(&self) -> usize { + self.num_vars + } - pub fn get_num_inputs(&self) -> usize { - self.num_inputs - } + pub fn get_num_inputs(&self) -> usize { + self.num_inputs + } } -impl R1CSInstance { - pub fn new( - num_cons: usize, - num_vars: usize, - num_inputs: usize, - A: &[(usize, usize, Scalar)], - B: &[(usize, usize, Scalar)], - C: &[(usize, usize, Scalar)], - ) -> R1CSInstance { - Timer::print(&format!("number_of_constraints {}", num_cons)); - Timer::print(&format!("number_of_variables {}", num_vars)); - Timer::print(&format!("number_of_inputs {}", num_inputs)); - Timer::print(&format!("number_non-zero_entries_A {}", A.len())); - Timer::print(&format!("number_non-zero_entries_B {}", B.len())); - Timer::print(&format!("number_non-zero_entries_C {}", C.len())); - - // check that num_cons is a power of 2 - assert_eq!(num_cons.next_power_of_two(), num_cons); - - // check that num_vars is a power of 2 - assert_eq!(num_vars.next_power_of_two(), num_vars); - - // check that number_inputs + 1 <= num_vars - assert!(num_inputs < num_vars); - - // no errors, so create polynomials - let num_poly_vars_x = num_cons.log_2(); - let num_poly_vars_y = (2 * num_vars).log_2(); - - let mat_A = (0..A.len()) - .map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2)) - .collect::>(); - let mat_B = (0..B.len()) - .map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2)) - .collect::>(); - let mat_C = (0..C.len()) - .map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2)) - .collect::>(); - - let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A); - let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B); - let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C); - - R1CSInstance { - num_cons, - num_vars, - num_inputs, - A: poly_A, - B: poly_B, - C: poly_C, - } - } - - pub fn get_num_vars(&self) -> usize { - self.num_vars - } - - pub fn get_num_cons(&self) -> usize { - self.num_cons - } - - pub fn get_num_inputs(&self) -> usize { - self.num_inputs - } - - pub fn get_digest(&self) -> Vec { - let mut bytes = Vec::new(); - self.serialize(&mut bytes).unwrap(); - let mut shake = Shake256::default(); - shake.input(bytes); - let mut reader = shake.xof_result(); - let mut buf = [0u8; 256]; - reader.read_exact(&mut buf).unwrap(); - buf.to_vec() - } - - pub fn produce_synthetic_r1cs( - num_cons: usize, - num_vars: usize, - num_inputs: usize, - ) -> (R1CSInstance, Vec, Vec) { - Timer::print(&format!("number_of_constraints {}", num_cons)); - Timer::print(&format!("number_of_variables {}", num_vars)); - Timer::print(&format!("number_of_inputs {}", num_inputs)); - - let mut rng = ark_std::rand::thread_rng(); - - // assert num_cons and num_vars are power of 2 - assert_eq!((num_cons.log_2()).pow2(), num_cons); - assert_eq!((num_vars.log_2()).pow2(), num_vars); - - // num_inputs + 1 <= num_vars - assert!(num_inputs < num_vars); - - // z is organized as [vars,1,io] - let size_z = num_vars + num_inputs + 1; - - // produce a random satisfying assignment - let Z = { - let mut Z: Vec = (0..size_z) - .map(|_i| Scalar::rand(&mut rng)) - .collect::>(); - Z[num_vars] = Scalar::one(); // set the constant term to 1 - Z - }; - - // three sparse matrices - let mut A: Vec = Vec::new(); - let mut B: Vec = Vec::new(); - let mut C: Vec = Vec::new(); - let one = Scalar::one(); - for i in 0..num_cons { - let A_idx = i % size_z; - let B_idx = (i + 2) % size_z; - A.push(SparseMatEntry::new(i, A_idx, one)); - B.push(SparseMatEntry::new(i, B_idx, one)); - let AB_val = Z[A_idx] * Z[B_idx]; - - let C_idx = (i + 3) % size_z; - let C_val = Z[C_idx]; - - if C_val == Scalar::zero() { - C.push(SparseMatEntry::new(i, num_vars, AB_val)); - } else { - C.push(SparseMatEntry::new( - i, - C_idx, - AB_val * C_val.inverse().unwrap(), - )); - } - } - - Timer::print(&format!("number_non-zero_entries_A {}", A.len())); - Timer::print(&format!("number_non-zero_entries_B {}", B.len())); - Timer::print(&format!("number_non-zero_entries_C {}", C.len())); - - let num_poly_vars_x = num_cons.log_2(); - let num_poly_vars_y = (2 * num_vars).log_2(); - let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A); - let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B); - let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C); - - let inst = R1CSInstance { - num_cons, - num_vars, - num_inputs, - A: poly_A, - B: poly_B, - C: poly_C, - }; - - assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..])); - - (inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec()) - } - - pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool { - assert_eq!(vars.len(), self.num_vars); - assert_eq!(input.len(), self.num_inputs); - - let z = { - let mut z = vars.to_vec(); - z.extend(&vec![Scalar::one()]); - z.extend(input); - z - }; - - // verify if Az * Bz - Cz = [0...] - let Az = self - .A - .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); - let Bz = self - .B - .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); - let Cz = self - .C - .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); - - assert_eq!(Az.len(), self.num_cons); - assert_eq!(Bz.len(), self.num_cons); - assert_eq!(Cz.len(), self.num_cons); - let res: usize = (0..self.num_cons) - .map(|i| usize::from(Az[i] * Bz[i] != Cz[i])) - .sum(); - - res == 0 - } - - pub fn multiply_vec( - &self, - num_rows: usize, - num_cols: usize, - z: &[Scalar], - ) -> (DensePolynomial, DensePolynomial, DensePolynomial) { - assert_eq!(num_rows, self.num_cons); - assert_eq!(z.len(), num_cols); - assert!(num_cols > self.num_vars); - ( - DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)), - DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)), - DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)), - ) - } - - pub fn compute_eval_table_sparse( - &self, - num_rows: usize, - num_cols: usize, - evals: &[Scalar], - ) -> (Vec, Vec, Vec) { - assert_eq!(num_rows, self.num_cons); - assert!(num_cols > self.num_vars); - - let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols); - let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols); - let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols); - - (evals_A, evals_B, evals_C) +impl R1CSInstance { + pub fn new( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: &[(usize, usize, F)], + B: &[(usize, usize, F)], + C: &[(usize, usize, F)], + ) -> Self { + Timer::print(&format!("number_of_constraints {}", num_cons)); + Timer::print(&format!("number_of_variables {}", num_vars)); + Timer::print(&format!("number_of_inputs {}", num_inputs)); + Timer::print(&format!("number_non-zero_entries_A {}", A.len())); + Timer::print(&format!("number_non-zero_entries_B {}", B.len())); + Timer::print(&format!("number_non-zero_entries_C {}", C.len())); + + // check that num_cons is a power of 2 + assert_eq!(num_cons.next_power_of_two(), num_cons); + + // check that num_vars is a power of 2 + assert_eq!(num_vars.next_power_of_two(), num_vars); + + // check that number_inputs + 1 <= num_vars + assert!(num_inputs < num_vars); + + // no errors, so create polynomials + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + + let mat_A = (0..A.len()) + .map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2)) + .collect::>(); + let mat_B = (0..B.len()) + .map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2)) + .collect::>(); + let mat_C = (0..C.len()) + .map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2)) + .collect::>(); + + let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A); + let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B); + let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C); + + R1CSInstance { + num_cons, + num_vars, + num_inputs, + A: poly_A, + B: poly_B, + C: poly_C, } - - pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) { - let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry); - (evals[0], evals[1], evals[2]) + } + + pub fn get_num_vars(&self) -> usize { + self.num_vars + } + + pub fn get_num_cons(&self) -> usize { + self.num_cons + } + + pub fn get_num_inputs(&self) -> usize { + self.num_inputs + } + + pub fn get_digest(&self) -> Vec { + let mut bytes = Vec::new(); + self.serialize_with_mode(&mut bytes, Compress::Yes).unwrap(); + let mut shake = Shake256::default(); + shake.input(bytes); + let mut reader = shake.xof_result(); + let mut buf = [0u8; 256]; + reader.read_exact(&mut buf).unwrap(); + buf.to_vec() + } + + pub fn produce_synthetic_r1cs( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + ) -> (Self, Vec, Vec) { + Timer::print(&format!("number_of_constraints {}", num_cons)); + Timer::print(&format!("number_of_variables {}", num_vars)); + Timer::print(&format!("number_of_inputs {}", num_inputs)); + + let mut rng = ark_std::rand::thread_rng(); + + // assert num_cons and num_vars are power of 2 + assert_eq!((num_cons.log_2()).pow2(), num_cons); + assert_eq!((num_vars.log_2()).pow2(), num_vars); + + // num_inputs + 1 <= num_vars + assert!(num_inputs < num_vars); + + // z is organized as [vars,1,io] + let size_z = num_vars + num_inputs + 1; + + // produce a random satisfying assignment + let Z = { + let mut Z: Vec = (0..size_z).map(|_i| F::rand(&mut rng)).collect::>(); + Z[num_vars] = F::one(); // set the constant term to 1 + Z + }; + + // three sparse matrices + let mut A: Vec> = Vec::new(); + let mut B: Vec> = Vec::new(); + let mut C: Vec> = Vec::new(); + let one = F::one(); + for i in 0..num_cons { + let A_idx = i % size_z; + let B_idx = (i + 2) % size_z; + A.push(SparseMatEntry::new(i, A_idx, one)); + B.push(SparseMatEntry::new(i, B_idx, one)); + let AB_val = Z[A_idx] * Z[B_idx]; + + let C_idx = (i + 3) % size_z; + let C_val = Z[C_idx]; + + if C_val == F::zero() { + C.push(SparseMatEntry::new(i, num_vars, AB_val)); + } else { + C.push(SparseMatEntry::new( + i, + C_idx, + AB_val * C_val.inverse().unwrap(), + )); + } } - pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) { - let (comm, dense) = - SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens); - let r1cs_comm = R1CSCommitment { - num_cons: self.num_cons, - num_vars: self.num_vars, - num_inputs: self.num_inputs, - comm, - }; - - let r1cs_decomm = R1CSDecommitment { dense }; - - (r1cs_comm, r1cs_decomm) - } + Timer::print(&format!("number_non-zero_entries_A {}", A.len())); + Timer::print(&format!("number_non-zero_entries_B {}", B.len())); + Timer::print(&format!("number_non-zero_entries_C {}", C.len())); + + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A); + let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B); + let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C); + + let inst = R1CSInstance { + num_cons, + num_vars, + num_inputs, + A: poly_A, + B: poly_B, + C: poly_C, + }; + + assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..])); + + (inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec()) + } + + pub fn is_sat(&self, vars: &[F], input: &[F]) -> bool { + assert_eq!(vars.len(), self.num_vars); + assert_eq!(input.len(), self.num_inputs); + + let z = { + let mut z = vars.to_vec(); + z.extend(&vec![F::one()]); + z.extend(input); + z + }; + + // verify if Az * Bz - Cz = [0...] + let Az = self + .A + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + let Bz = self + .B + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + let Cz = self + .C + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + + assert_eq!(Az.len(), self.num_cons); + assert_eq!(Bz.len(), self.num_cons); + assert_eq!(Cz.len(), self.num_cons); + let res: usize = (0..self.num_cons) + .map(|i| usize::from(Az[i] * Bz[i] != Cz[i])) + .sum(); + + res == 0 + } + + pub fn multiply_vec( + &self, + num_rows: usize, + num_cols: usize, + z: &[F], + ) -> (DensePolynomial, DensePolynomial, DensePolynomial) { + assert_eq!(num_rows, self.num_cons); + assert_eq!(z.len(), num_cols); + assert!(num_cols > self.num_vars); + ( + DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)), + DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)), + DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)), + ) + } + + pub fn compute_eval_table_sparse( + &self, + num_rows: usize, + num_cols: usize, + evals: &[F], + ) -> (Vec, Vec, Vec) { + assert_eq!(num_rows, self.num_cons); + assert!(num_cols > self.num_vars); + + let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols); + let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols); + let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols); + + (evals_A, evals_B, evals_C) + } + + pub fn evaluate(&self, rx: &[F], ry: &[F]) -> (F, F, F) { + let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry); + (evals[0], evals[1], evals[2]) + } + + pub fn commit>( + &self, + gens: &R1CSCommitmentGens, + ) -> (R1CSCommitment, R1CSDecommitment) { + // Noting that matrices A, B and C are sparse, produces a combined dense + // dense polynomial from the non-zero entry that we commit to. This + // represents the computational commitment. + let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens); + let r1cs_comm = R1CSCommitment { + num_cons: self.num_cons, + num_vars: self.num_vars, + num_inputs: self.num_inputs, + comm, + }; + + // The decommitment is used by the prover to convince the verifier + // the received openings of A, B and C are correct. + let r1cs_decomm = R1CSDecommitment { dense }; + + (r1cs_comm, r1cs_decomm) + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct R1CSEvalProof { - proof: SparseMatPolyEvalProof, +pub struct R1CSEvalProof { + proof: SparseMatPolyEvalProof, } -impl R1CSEvalProof { - pub fn prove( - decomm: &R1CSDecommitment, - rx: &[Scalar], // point at which the polynomial is evaluated - ry: &[Scalar], - evals: &(Scalar, Scalar, Scalar), - gens: &R1CSCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> R1CSEvalProof { - let timer = Timer::new("R1CSEvalProof::prove"); - let proof = SparseMatPolyEvalProof::prove( - &decomm.dense, - rx, - ry, - &[evals.0, evals.1, evals.2], - &gens.gens, - transcript, - random_tape, - ); - timer.stop(); - - R1CSEvalProof { proof } - } - - pub fn verify( - &self, - comm: &R1CSCommitment, - rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated - ry: &[Scalar], - evals: &(Scalar, Scalar, Scalar), - gens: &R1CSCommitmentGens, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - self.proof.verify( - &comm.comm, - rx, - ry, - &[evals.0, evals.1, evals.2], - &gens.gens, - transcript, - ) - } +impl R1CSEvalProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + pub fn prove( + decomm: &R1CSDecommitment, + rx: &[E::ScalarField], // point at which the polynomial is evaluated + ry: &[E::ScalarField], + evals: &(E::ScalarField, E::ScalarField, E::ScalarField), + gens: &R1CSCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Self { + let timer = Timer::new("R1CSEvalProof::prove"); + let proof = SparseMatPolyEvalProof::prove( + &decomm.dense, + rx, + ry, + &[evals.0, evals.1, evals.2], + &gens.gens, + transcript, + ); + timer.stop(); + + R1CSEvalProof { proof } + } + + pub fn verify( + &self, + comm: &R1CSCommitment, + rx: &[E::ScalarField], // point at which the R1CS matrix polynomials are evaluated + ry: &[E::ScalarField], + evals: &(E::ScalarField, E::ScalarField, E::ScalarField), + gens: &R1CSCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + self.proof.verify( + &comm.comm, + rx, + ry, + &[evals.0, evals.1, evals.2], + &gens.gens, + transcript, + ) + } } diff --git a/src/r1csproof.rs b/src/r1csproof.rs index 2fc8b8e..9253852 100644 --- a/src/r1csproof.rs +++ b/src/r1csproof.rs @@ -1,542 +1,633 @@ #![allow(clippy::too_many_arguments)] -use crate::constraints::{VerifierCircuit, VerifierConfig}; -use crate::group::{Fq, Fr}; +use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyCommitmentGens}; +use super::errors::ProofVerifyError; +use crate::constraints::{R1CSVerificationCircuit, SumcheckVerificationCircuit, VerifierConfig}; use crate::math::Math; -use crate::parameters::poseidon_params; -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; +use crate::mipp::MippProof; +use crate::poseidon_transcript::PoseidonTranscript; +use crate::sqrt_pst::Polynomial; use crate::sumcheck::SumcheckInstanceProof; -use ark_bls12_377::Bls12_377 as I; -use ark_bw6_761::BW6_761 as P; -use ark_poly::MultilinearExtension; -use ark_poly_commit::multilinear_pc::data_structures::{Commitment, Proof}; -use ark_poly_commit::multilinear_pc::MultilinearPC; +use crate::transcript::Transcript; +use crate::unipoly::UniPoly; +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; -use super::commitments::MultiCommitGens; -use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyCommitmentGens}; -use super::errors::ProofVerifyError; +use ark_poly_commit::multilinear_pc::data_structures::{Commitment, Proof}; +use itertools::Itertools; use super::r1csinstance::R1CSInstance; -use super::scalar::Scalar; use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}; use super::timer::Timer; -use ark_crypto_primitives::{CircuitSpecificSetupSNARK, SNARK}; +use ark_snark::{CircuitSpecificSetupSNARK, SNARK}; + +use crate::ark_std::UniformRand; +use ark_groth16::{Groth16, ProvingKey, VerifyingKey}; -use ark_groth16::Groth16; -use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem}; use ark_serialize::*; use ark_std::{One, Zero}; -use std::time::Instant; - #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct R1CSProof { - // The PST commitment to the multilinear extension of the witness. - comm: Commitment, - sc_proof_phase1: SumcheckInstanceProof, - claims_phase2: (Scalar, Scalar, Scalar, Scalar), - sc_proof_phase2: SumcheckInstanceProof, - eval_vars_at_ry: Scalar, - proof_eval_vars_at_ry: Proof, - rx: Vec, - ry: Vec, - // The transcript state after the satisfiability proof was computed. - pub transcript_sat_state: Scalar, -} -#[derive(Clone)] -pub struct R1CSSumcheckGens { - gens_1: MultiCommitGens, - gens_3: MultiCommitGens, - gens_4: MultiCommitGens, +pub struct R1CSProof { + // The PST commitment to the multilinear extension of the witness. + pub comm: Commitment, + sc_proof_phase1: SumcheckInstanceProof, + claims_phase2: ( + E::ScalarField, + E::ScalarField, + E::ScalarField, + E::ScalarField, + ), + sc_proof_phase2: SumcheckInstanceProof, + pub eval_vars_at_ry: E::ScalarField, + pub proof_eval_vars_at_ry: Proof, + rx: Vec, + ry: Vec, + // The transcript state after the satisfiability proof was computed. + pub transcript_sat_state: E::ScalarField, + pub initial_state: E::ScalarField, + pub t: E::TargetField, + pub mipp_proof: MippProof, } -// TODO: fix passing gens_1_ref -impl R1CSSumcheckGens { - pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self { - let gens_1 = gens_1_ref.clone(); - let gens_3 = MultiCommitGens::new(3, label); - let gens_4 = MultiCommitGens::new(4, label); - - R1CSSumcheckGens { - gens_1, - gens_3, - gens_4, - } - } +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)] +pub struct R1CSVerifierProof { + comm: Commitment, + circuit_proof: ark_groth16::Proof, + initial_state: E::ScalarField, + transcript_sat_state: E::ScalarField, + eval_vars_at_ry: E::ScalarField, + proof_eval_vars_at_ry: Proof, + t: E::TargetField, + mipp_proof: MippProof, } #[derive(Clone)] -pub struct R1CSGens { - gens_sc: R1CSSumcheckGens, - gens_pc: PolyCommitmentGens, +pub struct CircuitGens { + pk: ProvingKey, + vk: VerifyingKey, } -impl R1CSGens { - pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self { - let num_poly_vars = num_vars.log_2(); - let gens_pc = PolyCommitmentGens::new(num_poly_vars, label); - let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1); - R1CSGens { gens_sc, gens_pc } - } +impl CircuitGens +where + E: Pairing, +{ + // Performs the circuit-specific setup required by Groth16 for the sumcheck + // circuit. This is done by filling the struct with dummy elements, ensuring + // the sizes are correct so the setup matches the circuit that will be proved. + pub fn setup( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + poseidon: PoseidonConfig, + ) -> Self { + let mut rng = rand::thread_rng(); + + let uni_polys_round1 = (0..num_cons.log_2()) + .map(|_i| { + UniPoly::::from_evals(&[ + E::ScalarField::rand(&mut rng), + E::ScalarField::rand(&mut rng), + E::ScalarField::rand(&mut rng), + E::ScalarField::rand(&mut rng), + ]) + }) + .collect::>>(); + + let uni_polys_round2 = (0..num_vars.log_2() + 1) + .map(|_i| { + UniPoly::::from_evals(&[ + E::ScalarField::rand(&mut rng), + E::ScalarField::rand(&mut rng), + E::ScalarField::rand(&mut rng), + ]) + }) + .collect::>>(); + + let circuit = R1CSVerificationCircuit { + num_vars: num_vars, + num_cons: num_cons, + input: (0..num_inputs) + .map(|_i| E::ScalarField::rand(&mut rng)) + .collect_vec(), + input_as_sparse_poly: SparsePolynomial::new( + num_vars.log_2(), + (0..num_inputs + 1) + .map(|i| SparsePolyEntry::new(i, E::ScalarField::rand(&mut rng))) + .collect::>>(), + ), + evals: ( + E::ScalarField::zero(), + E::ScalarField::zero(), + E::ScalarField::zero(), + ), + params: poseidon, + prev_challenge: E::ScalarField::zero(), + claims_phase2: ( + E::ScalarField::zero(), + E::ScalarField::zero(), + E::ScalarField::zero(), + E::ScalarField::zero(), + ), + eval_vars_at_ry: E::ScalarField::zero(), + sc_phase1: SumcheckVerificationCircuit { + polys: uni_polys_round1, + }, + sc_phase2: SumcheckVerificationCircuit { + polys: uni_polys_round2, + }, + claimed_rx: (0..num_cons.log_2()) + .map(|_i| E::ScalarField::rand(&mut rng)) + .collect_vec(), + claimed_ry: (0..num_vars.log_2() + 1) + .map(|_i| E::ScalarField::rand(&mut rng)) + .collect_vec(), + claimed_transcript_sat_state: E::ScalarField::zero(), + }; + let (pk, vk) = Groth16::::setup(circuit.clone(), &mut rng).unwrap(); + CircuitGens { pk, vk } + } } -impl R1CSProof { - fn prove_phase_one( - num_rounds: usize, - evals_tau: &mut DensePolynomial, - evals_Az: &mut DensePolynomial, - evals_Bz: &mut DensePolynomial, - evals_Cz: &mut DensePolynomial, - transcript: &mut PoseidonTranscript, - ) -> (SumcheckInstanceProof, Vec, Vec) { - let comb_func = |poly_tau_comp: &Scalar, - poly_A_comp: &Scalar, - poly_B_comp: &Scalar, - poly_C_comp: &Scalar| - -> Scalar { - (*poly_tau_comp) * ((*poly_A_comp) * poly_B_comp - poly_C_comp) - }; - - let (sc_proof_phase_one, r, claims) = SumcheckInstanceProof::prove_cubic_with_additive_term( - &Scalar::zero(), // claim is zero - num_rounds, - evals_tau, - evals_Az, - evals_Bz, - evals_Cz, - comb_func, - transcript, - ); - - (sc_proof_phase_one, r, claims) - } - - fn prove_phase_two( - num_rounds: usize, - claim: &Scalar, - evals_z: &mut DensePolynomial, - evals_ABC: &mut DensePolynomial, - transcript: &mut PoseidonTranscript, - ) -> (SumcheckInstanceProof, Vec, Vec) { - let comb_func = - |poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { (*poly_A_comp) * poly_B_comp }; - let (sc_proof_phase_two, r, claims) = SumcheckInstanceProof::prove_quad( - claim, num_rounds, evals_z, evals_ABC, comb_func, transcript, - ); - - (sc_proof_phase_two, r, claims) - } - - fn protocol_name() -> &'static [u8] { - b"R1CS proof" - } - - pub fn prove( - inst: &R1CSInstance, - vars: Vec, - input: &[Scalar], - gens: &R1CSGens, - transcript: &mut PoseidonTranscript, - ) -> (R1CSProof, Vec, Vec) { - let timer_prove = Timer::new("R1CSProof::prove"); - // we currently require the number of |inputs| + 1 to be at most number of vars - assert!(input.len() < vars.len()); - - // create the multilinear witness polynomial from the satisfying assiment - let poly_vars = DensePolynomial::new(vars.clone()); - - let timer_commit = Timer::new("polycommit"); - // commitment to the satisfying witness polynomial - let comm = MultilinearPC::::commit(&gens.gens_pc.ck, &poly_vars); - comm.append_to_poseidon(transcript); - timer_commit.stop(); - - let c = transcript.challenge_scalar(); - transcript.new_from_state(&c); - - transcript.append_scalar_vector(input); - - let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one"); - - // append input to variables to create a single vector z - let z = { - let num_inputs = input.len(); - let num_vars = vars.len(); - let mut z = vars; - z.extend(&vec![Scalar::one()]); // add constant term in z - z.extend(input); - z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros - z - }; - - // derive the verifier's challenge tau - let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2()); - let tau = transcript.challenge_vector(num_rounds_x); - // compute the initial evaluation table for R(\tau, x) - let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); - let (mut poly_Az, mut poly_Bz, mut poly_Cz) = - inst.multiply_vec(inst.get_num_cons(), z.len(), &z); - - let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::prove_phase_one( - num_rounds_x, - &mut poly_tau, - &mut poly_Az, - &mut poly_Bz, - &mut poly_Cz, - transcript, - ); - assert_eq!(poly_tau.len(), 1); - assert_eq!(poly_Az.len(), 1); - assert_eq!(poly_Bz.len(), 1); - assert_eq!(poly_Cz.len(), 1); - timer_sc_proof_phase1.stop(); - - let (tau_claim, Az_claim, Bz_claim, Cz_claim) = - (&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]); - let prod_Az_Bz_claims = (*Az_claim) * Bz_claim; - - // prove the final step of sum-check #1 - let taus_bound_rx = tau_claim; - let _claim_post_phase1 = ((*Az_claim) * Bz_claim - Cz_claim) * taus_bound_rx; - - let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two"); - // combine the three claims into a single claim - let r_A = transcript.challenge_scalar(); - let r_B = transcript.challenge_scalar(); - let r_C = transcript.challenge_scalar(); - let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; - - let evals_ABC = { - // compute the initial evaluation table for R(\tau, x) - let evals_rx = EqPolynomial::new(rx.clone()).evals(); - let (evals_A, evals_B, evals_C) = - inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx); - - assert_eq!(evals_A.len(), evals_B.len()); - assert_eq!(evals_A.len(), evals_C.len()); - (0..evals_A.len()) - .map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i]) - .collect::>() - }; - - // another instance of the sum-check protocol - let (sc_proof_phase2, ry, _claims_phase2) = R1CSProof::prove_phase_two( - num_rounds_y, - &claim_phase2, - &mut DensePolynomial::new(z), - &mut DensePolynomial::new(evals_ABC), - transcript, - ); - timer_sc_proof_phase2.stop(); - - // TODO: modify the polynomial evaluation in Spartan to be consistent - // with the evaluation in ark-poly-commit so that reversing is not needed - // anymore - let timmer_opening = Timer::new("polyopening"); - let mut dummy = ry[1..].to_vec().clone(); - dummy.reverse(); - let proof_eval_vars_at_ry = MultilinearPC::::open(&gens.gens_pc.ck, &poly_vars, &dummy); - println!( - "proof size (no of quotients): {:?}", - proof_eval_vars_at_ry.proofs.len() - ); - timmer_opening.stop(); - - let timer_polyeval = Timer::new("polyeval"); - let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]); - timer_polyeval.stop(); - - timer_prove.stop(); - - let c = transcript.challenge_scalar(); - - ( - R1CSProof { - comm, - sc_proof_phase1, - claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim, prod_Az_Bz_claims), - sc_proof_phase2, - eval_vars_at_ry, - proof_eval_vars_at_ry, - rx: rx.clone(), - ry: ry.clone(), - transcript_sat_state: c, - }, - rx, - ry, - ) - } - - pub fn verify_groth16( - &self, - num_vars: usize, - num_cons: usize, - input: &[Scalar], - evals: &(Scalar, Scalar, Scalar), - transcript: &mut PoseidonTranscript, - gens: &R1CSGens, - ) -> Result<(u128, u128, u128), ProofVerifyError> { - self.comm.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())]; - //remaining inputs - input_as_sparse_poly_entries.extend( - (0..input.len()) - .map(|i| SparsePolyEntry::new(i + 1, input[i])) - .collect::>(), - ); - - let n = num_vars; - let input_as_sparse_poly = - SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries); - - let config = VerifierConfig { - num_vars, - num_cons, - input: input.to_vec(), - evals: *evals, - params: poseidon_params(), - prev_challenge: c, - claims_phase2: self.claims_phase2, - polys_sc1: self.sc_proof_phase1.polys.clone(), - polys_sc2: self.sc_proof_phase2.polys.clone(), - eval_vars_at_ry: self.eval_vars_at_ry, - input_as_sparse_poly, - // rx: self.rx.clone(), - ry: self.ry.clone(), - transcript_sat_state: self.transcript_sat_state, - }; - - let mut rng = ark_std::test_rng(); - - let prove_inner = Timer::new("proveinnercircuit"); - let start = Instant::now(); - let circuit = VerifierCircuit::new(&config, &mut rng).unwrap(); - let dp1 = start.elapsed().as_millis(); - prove_inner.stop(); - - let start = Instant::now(); - let (pk, vk) = Groth16::

::setup(circuit.clone(), &mut rng).unwrap(); - let ds = start.elapsed().as_millis(); - - let prove_outer = Timer::new("proveoutercircuit"); - let start = Instant::now(); - let proof = Groth16::

::prove(&pk, circuit, &mut rng).unwrap(); - let dp2 = start.elapsed().as_millis(); - prove_outer.stop(); - - let start = Instant::now(); - let is_verified = Groth16::

::verify(&vk, &[], &proof).unwrap(); - assert!(is_verified); - - let timer_verification = Timer::new("commitverification"); - let mut dummy = self.ry[1..].to_vec(); - // TODO: ensure ark-poly-commit and Spartan produce consistent results - // when evaluating a polynomial at a given point so this reverse is not - // needed. - dummy.reverse(); - - // Verifies the proof of opening against the result of evaluating the - // witness polynomial at point ry. - let res = MultilinearPC::::check( - &gens.gens_pc.vk, - &self.comm, - &dummy, - self.eval_vars_at_ry, - &self.proof_eval_vars_at_ry, - ); - - timer_verification.stop(); - assert!(res == true); - let dv = start.elapsed().as_millis(); - - Ok((ds, dp1 + dp2, dv)) - } - - // Helper function to find the number of constraint in the circuit which - // requires executing it. - pub fn circuit_size( - &self, - num_vars: usize, - num_cons: usize, - input: &[Scalar], - evals: &(Scalar, Scalar, Scalar), - transcript: &mut PoseidonTranscript, - _gens: &R1CSGens, - ) -> Result { - self.comm.append_to_poseidon(transcript); - - let c = transcript.challenge_scalar(); - - let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())]; - //remaining inputs - input_as_sparse_poly_entries.extend( - (0..input.len()) - .map(|i| SparsePolyEntry::new(i + 1, input[i])) - .collect::>(), - ); - - let n = num_vars; - let input_as_sparse_poly = - SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries); - - let config = VerifierConfig { - num_vars, - num_cons, - input: input.to_vec(), - evals: *evals, - params: poseidon_params(), - prev_challenge: c, - claims_phase2: self.claims_phase2, - polys_sc1: self.sc_proof_phase1.polys.clone(), - polys_sc2: self.sc_proof_phase2.polys.clone(), - eval_vars_at_ry: self.eval_vars_at_ry, - input_as_sparse_poly, - // rx: self.rx.clone(), - ry: self.ry.clone(), - transcript_sat_state: self.transcript_sat_state, - }; - - let mut rng = ark_std::test_rng(); - let circuit = VerifierCircuit::new(&config, &mut rng).unwrap(); - - let nc_inner = verify_constraints_inner(circuit.clone(), &num_cons); - - let nc_outer = verify_constraints_outer(circuit, &num_cons); - Ok(nc_inner + nc_outer) - } +#[derive(Clone)] +pub struct R1CSGens { + gens_pc: PolyCommitmentGens, + gens_gc: CircuitGens, } -fn verify_constraints_outer(circuit: VerifierCircuit, _num_cons: &usize) -> usize { - let cs = ConstraintSystem::::new_ref(); - circuit.generate_constraints(cs.clone()).unwrap(); - assert!(cs.is_satisfied().unwrap()); - cs.num_constraints() +impl R1CSGens { + // Performs the setup for the polynomial commitment PST and for Groth16. + pub fn setup( + label: &'static [u8], + num_cons: usize, + num_vars: usize, + num_inputs: usize, + poseidon: PoseidonConfig, + ) -> Self { + let num_poly_vars = num_vars.log_2(); + let gens_pc = PolyCommitmentGens::setup(num_poly_vars, label); + let gens_gc = CircuitGens::setup(num_cons, num_vars, num_inputs, poseidon); + R1CSGens { gens_pc, gens_gc } + } } -fn verify_constraints_inner(circuit: VerifierCircuit, _num_cons: &usize) -> usize { - let cs = ConstraintSystem::::new_ref(); - circuit - .inner_circuit - .generate_constraints(cs.clone()) - .unwrap(); - assert!(cs.is_satisfied().unwrap()); - cs.num_constraints() +impl R1CSProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + fn prove_phase_one( + num_rounds: usize, + evals_tau: &mut DensePolynomial, + evals_Az: &mut DensePolynomial, + evals_Bz: &mut DensePolynomial, + evals_Cz: &mut DensePolynomial, + transcript: &mut PoseidonTranscript, + ) -> ( + SumcheckInstanceProof, + Vec, + Vec, + ) { + let comb_func = + |poly_tau_comp: &E::ScalarField, + poly_A_comp: &E::ScalarField, + poly_B_comp: &E::ScalarField, + poly_C_comp: &E::ScalarField| + -> E::ScalarField { (*poly_tau_comp) * ((*poly_A_comp) * poly_B_comp - poly_C_comp) }; + + let (sc_proof_phase_one, r, claims) = SumcheckInstanceProof::prove_cubic_with_additive_term( + &E::ScalarField::zero(), // claim is zero + num_rounds, + evals_tau, + evals_Az, + evals_Bz, + evals_Cz, + comb_func, + transcript, + ); + + (sc_proof_phase_one, r, claims) + } + + fn prove_phase_two( + num_rounds: usize, + claim: &E::ScalarField, + evals_z: &mut DensePolynomial, + evals_ABC: &mut DensePolynomial, + transcript: &mut PoseidonTranscript, + ) -> ( + SumcheckInstanceProof, + Vec, + Vec, + ) { + let comb_func = |poly_A_comp: &E::ScalarField, + poly_B_comp: &E::ScalarField| + -> E::ScalarField { (*poly_A_comp) * poly_B_comp }; + let (sc_proof_phase_two, r, claims) = SumcheckInstanceProof::prove_quad( + claim, num_rounds, evals_z, evals_ABC, comb_func, transcript, + ); + + (sc_proof_phase_two, r, claims) + } + + // Proves the R1CS instance inst is satisfiable given the assignment + // vars. + pub fn prove( + inst: &R1CSInstance, + vars: Vec, + input: &[E::ScalarField], + gens: &R1CSGens, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, Vec) { + let timer_prove = Timer::new("R1CSProof::prove"); + // we currently require the number of |inputs| + 1 to be at most number of vars + assert!(input.len() < vars.len()); + + // create the multilinear witness polynomial from the satisfying assiment + // expressed as the list of sqrt-sized polynomials + let mut pl = Polynomial::from_evaluations(&vars.clone()); + + let timer_commit = Timer::new("polycommit"); + + // commitment list to the satisfying witness polynomial list + let (comm_list, t) = pl.commit(&gens.gens_pc.ck); + + transcript.append_gt::(b"", &t); + + timer_commit.stop(); + + let initial_state = transcript.challenge_scalar(b""); + transcript.new_from_state(&initial_state); + + transcript.append_scalar_vector(b"", &input); + + let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one"); + + // append input to variables to create a single vector z + let z = { + let num_inputs = input.len(); + let num_vars = vars.len(); + let mut z = vars; + z.extend(&vec![E::ScalarField::one()]); // add constant term in z + z.extend(input); + z.extend(&vec![E::ScalarField::zero(); num_vars - num_inputs - 1]); // we will pad with zeros + z + }; + + // derive the verifier's challenge tau + let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2()); + let tau = transcript.challenge_scalar_vec(b"", num_rounds_x); + // compute the initial evaluation table for R(\tau, x) + let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); + let (mut poly_Az, mut poly_Bz, mut poly_Cz) = + inst.multiply_vec(inst.get_num_cons(), z.len(), &z); + + let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::::prove_phase_one( + num_rounds_x, + &mut poly_tau, + &mut poly_Az, + &mut poly_Bz, + &mut poly_Cz, + transcript, + ); + assert_eq!(poly_tau.len(), 1); + assert_eq!(poly_Az.len(), 1); + assert_eq!(poly_Bz.len(), 1); + assert_eq!(poly_Cz.len(), 1); + timer_sc_proof_phase1.stop(); + + let (tau_claim, Az_claim, Bz_claim, Cz_claim) = + (&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]); + let prod_Az_Bz_claims = (*Az_claim) * Bz_claim; + + // prove the final step of sum-check #1 + let taus_bound_rx = tau_claim; + let _claim_post_phase1 = ((*Az_claim) * Bz_claim - Cz_claim) * taus_bound_rx; + + let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two"); + // combine the three claims into a single claim + let r_A: E::ScalarField = transcript.challenge_scalar(b""); + let r_B: E::ScalarField = transcript.challenge_scalar(b""); + let r_C: E::ScalarField = transcript.challenge_scalar(b""); + let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; + + let evals_ABC = { + // compute the initial evaluation table for R(\tau, x) + let evals_rx = EqPolynomial::new(rx.clone()).evals(); + let (evals_A, evals_B, evals_C) = + inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx); + + assert_eq!(evals_A.len(), evals_B.len()); + assert_eq!(evals_A.len(), evals_C.len()); + (0..evals_A.len()) + .map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i]) + .collect::>() + }; + + // another instance of the sum-check protocol + let (sc_proof_phase2, ry, _claims_phase2) = R1CSProof::::prove_phase_two( + num_rounds_y, + &claim_phase2, + &mut DensePolynomial::new(z), + &mut DensePolynomial::new(evals_ABC), + transcript, + ); + timer_sc_proof_phase2.stop(); + let transcript_sat_state = transcript.challenge_scalar(b""); + transcript.new_from_state(&transcript_sat_state); + + let timmer_opening = Timer::new("polyopening"); + + let (comm, proof_eval_vars_at_ry, mipp_proof) = + pl.open(transcript, comm_list, &gens.gens_pc.ck, &ry[1..], &t); + + timmer_opening.stop(); + + let timer_polyeval = Timer::new("polyeval"); + let eval_vars_at_ry = pl.eval(&ry[1..]); + timer_polyeval.stop(); + timer_prove.stop(); + ( + R1CSProof { + comm, + initial_state, + sc_proof_phase1, + claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim, prod_Az_Bz_claims), + sc_proof_phase2, + eval_vars_at_ry, + proof_eval_vars_at_ry, + rx: rx.clone(), + ry: ry.clone(), + transcript_sat_state, + t, + mipp_proof, + }, + rx, + ry, + ) + } + + // Creates a Groth16 proof for the verification of sumcheck, expressed + // as a circuit. + pub fn prove_verifier( + &self, + num_vars: usize, + num_cons: usize, + input: &[E::ScalarField], + evals: &(E::ScalarField, E::ScalarField, E::ScalarField), + transcript: &mut PoseidonTranscript, + gens: &R1CSGens, + poseidon: PoseidonConfig, + ) -> Result, ProofVerifyError> { + // serialise and add the IPP commitment to the transcript + transcript.append_gt::(b"", &self.t); + + let initial_state = transcript.challenge_scalar(b""); + transcript.new_from_state(&initial_state); + + let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, E::ScalarField::one())]; + //remaining inputs + input_as_sparse_poly_entries.extend( + (0..input.len()) + .map(|i| SparsePolyEntry::new(i + 1, input[i])) + .collect::>>(), + ); + let input_as_sparse_poly = + SparsePolynomial::new(num_vars.log_2() as usize, input_as_sparse_poly_entries); + + let config = VerifierConfig { + num_vars, + num_cons, + input: input.to_vec(), + evals: *evals, + params: poseidon, + prev_challenge: initial_state, + claims_phase2: self.claims_phase2, + polys_sc1: self.sc_proof_phase1.polys.clone(), + polys_sc2: self.sc_proof_phase2.polys.clone(), + eval_vars_at_ry: self.eval_vars_at_ry, + input_as_sparse_poly, + comm: self.comm.clone(), + rx: self.rx.clone(), + ry: self.ry.clone(), + transcript_sat_state: self.transcript_sat_state, + }; + + let circuit = R1CSVerificationCircuit::new(&config); + + let circuit_prover_timer = Timer::new("provecircuit"); + let proof = Groth16::::prove(&gens.gens_gc.pk, circuit, &mut rand::thread_rng()).unwrap(); + circuit_prover_timer.stop(); + + Ok(R1CSVerifierProof { + comm: self.comm.clone(), + circuit_proof: proof, + initial_state: self.initial_state, + transcript_sat_state: self.transcript_sat_state, + eval_vars_at_ry: self.eval_vars_at_ry, + proof_eval_vars_at_ry: self.proof_eval_vars_at_ry.clone(), + t: self.t, + mipp_proof: self.mipp_proof.clone(), + }) + } +} + +impl R1CSVerifierProof +where + ::ScalarField: Absorb, +{ + // Verifier the Groth16 proof for the sumcheck circuit and the PST polynomial + // commitment opening. + pub fn verify( + &self, + r: (Vec, Vec), + input: &[E::ScalarField], + evals: &(E::ScalarField, E::ScalarField, E::ScalarField), + transcript: &mut PoseidonTranscript, + gens: &R1CSGens, + ) -> Result { + let (rx, ry) = &r; + let (Ar, Br, Cr) = evals; + let mut pubs = vec![self.initial_state]; + pubs.extend(input.clone()); + pubs.extend(rx.clone()); + pubs.extend(ry.clone()); + pubs.extend(vec![ + self.eval_vars_at_ry, + *Ar, + *Br, + *Cr, + self.transcript_sat_state, + ]); + transcript.new_from_state(&self.transcript_sat_state); + par! { + // verifies the Groth16 proof for the spartan verifier + let is_verified = Groth16::::verify(&gens.gens_gc.vk, &pubs, &self.circuit_proof).unwrap(), + + // verifies the proof of opening against the result of evaluating the + // witness polynomial at point ry + let res = Polynomial::verify( + transcript, + &gens.gens_pc.vk, + &self.comm, + &ry[1..], + self.eval_vars_at_ry, + &self.proof_eval_vars_at_ry, + &self.mipp_proof, + &self.t, + ) + }; + assert!(is_verified == true); + assert!(res == true); + Ok(is_verified && res) + } } #[cfg(test)] mod tests { - use crate::parameters::poseidon_params; - - use super::*; - - use ark_std::UniformRand; - - fn produce_tiny_r1cs() -> (R1CSInstance, Vec, Vec) { - // three constraints over five variables Z1, Z2, Z3, Z4, and Z5 - // rounded to the nearest power of two - let num_cons = 128; - let num_vars = 256; - let num_inputs = 2; - - // encode the above constraints into three matrices - let mut A: Vec<(usize, usize, Scalar)> = Vec::new(); - let mut B: Vec<(usize, usize, Scalar)> = Vec::new(); - let mut C: Vec<(usize, usize, Scalar)> = Vec::new(); - - let one = Scalar::one(); - // constraint 0 entries - // (Z1 + Z2) * I0 - Z3 = 0; - A.push((0, 0, one)); - A.push((0, 1, one)); - B.push((0, num_vars + 1, one)); - C.push((0, 2, one)); - - // constraint 1 entries - // (Z1 + I1) * (Z3) - Z4 = 0 - A.push((1, 0, one)); - A.push((1, num_vars + 2, one)); - B.push((1, 2, one)); - C.push((1, 3, one)); - // constraint 3 entries - // Z5 * 1 - 0 = 0 - A.push((2, 4, one)); - B.push((2, num_vars, one)); - - let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C); - - // compute a satisfying assignment - let mut rng = ark_std::rand::thread_rng(); - let i0 = Scalar::rand(&mut rng); - let i1 = Scalar::rand(&mut rng); - let z1 = Scalar::rand(&mut rng); - let z2 = Scalar::rand(&mut rng); - let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; - let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 - let z5 = Scalar::zero(); //constraint 3 - - let mut vars = vec![Scalar::zero(); num_vars]; - vars[0] = z1; - vars[1] = z2; - vars[2] = z3; - vars[3] = z4; - vars[4] = z5; - - let mut input = vec![Scalar::zero(); num_inputs]; - input[0] = i0; - input[1] = i1; - - (inst, vars, input) - } - - #[test] - fn test_tiny_r1cs() { - let (inst, vars, input) = tests::produce_tiny_r1cs(); - let is_sat = inst.is_sat(&vars, &input); - assert!(is_sat); - } - - #[test] - fn test_synthetic_r1cs() { - let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10); - let is_sat = inst.is_sat(&vars, &input); - assert!(is_sat); - } - - #[test] - pub fn check_r1cs_proof() { - let num_vars = 1024; - let num_cons = num_vars; - let num_inputs = 10; - let (inst, vars, input) = - R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); - - let gens = R1CSGens::new(b"test-m", num_cons, num_vars); - - let params = poseidon_params(); - // let mut random_tape = RandomTape::new(b"proof"); - - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let (proof, rx, ry) = R1CSProof::prove(&inst, vars, &input, &gens, &mut prover_transcript); - - let inst_evals = inst.evaluate(&rx, &ry); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - - // if you want to check the test fails - // input[0] = Scalar::zero(); - - assert!(proof - .verify_groth16( - inst.get_num_vars(), - inst.get_num_cons(), - &input, - &inst_evals, - &mut verifier_transcript, - &gens, - ) - .is_ok()); - } + + use super::*; + + use ark_ff::PrimeField; + use ark_std::UniformRand; + type F = ark_bls12_377::Fr; + + fn produce_tiny_r1cs() -> (R1CSInstance, Vec, Vec) { + // three constraints over five variables Z1, Z2, Z3, Z4, and Z5 + // rounded to the nearest power of two + let num_cons = 128; + let num_vars = 256; + let num_inputs = 2; + + // encode the above constraints into three matrices + let mut A: Vec<(usize, usize, F)> = Vec::new(); + let mut B: Vec<(usize, usize, F)> = Vec::new(); + let mut C: Vec<(usize, usize, F)> = Vec::new(); + + let one = F::one(); + // constraint 0 entries + // (Z1 + Z2) * I0 - Z3 = 0; + A.push((0, 0, one)); + A.push((0, 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 2, one)); + + // constraint 1 entries + // (Z1 + I1) * (Z3) - Z4 = 0 + A.push((1, 0, one)); + A.push((1, num_vars + 2, one)); + B.push((1, 2, one)); + C.push((1, 3, one)); + // constraint 3 entries + // Z5 * 1 - 0 = 0 + A.push((2, 4, one)); + B.push((2, num_vars, one)); + + let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C); + + // compute a satisfying assignment + let mut rng = ark_std::rand::thread_rng(); + let i0 = F::rand(&mut rng); + let i1 = F::rand(&mut rng); + let z1 = F::rand(&mut rng); + let z2 = F::rand(&mut rng); + let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; + let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 + let z5 = F::zero(); //constraint 3 + + let mut vars = vec![F::zero(); num_vars]; + vars[0] = z1; + vars[1] = z2; + vars[2] = z3; + vars[3] = z4; + vars[4] = z5; + + let mut input = vec![F::zero(); num_inputs]; + input[0] = i0; + input[1] = i1; + + (inst, vars, input) + } + + #[test] + fn test_tiny_r1cs() { + let (inst, vars, input) = tests::produce_tiny_r1cs(); + let is_sat = inst.is_sat(&vars, &input); + assert!(is_sat); + } + + #[test] + fn test_synthetic_r1cs() { + type F = ark_bls12_377::Fr; + let (inst, vars, input) = R1CSInstance::::produce_synthetic_r1cs(1024, 1024, 10); + let is_sat = inst.is_sat(&vars, &input); + assert!(is_sat); + } + + use crate::parameters::PoseidonConfiguration; + #[test] + fn check_r1cs_proof_ark_blst() { + let params = ark_blst::Scalar::poseidon_params(); + check_r1cs_proof::(params); + } + #[test] + fn check_r1cs_proof_bls12_377() { + let params = ark_bls12_377::Fr::poseidon_params(); + check_r1cs_proof::(params); + } + + #[test] + fn check_r1cs_proof_bls12_381() { + let params = ark_bls12_381::Fr::poseidon_params(); + check_r1cs_proof::(params); + } + fn check_r1cs_proof

(params: PoseidonConfig) + where + P: Pairing, + P::ScalarField: PrimeField, + P::ScalarField: Absorb, + { + let num_vars = 1024; + let num_cons = num_vars; + let num_inputs = 3; + let (inst, vars, input) = + R1CSInstance::::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + let gens = R1CSGens::

::setup(b"test-m", num_cons, num_vars, num_inputs, params.clone()); + + //let params = poseidon_params(); + // let mut random_tape = RandomTape::new(b"proof"); + + let mut prover_transcript = PoseidonTranscript::new(¶ms.clone()); + let c = prover_transcript.challenge_scalar::(b""); + prover_transcript.new_from_state(&c); + let (proof, rx, ry) = R1CSProof::prove(&inst, vars, &input, &gens, &mut prover_transcript); + + let inst_evals = inst.evaluate(&rx, &ry); + + prover_transcript.new_from_state(&c); + let verifer_proof = proof + .prove_verifier( + num_vars, + num_cons, + &input, + &inst_evals, + &mut prover_transcript, + &gens, + params.clone(), + ) + .unwrap(); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone()); + assert!(verifer_proof + .verify( + (rx, ry), + &input, + &inst_evals, + &mut verifier_transcript, + &gens + ) + .is_ok()); + } } diff --git a/src/random.rs b/src/random.rs deleted file mode 100644 index 6d4e8e5..0000000 --- a/src/random.rs +++ /dev/null @@ -1,28 +0,0 @@ -use super::scalar::Scalar; -use super::transcript::ProofTranscript; -use ark_std::UniformRand; -use merlin::Transcript; - -pub struct RandomTape { - tape: Transcript, -} - -impl RandomTape { - pub fn new(name: &'static [u8]) -> Self { - let tape = { - let mut rng = ark_std::rand::thread_rng(); - let mut tape = Transcript::new(name); - tape.append_scalar(b"init_randomness", &Scalar::rand(&mut rng)); - tape - }; - Self { tape } - } - - pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar { - self.tape.challenge_scalar(label) - } - - pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec { - self.tape.challenge_vector(label, len) - } -} diff --git a/src/scalar/mod.rs b/src/scalar/mod.rs deleted file mode 100644 index b6182ee..0000000 --- a/src/scalar/mod.rs +++ /dev/null @@ -1,44 +0,0 @@ -pub use ark_bls12_377::Fr as Scalar; -// mod ristretto255; - -// pub type Scalar = ristretto255::Scalar; -// pub type ScalarBytes = curve25519_dalek::scalar::Scalar; - -// pub trait ScalarFromPrimitives { -// fn to_scalar(self) -> Scalar; -// } - -// impl ScalarFromPrimitives for usize { -// #[inline] -// fn to_scalar(self) -> Scalar { -// (0..self).map(|_i| Scalar::one()).sum() -// } -// } - -// impl ScalarFromPrimitives for bool { -// #[inline] -// fn to_scalar(self) -> Scalar { -// if self { -// Scalar::one() -// } else { -// Scalar::zero() -// } -// } -// } - -// pub trait ScalarBytesFromScalar { -// fn decompress_scalar(s: &Scalar) -> ScalarBytes; -// fn decompress_vector(s: &[Scalar]) -> Vec; -// } - -// impl ScalarBytesFromScalar for Scalar { -// fn decompress_scalar(s: &Scalar) -> ScalarBytes { -// ScalarBytes::from_bytes_mod_order(s.to_bytes()) -// } - -// fn decompress_vector(s: &[Scalar]) -> Vec { -// (0..s.len()) -// .map(|i| Scalar::decompress_scalar(&s[i])) -// .collect::>() -// } -// } diff --git a/src/sparse_mlpoly.rs b/src/sparse_mlpoly.rs index 5d24b7e..17f6aa0 100644 --- a/src/sparse_mlpoly.rs +++ b/src/sparse_mlpoly.rs @@ -1,1728 +1,1689 @@ #![allow(clippy::type_complexity)] #![allow(clippy::too_many_arguments)] #![allow(clippy::needless_range_loop)] -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; - use super::dense_mlpoly::DensePolynomial; use super::dense_mlpoly::{ - EqPolynomial, IdentityPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof, + EqPolynomial, IdentityPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof, }; use super::errors::ProofVerifyError; use super::math::Math; use super::product_tree::{DotProductCircuit, ProductCircuit, ProductCircuitEvalProofBatched}; -use super::random::RandomTape; -use super::scalar::Scalar; use super::timer::Timer; -use super::transcript::AppendToTranscript; +use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter}; +use crate::transcript::Transcript; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; +use ark_ec::CurveGroup; +use ark_ff::PrimeField; use ark_ff::{Field, One, Zero}; use ark_serialize::*; use core::cmp::Ordering; -use merlin::Transcript; #[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)] -pub struct SparseMatEntry { - row: usize, - col: usize, - val: Scalar, +// Each SparseMatEntry is a tuple (row, col, val) representing a non-zero value +// in an R1CS matrix. +pub struct SparseMatEntry { + row: usize, + col: usize, + val: F, } -impl SparseMatEntry { - pub fn new(row: usize, col: usize, val: Scalar) -> Self { - SparseMatEntry { row, col, val } - } +impl SparseMatEntry { + pub fn new(row: usize, col: usize, val: F) -> Self { + SparseMatEntry { row, col, val } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)] -pub struct SparseMatPolynomial { - num_vars_x: usize, - num_vars_y: usize, - M: Vec, +// The sparse multilinearrepresentation of an R1CS matrix of size x*y +pub struct SparseMatPolynomial { + num_vars_x: usize, + num_vars_y: usize, + // The non-zero entries in the matrix, represented by the tuple (row, col,val) + M: Vec>, } -pub struct Derefs { - row_ops_val: Vec, - col_ops_val: Vec, - comb: DensePolynomial, +pub struct Derefs { + row_ops_val: Vec>, + col_ops_val: Vec>, + comb: DensePolynomial, } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct DerefsCommitment { - comm_ops_val: PolyCommitment, +pub struct DerefsCommitment { + comm_ops_val: PolyCommitment, } -impl Derefs { - pub fn new(row_ops_val: Vec, col_ops_val: Vec) -> Self { - assert_eq!(row_ops_val.len(), col_ops_val.len()); - - let derefs = { - // combine all polynomials into a single polynomial (used below to produce a single commitment) - let comb = DensePolynomial::merge(row_ops_val.iter().chain(col_ops_val.iter())); - - Derefs { - row_ops_val, - col_ops_val, - comb, - } - }; - - derefs - } - - pub fn commit(&self, gens: &PolyCommitmentGens) -> DerefsCommitment { - let (comm_ops_val, _blinds) = self.comb.commit(gens, None); - DerefsCommitment { comm_ops_val } - } +impl Derefs { + pub fn new(row_ops_val: Vec>, col_ops_val: Vec>) -> Self { + assert_eq!(row_ops_val.len(), col_ops_val.len()); + + let derefs = { + // combine all polynomials into a single polynomial (used below to produce a single commitment) + let comb = DensePolynomial::merge(row_ops_val.iter().chain(col_ops_val.iter())); + + Derefs { + row_ops_val, + col_ops_val, + comb, + } + }; + + derefs + } + + pub fn commit(&self, gens: &PolyCommitmentGens) -> DerefsCommitment + where + E: Pairing, + { + let (comm_ops_val, _blinds) = self.comb.commit(gens, false); + DerefsCommitment { comm_ops_val } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct DerefsEvalProof { - proof_derefs: PolyEvalProof, -} - -impl DerefsEvalProof { - fn protocol_name() -> &'static [u8] { - b"Derefs evaluation proof" - } - - fn prove_single( - joint_poly: &DensePolynomial, - r: &[Scalar], - evals: Vec, - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> PolyEvalProof { - assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log_2()); - - // append the claimed evaluations to transcript - // evals.append_to_transcript(b"evals_ops_val", transcript); - transcript.append_scalar_vector(&evals); - - // n-to-1 reduction - let (r_joint, eval_joint) = { - let challenges = transcript.challenge_vector(evals.len().log_2()); - let mut poly_evals = DensePolynomial::new(evals); - for i in (0..challenges.len()).rev() { - poly_evals.bound_poly_var_bot(&challenges[i]); - } - assert_eq!(poly_evals.len(), 1); - let joint_claim_eval = poly_evals[0]; - let mut r_joint = challenges; - r_joint.extend(r); - - debug_assert_eq!(joint_poly.evaluate(&r_joint), joint_claim_eval); - (r_joint, joint_claim_eval) - }; - // decommit the joint polynomial at r_joint - transcript.append_scalar(&eval_joint); - let (proof_derefs, _comm_derefs_eval) = PolyEvalProof::prove( - joint_poly, - None, - &r_joint, - &eval_joint, - None, - gens, - transcript, - random_tape, - ); - - proof_derefs - } - - // evalues both polynomials at r and produces a joint proof of opening - pub fn prove( - derefs: &Derefs, - eval_row_ops_val_vec: &[Scalar], - eval_col_ops_val_vec: &[Scalar], - r: &[Scalar], - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> Self { - // transcript.append_protocol_name(DerefsEvalProof::protocol_name()); - - let evals = { - let mut evals = eval_row_ops_val_vec.to_owned(); - evals.extend(eval_col_ops_val_vec); - evals.resize(evals.len().next_power_of_two(), Scalar::zero()); - evals - }; - let proof_derefs = - DerefsEvalProof::prove_single(&derefs.comb, r, evals, gens, transcript, random_tape); - - DerefsEvalProof { proof_derefs } - } - - fn verify_single( - proof: &PolyEvalProof, - comm: &PolyCommitment, - r: &[Scalar], - evals: Vec, - gens: &PolyCommitmentGens, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - // append the claimed evaluations to transcript - // evals.append_to_transcript(b"evals_ops_val", transcript); - transcript.append_scalar_vector(&evals); - - // n-to-1 reduction - let challenges = transcript.challenge_vector(evals.len().log_2()); - let mut poly_evals = DensePolynomial::new(evals); - for i in (0..challenges.len()).rev() { - poly_evals.bound_poly_var_bot(&challenges[i]); - } - assert_eq!(poly_evals.len(), 1); - let joint_claim_eval = poly_evals[0]; - let mut r_joint = challenges; - r_joint.extend(r); - - // decommit the joint polynomial at r_joint - // joint_claim_eval.append_to_transcript(b"joint_claim_eval", transcript); - transcript.append_scalar(&joint_claim_eval); - - proof.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, comm) - } - - // verify evaluations of both polynomials at r - pub fn verify( - &self, - r: &[Scalar], - eval_row_ops_val_vec: &[Scalar], - eval_col_ops_val_vec: &[Scalar], - gens: &PolyCommitmentGens, - comm: &DerefsCommitment, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(DerefsEvalProof::protocol_name()); - let mut evals = eval_row_ops_val_vec.to_owned(); - evals.extend(eval_col_ops_val_vec); - evals.resize(evals.len().next_power_of_two(), Scalar::zero()); - - DerefsEvalProof::verify_single( - &self.proof_derefs, - &comm.comm_ops_val, - r, - evals, - gens, - transcript, - ) - } +pub struct DerefsEvalProof { + proof_derefs: PolyEvalProof, } -impl AppendToTranscript for DerefsCommitment { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_message(b"derefs_commitment", b"begin_derefs_commitment"); - self.comm_ops_val.append_to_transcript(label, transcript); - transcript.append_message(b"derefs_commitment", b"end_derefs_commitment"); +impl DerefsEvalProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + fn prove_single( + joint_poly: &DensePolynomial, + r: &[E::ScalarField], + evals: Vec, + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> PolyEvalProof { + assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log_2()); + + // append the claimed evaluations to transcript + // evals.append_to_transcript(b"evals_ops_val", transcript); + transcript.append_scalar_vector(b"", &evals); + + // n-to-1 reduction + let (r_joint, eval_joint) = { + let challenges = transcript.challenge_scalar_vec(b"", evals.len().log_2()); + let mut poly_evals = DensePolynomial::new(evals); + for i in (0..challenges.len()).rev() { + poly_evals.bound_poly_var_bot(&challenges[i]); + } + assert_eq!(poly_evals.len(), 1); + let joint_claim_eval = poly_evals[0]; + let mut r_joint = challenges; + r_joint.extend(r); + + debug_assert_eq!(joint_poly.evaluate(&r_joint), joint_claim_eval); + (r_joint, joint_claim_eval) + }; + // decommit the joint polynomial at r_joint + transcript.append_scalar(b"", &eval_joint); + let (proof_derefs, _comm_derefs_eval) = PolyEvalProof::prove( + joint_poly, + None, + &r_joint, + &eval_joint, + None, + gens, + transcript, + ); + + proof_derefs + } + + // evalues both polynomials at r and produces a joint proof of opening + pub fn prove( + derefs: &Derefs, + eval_row_ops_val_vec: &[E::ScalarField], + eval_col_ops_val_vec: &[E::ScalarField], + r: &[E::ScalarField], + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Self { + // transcript.append_protocol_name(DerefsEvalProof::protocol_name()); + + let evals = { + let mut evals = eval_row_ops_val_vec.to_owned(); + evals.extend(eval_col_ops_val_vec); + evals.resize(evals.len().next_power_of_two(), E::ScalarField::zero()); + evals + }; + let proof_derefs = DerefsEvalProof::prove_single(&derefs.comb, r, evals, gens, transcript); + + DerefsEvalProof { proof_derefs } + } + + fn verify_single( + proof: &PolyEvalProof, + comm: &PolyCommitment, + r: &[E::ScalarField], + evals: Vec, + gens: &PolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + // append the claimed evaluations to transcript + // evals.append_to_transcript(b"evals_ops_val", transcript); + transcript.append_scalar_vector(b"", &evals); + + // n-to-1 reduction + let challenges = transcript.challenge_scalar_vec(b"", evals.len().log_2()); + let mut poly_evals = DensePolynomial::new(evals); + for i in (0..challenges.len()).rev() { + poly_evals.bound_poly_var_bot(&challenges[i]); } + assert_eq!(poly_evals.len(), 1); + let joint_claim_eval = poly_evals[0]; + let mut r_joint = challenges; + r_joint.extend(r); + + // decommit the joint polynomial at r_joint + // joint_claim_eval.append_to_transcript(b"joint_claim_eval", transcript); + transcript.append_scalar(b"", &joint_claim_eval); + + proof.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, comm) + } + + // verify evaluations of both polynomials at r + pub fn verify( + &self, + r: &[E::ScalarField], + eval_row_ops_val_vec: &[E::ScalarField], + eval_col_ops_val_vec: &[E::ScalarField], + gens: &PolyCommitmentGens, + comm: &DerefsCommitment, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + // transcript.append_protocol_name(DerefsEvalProof::protocol_name()); + let mut evals = eval_row_ops_val_vec.to_owned(); + evals.extend(eval_col_ops_val_vec); + evals.resize(evals.len().next_power_of_two(), E::ScalarField::zero()); + + DerefsEvalProof::verify_single( + &self.proof_derefs, + &comm.comm_ops_val, + r, + evals, + gens, + transcript, + ) + } } -impl AppendToPoseidon for DerefsCommitment { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - self.comm_ops_val.append_to_poseidon(transcript); - } +impl TranscriptWriter for DerefsCommitment { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript) { + self.comm_ops_val.write_to_transcript(transcript); + } } -struct AddrTimestamps { - ops_addr_usize: Vec>, - ops_addr: Vec, - read_ts: Vec, - audit_ts: DensePolynomial, +struct AddrTimestamps { + ops_addr_usize: Vec>, + ops_addr: Vec>, + read_ts: Vec>, + audit_ts: DensePolynomial, } -impl AddrTimestamps { - pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec>) -> Self { - for item in ops_addr.iter() { - assert_eq!(item.len(), num_ops); - } - - let mut audit_ts = vec![0usize; num_cells]; - let mut ops_addr_vec: Vec = Vec::new(); - let mut read_ts_vec: Vec = Vec::new(); - for ops_addr_inst in ops_addr.iter() { - let mut read_ts = vec![0usize; num_ops]; - - // since read timestamps are trustworthy, we can simply increment the r-ts to obtain a w-ts - // this is sufficient to ensure that the write-set, consisting of (addr, val, ts) tuples, is a set - for i in 0..num_ops { - let addr = ops_addr_inst[i]; - assert!(addr < num_cells); - let r_ts = audit_ts[addr]; - read_ts[i] = r_ts; - - let w_ts = r_ts + 1; - audit_ts[addr] = w_ts; - } - - ops_addr_vec.push(DensePolynomial::from_usize(ops_addr_inst)); - read_ts_vec.push(DensePolynomial::from_usize(&read_ts)); - } - - AddrTimestamps { - ops_addr: ops_addr_vec, - ops_addr_usize: ops_addr, - read_ts: read_ts_vec, - audit_ts: DensePolynomial::from_usize(&audit_ts), - } +impl AddrTimestamps { + pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec>) -> Self { + for item in ops_addr.iter() { + assert_eq!(item.len(), num_ops); } - fn deref_mem(addr: &[usize], mem_val: &[Scalar]) -> DensePolynomial { - DensePolynomial::new( - (0..addr.len()) - .map(|i| { - let a = addr[i]; - mem_val[a] - }) - .collect::>(), - ) + let mut audit_ts = vec![0usize; num_cells]; + let mut ops_addr_vec: Vec> = Vec::new(); + let mut read_ts_vec: Vec> = Vec::new(); + for ops_addr_inst in ops_addr.iter() { + let mut read_ts = vec![0usize; num_ops]; + + // since read timestamps are trustworthy, we can simply increment the r-ts to obtain a w-ts + // this is sufficient to ensure that the write-set, consisting of (addr, val, ts) tuples, is a set + for i in 0..num_ops { + let addr = ops_addr_inst[i]; + assert!(addr < num_cells); + let r_ts = audit_ts[addr]; + read_ts[i] = r_ts; + + let w_ts = r_ts + 1; + audit_ts[addr] = w_ts; + } + + ops_addr_vec.push(DensePolynomial::from_usize(ops_addr_inst)); + read_ts_vec.push(DensePolynomial::from_usize(&read_ts)); } - pub fn deref(&self, mem_val: &[Scalar]) -> Vec { - (0..self.ops_addr.len()) - .map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val)) - .collect::>() + AddrTimestamps { + ops_addr: ops_addr_vec, + ops_addr_usize: ops_addr, + read_ts: read_ts_vec, + audit_ts: DensePolynomial::from_usize(&audit_ts), } + } + + fn deref_mem(addr: &[usize], mem_val: &[F]) -> DensePolynomial { + DensePolynomial::new( + (0..addr.len()) + .map(|i| { + let a = addr[i]; + mem_val[a] + }) + .collect::>(), + ) + } + + pub fn deref(&self, mem_val: &[F]) -> Vec> { + (0..self.ops_addr.len()) + .map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val)) + .collect::>>() + } } -pub struct MultiSparseMatPolynomialAsDense { - batch_size: usize, - val: Vec, - row: AddrTimestamps, - col: AddrTimestamps, - comb_ops: DensePolynomial, - comb_mem: DensePolynomial, +pub struct MultiSparseMatPolynomialAsDense { + batch_size: usize, + val: Vec>, + row: AddrTimestamps, + col: AddrTimestamps, + comb_ops: DensePolynomial, + comb_mem: DensePolynomial, } -pub struct SparseMatPolyCommitmentGens { - gens_ops: PolyCommitmentGens, - gens_mem: PolyCommitmentGens, - gens_derefs: PolyCommitmentGens, +pub struct SparseMatPolyCommitmentGens { + gens_ops: PolyCommitmentGens, + gens_mem: PolyCommitmentGens, + gens_derefs: PolyCommitmentGens, } -impl SparseMatPolyCommitmentGens { - pub fn new( - label: &'static [u8], - num_vars_x: usize, - num_vars_y: usize, - num_nz_entries: usize, - batch_size: usize, - ) -> SparseMatPolyCommitmentGens { - let num_vars_ops = num_nz_entries.next_power_of_two().log_2() - + (batch_size * 5).next_power_of_two().log_2(); - let num_vars_mem = if num_vars_x > num_vars_y { - num_vars_x - } else { - num_vars_y - } + 1; - let num_vars_derefs = num_nz_entries.next_power_of_two().log_2() - + (batch_size * 2).next_power_of_two().log_2(); - - let gens_ops = PolyCommitmentGens::new(num_vars_ops, label); - let gens_mem = PolyCommitmentGens::new(num_vars_mem, label); - let gens_derefs = PolyCommitmentGens::new(num_vars_derefs, label); - SparseMatPolyCommitmentGens { - gens_ops, - gens_mem, - gens_derefs, - } +impl SparseMatPolyCommitmentGens { + pub fn setup( + label: &'static [u8], + num_vars_x: usize, + num_vars_y: usize, + num_nz_entries: usize, + batch_size: usize, + ) -> Self { + let num_vars_ops = + num_nz_entries.next_power_of_two().log_2() + (batch_size * 5).next_power_of_two().log_2(); + let num_vars_mem = if num_vars_x > num_vars_y { + num_vars_x + } else { + num_vars_y + } + 1; + let num_vars_derefs = + num_nz_entries.next_power_of_two().log_2() + (batch_size * 2).next_power_of_two().log_2(); + + let gens_ops = PolyCommitmentGens::setup(num_vars_ops, label); + let gens_mem = PolyCommitmentGens::setup(num_vars_mem, label); + let gens_derefs = PolyCommitmentGens::setup(num_vars_derefs, label); + SparseMatPolyCommitmentGens { + gens_ops, + gens_mem, + gens_derefs, } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct SparseMatPolyCommitment { - batch_size: usize, - num_ops: usize, - num_mem_cells: usize, - comm_comb_ops: PolyCommitment, - comm_comb_mem: PolyCommitment, +pub struct SparseMatPolyCommitment { + batch_size: usize, + num_ops: usize, + num_mem_cells: usize, + comm_comb_ops: PolyCommitment, + comm_comb_mem: PolyCommitment, } -impl AppendToTranscript for SparseMatPolyCommitment { - fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) { - transcript.append_u64(b"batch_size", self.batch_size as u64); - transcript.append_u64(b"num_ops", self.num_ops as u64); - transcript.append_u64(b"num_mem_cells", self.num_mem_cells as u64); - self.comm_comb_ops - .append_to_transcript(b"comm_comb_ops", transcript); - self.comm_comb_mem - .append_to_transcript(b"comm_comb_mem", transcript); - } -} - -impl AppendToPoseidon for SparseMatPolyCommitment { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - transcript.append_u64(self.batch_size as u64); - transcript.append_u64(self.num_ops as u64); - transcript.append_u64(self.num_mem_cells as u64); - self.comm_comb_ops.append_to_poseidon(transcript); - self.comm_comb_mem.append_to_poseidon(transcript); - } +impl TranscriptWriter for SparseMatPolyCommitment { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript) { + transcript.append_u64(b"", self.batch_size as u64); + transcript.append_u64(b"", self.num_ops as u64); + transcript.append_u64(b"", self.num_mem_cells as u64); + self.comm_comb_ops.write_to_transcript(transcript); + self.comm_comb_mem.write_to_transcript(transcript); + } } -impl SparseMatPolynomial { - pub fn new(num_vars_x: usize, num_vars_y: usize, M: Vec) -> Self { - SparseMatPolynomial { - num_vars_x, - num_vars_y, - M, - } +impl SparseMatPolynomial { + pub fn new(num_vars_x: usize, num_vars_y: usize, M: Vec>) -> Self { + SparseMatPolynomial { + num_vars_x, + num_vars_y, + M, } - - pub fn get_num_nz_entries(&self) -> usize { - self.M.len().next_power_of_two() - } - - fn sparse_to_dense_vecs(&self, N: usize) -> (Vec, Vec, Vec) { - assert!(N >= self.get_num_nz_entries()); - let mut ops_row: Vec = vec![0; N]; - let mut ops_col: Vec = vec![0; N]; - let mut val: Vec = vec![Scalar::zero(); N]; - - for i in 0..self.M.len() { - ops_row[i] = self.M[i].row; - ops_col[i] = self.M[i].col; - val[i] = self.M[i].val; - } - (ops_row, ops_col, val) - } - - fn multi_sparse_to_dense_rep( - sparse_polys: &[&SparseMatPolynomial], - ) -> MultiSparseMatPolynomialAsDense { - assert!(!sparse_polys.is_empty()); - for i in 1..sparse_polys.len() { - assert_eq!(sparse_polys[i].num_vars_x, sparse_polys[0].num_vars_x); - assert_eq!(sparse_polys[i].num_vars_y, sparse_polys[0].num_vars_y); - } - - let N = (0..sparse_polys.len()) - .map(|i| sparse_polys[i].get_num_nz_entries()) - .max() - .unwrap() - .next_power_of_two(); - - let mut ops_row_vec: Vec> = Vec::new(); - let mut ops_col_vec: Vec> = Vec::new(); - let mut val_vec: Vec = Vec::new(); - for poly in sparse_polys { - let (ops_row, ops_col, val) = poly.sparse_to_dense_vecs(N); - ops_row_vec.push(ops_row); - ops_col_vec.push(ops_col); - val_vec.push(DensePolynomial::new(val)); - } - - let any_poly = &sparse_polys[0]; - - let num_mem_cells = if any_poly.num_vars_x > any_poly.num_vars_y { - any_poly.num_vars_x.pow2() - } else { - any_poly.num_vars_y.pow2() - }; - - let row = AddrTimestamps::new(num_mem_cells, N, ops_row_vec); - let col = AddrTimestamps::new(num_mem_cells, N, ops_col_vec); - - // combine polynomials into a single polynomial for commitment purposes - let comb_ops = DensePolynomial::merge( - row.ops_addr - .iter() - .chain(row.read_ts.iter()) - .chain(col.ops_addr.iter()) - .chain(col.read_ts.iter()) - .chain(val_vec.iter()), - ); - let mut comb_mem = row.audit_ts.clone(); - comb_mem.extend(&col.audit_ts); - - MultiSparseMatPolynomialAsDense { - batch_size: sparse_polys.len(), - row, - col, - val: val_vec, - comb_ops, - comb_mem, - } - } - - fn evaluate_with_tables(&self, eval_table_rx: &[Scalar], eval_table_ry: &[Scalar]) -> Scalar { - assert_eq!(self.num_vars_x.pow2(), eval_table_rx.len()); - assert_eq!(self.num_vars_y.pow2(), eval_table_ry.len()); - - (0..self.M.len()) - .map(|i| { - let row = self.M[i].row; - let col = self.M[i].col; - let val = &self.M[i].val; - eval_table_rx[row] * eval_table_ry[col] * val - }) - .sum() + } + + // get the number of non_zero entries in a sparse R1CS matrix + pub fn get_num_nz_entries(&self) -> usize { + self.M.len().next_power_of_two() + } + + fn sparse_to_dense_vecs(&self, N: usize) -> (Vec, Vec, Vec) { + assert!(N >= self.get_num_nz_entries()); + let mut ops_row: Vec = vec![0; N]; + let mut ops_col: Vec = vec![0; N]; + let mut val: Vec = vec![F::zero(); N]; + + for i in 0..self.M.len() { + ops_row[i] = self.M[i].row; + ops_col[i] = self.M[i].col; + val[i] = self.M[i].val; } - - pub fn multi_evaluate( - polys: &[&SparseMatPolynomial], - rx: &[Scalar], - ry: &[Scalar], - ) -> Vec { - let eval_table_rx = EqPolynomial::new(rx.to_vec()).evals(); - let eval_table_ry = EqPolynomial::new(ry.to_vec()).evals(); - - (0..polys.len()) - .map(|i| polys[i].evaluate_with_tables(&eval_table_rx, &eval_table_ry)) - .collect::>() + (ops_row, ops_col, val) + } + + // Produce the dense representation of sparse matrices A, B and C. + fn multi_sparse_to_dense_rep( + sparse_polys: &[&SparseMatPolynomial], + ) -> MultiSparseMatPolynomialAsDense { + assert!(!sparse_polys.is_empty()); + for i in 1..sparse_polys.len() { + assert_eq!(sparse_polys[i].num_vars_x, sparse_polys[0].num_vars_x); + assert_eq!(sparse_polys[i].num_vars_y, sparse_polys[0].num_vars_y); } - pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &[Scalar]) -> Vec { - assert_eq!(z.len(), num_cols); - - (0..self.M.len()) - .map(|i| { - let row = self.M[i].row; - let col = self.M[i].col; - let val = &self.M[i].val; - (row, z[col] * val) - }) - .fold(vec![Scalar::zero(); num_rows], |mut Mz, (r, v)| { - Mz[r] += v; - Mz - }) + let N = (0..sparse_polys.len()) + .map(|i| sparse_polys[i].get_num_nz_entries()) + .max() + .unwrap() + .next_power_of_two(); + + let mut ops_row_vec: Vec> = Vec::new(); + let mut ops_col_vec: Vec> = Vec::new(); + let mut val_vec: Vec> = Vec::new(); + for poly in sparse_polys { + let (ops_row, ops_col, val) = poly.sparse_to_dense_vecs(N); + // aggregate all the row and columns that contain non-zero values in the + // three matrices + ops_row_vec.push(ops_row); + ops_col_vec.push(ops_col); + // create dense polynomials, in Lagrange representation, for the non-zero + // values of each matrix + val_vec.push(DensePolynomial::new(val)); } - pub fn compute_eval_table_sparse( - &self, - rx: &[Scalar], - num_rows: usize, - num_cols: usize, - ) -> Vec { - assert_eq!(rx.len(), num_rows); - - let mut M_evals: Vec = vec![Scalar::zero(); num_cols]; - - for i in 0..self.M.len() { - let entry = &self.M[i]; - M_evals[entry.col] += rx[entry.row] * entry.val; - } - M_evals + // Note: everything else from + + let any_poly = &sparse_polys[0]; + + let num_mem_cells = if any_poly.num_vars_x > any_poly.num_vars_y { + any_poly.num_vars_x.pow2() + } else { + any_poly.num_vars_y.pow2() + }; + + let row = AddrTimestamps::new(num_mem_cells, N, ops_row_vec); + let col = AddrTimestamps::new(num_mem_cells, N, ops_col_vec); + + // combine polynomials into a single polynomial for commitment purposes + // this is done because the commitment used has a public setup + let comb_ops = DensePolynomial::merge( + row + .ops_addr + .iter() + .chain(row.read_ts.iter()) + .chain(col.ops_addr.iter()) + .chain(col.read_ts.iter()) + .chain(val_vec.iter()), + ); + let mut comb_mem = row.audit_ts.clone(); + comb_mem.extend(&col.audit_ts); + + MultiSparseMatPolynomialAsDense { + batch_size: sparse_polys.len(), + row, + col, + val: val_vec, + comb_ops, + comb_mem, } - - pub fn multi_commit( - sparse_polys: &[&SparseMatPolynomial], - gens: &SparseMatPolyCommitmentGens, - ) -> (SparseMatPolyCommitment, MultiSparseMatPolynomialAsDense) { - let batch_size = sparse_polys.len(); - let dense = SparseMatPolynomial::multi_sparse_to_dense_rep(sparse_polys); - - let (comm_comb_ops, _blinds_comb_ops) = dense.comb_ops.commit(&gens.gens_ops, None); - let (comm_comb_mem, _blinds_comb_mem) = dense.comb_mem.commit(&gens.gens_mem, None); - - ( - SparseMatPolyCommitment { - batch_size, - num_mem_cells: dense.row.audit_ts.len(), - num_ops: dense.row.read_ts[0].len(), - comm_comb_ops, - comm_comb_mem, - }, - dense, - ) + } + + fn evaluate_with_tables(&self, eval_table_rx: &[F], eval_table_ry: &[F]) -> F { + assert_eq!(self.num_vars_x.pow2(), eval_table_rx.len()); + assert_eq!(self.num_vars_y.pow2(), eval_table_ry.len()); + + (0..self.M.len()) + .map(|i| { + let row = self.M[i].row; + let col = self.M[i].col; + let val = &self.M[i].val; + eval_table_rx[row] * eval_table_ry[col] * val + }) + .sum() + } + + pub fn multi_evaluate(polys: &[&SparseMatPolynomial], rx: &[F], ry: &[F]) -> Vec { + let eval_table_rx = EqPolynomial::new(rx.to_vec()).evals(); + let eval_table_ry = EqPolynomial::new(ry.to_vec()).evals(); + + (0..polys.len()) + .map(|i| polys[i].evaluate_with_tables(&eval_table_rx, &eval_table_ry)) + .collect::>() + } + + pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &[F]) -> Vec { + assert_eq!(z.len(), num_cols); + + (0..self.M.len()) + .map(|i| { + let row = self.M[i].row; + let col = self.M[i].col; + let val = &self.M[i].val; + (row, z[col] * val) + }) + .fold(vec![F::zero(); num_rows], |mut Mz, (r, v)| { + Mz[r] += v; + Mz + }) + } + + pub fn compute_eval_table_sparse(&self, rx: &[F], num_rows: usize, num_cols: usize) -> Vec { + assert_eq!(rx.len(), num_rows); + + let mut M_evals: Vec = vec![F::zero(); num_cols]; + + for i in 0..self.M.len() { + let entry = &self.M[i]; + M_evals[entry.col] += rx[entry.row] * entry.val; } + M_evals + } + + pub fn multi_commit( + sparse_polys: &[&SparseMatPolynomial], + gens: &SparseMatPolyCommitmentGens, + ) -> ( + SparseMatPolyCommitment, + MultiSparseMatPolynomialAsDense, + ) + where + E: Pairing, + { + let batch_size = sparse_polys.len(); + let dense = SparseMatPolynomial::multi_sparse_to_dense_rep(sparse_polys); + + let (comm_comb_ops, _blinds_comb_ops) = dense.comb_ops.commit(&gens.gens_ops, false); + let (comm_comb_mem, _blinds_comb_mem) = dense.comb_mem.commit(&gens.gens_mem, false); + + ( + SparseMatPolyCommitment { + batch_size, + num_mem_cells: dense.row.audit_ts.len(), + num_ops: dense.row.read_ts[0].len(), + comm_comb_ops, + comm_comb_mem, + }, + dense, + ) + } } -impl MultiSparseMatPolynomialAsDense { - pub fn deref(&self, row_mem_val: &[Scalar], col_mem_val: &[Scalar]) -> Derefs { - let row_ops_val = self.row.deref(row_mem_val); - let col_ops_val = self.col.deref(col_mem_val); +impl MultiSparseMatPolynomialAsDense { + pub fn deref(&self, row_mem_val: &[F], col_mem_val: &[F]) -> Derefs { + let row_ops_val = self.row.deref(row_mem_val); + let col_ops_val = self.col.deref(col_mem_val); - Derefs::new(row_ops_val, col_ops_val) - } + Derefs::new(row_ops_val, col_ops_val) + } } #[derive(Debug)] -struct ProductLayer { - init: ProductCircuit, - read_vec: Vec, - write_vec: Vec, - audit: ProductCircuit, +struct ProductLayer { + init: ProductCircuit, + read_vec: Vec>, + write_vec: Vec>, + audit: ProductCircuit, } #[derive(Debug)] -struct Layers { - prod_layer: ProductLayer, +struct Layers { + prod_layer: ProductLayer, } -impl Layers { - fn build_hash_layer( - eval_table: &[Scalar], - addrs_vec: &[DensePolynomial], - derefs_vec: &[DensePolynomial], - read_ts_vec: &[DensePolynomial], - audit_ts: &DensePolynomial, - r_mem_check: &(Scalar, Scalar), - ) -> ( - DensePolynomial, - Vec, - Vec, - DensePolynomial, - ) { - let (r_hash, r_multiset_check) = r_mem_check; - - //hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr - let r_hash_sqr = r_hash.square(); - let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar { - r_hash_sqr * ts + (*val) * r_hash + addr - }; - - // hash init and audit that does not depend on #instances - let num_mem_cells = eval_table.len(); - let poly_init_hashed = DensePolynomial::new( - (0..num_mem_cells) - .map(|i| { - // at init time, addr is given by i, init value is given by eval_table, and ts = 0 - hash_func(&Scalar::from(i as u64), &eval_table[i], &Scalar::zero()) - - r_multiset_check - }) - .collect::>(), - ); - let poly_audit_hashed = DensePolynomial::new( - (0..num_mem_cells) - .map(|i| { - // at audit time, addr is given by i, value is given by eval_table, and ts is given by audit_ts - hash_func(&Scalar::from(i as u64), &eval_table[i], &audit_ts[i]) - - r_multiset_check - }) - .collect::>(), - ); - - // hash read and write that depends on #instances - let mut poly_read_hashed_vec: Vec = Vec::new(); - let mut poly_write_hashed_vec: Vec = Vec::new(); - for i in 0..addrs_vec.len() { - let (addrs, derefs, read_ts) = (&addrs_vec[i], &derefs_vec[i], &read_ts_vec[i]); - assert_eq!(addrs.len(), derefs.len()); - assert_eq!(addrs.len(), read_ts.len()); - let num_ops = addrs.len(); - let poly_read_hashed = DensePolynomial::new( - (0..num_ops) - .map(|i| { - // at read time, addr is given by addrs, value is given by derefs, and ts is given by read_ts - hash_func(&addrs[i], &derefs[i], &read_ts[i]) - r_multiset_check - }) - .collect::>(), - ); - poly_read_hashed_vec.push(poly_read_hashed); - - let poly_write_hashed = DensePolynomial::new( - (0..num_ops) - .map(|i| { - // at write time, addr is given by addrs, value is given by derefs, and ts is given by write_ts = read_ts + 1 - hash_func(&addrs[i], &derefs[i], &(read_ts[i] + Scalar::one())) - - r_multiset_check - }) - .collect::>(), - ); - poly_write_hashed_vec.push(poly_write_hashed); - } - - ( - poly_init_hashed, - poly_read_hashed_vec, - poly_write_hashed_vec, - poly_audit_hashed, - ) +impl Layers { + fn build_hash_layer( + eval_table: &[F], + addrs_vec: &[DensePolynomial], + derefs_vec: &[DensePolynomial], + read_ts_vec: &[DensePolynomial], + audit_ts: &DensePolynomial, + r_mem_check: &(F, F), + ) -> ( + DensePolynomial, + Vec>, + Vec>, + DensePolynomial, + ) { + let (r_hash, r_multiset_check) = r_mem_check; + + //hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr + let r_hash_sqr = r_hash.square(); + let hash_func = |addr: &F, val: &F, ts: &F| -> F { r_hash_sqr * ts + (*val) * r_hash + addr }; + + // hash init and audit that does not depend on #instances + let num_mem_cells = eval_table.len(); + let poly_init_hashed = DensePolynomial::new( + (0..num_mem_cells) + .map(|i| { + // at init time, addr is given by i, init value is given by eval_table, and ts = 0 + hash_func(&F::from(i as u64), &eval_table[i], &F::zero()) - r_multiset_check + }) + .collect::>(), + ); + let poly_audit_hashed = DensePolynomial::new( + (0..num_mem_cells) + .map(|i| { + // at audit time, addr is given by i, value is given by eval_table, and ts is given by audit_ts + hash_func(&F::from(i as u64), &eval_table[i], &audit_ts[i]) - r_multiset_check + }) + .collect::>(), + ); + + // hash read and write that depends on #instances + let mut poly_read_hashed_vec: Vec> = Vec::new(); + let mut poly_write_hashed_vec: Vec> = Vec::new(); + for i in 0..addrs_vec.len() { + let (addrs, derefs, read_ts) = (&addrs_vec[i], &derefs_vec[i], &read_ts_vec[i]); + assert_eq!(addrs.len(), derefs.len()); + assert_eq!(addrs.len(), read_ts.len()); + let num_ops = addrs.len(); + let poly_read_hashed = DensePolynomial::new( + (0..num_ops) + .map(|i| { + // at read time, addr is given by addrs, value is given by derefs, and ts is given by read_ts + hash_func(&addrs[i], &derefs[i], &read_ts[i]) - r_multiset_check + }) + .collect::>(), + ); + poly_read_hashed_vec.push(poly_read_hashed); + + let poly_write_hashed = DensePolynomial::new( + (0..num_ops) + .map(|i| { + // at write time, addr is given by addrs, value is given by derefs, and ts is given by write_ts = read_ts + 1 + hash_func(&addrs[i], &derefs[i], &(read_ts[i] + F::one())) - r_multiset_check + }) + .collect::>(), + ); + poly_write_hashed_vec.push(poly_write_hashed); } - pub fn new( - eval_table: &[Scalar], - addr_timestamps: &AddrTimestamps, - poly_ops_val: &[DensePolynomial], - r_mem_check: &(Scalar, Scalar), - ) -> Self { - let (poly_init_hashed, poly_read_hashed_vec, poly_write_hashed_vec, poly_audit_hashed) = - Layers::build_hash_layer( - eval_table, - &addr_timestamps.ops_addr, - poly_ops_val, - &addr_timestamps.read_ts, - &addr_timestamps.audit_ts, - r_mem_check, - ); - - let prod_init = ProductCircuit::new(&poly_init_hashed); - let prod_read_vec = (0..poly_read_hashed_vec.len()) - .map(|i| ProductCircuit::new(&poly_read_hashed_vec[i])) - .collect::>(); - let prod_write_vec = (0..poly_write_hashed_vec.len()) - .map(|i| ProductCircuit::new(&poly_write_hashed_vec[i])) - .collect::>(); - let prod_audit = ProductCircuit::new(&poly_audit_hashed); - - // subset audit check - let hashed_writes: Scalar = (0..prod_write_vec.len()) - .map(|i| prod_write_vec[i].evaluate()) - .product(); - let hashed_write_set: Scalar = prod_init.evaluate() * hashed_writes; - - let hashed_reads: Scalar = (0..prod_read_vec.len()) - .map(|i| prod_read_vec[i].evaluate()) - .product(); - let hashed_read_set: Scalar = hashed_reads * prod_audit.evaluate(); - - //assert_eq!(hashed_read_set, hashed_write_set); - debug_assert_eq!(hashed_read_set, hashed_write_set); - - Layers { - prod_layer: ProductLayer { - init: prod_init, - read_vec: prod_read_vec, - write_vec: prod_write_vec, - audit: prod_audit, - }, - } + ( + poly_init_hashed, + poly_read_hashed_vec, + poly_write_hashed_vec, + poly_audit_hashed, + ) + } + + pub fn new( + eval_table: &[F], + addr_timestamps: &AddrTimestamps, + poly_ops_val: &[DensePolynomial], + r_mem_check: &(F, F), + ) -> Self { + let (poly_init_hashed, poly_read_hashed_vec, poly_write_hashed_vec, poly_audit_hashed) = + Layers::build_hash_layer( + eval_table, + &addr_timestamps.ops_addr, + poly_ops_val, + &addr_timestamps.read_ts, + &addr_timestamps.audit_ts, + r_mem_check, + ); + + let prod_init = ProductCircuit::new(&poly_init_hashed); + let prod_read_vec = (0..poly_read_hashed_vec.len()) + .map(|i| ProductCircuit::new(&poly_read_hashed_vec[i])) + .collect::>>(); + let prod_write_vec = (0..poly_write_hashed_vec.len()) + .map(|i| ProductCircuit::new(&poly_write_hashed_vec[i])) + .collect::>>(); + let prod_audit = ProductCircuit::new(&poly_audit_hashed); + + // subset audit check + let hashed_writes: F = (0..prod_write_vec.len()) + .map(|i| prod_write_vec[i].evaluate()) + .product(); + let hashed_write_set: F = prod_init.evaluate() * hashed_writes; + + let hashed_reads: F = (0..prod_read_vec.len()) + .map(|i| prod_read_vec[i].evaluate()) + .product(); + let hashed_read_set: F = hashed_reads * prod_audit.evaluate(); + + //assert_eq!(hashed_read_set, hashed_write_set); + debug_assert_eq!(hashed_read_set, hashed_write_set); + + Layers { + prod_layer: ProductLayer { + init: prod_init, + read_vec: prod_read_vec, + write_vec: prod_write_vec, + audit: prod_audit, + }, } + } } #[derive(Debug)] -struct PolyEvalNetwork { - row_layers: Layers, - col_layers: Layers, +struct PolyEvalNetwork { + row_layers: Layers, + col_layers: Layers, } -impl PolyEvalNetwork { - pub fn new( - dense: &MultiSparseMatPolynomialAsDense, - derefs: &Derefs, - mem_rx: &[Scalar], - mem_ry: &[Scalar], - r_mem_check: &(Scalar, Scalar), - ) -> Self { - let row_layers = Layers::new(mem_rx, &dense.row, &derefs.row_ops_val, r_mem_check); - let col_layers = Layers::new(mem_ry, &dense.col, &derefs.col_ops_val, r_mem_check); - - PolyEvalNetwork { - row_layers, - col_layers, - } +impl PolyEvalNetwork { + pub fn new( + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + mem_rx: &[F], + mem_ry: &[F], + r_mem_check: &(F, F), + ) -> Self { + let row_layers = Layers::new(mem_rx, &dense.row, &derefs.row_ops_val, r_mem_check); + let col_layers = Layers::new(mem_ry, &dense.col, &derefs.col_ops_val, r_mem_check); + + PolyEvalNetwork { + row_layers, + col_layers, } + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -struct HashLayerProof { - eval_row: (Vec, Vec, Scalar), - eval_col: (Vec, Vec, Scalar), - eval_val: Vec, - eval_derefs: (Vec, Vec), - proof_ops: PolyEvalProof, - proof_mem: PolyEvalProof, - proof_derefs: DerefsEvalProof, +struct HashLayerProof { + eval_row: (Vec, Vec, E::ScalarField), + eval_col: (Vec, Vec, E::ScalarField), + eval_val: Vec, + eval_derefs: (Vec, Vec), + proof_ops: PolyEvalProof, + proof_mem: PolyEvalProof, + proof_derefs: DerefsEvalProof, } -impl HashLayerProof { - fn protocol_name() -> &'static [u8] { - b"Sparse polynomial hash layer proof" +impl HashLayerProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + fn prove_helper( + rand: (&Vec, &Vec), + addr_timestamps: &AddrTimestamps, + ) -> (Vec, Vec, E::ScalarField) { + let (rand_mem, rand_ops) = rand; + + // decommit ops-addr at rand_ops + let mut eval_ops_addr_vec: Vec = Vec::new(); + for i in 0..addr_timestamps.ops_addr.len() { + let eval_ops_addr = addr_timestamps.ops_addr[i].evaluate(rand_ops); + eval_ops_addr_vec.push(eval_ops_addr); } - fn prove_helper( - rand: (&Vec, &Vec), - addr_timestamps: &AddrTimestamps, - ) -> (Vec, Vec, Scalar) { - let (rand_mem, rand_ops) = rand; - - // decommit ops-addr at rand_ops - let mut eval_ops_addr_vec: Vec = Vec::new(); - for i in 0..addr_timestamps.ops_addr.len() { - let eval_ops_addr = addr_timestamps.ops_addr[i].evaluate(rand_ops); - eval_ops_addr_vec.push(eval_ops_addr); - } - - // decommit read_ts at rand_ops - let mut eval_read_ts_vec: Vec = Vec::new(); - for i in 0..addr_timestamps.read_ts.len() { - let eval_read_ts = addr_timestamps.read_ts[i].evaluate(rand_ops); - eval_read_ts_vec.push(eval_read_ts); - } - - // decommit audit-ts at rand_mem - let eval_audit_ts = addr_timestamps.audit_ts.evaluate(rand_mem); - - (eval_ops_addr_vec, eval_read_ts_vec, eval_audit_ts) + // decommit read_ts at rand_ops + let mut eval_read_ts_vec: Vec = Vec::new(); + for i in 0..addr_timestamps.read_ts.len() { + let eval_read_ts = addr_timestamps.read_ts[i].evaluate(rand_ops); + eval_read_ts_vec.push(eval_read_ts); } - fn prove( - rand: (&Vec, &Vec), - dense: &MultiSparseMatPolynomialAsDense, - derefs: &Derefs, - gens: &SparseMatPolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> Self { - // transcript.append_protocol_name(HashLayerProof::protocol_name()); - - let (rand_mem, rand_ops) = rand; - - // decommit derefs at rand_ops - let eval_row_ops_val = (0..derefs.row_ops_val.len()) - .map(|i| derefs.row_ops_val[i].evaluate(rand_ops)) - .collect::>(); - let eval_col_ops_val = (0..derefs.col_ops_val.len()) - .map(|i| derefs.col_ops_val[i].evaluate(rand_ops)) - .collect::>(); - let proof_derefs = DerefsEvalProof::prove( - derefs, - &eval_row_ops_val, - &eval_col_ops_val, - rand_ops, - &gens.gens_derefs, - transcript, - random_tape, - ); - let eval_derefs = (eval_row_ops_val, eval_col_ops_val); - - // evaluate row_addr, row_read-ts, col_addr, col_read-ts, val at rand_ops - // evaluate row_audit_ts and col_audit_ts at rand_mem - let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = - HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.row); - let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = - HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.col); - let eval_val_vec = (0..dense.val.len()) - .map(|i| dense.val[i].evaluate(rand_ops)) - .collect::>(); - - // form a single decommitment using comm_comb_ops - let mut evals_ops: Vec = Vec::new(); - evals_ops.extend(&eval_row_addr_vec); - evals_ops.extend(&eval_row_read_ts_vec); - evals_ops.extend(&eval_col_addr_vec); - evals_ops.extend(&eval_col_read_ts_vec); - evals_ops.extend(&eval_val_vec); - evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero()); - transcript.append_scalar_vector(&evals_ops); - let challenges_ops = transcript.challenge_vector(evals_ops.len().log_2()); - - let mut poly_evals_ops = DensePolynomial::new(evals_ops); - for i in (0..challenges_ops.len()).rev() { - poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); - } - assert_eq!(poly_evals_ops.len(), 1); - let joint_claim_eval_ops = poly_evals_ops[0]; - let mut r_joint_ops = challenges_ops; - r_joint_ops.extend(rand_ops); - debug_assert_eq!(dense.comb_ops.evaluate(&r_joint_ops), joint_claim_eval_ops); - transcript.append_scalar(&joint_claim_eval_ops); - let (proof_ops, _comm_ops_eval) = PolyEvalProof::prove( - &dense.comb_ops, - None, - &r_joint_ops, - &joint_claim_eval_ops, - None, - &gens.gens_ops, - transcript, - random_tape, - ); - - // form a single decommitment using comb_comb_mem at rand_mem - let evals_mem: Vec = vec![eval_row_audit_ts, eval_col_audit_ts]; - // evals_mem.append_to_transcript(b"claim_evals_mem", transcript); - transcript.append_scalar_vector(&evals_mem); - let challenges_mem = transcript.challenge_vector(evals_mem.len().log_2()); - - let mut poly_evals_mem = DensePolynomial::new(evals_mem); - for i in (0..challenges_mem.len()).rev() { - poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); - } - assert_eq!(poly_evals_mem.len(), 1); - let joint_claim_eval_mem = poly_evals_mem[0]; - let mut r_joint_mem = challenges_mem; - r_joint_mem.extend(rand_mem); - debug_assert_eq!(dense.comb_mem.evaluate(&r_joint_mem), joint_claim_eval_mem); - transcript.append_scalar(&joint_claim_eval_mem); - let (proof_mem, _comm_mem_eval) = PolyEvalProof::prove( - &dense.comb_mem, - None, - &r_joint_mem, - &joint_claim_eval_mem, - None, - &gens.gens_mem, - transcript, - random_tape, - ); - - HashLayerProof { - eval_row: (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts), - eval_col: (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts), - eval_val: eval_val_vec, - eval_derefs, - proof_ops, - proof_mem, - proof_derefs, - } + // decommit audit-ts at rand_mem + let eval_audit_ts = addr_timestamps.audit_ts.evaluate(rand_mem); + + (eval_ops_addr_vec, eval_read_ts_vec, eval_audit_ts) + } + + fn prove( + rand: (&Vec, &Vec), + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + gens: &SparseMatPolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Self { + // transcript.append_protocol_name(HashLayerProof::protocol_name()); + + let (rand_mem, rand_ops) = rand; + + // decommit derefs at rand_ops + let eval_row_ops_val = (0..derefs.row_ops_val.len()) + .map(|i| derefs.row_ops_val[i].evaluate(rand_ops)) + .collect::>(); + let eval_col_ops_val = (0..derefs.col_ops_val.len()) + .map(|i| derefs.col_ops_val[i].evaluate(rand_ops)) + .collect::>(); + let proof_derefs = DerefsEvalProof::prove( + derefs, + &eval_row_ops_val, + &eval_col_ops_val, + rand_ops, + &gens.gens_derefs, + transcript, + ); + let eval_derefs = (eval_row_ops_val, eval_col_ops_val); + + // evaluate row_addr, row_read-ts, col_addr, col_read-ts, val at rand_ops + // evaluate row_audit_ts and col_audit_ts at rand_mem + let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = + HashLayerProof::::prove_helper((rand_mem, rand_ops), &dense.row); + let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = + HashLayerProof::::prove_helper((rand_mem, rand_ops), &dense.col); + let eval_val_vec = (0..dense.val.len()) + .map(|i| dense.val[i].evaluate(rand_ops)) + .collect::>(); + + // form a single decommitment using comm_comb_ops + let mut evals_ops: Vec = Vec::new(); + evals_ops.extend(&eval_row_addr_vec); + evals_ops.extend(&eval_row_read_ts_vec); + evals_ops.extend(&eval_col_addr_vec); + evals_ops.extend(&eval_col_read_ts_vec); + evals_ops.extend(&eval_val_vec); + evals_ops.resize(evals_ops.len().next_power_of_two(), E::ScalarField::zero()); + transcript.append_scalar_vector(b"", &evals_ops); + let challenges_ops = transcript.challenge_scalar_vec(b"", evals_ops.len().log_2()); + + let mut poly_evals_ops = DensePolynomial::new(evals_ops); + for i in (0..challenges_ops.len()).rev() { + poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); } - - fn verify_helper( - rand: &(&Vec, &Vec), - claims: &(Scalar, Vec, Vec, Scalar), - eval_ops_val: &[Scalar], - eval_ops_addr: &[Scalar], - eval_read_ts: &[Scalar], - eval_audit_ts: &Scalar, - r: &[Scalar], - r_hash: &Scalar, - r_multiset_check: &Scalar, - ) -> Result<(), ProofVerifyError> { - let r_hash_sqr = r_hash.square(); - let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar { - r_hash_sqr * ts + (*val) * r_hash + addr - }; - - let (rand_mem, _rand_ops) = rand; - let (claim_init, claim_read, claim_write, claim_audit) = claims; - - // init - let eval_init_addr = IdentityPolynomial::new(rand_mem.len()).evaluate(rand_mem); - let eval_init_val = EqPolynomial::new(r.to_vec()).evaluate(rand_mem); - let hash_init_at_rand_mem = - hash_func(&eval_init_addr, &eval_init_val, &Scalar::zero()) - r_multiset_check; // verify the claim_last of init chunk - assert_eq!(&hash_init_at_rand_mem, claim_init); - - // read - for i in 0..eval_ops_addr.len() { - let hash_read_at_rand_ops = - hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_read_ts[i]) - r_multiset_check; // verify the claim_last of init chunk - assert_eq!(&hash_read_at_rand_ops, &claim_read[i]); - } - - // write: shares addr, val component; only decommit write_ts - for i in 0..eval_ops_addr.len() { - let eval_write_ts = eval_read_ts[i] + Scalar::one(); - let hash_write_at_rand_ops = - hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_write_ts) - r_multiset_check; // verify the claim_last of init chunk - assert_eq!(&hash_write_at_rand_ops, &claim_write[i]); - } - - // audit: shares addr and val with init - let eval_audit_addr = eval_init_addr; - let eval_audit_val = eval_init_val; - let hash_audit_at_rand_mem = - hash_func(&eval_audit_addr, &eval_audit_val, eval_audit_ts) - r_multiset_check; - assert_eq!(&hash_audit_at_rand_mem, claim_audit); // verify the last step of the sum-check for audit - - Ok(()) + assert_eq!(poly_evals_ops.len(), 1); + let joint_claim_eval_ops = poly_evals_ops[0]; + let mut r_joint_ops = challenges_ops; + r_joint_ops.extend(rand_ops); + debug_assert_eq!(dense.comb_ops.evaluate(&r_joint_ops), joint_claim_eval_ops); + transcript.append_scalar(b"", &joint_claim_eval_ops); + let (proof_ops, _comm_ops_eval) = PolyEvalProof::prove( + &dense.comb_ops, + None, + &r_joint_ops, + &joint_claim_eval_ops, + None, + &gens.gens_ops, + transcript, + ); + + // form a single decommitment using comb_comb_mem at rand_mem + let evals_mem: Vec = vec![eval_row_audit_ts, eval_col_audit_ts]; + // evals_mem.append_to_transcript(b"claim_evals_mem", transcript); + transcript.append_scalar_vector(b"", &evals_mem); + let challenges_mem = transcript.challenge_scalar_vec(b"", evals_mem.len().log_2()); + + let mut poly_evals_mem = DensePolynomial::new(evals_mem); + for i in (0..challenges_mem.len()).rev() { + poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); } - - fn verify( - &self, - rand: (&Vec, &Vec), - claims_row: &(Scalar, Vec, Vec, Scalar), - claims_col: &(Scalar, Vec, Vec, Scalar), - claims_dotp: &[Scalar], - comm: &SparseMatPolyCommitment, - gens: &SparseMatPolyCommitmentGens, - comm_derefs: &DerefsCommitment, - rx: &[Scalar], - ry: &[Scalar], - r_hash: &Scalar, - r_multiset_check: &Scalar, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - let timer = Timer::new("verify_hash_proof"); - // transcript.append_protocol_name(HashLayerProof::protocol_name()); - - let (rand_mem, rand_ops) = rand; - - // verify derefs at rand_ops - let (eval_row_ops_val, eval_col_ops_val) = &self.eval_derefs; - assert_eq!(eval_row_ops_val.len(), eval_col_ops_val.len()); - self.proof_derefs.verify( - rand_ops, - eval_row_ops_val, - eval_col_ops_val, - &gens.gens_derefs, - comm_derefs, - transcript, - )?; - - // verify the decommitments used in evaluation sum-check - let eval_val_vec = &self.eval_val; - assert_eq!(claims_dotp.len(), 3 * eval_row_ops_val.len()); - for i in 0..claims_dotp.len() / 3 { - let claim_row_ops_val = claims_dotp[3 * i]; - let claim_col_ops_val = claims_dotp[3 * i + 1]; - let claim_val = claims_dotp[3 * i + 2]; - - assert_eq!(claim_row_ops_val, eval_row_ops_val[i]); - assert_eq!(claim_col_ops_val, eval_col_ops_val[i]); - assert_eq!(claim_val, eval_val_vec[i]); - } - - // verify addr-timestamps using comm_comb_ops at rand_ops - let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = &self.eval_row; - let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = &self.eval_col; - - let mut evals_ops: Vec = Vec::new(); - evals_ops.extend(eval_row_addr_vec); - evals_ops.extend(eval_row_read_ts_vec); - evals_ops.extend(eval_col_addr_vec); - evals_ops.extend(eval_col_read_ts_vec); - evals_ops.extend(eval_val_vec); - evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero()); - transcript.append_scalar_vector(&evals_ops); - // evals_ops.append_to_transcript(b"claim_evals_ops", transcript); - let challenges_ops = transcript.challenge_vector(evals_ops.len().log_2()); - - let mut poly_evals_ops = DensePolynomial::new(evals_ops); - for i in (0..challenges_ops.len()).rev() { - poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); - } - assert_eq!(poly_evals_ops.len(), 1); - let joint_claim_eval_ops = poly_evals_ops[0]; - let mut r_joint_ops = challenges_ops; - r_joint_ops.extend(rand_ops); - transcript.append_scalar(&joint_claim_eval_ops); - assert!(self - .proof_ops - .verify_plain( - &gens.gens_ops, - transcript, - &r_joint_ops, - &joint_claim_eval_ops, - &comm.comm_comb_ops - ) - .is_ok()); - - // verify proof-mem using comm_comb_mem at rand_mem - // form a single decommitment using comb_comb_mem at rand_mem - let evals_mem: Vec = vec![*eval_row_audit_ts, *eval_col_audit_ts]; - // evals_mem.append_to_transcript(b"claim_evals_mem", transcript); - transcript.append_scalar_vector(&evals_mem); - let challenges_mem = transcript.challenge_vector(evals_mem.len().log_2()); - - let mut poly_evals_mem = DensePolynomial::new(evals_mem); - for i in (0..challenges_mem.len()).rev() { - poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); - } - assert_eq!(poly_evals_mem.len(), 1); - let joint_claim_eval_mem = poly_evals_mem[0]; - let mut r_joint_mem = challenges_mem; - r_joint_mem.extend(rand_mem); - // joint_claim_eval_mem.append_to_transcript(b"joint_claim_eval_mem", transcript); - transcript.append_scalar(&joint_claim_eval_mem); - self.proof_mem.verify_plain( - &gens.gens_mem, - transcript, - &r_joint_mem, - &joint_claim_eval_mem, - &comm.comm_comb_mem, - )?; - - // verify the claims from the product layer - let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_row; - HashLayerProof::verify_helper( - &(rand_mem, rand_ops), - claims_row, - eval_row_ops_val, - eval_ops_addr, - eval_read_ts, - eval_audit_ts, - rx, - r_hash, - r_multiset_check, - )?; - - let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_col; - HashLayerProof::verify_helper( - &(rand_mem, rand_ops), - claims_col, - eval_col_ops_val, - eval_ops_addr, - eval_read_ts, - eval_audit_ts, - ry, - r_hash, - r_multiset_check, - )?; - - timer.stop(); - Ok(()) + assert_eq!(poly_evals_mem.len(), 1); + let joint_claim_eval_mem = poly_evals_mem[0]; + let mut r_joint_mem = challenges_mem; + r_joint_mem.extend(rand_mem); + debug_assert_eq!(dense.comb_mem.evaluate(&r_joint_mem), joint_claim_eval_mem); + transcript.append_scalar(b"", &joint_claim_eval_mem); + let (proof_mem, _comm_mem_eval) = PolyEvalProof::prove( + &dense.comb_mem, + None, + &r_joint_mem, + &joint_claim_eval_mem, + None, + &gens.gens_mem, + transcript, + ); + + HashLayerProof { + eval_row: (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts), + eval_col: (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts), + eval_val: eval_val_vec, + eval_derefs, + proof_ops, + proof_mem, + proof_derefs, + } + } + + fn verify_helper( + rand: &(&Vec, &Vec), + claims: &( + E::ScalarField, + Vec, + Vec, + E::ScalarField, + ), + eval_ops_val: &[E::ScalarField], + eval_ops_addr: &[E::ScalarField], + eval_read_ts: &[E::ScalarField], + eval_audit_ts: &E::ScalarField, + r: &[E::ScalarField], + r_hash: &E::ScalarField, + r_multiset_check: &E::ScalarField, + ) -> Result<(), ProofVerifyError> { + let r_hash_sqr = r_hash.square(); + let hash_func = |addr: &E::ScalarField, + val: &E::ScalarField, + ts: &E::ScalarField| + -> E::ScalarField { r_hash_sqr * ts + (*val) * r_hash + addr }; + + let (rand_mem, _rand_ops) = rand; + let (claim_init, claim_read, claim_write, claim_audit) = claims; + + // init + let eval_init_addr = IdentityPolynomial::new(rand_mem.len()).evaluate(rand_mem); + let eval_init_val = EqPolynomial::new(r.to_vec()).evaluate(rand_mem); + let hash_init_at_rand_mem = + hash_func(&eval_init_addr, &eval_init_val, &E::ScalarField::zero()) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_init_at_rand_mem, claim_init); + + // read + for i in 0..eval_ops_addr.len() { + let hash_read_at_rand_ops = + hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_read_ts[i]) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_read_at_rand_ops, &claim_read[i]); } -} - -#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -struct ProductLayerProof { - eval_row: (Scalar, Vec, Vec, Scalar), - eval_col: (Scalar, Vec, Vec, Scalar), - eval_val: (Vec, Vec), - proof_mem: ProductCircuitEvalProofBatched, - proof_ops: ProductCircuitEvalProofBatched, -} -impl ProductLayerProof { - fn protocol_name() -> &'static [u8] { - b"Sparse polynomial product layer proof" + // write: shares addr, val component; only decommit write_ts + for i in 0..eval_ops_addr.len() { + let eval_write_ts = eval_read_ts[i] + E::ScalarField::one(); + let hash_write_at_rand_ops = + hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_write_ts) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_write_at_rand_ops, &claim_write[i]); } - pub fn prove( - row_prod_layer: &mut ProductLayer, - col_prod_layer: &mut ProductLayer, - dense: &MultiSparseMatPolynomialAsDense, - derefs: &Derefs, - eval: &[Scalar], - transcript: &mut PoseidonTranscript, - ) -> (Self, Vec, Vec) { - // transcript.append_protocol_name(ProductLayerProof::protocol_name()); - - let row_eval_init = row_prod_layer.init.evaluate(); - let row_eval_audit = row_prod_layer.audit.evaluate(); - let row_eval_read = (0..row_prod_layer.read_vec.len()) - .map(|i| row_prod_layer.read_vec[i].evaluate()) - .collect::>(); - let row_eval_write = (0..row_prod_layer.write_vec.len()) - .map(|i| row_prod_layer.write_vec[i].evaluate()) - .collect::>(); - - // subset check - let ws: Scalar = (0..row_eval_write.len()) - .map(|i| row_eval_write[i]) - .product(); - let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); - assert_eq!(row_eval_init * ws, rs * row_eval_audit); - - transcript.append_scalar(&row_eval_init); - transcript.append_scalar_vector(&row_eval_read); - transcript.append_scalar_vector(&row_eval_write); - transcript.append_scalar(&row_eval_audit); - - let col_eval_init = col_prod_layer.init.evaluate(); - let col_eval_audit = col_prod_layer.audit.evaluate(); - let col_eval_read: Vec = (0..col_prod_layer.read_vec.len()) - .map(|i| col_prod_layer.read_vec[i].evaluate()) - .collect(); - let col_eval_write: Vec = (0..col_prod_layer.write_vec.len()) - .map(|i| col_prod_layer.write_vec[i].evaluate()) - .collect(); - - // subset check - let ws: Scalar = (0..col_eval_write.len()) - .map(|i| col_eval_write[i]) - .product(); - let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); - assert_eq!(col_eval_init * ws, rs * col_eval_audit); - - transcript.append_scalar(&col_eval_init); - transcript.append_scalar_vector(&col_eval_read); - transcript.append_scalar_vector(&col_eval_write); - transcript.append_scalar(&col_eval_audit); - - // prepare dotproduct circuit for batching then with ops-related product circuits - assert_eq!(eval.len(), derefs.row_ops_val.len()); - assert_eq!(eval.len(), derefs.col_ops_val.len()); - assert_eq!(eval.len(), dense.val.len()); - let mut dotp_circuit_left_vec: Vec = Vec::new(); - let mut dotp_circuit_right_vec: Vec = Vec::new(); - let mut eval_dotp_left_vec: Vec = Vec::new(); - let mut eval_dotp_right_vec: Vec = Vec::new(); - for i in 0..derefs.row_ops_val.len() { - // evaluate sparse polynomial evaluation using two dotp checks - let left = derefs.row_ops_val[i].clone(); - let right = derefs.col_ops_val[i].clone(); - let weights = dense.val[i].clone(); - - // build two dot product circuits to prove evaluation of sparse polynomial - let mut dotp_circuit = DotProductCircuit::new(left, right, weights); - let (dotp_circuit_left, dotp_circuit_right) = dotp_circuit.split(); - - let (eval_dotp_left, eval_dotp_right) = - (dotp_circuit_left.evaluate(), dotp_circuit_right.evaluate()); - - // eval_dotp_left.append_to_transcript(b"claim_eval_dotp_left", transcript); - // eval_dotp_right.append_to_transcript(b"claim_eval_dotp_right", transcript); - transcript.append_scalar(&eval_dotp_left); - transcript.append_scalar(&eval_dotp_right); - assert_eq!(eval_dotp_left + eval_dotp_right, eval[i]); - eval_dotp_left_vec.push(eval_dotp_left); - eval_dotp_right_vec.push(eval_dotp_right); - - dotp_circuit_left_vec.push(dotp_circuit_left); - dotp_circuit_right_vec.push(dotp_circuit_right); - } - - // The number of operations into the memory encoded by rx and ry are always the same (by design) - // So we can produce a batched product proof for all of them at the same time. - // prove the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write - // TODO: we currently only produce proofs for 3 batched sparse polynomial evaluations - assert_eq!(row_prod_layer.read_vec.len(), 3); - let (row_read_A, row_read_B, row_read_C) = { - let (vec_A, vec_BC) = row_prod_layer.read_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (row_write_A, row_write_B, row_write_C) = { - let (vec_A, vec_BC) = row_prod_layer.write_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (col_read_A, col_read_B, col_read_C) = { - let (vec_A, vec_BC) = col_prod_layer.read_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (col_write_A, col_write_B, col_write_C) = { - let (vec_A, vec_BC) = col_prod_layer.write_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (dotp_left_A, dotp_left_B, dotp_left_C) = { - let (vec_A, vec_BC) = dotp_circuit_left_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (dotp_right_A, dotp_right_B, dotp_right_C) = { - let (vec_A, vec_BC) = dotp_circuit_right_vec.split_at_mut(1); - let (vec_B, vec_C) = vec_BC.split_at_mut(1); - (vec_A, vec_B, vec_C) - }; - - let (proof_ops, rand_ops) = ProductCircuitEvalProofBatched::prove( - &mut vec![ - &mut row_read_A[0], - &mut row_read_B[0], - &mut row_read_C[0], - &mut row_write_A[0], - &mut row_write_B[0], - &mut row_write_C[0], - &mut col_read_A[0], - &mut col_read_B[0], - &mut col_read_C[0], - &mut col_write_A[0], - &mut col_write_B[0], - &mut col_write_C[0], - ], - &mut vec![ - &mut dotp_left_A[0], - &mut dotp_right_A[0], - &mut dotp_left_B[0], - &mut dotp_right_B[0], - &mut dotp_left_C[0], - &mut dotp_right_C[0], - ], - transcript, - ); - - // produce a batched proof of memory-related product circuits - let (proof_mem, rand_mem) = ProductCircuitEvalProofBatched::prove( - &mut vec![ - &mut row_prod_layer.init, - &mut row_prod_layer.audit, - &mut col_prod_layer.init, - &mut col_prod_layer.audit, - ], - &mut Vec::new(), - transcript, - ); - - let product_layer_proof = ProductLayerProof { - eval_row: (row_eval_init, row_eval_read, row_eval_write, row_eval_audit), - eval_col: (col_eval_init, col_eval_read, col_eval_write, col_eval_audit), - eval_val: (eval_dotp_left_vec, eval_dotp_right_vec), - proof_mem, - proof_ops, - }; - - let mut product_layer_proof_encoded: Vec = Vec::new(); - product_layer_proof - .serialize(&mut product_layer_proof_encoded) - .unwrap(); - let msg = format!( - "len_product_layer_proof {:?}", - product_layer_proof_encoded.len() - ); - Timer::print(&msg); - - (product_layer_proof, rand_mem, rand_ops) + // audit: shares addr and val with init + let eval_audit_addr = eval_init_addr; + let eval_audit_val = eval_init_val; + let hash_audit_at_rand_mem = + hash_func(&eval_audit_addr, &eval_audit_val, eval_audit_ts) - r_multiset_check; + assert_eq!(&hash_audit_at_rand_mem, claim_audit); // verify the last step of the sum-check for audit + + Ok(()) + } + + fn verify( + &self, + rand: (&Vec, &Vec), + claims_row: &( + E::ScalarField, + Vec, + Vec, + E::ScalarField, + ), + claims_col: &( + E::ScalarField, + Vec, + Vec, + E::ScalarField, + ), + claims_dotp: &[E::ScalarField], + comm: &SparseMatPolyCommitment, + gens: &SparseMatPolyCommitmentGens, + comm_derefs: &DerefsCommitment, + rx: &[E::ScalarField], + ry: &[E::ScalarField], + r_hash: &E::ScalarField, + r_multiset_check: &E::ScalarField, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + let timer = Timer::new("verify_hash_proof"); + // transcript.append_protocol_name(HashLayerProof::protocol_name()); + + let (rand_mem, rand_ops) = rand; + + // verify derefs at rand_ops + let (eval_row_ops_val, eval_col_ops_val) = &self.eval_derefs; + assert_eq!(eval_row_ops_val.len(), eval_col_ops_val.len()); + self.proof_derefs.verify( + rand_ops, + eval_row_ops_val, + eval_col_ops_val, + &gens.gens_derefs, + comm_derefs, + transcript, + )?; + + // verify the decommitments used in evaluation sum-check + let eval_val_vec = &self.eval_val; + assert_eq!(claims_dotp.len(), 3 * eval_row_ops_val.len()); + for i in 0..claims_dotp.len() / 3 { + let claim_row_ops_val = claims_dotp[3 * i]; + let claim_col_ops_val = claims_dotp[3 * i + 1]; + let claim_val = claims_dotp[3 * i + 2]; + + assert_eq!(claim_row_ops_val, eval_row_ops_val[i]); + assert_eq!(claim_col_ops_val, eval_col_ops_val[i]); + assert_eq!(claim_val, eval_val_vec[i]); } - pub fn verify( - &self, - num_ops: usize, - num_cells: usize, - eval: &[Scalar], - transcript: &mut PoseidonTranscript, - ) -> Result< - ( - Vec, - Vec, - Vec, - Vec, - Vec, - ), - ProofVerifyError, - > { - // transcript.append_protocol_name(ProductLayerProof::protocol_name()); - - let timer = Timer::new("verify_prod_proof"); - let num_instances = eval.len(); - - // subset check - let (row_eval_init, row_eval_read, row_eval_write, row_eval_audit) = &self.eval_row; - assert_eq!(row_eval_write.len(), num_instances); - assert_eq!(row_eval_read.len(), num_instances); - let ws: Scalar = (0..row_eval_write.len()) - .map(|i| row_eval_write[i]) - .product(); - let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); - assert_eq!(ws * row_eval_init, rs * row_eval_audit); - - // row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript); - // row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript); - // row_eval_write.append_to_transcript(b"claim_row_eval_write", transcript); - // row_eval_audit.append_to_transcript(b"claim_row_eval_audit", transcript); - - transcript.append_scalar(row_eval_init); - transcript.append_scalar_vector(row_eval_read); - transcript.append_scalar_vector(row_eval_write); - transcript.append_scalar(row_eval_audit); - - // subset check - let (col_eval_init, col_eval_read, col_eval_write, col_eval_audit) = &self.eval_col; - assert_eq!(col_eval_write.len(), num_instances); - assert_eq!(col_eval_read.len(), num_instances); - let ws: Scalar = (0..col_eval_write.len()) - .map(|i| col_eval_write[i]) - .product(); - let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); - assert_eq!(ws * col_eval_init, rs * col_eval_audit); - - // col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript); - // col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript); - // col_eval_write.append_to_transcript(b"claim_col_eval_write", transcript); - // col_eval_audit.append_to_transcript(b"claim_col_eval_audit", transcript); - - transcript.append_scalar(col_eval_init); - transcript.append_scalar_vector(col_eval_read); - transcript.append_scalar_vector(col_eval_write); - transcript.append_scalar(col_eval_audit); - - // verify the evaluation of the sparse polynomial - let (eval_dotp_left, eval_dotp_right) = &self.eval_val; - assert_eq!(eval_dotp_left.len(), eval_dotp_left.len()); - assert_eq!(eval_dotp_left.len(), num_instances); - let mut claims_dotp_circuit: Vec = Vec::new(); - for i in 0..num_instances { - assert_eq!(eval_dotp_left[i] + eval_dotp_right[i], eval[i]); - // eval_dotp_left[i].append_to_transcript(b"claim_eval_dotp_left", transcript); - // eval_dotp_right[i].append_to_transcript(b"claim_eval_dotp_right", transcript) - transcript.append_scalar(&eval_dotp_left[i]); - transcript.append_scalar(&eval_dotp_right[i]); - - claims_dotp_circuit.push(eval_dotp_left[i]); - claims_dotp_circuit.push(eval_dotp_right[i]); - } - - // verify the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write - let mut claims_prod_circuit: Vec = Vec::new(); - claims_prod_circuit.extend(row_eval_read); - claims_prod_circuit.extend(row_eval_write); - claims_prod_circuit.extend(col_eval_read); - claims_prod_circuit.extend(col_eval_write); - - let (claims_ops, claims_dotp, rand_ops) = self.proof_ops.verify( - &claims_prod_circuit, - &claims_dotp_circuit, - num_ops, - transcript, - ); - // verify the correctness of claim_row_eval_init and claim_row_eval_audit - let (claims_mem, _claims_mem_dotp, rand_mem) = self.proof_mem.verify( - &[ - *row_eval_init, - *row_eval_audit, - *col_eval_init, - *col_eval_audit, - ], - &Vec::new(), - num_cells, - transcript, - ); - timer.stop(); - - Ok((claims_mem, rand_mem, claims_ops, claims_dotp, rand_ops)) + // verify addr-timestamps using comm_comb_ops at rand_ops + let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = &self.eval_row; + let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = &self.eval_col; + + let mut evals_ops: Vec = Vec::new(); + evals_ops.extend(eval_row_addr_vec); + evals_ops.extend(eval_row_read_ts_vec); + evals_ops.extend(eval_col_addr_vec); + evals_ops.extend(eval_col_read_ts_vec); + evals_ops.extend(eval_val_vec); + evals_ops.resize(evals_ops.len().next_power_of_two(), E::ScalarField::zero()); + transcript.append_scalar_vector(b"", &evals_ops); + // evals_ops.append_to_transcript(b"claim_evals_ops", transcript); + let challenges_ops = transcript.challenge_scalar_vec(b"", evals_ops.len().log_2()); + + let mut poly_evals_ops = DensePolynomial::new(evals_ops); + for i in (0..challenges_ops.len()).rev() { + poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); + } + assert_eq!(poly_evals_ops.len(), 1); + let joint_claim_eval_ops = poly_evals_ops[0]; + let mut r_joint_ops = challenges_ops; + r_joint_ops.extend(rand_ops); + transcript.append_scalar(b"", &joint_claim_eval_ops); + assert!(self + .proof_ops + .verify_plain( + &gens.gens_ops, + transcript, + &r_joint_ops, + &joint_claim_eval_ops, + &comm.comm_comb_ops + ) + .is_ok()); + + // verify proof-mem using comm_comb_mem at rand_mem + // form a single decommitment using comb_comb_mem at rand_mem + let evals_mem: Vec = vec![*eval_row_audit_ts, *eval_col_audit_ts]; + // evals_mem.append_to_transcript(b"claim_evals_mem", transcript); + transcript.append_scalar_vector(b"", &evals_mem); + let challenges_mem = transcript.challenge_scalar_vec(b"", evals_mem.len().log_2()); + + let mut poly_evals_mem = DensePolynomial::new(evals_mem); + for i in (0..challenges_mem.len()).rev() { + poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); } + assert_eq!(poly_evals_mem.len(), 1); + let joint_claim_eval_mem = poly_evals_mem[0]; + let mut r_joint_mem = challenges_mem; + r_joint_mem.extend(rand_mem); + // joint_claim_eval_mem.append_to_transcript(b"joint_claim_eval_mem", transcript); + transcript.append_scalar(b"", &joint_claim_eval_mem); + self.proof_mem.verify_plain( + &gens.gens_mem, + transcript, + &r_joint_mem, + &joint_claim_eval_mem, + &comm.comm_comb_mem, + )?; + + // verify the claims from the product layer + let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_row; + HashLayerProof::::verify_helper( + &(rand_mem, rand_ops), + claims_row, + eval_row_ops_val, + eval_ops_addr, + eval_read_ts, + eval_audit_ts, + rx, + r_hash, + r_multiset_check, + )?; + + let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_col; + HashLayerProof::::verify_helper( + &(rand_mem, rand_ops), + claims_col, + eval_col_ops_val, + eval_ops_addr, + eval_read_ts, + eval_audit_ts, + ry, + r_hash, + r_multiset_check, + )?; + + timer.stop(); + Ok(()) + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -struct PolyEvalNetworkProof { - proof_prod_layer: ProductLayerProof, - proof_hash_layer: HashLayerProof, +struct ProductLayerProof { + eval_row: (F, Vec, Vec, F), + eval_col: (F, Vec, Vec, F), + eval_val: (Vec, Vec), + proof_mem: ProductCircuitEvalProofBatched, + proof_ops: ProductCircuitEvalProofBatched, } -impl PolyEvalNetworkProof { - fn protocol_name() -> &'static [u8] { - b"Sparse polynomial evaluation proof" +impl ProductLayerProof { + pub fn prove( + row_prod_layer: &mut ProductLayer, + col_prod_layer: &mut ProductLayer, + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + eval: &[F], + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, Vec) { + // transcript.append_protocol_name(ProductLayerProof::protocol_name()); + + let row_eval_init = row_prod_layer.init.evaluate(); + let row_eval_audit = row_prod_layer.audit.evaluate(); + let row_eval_read = (0..row_prod_layer.read_vec.len()) + .map(|i| row_prod_layer.read_vec[i].evaluate()) + .collect::>(); + let row_eval_write = (0..row_prod_layer.write_vec.len()) + .map(|i| row_prod_layer.write_vec[i].evaluate()) + .collect::>(); + + // subset check + let ws: F = (0..row_eval_write.len()) + .map(|i| row_eval_write[i]) + .product(); + let rs: F = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); + assert_eq!(row_eval_init * ws, rs * row_eval_audit); + + transcript.append_scalar(b"", &row_eval_init); + transcript.append_scalar_vector(b"", &row_eval_read); + transcript.append_scalar_vector(b"", &row_eval_write); + transcript.append_scalar(b"", &row_eval_audit); + + let col_eval_init = col_prod_layer.init.evaluate(); + let col_eval_audit = col_prod_layer.audit.evaluate(); + let col_eval_read: Vec = (0..col_prod_layer.read_vec.len()) + .map(|i| col_prod_layer.read_vec[i].evaluate()) + .collect(); + let col_eval_write: Vec = (0..col_prod_layer.write_vec.len()) + .map(|i| col_prod_layer.write_vec[i].evaluate()) + .collect(); + + // subset check + let ws: F = (0..col_eval_write.len()) + .map(|i| col_eval_write[i]) + .product(); + let rs: F = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); + assert_eq!(col_eval_init * ws, rs * col_eval_audit); + + transcript.append_scalar(b"", &col_eval_init); + transcript.append_scalar_vector(b"", &col_eval_read); + transcript.append_scalar_vector(b"", &col_eval_write); + transcript.append_scalar(b"", &col_eval_audit); + + // prepare dotproduct circuit for batching then with ops-related product circuits + assert_eq!(eval.len(), derefs.row_ops_val.len()); + assert_eq!(eval.len(), derefs.col_ops_val.len()); + assert_eq!(eval.len(), dense.val.len()); + let mut dotp_circuit_left_vec: Vec> = Vec::new(); + let mut dotp_circuit_right_vec: Vec> = Vec::new(); + let mut eval_dotp_left_vec: Vec = Vec::new(); + let mut eval_dotp_right_vec: Vec = Vec::new(); + for i in 0..derefs.row_ops_val.len() { + // evaluate sparse polynomial evaluation using two dotp checks + let left = derefs.row_ops_val[i].clone(); + let right = derefs.col_ops_val[i].clone(); + let weights = dense.val[i].clone(); + + // build two dot product circuits to prove evaluation of sparse polynomial + let mut dotp_circuit = DotProductCircuit::new(left, right, weights); + let (dotp_circuit_left, dotp_circuit_right) = dotp_circuit.split(); + + let (eval_dotp_left, eval_dotp_right) = + (dotp_circuit_left.evaluate(), dotp_circuit_right.evaluate()); + + // eval_dotp_left.append_to_transcript(b"claim_eval_dotp_left", transcript); + // eval_dotp_right.append_to_transcript(b"claim_eval_dotp_right", transcript); + transcript.append_scalar(b"", &eval_dotp_left); + transcript.append_scalar(b"", &eval_dotp_right); + assert_eq!(eval_dotp_left + eval_dotp_right, eval[i]); + eval_dotp_left_vec.push(eval_dotp_left); + eval_dotp_right_vec.push(eval_dotp_right); + + dotp_circuit_left_vec.push(dotp_circuit_left); + dotp_circuit_right_vec.push(dotp_circuit_right); } - pub fn prove( - network: &mut PolyEvalNetwork, - dense: &MultiSparseMatPolynomialAsDense, - derefs: &Derefs, - evals: &[Scalar], - gens: &SparseMatPolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> Self { - // transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); - - let (proof_prod_layer, rand_mem, rand_ops) = ProductLayerProof::prove( - &mut network.row_layers.prod_layer, - &mut network.col_layers.prod_layer, - dense, - derefs, - evals, - transcript, - ); - - // proof of hash layer for row and col - let proof_hash_layer = HashLayerProof::prove( - (&rand_mem, &rand_ops), - dense, - derefs, - gens, - transcript, - random_tape, - ); - - PolyEvalNetworkProof { - proof_prod_layer, - proof_hash_layer, - } + // The number of operations into the memory encoded by rx and ry are always the same (by design) + // So we can produce a batched product proof for all of them at the same time. + // prove the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write + // TODO: we currently only produce proofs for 3 batched sparse polynomial evaluations + assert_eq!(row_prod_layer.read_vec.len(), 3); + let (row_read_A, row_read_B, row_read_C) = { + let (vec_A, vec_BC) = row_prod_layer.read_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (row_write_A, row_write_B, row_write_C) = { + let (vec_A, vec_BC) = row_prod_layer.write_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (col_read_A, col_read_B, col_read_C) = { + let (vec_A, vec_BC) = col_prod_layer.read_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (col_write_A, col_write_B, col_write_C) = { + let (vec_A, vec_BC) = col_prod_layer.write_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (dotp_left_A, dotp_left_B, dotp_left_C) = { + let (vec_A, vec_BC) = dotp_circuit_left_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (dotp_right_A, dotp_right_B, dotp_right_C) = { + let (vec_A, vec_BC) = dotp_circuit_right_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (proof_ops, rand_ops) = ProductCircuitEvalProofBatched::prove( + &mut vec![ + &mut row_read_A[0], + &mut row_read_B[0], + &mut row_read_C[0], + &mut row_write_A[0], + &mut row_write_B[0], + &mut row_write_C[0], + &mut col_read_A[0], + &mut col_read_B[0], + &mut col_read_C[0], + &mut col_write_A[0], + &mut col_write_B[0], + &mut col_write_C[0], + ], + &mut vec![ + &mut dotp_left_A[0], + &mut dotp_right_A[0], + &mut dotp_left_B[0], + &mut dotp_right_B[0], + &mut dotp_left_C[0], + &mut dotp_right_C[0], + ], + transcript, + ); + + // produce a batched proof of memory-related product circuits + let (proof_mem, rand_mem) = ProductCircuitEvalProofBatched::prove( + &mut vec![ + &mut row_prod_layer.init, + &mut row_prod_layer.audit, + &mut col_prod_layer.init, + &mut col_prod_layer.audit, + ], + &mut Vec::new(), + transcript, + ); + + let product_layer_proof = ProductLayerProof { + eval_row: (row_eval_init, row_eval_read, row_eval_write, row_eval_audit), + eval_col: (col_eval_init, col_eval_read, col_eval_write, col_eval_audit), + eval_val: (eval_dotp_left_vec, eval_dotp_right_vec), + proof_mem, + proof_ops, + }; + + let mut product_layer_proof_encoded: Vec = Vec::new(); + product_layer_proof + .serialize_with_mode(&mut product_layer_proof_encoded, Compress::Yes) + .unwrap(); + let msg = format!( + "len_product_layer_proof {:?}", + product_layer_proof_encoded.len() + ); + Timer::print(&msg); + + (product_layer_proof, rand_mem, rand_ops) + } + + pub fn verify( + &self, + num_ops: usize, + num_cells: usize, + eval: &[F], + transcript: &mut PoseidonTranscript, + ) -> Result<(Vec, Vec, Vec, Vec, Vec), ProofVerifyError> { + // transcript.append_protocol_name(ProductLayerProof::protocol_name()); + + let timer = Timer::new("verify_prod_proof"); + let num_instances = eval.len(); + + // subset check + let (row_eval_init, row_eval_read, row_eval_write, row_eval_audit) = &self.eval_row; + assert_eq!(row_eval_write.len(), num_instances); + assert_eq!(row_eval_read.len(), num_instances); + let ws: F = (0..row_eval_write.len()) + .map(|i| row_eval_write[i]) + .product(); + let rs: F = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); + assert_eq!(ws * row_eval_init, rs * row_eval_audit); + + // row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript); + // row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript); + // row_eval_write.append_to_transcript(b"claim_row_eval_write", transcript); + // row_eval_audit.append_to_transcript(b"claim_row_eval_audit", transcript); + + transcript.append_scalar(b"", row_eval_init); + transcript.append_scalar_vector(b"", row_eval_read); + transcript.append_scalar_vector(b"", row_eval_write); + transcript.append_scalar(b"", row_eval_audit); + + // subset check + let (col_eval_init, col_eval_read, col_eval_write, col_eval_audit) = &self.eval_col; + assert_eq!(col_eval_write.len(), num_instances); + assert_eq!(col_eval_read.len(), num_instances); + let ws: F = (0..col_eval_write.len()) + .map(|i| col_eval_write[i]) + .product(); + let rs: F = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); + assert_eq!(ws * col_eval_init, rs * col_eval_audit); + + // col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript); + // col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript); + // col_eval_write.append_to_transcript(b"claim_col_eval_write", transcript); + // col_eval_audit.append_to_transcript(b"claim_col_eval_audit", transcript); + + transcript.append_scalar(b"", col_eval_init); + transcript.append_scalar_vector(b"", col_eval_read); + transcript.append_scalar_vector(b"", col_eval_write); + transcript.append_scalar(b"", col_eval_audit); + + // verify the evaluation of the sparse polynomial + let (eval_dotp_left, eval_dotp_right) = &self.eval_val; + assert_eq!(eval_dotp_left.len(), eval_dotp_left.len()); + assert_eq!(eval_dotp_left.len(), num_instances); + let mut claims_dotp_circuit: Vec = Vec::new(); + for i in 0..num_instances { + assert_eq!(eval_dotp_left[i] + eval_dotp_right[i], eval[i]); + // eval_dotp_left[i].append_to_transcript(b"claim_eval_dotp_left", transcript); + // eval_dotp_right[i].append_to_transcript(b"claim_eval_dotp_right", transcript) + transcript.append_scalar(b"", &eval_dotp_left[i]); + transcript.append_scalar(b"", &eval_dotp_right[i]); + + claims_dotp_circuit.push(eval_dotp_left[i]); + claims_dotp_circuit.push(eval_dotp_right[i]); } - pub fn verify( - &self, - comm: &SparseMatPolyCommitment, - comm_derefs: &DerefsCommitment, - evals: &[Scalar], - gens: &SparseMatPolyCommitmentGens, - rx: &[Scalar], - ry: &[Scalar], - r_mem_check: &(Scalar, Scalar), - nz: usize, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - let timer = Timer::new("verify_polyeval_proof"); - // transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); - - let num_instances = evals.len(); - let (r_hash, r_multiset_check) = r_mem_check; - - let num_ops = nz.next_power_of_two(); - let num_cells = rx.len().pow2(); - assert_eq!(rx.len(), ry.len()); - - let (claims_mem, rand_mem, mut claims_ops, claims_dotp, rand_ops) = - self.proof_prod_layer - .verify(num_ops, num_cells, evals, transcript)?; - assert_eq!(claims_mem.len(), 4); - assert_eq!(claims_ops.len(), 4 * num_instances); - assert_eq!(claims_dotp.len(), 3 * num_instances); - - let (claims_ops_row, claims_ops_col) = claims_ops.split_at_mut(2 * num_instances); - let (claims_ops_row_read, claims_ops_row_write) = - claims_ops_row.split_at_mut(num_instances); - let (claims_ops_col_read, claims_ops_col_write) = - claims_ops_col.split_at_mut(num_instances); - - // verify the proof of hash layer - self.proof_hash_layer.verify( - (&rand_mem, &rand_ops), - &( - claims_mem[0], - claims_ops_row_read.to_vec(), - claims_ops_row_write.to_vec(), - claims_mem[1], - ), - &( - claims_mem[2], - claims_ops_col_read.to_vec(), - claims_ops_col_write.to_vec(), - claims_mem[3], - ), - &claims_dotp, - comm, - gens, - comm_derefs, - rx, - ry, - r_hash, - r_multiset_check, - transcript, - )?; - timer.stop(); - - Ok(()) - } + // verify the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write + let mut claims_prod_circuit: Vec = Vec::new(); + claims_prod_circuit.extend(row_eval_read); + claims_prod_circuit.extend(row_eval_write); + claims_prod_circuit.extend(col_eval_read); + claims_prod_circuit.extend(col_eval_write); + + let (claims_ops, claims_dotp, rand_ops) = self.proof_ops.verify( + &claims_prod_circuit, + &claims_dotp_circuit, + num_ops, + transcript, + ); + // verify the correctness of claim_row_eval_init and claim_row_eval_audit + let (claims_mem, _claims_mem_dotp, rand_mem) = self.proof_mem.verify( + &[ + *row_eval_init, + *row_eval_audit, + *col_eval_init, + *col_eval_audit, + ], + &Vec::new(), + num_cells, + transcript, + ); + timer.stop(); + + Ok((claims_mem, rand_mem, claims_ops, claims_dotp, rand_ops)) + } } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct SparseMatPolyEvalProof { - comm_derefs: DerefsCommitment, - poly_eval_network_proof: PolyEvalNetworkProof, +struct PolyEvalNetworkProof { + proof_prod_layer: ProductLayerProof, + proof_hash_layer: HashLayerProof, } -impl SparseMatPolyEvalProof { - fn protocol_name() -> &'static [u8] { - b"Sparse polynomial evaluation proof" +impl PolyEvalNetworkProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + pub fn prove( + network: &mut PolyEvalNetwork, + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + evals: &[E::ScalarField], + gens: &SparseMatPolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Self { + // transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); + + let (proof_prod_layer, rand_mem, rand_ops) = ProductLayerProof::prove( + &mut network.row_layers.prod_layer, + &mut network.col_layers.prod_layer, + dense, + derefs, + evals, + transcript, + ); + + // proof of hash layer for row and col + let proof_hash_layer = + HashLayerProof::prove((&rand_mem, &rand_ops), dense, derefs, gens, transcript); + + PolyEvalNetworkProof { + proof_prod_layer, + proof_hash_layer, } + } + + pub fn verify( + &self, + comm: &SparseMatPolyCommitment, + comm_derefs: &DerefsCommitment, + evals: &[E::ScalarField], + gens: &SparseMatPolyCommitmentGens, + rx: &[E::ScalarField], + ry: &[E::ScalarField], + r_mem_check: &(E::ScalarField, E::ScalarField), + nz: usize, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + let timer = Timer::new("verify_polyeval_proof"); + // transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); + + let num_instances = evals.len(); + let (r_hash, r_multiset_check) = r_mem_check; + + let num_ops = nz.next_power_of_two(); + let num_cells = rx.len().pow2(); + assert_eq!(rx.len(), ry.len()); + + let (claims_mem, rand_mem, mut claims_ops, claims_dotp, rand_ops) = self + .proof_prod_layer + .verify(num_ops, num_cells, evals, transcript)?; + assert_eq!(claims_mem.len(), 4); + assert_eq!(claims_ops.len(), 4 * num_instances); + assert_eq!(claims_dotp.len(), 3 * num_instances); + + let (claims_ops_row, claims_ops_col) = claims_ops.split_at_mut(2 * num_instances); + let (claims_ops_row_read, claims_ops_row_write) = claims_ops_row.split_at_mut(num_instances); + let (claims_ops_col_read, claims_ops_col_write) = claims_ops_col.split_at_mut(num_instances); + + // verify the proof of hash layer + self.proof_hash_layer.verify( + (&rand_mem, &rand_ops), + &( + claims_mem[0], + claims_ops_row_read.to_vec(), + claims_ops_row_write.to_vec(), + claims_mem[1], + ), + &( + claims_mem[2], + claims_ops_col_read.to_vec(), + claims_ops_col_write.to_vec(), + claims_mem[3], + ), + &claims_dotp, + comm, + gens, + comm_derefs, + rx, + ry, + r_hash, + r_multiset_check, + transcript, + )?; + timer.stop(); + + Ok(()) + } +} - fn equalize(rx: &[Scalar], ry: &[Scalar]) -> (Vec, Vec) { - match rx.len().cmp(&ry.len()) { - Ordering::Less => { - let diff = ry.len() - rx.len(); - let mut rx_ext = vec![Scalar::zero(); diff]; - rx_ext.extend(rx); - (rx_ext, ry.to_vec()) - } - Ordering::Greater => { - let diff = rx.len() - ry.len(); - let mut ry_ext = vec![Scalar::zero(); diff]; - ry_ext.extend(ry); - (rx.to_vec(), ry_ext) - } - Ordering::Equal => (rx.to_vec(), ry.to_vec()), - } - } +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct SparseMatPolyEvalProof { + comm_derefs: DerefsCommitment, + poly_eval_network_proof: PolyEvalNetworkProof, +} - pub fn prove( - dense: &MultiSparseMatPolynomialAsDense, - rx: &[Scalar], // point at which the polynomial is evaluated - ry: &[Scalar], - evals: &[Scalar], // a vector evaluation of \widetilde{M}(r = (rx,ry)) for each M - gens: &SparseMatPolyCommitmentGens, - transcript: &mut PoseidonTranscript, - random_tape: &mut RandomTape, - ) -> SparseMatPolyEvalProof { - // transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); - - // ensure there is one eval for each polynomial in dense - assert_eq!(evals.len(), dense.batch_size); - - let (mem_rx, mem_ry) = { - // equalize the lengths of rx and ry - let (rx_ext, ry_ext) = SparseMatPolyEvalProof::equalize(rx, ry); - let poly_rx = EqPolynomial::new(rx_ext).evals(); - let poly_ry = EqPolynomial::new(ry_ext).evals(); - (poly_rx, poly_ry) - }; - - let derefs = dense.deref(&mem_rx, &mem_ry); - - // commit to non-deterministic choices of the prover - let timer_commit = Timer::new("commit_nondet_witness"); - let comm_derefs = { - let comm = derefs.commit(&gens.gens_derefs); - comm.append_to_poseidon(transcript); - comm - }; - timer_commit.stop(); - - let poly_eval_network_proof = { - // produce a random element from the transcript for hash function - let r_mem_check = transcript.challenge_vector(2); - - // build a network to evaluate the sparse polynomial - let timer_build_network = Timer::new("build_layered_network"); - let mut net = PolyEvalNetwork::new( - dense, - &derefs, - &mem_rx, - &mem_ry, - &(r_mem_check[0], r_mem_check[1]), - ); - timer_build_network.stop(); - - let timer_eval_network = Timer::new("evalproof_layered_network"); - let poly_eval_network_proof = PolyEvalNetworkProof::prove( - &mut net, - dense, - &derefs, - evals, - gens, - transcript, - random_tape, - ); - timer_eval_network.stop(); - - poly_eval_network_proof - }; - - SparseMatPolyEvalProof { - comm_derefs, - poly_eval_network_proof, - } +impl SparseMatPolyEvalProof +where + E: Pairing, + E::ScalarField: Absorb, +{ + fn equalize( + rx: &[E::ScalarField], + ry: &[E::ScalarField], + ) -> (Vec, Vec) { + match rx.len().cmp(&ry.len()) { + Ordering::Less => { + let diff = ry.len() - rx.len(); + let mut rx_ext = vec![E::ScalarField::zero(); diff]; + rx_ext.extend(rx); + (rx_ext, ry.to_vec()) + } + Ordering::Greater => { + let diff = rx.len() - ry.len(); + let mut ry_ext = vec![E::ScalarField::zero(); diff]; + ry_ext.extend(ry); + (rx.to_vec(), ry_ext) + } + Ordering::Equal => (rx.to_vec(), ry.to_vec()), } - - pub fn verify( - &self, - comm: &SparseMatPolyCommitment, - rx: &[Scalar], // point at which the polynomial is evaluated - ry: &[Scalar], - evals: &[Scalar], // evaluation of \widetilde{M}(r = (rx,ry)) - gens: &SparseMatPolyCommitmentGens, - transcript: &mut PoseidonTranscript, - ) -> Result<(), ProofVerifyError> { - // transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); - - // equalize the lengths of rx and ry - let (rx_ext, ry_ext) = SparseMatPolyEvalProof::equalize(rx, ry); - - let (nz, num_mem_cells) = (comm.num_ops, comm.num_mem_cells); - assert_eq!(rx_ext.len().pow2(), num_mem_cells); - - // add claims to transcript and obtain challenges for randomized mem-check circuit - self.comm_derefs.append_to_poseidon(transcript); - - // produce a random element from the transcript for hash function - let r_mem_check = transcript.challenge_vector(2); - - self.poly_eval_network_proof.verify( - comm, - &self.comm_derefs, - evals, - gens, - &rx_ext, - &ry_ext, - &(r_mem_check[0], r_mem_check[1]), - nz, - transcript, - ) + } + + pub fn prove( + dense: &MultiSparseMatPolynomialAsDense, + rx: &[E::ScalarField], // point at which the polynomial is evaluated + ry: &[E::ScalarField], + evals: &[E::ScalarField], // a vector evaluation of \widetilde{M}(r = (rx,ry)) for each M + gens: &SparseMatPolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> SparseMatPolyEvalProof { + // transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); + + // ensure there is one eval for each polynomial in dense + assert_eq!(evals.len(), dense.batch_size); + + let (mem_rx, mem_ry) = { + // equalize the lengths of rx and ry + let (rx_ext, ry_ext) = SparseMatPolyEvalProof::::equalize(rx, ry); + let poly_rx = EqPolynomial::new(rx_ext).evals(); + let poly_ry = EqPolynomial::new(ry_ext).evals(); + (poly_rx, poly_ry) + }; + + let derefs = dense.deref(&mem_rx, &mem_ry); + + // commit to non-deterministic choices of the prover + let timer_commit = Timer::new("commit_nondet_witness"); + let comm_derefs = { + let comm = derefs.commit(&gens.gens_derefs); + comm.write_to_transcript(transcript); + comm + }; + timer_commit.stop(); + + let poly_eval_network_proof = { + // produce a random element from the transcript for hash function + let r_mem_check = transcript.challenge_scalar_vec(b"", 2); + + // build a network to evaluate the sparse polynomial + let timer_build_network = Timer::new("build_layered_network"); + let mut net = PolyEvalNetwork::new( + dense, + &derefs, + &mem_rx, + &mem_ry, + &(r_mem_check[0], r_mem_check[1]), + ); + timer_build_network.stop(); + + let timer_eval_network = Timer::new("evalproof_layered_network"); + let poly_eval_network_proof = + PolyEvalNetworkProof::prove(&mut net, dense, &derefs, evals, gens, transcript); + timer_eval_network.stop(); + + poly_eval_network_proof + }; + + SparseMatPolyEvalProof { + comm_derefs, + poly_eval_network_proof, } + } + + pub fn verify( + &self, + comm: &SparseMatPolyCommitment, + rx: &[E::ScalarField], // point at which the polynomial is evaluated + ry: &[E::ScalarField], + evals: &[E::ScalarField], // evaluation of \widetilde{M}(r = (rx,ry)) + gens: &SparseMatPolyCommitmentGens, + transcript: &mut PoseidonTranscript, + ) -> Result<(), ProofVerifyError> { + // transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); + + // equalize the lengths of rx and ry + let (rx_ext, ry_ext) = SparseMatPolyEvalProof::::equalize(rx, ry); + + let (nz, num_mem_cells) = (comm.num_ops, comm.num_mem_cells); + assert_eq!(rx_ext.len().pow2(), num_mem_cells); + + // add claims to transcript and obtain challenges for randomized mem-check circuit + self.comm_derefs.write_to_transcript(transcript); + + // produce a random element from the transcript for hash function + let r_mem_check = transcript.challenge_scalar_vec(b"", 2); + + self.poly_eval_network_proof.verify( + comm, + &self.comm_derefs, + evals, + gens, + &rx_ext, + &ry_ext, + &(r_mem_check[0], r_mem_check[1]), + nz, + transcript, + ) + } } #[derive(Clone)] -pub struct SparsePolyEntry { - pub idx: usize, - pub val: Scalar, +pub struct SparsePolyEntry { + pub idx: usize, + pub val: F, } -impl SparsePolyEntry { - pub fn new(idx: usize, val: Scalar) -> Self { - SparsePolyEntry { idx, val } - } +impl SparsePolyEntry { + pub fn new(idx: usize, val: F) -> Self { + SparsePolyEntry { idx, val } + } } #[derive(Clone)] -pub struct SparsePolynomial { - pub num_vars: usize, - pub Z: Vec, +pub struct SparsePolynomial { + pub num_vars: usize, + pub Z: Vec>, } -impl SparsePolynomial { - pub fn new(num_vars: usize, Z: Vec) -> Self { - SparsePolynomial { num_vars, Z } - } - - // TF IS THIS?? - - fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar { - assert_eq!(a.len(), r.len()); - let mut chi_i = Scalar::one(); - for j in 0..r.len() { - if a[j] { - chi_i *= r[j]; - } else { - chi_i *= Scalar::one() - r[j]; - } - } - chi_i - } - - // Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z - pub fn evaluate(&self, r: &[Scalar]) -> Scalar { - assert_eq!(self.num_vars, r.len()); - - (0..self.Z.len()) - .map(|i| { - let bits = self.Z[i].idx.get_bits(r.len()); - println!("{:?}", bits); - SparsePolynomial::compute_chi(&bits, r) * self.Z[i].val - }) - .sum() - } +impl SparsePolynomial { + pub fn new(num_vars: usize, Z: Vec>) -> Self { + SparsePolynomial { num_vars, Z } + } + + // fn compute_chi(a: &[bool], r: &[F]) -> F { + // assert_eq!(a.len(), r.len()); + // let mut chi_i = F::one(); + // for j in 0..r.len() { + // if a[j] { + // chi_i *= r[j]; + // } else { + // chi_i *= F::one() - r[j]; + // } + // } + // chi_i + // } + + // // Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z + // pub fn evaluate(&self, r: &[F]) -> F { + // assert_eq!(self.num_vars, r.len()); + + // (0..self.Z.len()) + // .map(|i| { + // let bits = self.Z[i].idx.get_bits(r.len()); + // println!("{:?}", bits); + // SparsePolynomial::compute_chi(&bits, r) * self.Z[i].val + // }) + // .sum() + // } } #[cfg(test)] mod tests { - use crate::parameters::poseidon_params; - - use super::*; - use ark_std::UniformRand; - use rand::RngCore; - - #[test] - fn check_sparse_polyeval_proof() { - let mut rng = ark_std::rand::thread_rng(); - - let num_nz_entries: usize = 256; - let num_rows: usize = 256; - let num_cols: usize = 256; - let num_vars_x: usize = num_rows.log_2(); - let num_vars_y: usize = num_cols.log_2(); - - let mut M: Vec = Vec::new(); - - for _i in 0..num_nz_entries { - M.push(SparseMatEntry::new( - (rng.next_u64() % (num_rows as u64)) as usize, - (rng.next_u64() % (num_cols as u64)) as usize, - Scalar::rand(&mut rng), - )); - } - - let poly_M = SparseMatPolynomial::new(num_vars_x, num_vars_y, M); - let gens = SparseMatPolyCommitmentGens::new( - b"gens_sparse_poly", - num_vars_x, - num_vars_y, - num_nz_entries, - 3, - ); - - // commitment - let (poly_comm, dense) = - SparseMatPolynomial::multi_commit(&[&poly_M, &poly_M, &poly_M], &gens); - - // evaluation - let rx: Vec = (0..num_vars_x) - .map(|_i| Scalar::rand(&mut rng)) - .collect::>(); - let ry: Vec = (0..num_vars_y) - .map(|_i| Scalar::rand(&mut rng)) - .collect::>(); - let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry); - let evals = vec![eval[0], eval[0], eval[0]]; - - let params = poseidon_params(); - let mut random_tape = RandomTape::new(b"proof"); - let mut prover_transcript = PoseidonTranscript::new(¶ms); - let proof = SparseMatPolyEvalProof::prove( - &dense, - &rx, - &ry, - &evals, - &gens, - &mut prover_transcript, - &mut random_tape, - ); - - let mut verifier_transcript = PoseidonTranscript::new(¶ms); - assert!(proof - .verify( - &poly_comm, - &rx, - &ry, - &evals, - &gens, - &mut verifier_transcript, - ) - .is_ok()); + use crate::parameters::poseidon_params; + + use super::*; + use ark_std::UniformRand; + use rand::RngCore; + + type F = ark_bls12_377::Fr; + type E = ark_bls12_377::Bls12_377; + #[test] + fn check_sparse_polyeval_proof() { + let mut rng = ark_std::rand::thread_rng(); + + let num_nz_entries: usize = 256; + let num_rows: usize = 256; + let num_cols: usize = 256; + let num_vars_x: usize = num_rows.log_2(); + let num_vars_y: usize = num_cols.log_2(); + + let mut M: Vec> = Vec::new(); + + for _i in 0..num_nz_entries { + M.push(SparseMatEntry::new( + (rng.next_u64() % (num_rows as u64)) as usize, + (rng.next_u64() % (num_cols as u64)) as usize, + F::rand(&mut rng), + )); } + + let poly_M = SparseMatPolynomial::new(num_vars_x, num_vars_y, M); + let gens = SparseMatPolyCommitmentGens::::setup( + b"gens_sparse_poly", + num_vars_x, + num_vars_y, + num_nz_entries, + 3, + ); + + // commitment + let (poly_comm, dense) = SparseMatPolynomial::multi_commit(&[&poly_M, &poly_M, &poly_M], &gens); + + // evaluation + let rx: Vec = (0..num_vars_x) + .map(|_i| F::rand(&mut rng)) + .collect::>(); + let ry: Vec = (0..num_vars_y) + .map(|_i| F::rand(&mut rng)) + .collect::>(); + let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry); + let evals = vec![eval[0], eval[0], eval[0]]; + + let params = poseidon_params(); + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let proof = + SparseMatPolyEvalProof::prove(&dense, &rx, &ry, &evals, &gens, &mut prover_transcript); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + assert!(proof + .verify( + &poly_comm, + &rx, + &ry, + &evals, + &gens, + &mut verifier_transcript, + ) + .is_ok()); + } } diff --git a/src/sqrt_pst.rs b/src/sqrt_pst.rs new file mode 100644 index 0000000..e4aec37 --- /dev/null +++ b/src/sqrt_pst.rs @@ -0,0 +1,343 @@ +use crate::mipp::MippProof; +use ark_ec::{pairing::Pairing, scalar_mul::variable_base::VariableBaseMSM, CurveGroup}; +use ark_ff::One; +use ark_poly_commit::multilinear_pc::{ + data_structures::{Commitment, CommitterKey, Proof, VerifierKey}, + MultilinearPC, +}; +use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; + +use crate::{ + dense_mlpoly::DensePolynomial, math::Math, poseidon_transcript::PoseidonTranscript, timer::Timer, +}; + +pub struct Polynomial { + m: usize, + odd: usize, + polys: Vec>, + q: Option>, + chis_b: Option>, +} + +impl Polynomial { + // Given the evaluations over the boolean hypercube of a polynomial p of size + // n compute the sqrt-sized polynomials p_i as + // p_i(X) = \sum_{j \in \{0,1\}^m} p(j, i) * chi_j(X) + // where p(X,Y) = \sum_{i \in \{0,\1}^m} + // (\sum_{j \in \{0, 1\}^{m}} p(j, i) * \chi_j(X)) * \chi_i(Y) + // and m is n/2. + // To handle the case in which n is odd, the number of variables in the + // sqrt-sized polynomials will be increased by a factor of 2 (i.e. 2^{m+1}) + // while the number of polynomials remains the same (i.e. 2^m) + pub fn from_evaluations(Z: &[E::ScalarField]) -> Self { + let pl_timer = Timer::new("poly_list_build"); + // check the evaluation list is a power of 2 + debug_assert!(Z.len() & (Z.len() - 1) == 0); + + let num_vars = Z.len().log_2(); + let m_col = num_vars / 2; + let m_row = if num_vars % 2 == 0 { + num_vars / 2 + } else { + num_vars / 2 + 1 + }; + + let pow_m_col = 2_usize.pow(m_col as u32); + let pow_m_row = 2_usize.pow(m_row as u32); + + let polys: Vec> = (0..pow_m_col) + .into_par_iter() + .map(|i| { + let z: Vec = (0..pow_m_row) + .into_par_iter() + // viewing the list of evaluation as a square matrix + // we select by row j and column i + // to handle the odd case, we add another row to the matrix i.e. + // we add an extra variable to the polynomials while keeping their + // number tje same + .map(|j| Z[(j << m_col) | i]) + .collect(); + DensePolynomial::new(z) + }) + .collect(); + + debug_assert!(polys.len() == pow_m_col); + debug_assert!(polys[0].len == pow_m_row); + + pl_timer.stop(); + Self { + m: m_col, + odd: if num_vars % 2 == 1 { 1 } else { 0 }, + polys, + q: None, + chis_b: None, + } + } + + // Given point = (\vec{a}, \vec{b}), compute the polynomial q as + // q(Y) = + // \sum_{j \in \{0,1\}^m}(\sum_{i \in \{0,1\}^m} p(j,i) * chi_i(b)) * chi_j(Y) + // and p(a,b) = q(a) where p is the initial polynomial + fn get_q(&mut self, point: &[E::ScalarField]) { + let q_timer = Timer::new("build_q"); + + debug_assert!(point.len() == 2 * self.m + self.odd); + let b = &point[self.m + self.odd..]; + let pow_m = 2_usize.pow(self.m as u32); + + let chis: Vec = (0..pow_m) + .into_par_iter() + .map(|i| Self::get_chi_i(b, i)) + .collect(); + + let z_q: Vec = (0..(pow_m * 2_usize.pow(self.odd as u32))) + .into_par_iter() + .map(|j| (0..pow_m).map(|i| self.polys[i].Z[j] * chis[i]).sum()) + .collect(); + q_timer.stop(); + + self.q = Some(DensePolynomial::new(z_q)); + self.chis_b = Some(chis); + } + + // Given point = (\vec{a}, \vec{b}) used to construct q + // compute q(a) = p(a,b). + pub fn eval(&mut self, point: &[E::ScalarField]) -> E::ScalarField { + let a = &point[0..point.len() / 2 + self.odd]; + if self.q.is_none() { + self.get_q(point); + } + let q = self.q.clone().unwrap(); + (0..q.Z.len()) + .into_par_iter() + .map(|j| q.Z[j] * Polynomial::::get_chi_i(&a, j)) + .sum() + } + + pub fn commit(&self, ck: &CommitterKey) -> (Vec>, E::TargetField) { + let timer_commit = Timer::new("sqrt_commit"); + let timer_list = Timer::new("comm_list"); + // commit to each of the sqrt sized p_i + let comm_list: Vec> = self + .polys + .par_iter() + .map(|p| MultilinearPC::::commit(&ck, p)) + .collect(); + timer_list.stop(); + + let h_vec = ck.powers_of_h[self.odd].clone(); + assert!(comm_list.len() == h_vec.len()); + + let ipp_timer = Timer::new("ipp"); + let left_pairs: Vec<_> = comm_list + .clone() + .into_par_iter() + .map(|c| E::G1Prepared::from(c.g_product)) + .collect(); + let right_pairs: Vec<_> = h_vec + .into_par_iter() + .map(|h| E::G2Prepared::from(h)) + .collect(); + + // compute the IPP commitment + let t = E::multi_pairing(left_pairs, right_pairs).0; + ipp_timer.stop(); + + timer_commit.stop(); + + (comm_list, t) + } + + // computes \chi_i(\vec{b}) = \prod_{i_j = 0}(1 - b_j)\prod_{i_j = 1}(b_j) + pub fn get_chi_i(b: &[E::ScalarField], i: usize) -> E::ScalarField { + let m = b.len(); + let mut prod = E::ScalarField::one(); + for j in 0..m { + let b_j = b[j]; + // iterate from first (msb) to last (lsb) bit of i + // to build chi_i using the formula above + if i >> (m - j - 1) & 1 == 1 { + prod = prod * b_j; + } else { + prod = prod * (E::ScalarField::one() - b_j) + }; + } + prod + } + + pub fn open( + &mut self, + transcript: &mut PoseidonTranscript, + comm_list: Vec>, + ck: &CommitterKey, + point: &[E::ScalarField], + t: &E::TargetField, + ) -> (Commitment, Proof, MippProof) { + let a = &point[0..self.m + self.odd]; + if self.q.is_none() { + self.get_q(point); + } + + let q = self.q.clone().unwrap(); + + let timer_open = Timer::new("sqrt_open"); + + // Compute the PST commitment to q obtained as the inner products of the + // commitments to the polynomials p_i and chi_i(\vec{b}) for i ranging over + // the boolean hypercube of size m. + let timer_msm = Timer::new("msm"); + if self.chis_b.is_none() { + panic!("chis(b) should have been computed for q"); + } + // TODO remove that cloning - the whole option thing + let chis = self.chis_b.clone().unwrap(); + assert!(chis.len() == comm_list.len()); + + let comms: Vec<_> = comm_list.par_iter().map(|c| c.g_product).collect(); + + let c_u = ::msm_unchecked(&comms, &chis).into_affine(); + timer_msm.stop(); + + let U: Commitment = Commitment { + nv: q.num_vars, + g_product: c_u, + }; + let comm = MultilinearPC::::commit(ck, &q); + debug_assert!(c_u == comm.g_product); + let h_vec = ck.powers_of_h[self.odd].clone(); + + // construct MIPP proof that U is the inner product of the vector A + // and the vector y, where A is the opening vector to T + let timer_mipp_proof = Timer::new("mipp_prove"); + let mipp_proof = + MippProof::::prove(transcript, ck, comms, chis.to_vec(), h_vec, &c_u, t).unwrap(); + timer_mipp_proof.stop(); + + let timer_proof = Timer::new("pst_open"); + + // reversing a is necessary because the sumcheck code in spartan generates + // the point in reverse order compared to how the polynomial commitment + // expects it + let mut a_rev = a.to_vec().clone(); + a_rev.reverse(); + + // construct PST proof for opening q at a + let pst_proof = MultilinearPC::::open(ck, &q, &a_rev); + timer_proof.stop(); + + timer_open.stop(); + (U, pst_proof, mipp_proof) + } + + pub fn verify( + transcript: &mut PoseidonTranscript, + vk: &VerifierKey, + U: &Commitment, + point: &[E::ScalarField], + v: E::ScalarField, + pst_proof: &Proof, + mipp_proof: &MippProof, + T: &E::TargetField, + ) -> bool { + let len = point.len(); + let odd = if len % 2 == 1 { 1 } else { 0 }; + let a = &point[0..len / 2 + odd]; + let b = &point[len / 2 + odd..len]; + + let timer_mipp_verify = Timer::new("mipp_verify"); + // verify that U = A^y where A is the opening vector of T + let res_mipp = MippProof::::verify(vk, transcript, mipp_proof, b.to_vec(), &U.g_product, T); + assert!(res_mipp == true); + timer_mipp_verify.stop(); + + // reversing a is necessary because the sumcheck code in spartan generates + // the point in reverse order compared to how the polynomial commitment + // expects + let mut a_rev = a.to_vec().clone(); + a_rev.reverse(); + + let timer_pst_verify = Timer::new("pst_verify"); + // PST proof that q(a) is indeed equal to value claimed by the prover + let res = MultilinearPC::::check(vk, U, &a_rev, v, pst_proof); + timer_pst_verify.stop(); + res + } +} + +#[cfg(test)] +mod tests { + + use crate::parameters::poseidon_params; + + use super::*; + type F = ark_bls12_377::Fr; + type E = ark_bls12_377::Bls12_377; + + use ark_std::UniformRand; + #[test] + fn check_sqrt_poly_eval() { + let mut rng = ark_std::test_rng(); + let num_vars = 6; + let len = 2_usize.pow(num_vars); + let Z: Vec = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect(); + let r: Vec = (0..num_vars) + .into_iter() + .map(|_| F::rand(&mut rng)) + .collect(); + + let p = DensePolynomial::new(Z.clone()); + let res1 = p.evaluate(&r); + + let mut pl = Polynomial::::from_evaluations(&Z.clone()); + let res2 = pl.eval(&r); + + assert!(res1 == res2); + } + + #[test] + fn check_commit() { + // check odd case + check_sqrt_poly_commit(5); + + // check even case + check_sqrt_poly_commit(6); + } + + fn check_sqrt_poly_commit(num_vars: u32) { + let mut rng = ark_std::test_rng(); + let len = 2_usize.pow(num_vars); + let Z: Vec = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect(); + let r: Vec = (0..num_vars) + .into_iter() + .map(|_| F::rand(&mut rng)) + .collect(); + + let gens = MultilinearPC::::setup(3, &mut rng); + let (ck, vk) = MultilinearPC::::trim(&gens, 3); + + let mut pl = Polynomial::from_evaluations(&Z.clone()); + + let v = pl.eval(&r); + + let (comm_list, t) = pl.commit(&ck); + + let params = poseidon_params(); + let mut prover_transcript = PoseidonTranscript::new(¶ms); + + let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t); + + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + + let res = Polynomial::verify( + &mut verifier_transcript, + &vk, + &u, + &r, + v, + &pst_proof, + &mipp_proof, + &t, + ); + assert!(res == true); + } +} diff --git a/src/sumcheck.rs b/src/sumcheck.rs index e657e2b..8c728e0 100644 --- a/src/sumcheck.rs +++ b/src/sumcheck.rs @@ -1,915 +1,432 @@ #![allow(clippy::too_many_arguments)] #![allow(clippy::type_complexity)] -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; - use super::dense_mlpoly::DensePolynomial; use super::errors::ProofVerifyError; +use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter}; +use crate::transcript::Transcript; -use super::scalar::Scalar; use super::unipoly::UniPoly; +use ark_crypto_primitives::sponge::Absorb; +use ark_ff::PrimeField; -use ark_ff::Zero; use ark_serialize::*; use itertools::izip; #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct SumcheckInstanceProof { - pub polys: Vec, +pub struct SumcheckInstanceProof { + pub polys: Vec>, } -impl SumcheckInstanceProof { - pub fn new(polys: Vec) -> SumcheckInstanceProof { - SumcheckInstanceProof { polys } - } - - pub fn verify( - &self, - claim: Scalar, - num_rounds: usize, - degree_bound: usize, - transcript: &mut PoseidonTranscript, - ) -> Result<(Scalar, Vec), ProofVerifyError> { - let mut e = claim; - let mut r: Vec = Vec::new(); +impl SumcheckInstanceProof { + pub fn new(polys: Vec>) -> Self { + SumcheckInstanceProof { polys } + } - // verify that there is a univariate polynomial for each round - assert_eq!(self.polys.len(), num_rounds); - for i in 0..self.polys.len() { - let poly = self.polys[i].clone(); + pub fn verify( + &self, + claim: F, + num_rounds: usize, + degree_bound: usize, + transcript: &mut PoseidonTranscript, + ) -> Result<(F, Vec), ProofVerifyError> { + let mut e = claim; + let mut r: Vec = Vec::new(); - // verify degree bound - assert_eq!(poly.degree(), degree_bound); - // check if G_k(0) + G_k(1) = e + // verify that there is a univariate polynomial for each round + assert_eq!(self.polys.len(), num_rounds); + for i in 0..self.polys.len() { + let poly = self.polys[i].clone(); - assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); + // verify degree bound + assert_eq!(poly.degree(), degree_bound); + // check if G_k(0) + G_k(1) = e - // append the prover's message to the transcript - poly.append_to_poseidon(transcript); + assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); - //derive the verifier's challenge for the next round - let r_i = transcript.challenge_scalar(); + // append the prover's message to the transcript + poly.write_to_transcript(transcript); - r.push(r_i); + //derive the verifier's challenge for the next round + let r_i = transcript.challenge_scalar(b""); - // evaluate the claimed degree-ell polynomial at r_i - e = poly.evaluate(&r_i); - } + r.push(r_i); - Ok((e, r)) + // evaluate the claimed degree-ell polynomial at r_i + e = poly.evaluate(&r_i); } + + Ok((e, r)) + } } -// #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -// pub struct ZKSumcheckInstanceProof { -// comm_polys: Vec, -// comm_evals: Vec, -// proofs: Vec, -// } - -// impl ZKSumcheckInstanceProof { -// pub fn new( -// comm_polys: Vec, -// comm_evals: Vec, -// proofs: Vec, -// ) -> Self { -// ZKSumcheckInstanceProof { -// comm_polys, -// comm_evals, -// proofs, -// } -// } - -// pub fn verify( -// &self, -// comm_claim: &CompressedGroup, -// num_rounds: usize, -// degree_bound: usize, -// gens_1: &MultiCommitGens, -// gens_n: &MultiCommitGens, -// transcript: &mut Transcript, -// ) -> Result<(CompressedGroup, Vec), ProofVerifyError> { -// // verify degree bound -// assert_eq!(gens_n.n, degree_bound + 1); - -// // verify that there is a univariate polynomial for each round -// assert_eq!(self.comm_polys.len(), num_rounds); -// assert_eq!(self.comm_evals.len(), num_rounds); - -// let mut r: Vec = Vec::new(); -// for i in 0..self.comm_polys.len() { -// let comm_poly = &self.comm_polys[i]; - -// // append the prover's polynomial to the transcript -// comm_poly.append_to_transcript(b"comm_poly", transcript); - -// //derive the verifier's challenge for the next round -// let r_i = transcript.challenge_scalar(b"challenge_nextround"); - -// // verify the proof of sum-check and evals -// let res = { -// let comm_claim_per_round = if i == 0 { -// comm_claim -// } else { -// &self.comm_evals[i - 1] -// }; -// let mut comm_eval = &self.comm_evals[i]; - -// // add two claims to transcript -// comm_claim_per_round.append_to_transcript(transcript); -// comm_eval.append_to_transcript(transcript); - -// // produce two weights -// let w = transcript.challenge_vector(2); - -// // compute a weighted sum of the RHS -// let comm_target = GroupElement::vartime_multiscalar_mul( -// w.as_slice(), -// iter::once(&comm_claim_per_round) -// .chain(iter::once(&comm_eval)) -// .map(|pt| GroupElement::decompress(pt).unwrap()) -// .collect::>() -// .as_slice(), -// ) -// .compress(); - -// let a = { -// // the vector to use to decommit for sum-check test -// let a_sc = { -// let mut a = vec![Scalar::one(); degree_bound + 1]; -// a[0] += Scalar::one(); -// a -// }; - -// // the vector to use to decommit for evaluation -// let a_eval = { -// let mut a = vec![Scalar::one(); degree_bound + 1]; -// for j in 1..a.len() { -// a[j] = a[j - 1] * r_i; -// } -// a -// }; - -// // take weighted sum of the two vectors using w -// assert_eq!(a_sc.len(), a_eval.len()); -// (0..a_sc.len()) -// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) -// .collect::>() -// }; - -// self.proofs[i] -// .verify( -// gens_1, -// gens_n, -// transcript, -// &a, -// &self.comm_polys[i], -// &comm_target, -// ) -// .is_ok() -// }; -// if !res { -// return Err(ProofVerifyError::InternalError); -// } - -// r.push(r_i); -// } - -// Ok((self.comm_evals[&self.comm_evals.len() - 1].clone(), r)) -// } -// } - -impl SumcheckInstanceProof { - pub fn prove_cubic_with_additive_term( - claim: &Scalar, - num_rounds: usize, - poly_tau: &mut DensePolynomial, - poly_A: &mut DensePolynomial, - poly_B: &mut DensePolynomial, - poly_C: &mut DensePolynomial, - comb_func: F, - transcript: &mut PoseidonTranscript, - ) -> (Self, Vec, Vec) - where - F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar, - { - let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec = Vec::new(); - for _j in 0..num_rounds { - let mut eval_point_0 = Scalar::zero(); - let mut eval_point_2 = Scalar::zero(); - let mut eval_point_3 = Scalar::zero(); - - let len = poly_tau.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_tau[i], &poly_A[i], &poly_B[i], &poly_C[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_tau_bound_point = poly_tau[len + i] + poly_tau[len + i] - poly_tau[i]; - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - - eval_point_2 += comb_func( - &poly_tau_bound_point, - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_tau_bound_point = poly_tau_bound_point + poly_tau[len + i] - poly_tau[i]; - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - - eval_point_3 += comb_func( - &poly_tau_bound_point, - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - } - - let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - poly.append_to_poseidon(transcript); - //derive the verifier's challenge for the next round - let r_j = transcript.challenge_scalar(); - r.push(r_j); - - // bound all tables to the verifier's challenege - poly_tau.bound_poly_var_top(&r_j); - poly_A.bound_poly_var_top(&r_j); - poly_B.bound_poly_var_top(&r_j); - poly_C.bound_poly_var_top(&r_j); - - e = poly.evaluate(&r_j); - cubic_polys.push(poly); - } - ( - SumcheckInstanceProof::new(cubic_polys), - r, - vec![poly_tau[0], poly_A[0], poly_B[0], poly_C[0]], - ) +impl SumcheckInstanceProof { + pub fn prove_cubic_with_additive_term( + claim: &F, + num_rounds: usize, + poly_tau: &mut DensePolynomial, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + comb_func: C, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, Vec) + where + C: Fn(&F, &F, &F, &F) -> F, + { + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + for _j in 0..num_rounds { + let mut eval_point_0 = F::zero(); + let mut eval_point_2 = F::zero(); + let mut eval_point_3 = F::zero(); + + let len = poly_tau.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_tau[i], &poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_tau_bound_point = poly_tau[len + i] + poly_tau[len + i] - poly_tau[i]; + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + + eval_point_2 += comb_func( + &poly_tau_bound_point, + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_tau_bound_point = poly_tau_bound_point + poly_tau[len + i] - poly_tau[i]; + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + eval_point_3 += comb_func( + &poly_tau_bound_point, + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.write_to_transcript(transcript); + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b""); + r.push(r_j); + + // bound all tables to the verifier's challenege + poly_tau.bound_poly_var_top(&r_j); + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + + e = poly.evaluate(&r_j); + cubic_polys.push(poly); } - pub fn prove_cubic( - claim: &Scalar, - num_rounds: usize, - poly_A: &mut DensePolynomial, - poly_B: &mut DensePolynomial, - poly_C: &mut DensePolynomial, - comb_func: F, - transcript: &mut PoseidonTranscript, - ) -> (Self, Vec, Vec) - where - F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, - { - let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec = Vec::new(); - for _j in 0..num_rounds { - let mut eval_point_0 = Scalar::zero(); - let mut eval_point_2 = Scalar::zero(); - let mut eval_point_3 = Scalar::zero(); - - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - eval_point_2 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - - eval_point_3 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - } - - let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - poly.append_to_poseidon(transcript); - - //derive the verifier's challenge for the next round - let r_j = transcript.challenge_scalar(); - r.push(r_j); - // bound all tables to the verifier's challenege - poly_A.bound_poly_var_top(&r_j); - poly_B.bound_poly_var_top(&r_j); - poly_C.bound_poly_var_top(&r_j); - e = poly.evaluate(&r_j); - cubic_polys.push(poly); - } - - ( - SumcheckInstanceProof::new(cubic_polys), - r, - vec![poly_A[0], poly_B[0], poly_C[0]], - ) + ( + SumcheckInstanceProof::new(cubic_polys), + r, + vec![poly_tau[0], poly_A[0], poly_B[0], poly_C[0]], + ) + } + pub fn prove_cubic( + claim: &F, + num_rounds: usize, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + comb_func: C, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, Vec) + where + C: Fn(&F, &F, &F) -> F, + { + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + for _j in 0..num_rounds { + let mut eval_point_0 = F::zero(); + let mut eval_point_2 = F::zero(); + let mut eval_point_3 = F::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.write_to_transcript(transcript); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b""); + r.push(r_j); + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + e = poly.evaluate(&r_j); + cubic_polys.push(poly); } - pub fn prove_cubic_batched( - claim: &Scalar, - num_rounds: usize, - poly_vec_par: ( - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, - &mut DensePolynomial, - ), - poly_vec_seq: ( - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, - ), - coeffs: &[Scalar], - comb_func: F, - transcript: &mut PoseidonTranscript, - ) -> ( - Self, - Vec, - (Vec, Vec, Scalar), - (Vec, Vec, Vec), + ( + SumcheckInstanceProof::new(cubic_polys), + r, + vec![poly_A[0], poly_B[0], poly_C[0]], ) - where - F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, - { - let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par; - let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; - - //let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; - let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec = Vec::new(); - - for _j in 0..num_rounds { - let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new(); - - for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) { - let mut eval_point_0 = Scalar::zero(); - let mut eval_point_2 = Scalar::zero(); - let mut eval_point_3 = Scalar::zero(); - - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = - poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i]; - eval_point_2 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = - poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i]; - - eval_point_3 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - } - - evals.push((eval_point_0, eval_point_2, eval_point_3)); - } - - for (poly_A, poly_B, poly_C) in izip!( - poly_A_vec_seq.iter(), - poly_B_vec_seq.iter(), - poly_C_vec_seq.iter() - ) { - let mut eval_point_0 = Scalar::zero(); - let mut eval_point_2 = Scalar::zero(); - let mut eval_point_3 = Scalar::zero(); - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - eval_point_2 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - eval_point_3 += comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - ); - } - evals.push((eval_point_0, eval_point_2, eval_point_3)); - } - - let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); - let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); - let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum(); - - let evals = vec![ - evals_combined_0, - e - evals_combined_0, - evals_combined_2, - evals_combined_3, - ]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - poly.append_to_poseidon(transcript); - - //derive the verifier's challenge for the next round - let r_j = transcript.challenge_scalar(); - r.push(r_j); - - // bound all tables to the verifier's challenege - for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) { - poly_A.bound_poly_var_top(&r_j); - poly_B.bound_poly_var_top(&r_j); - } - poly_C_par.bound_poly_var_top(&r_j); - - for (poly_A, poly_B, poly_C) in izip!( - poly_A_vec_seq.iter_mut(), - poly_B_vec_seq.iter_mut(), - poly_C_vec_seq.iter_mut() - ) { - poly_A.bound_poly_var_top(&r_j); - poly_B.bound_poly_var_top(&r_j); - poly_C.bound_poly_var_top(&r_j); - } - - e = poly.evaluate(&r_j); - cubic_polys.push(poly); + } + + pub fn prove_cubic_batched( + claim: &F, + num_rounds: usize, + poly_vec_par: ( + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut DensePolynomial, + ), + poly_vec_seq: ( + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + ), + coeffs: &[F], + comb_func: C, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, (Vec, Vec, F), (Vec, Vec, Vec)) + where + C: Fn(&F, &F, &F) -> F, + { + let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par; + let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; + + //let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); + + for _j in 0..num_rounds { + let mut evals: Vec<(F, F, F)> = Vec::new(); + + for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) { + let mut eval_point_0 = F::zero(); + let mut eval_point_2 = F::zero(); + let mut eval_point_3 = F::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); } - let poly_A_par_final = (0..poly_A_vec_par.len()) - .map(|i| poly_A_vec_par[i][0]) - .collect(); - let poly_B_par_final = (0..poly_B_vec_par.len()) - .map(|i| poly_B_vec_par[i][0]) - .collect(); - let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]); - - let poly_A_seq_final = (0..poly_A_vec_seq.len()) - .map(|i| poly_A_vec_seq[i][0]) - .collect(); - let poly_B_seq_final = (0..poly_B_vec_seq.len()) - .map(|i| poly_B_vec_seq[i][0]) - .collect(); - let poly_C_seq_final = (0..poly_C_vec_seq.len()) - .map(|i| poly_C_vec_seq[i][0]) - .collect(); - let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final); - - ( - SumcheckInstanceProof::new(cubic_polys), - r, - claims_prod, - claims_dotp, - ) - } - - pub fn prove_quad( - claim: &Scalar, - num_rounds: usize, - poly_A: &mut DensePolynomial, - poly_B: &mut DensePolynomial, - comb_func: F, - transcript: &mut PoseidonTranscript, - ) -> (Self, Vec, Vec) - where - F: Fn(&Scalar, &Scalar) -> Scalar, - { - let mut e = *claim; - let mut r: Vec = Vec::new(); - let mut quad_polys: Vec = Vec::new(); - - for _j in 0..num_rounds { - let mut eval_point_0 = Scalar::zero(); - let mut eval_point_2 = Scalar::zero(); - - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); - } - - let evals = vec![eval_point_0, e - eval_point_0, eval_point_2]; - let poly = UniPoly::from_evals(&evals); - - // append the prover's message to the transcript - poly.append_to_poseidon(transcript); - - //derive the verifier's challenge for the next round - let r_j = transcript.challenge_scalar(); - r.push(r_j); - - // bound all tables to the verifier's challenege - poly_A.bound_poly_var_top(&r_j); - poly_B.bound_poly_var_top(&r_j); - e = poly.evaluate(&r_j); - quad_polys.push(poly); + evals.push((eval_point_0, eval_point_2, eval_point_3)); + } + + for (poly_A, poly_B, poly_C) in izip!( + poly_A_vec_seq.iter(), + poly_B_vec_seq.iter(), + poly_C_vec_seq.iter() + ) { + let mut eval_point_0 = F::zero(); + let mut eval_point_2 = F::zero(); + let mut eval_point_3 = F::zero(); + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); } + evals.push((eval_point_0, eval_point_2, eval_point_3)); + } + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + e - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.write_to_transcript(transcript); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b""); + r.push(r_j); + + // bound all tables to the verifier's challenege + for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) { + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + } + poly_C_par.bound_poly_var_top(&r_j); + + for (poly_A, poly_B, poly_C) in izip!( + poly_A_vec_seq.iter_mut(), + poly_B_vec_seq.iter_mut(), + poly_C_vec_seq.iter_mut() + ) { + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + } + + e = poly.evaluate(&r_j); + cubic_polys.push(poly); + } - ( - SumcheckInstanceProof::new(quad_polys), - r, - vec![poly_A[0], poly_B[0]], - ) + let poly_A_par_final = (0..poly_A_vec_par.len()) + .map(|i| poly_A_vec_par[i][0]) + .collect(); + let poly_B_par_final = (0..poly_B_vec_par.len()) + .map(|i| poly_B_vec_par[i][0]) + .collect(); + let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]); + + let poly_A_seq_final = (0..poly_A_vec_seq.len()) + .map(|i| poly_A_vec_seq[i][0]) + .collect(); + let poly_B_seq_final = (0..poly_B_vec_seq.len()) + .map(|i| poly_B_vec_seq[i][0]) + .collect(); + let poly_C_seq_final = (0..poly_C_vec_seq.len()) + .map(|i| poly_C_vec_seq[i][0]) + .collect(); + let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final); + + ( + SumcheckInstanceProof::new(cubic_polys), + r, + claims_prod, + claims_dotp, + ) + } + + pub fn prove_quad( + claim: &F, + num_rounds: usize, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + comb_func: C, + transcript: &mut PoseidonTranscript, + ) -> (Self, Vec, Vec) + where + C: Fn(&F, &F) -> F, + { + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut quad_polys: Vec> = Vec::new(); + + for _j in 0..num_rounds { + let mut eval_point_0 = F::zero(); + let mut eval_point_2 = F::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); + } + + let evals = vec![eval_point_0, e - eval_point_0, eval_point_2]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.write_to_transcript(transcript); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b""); + r.push(r_j); + + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + e = poly.evaluate(&r_j); + quad_polys.push(poly); } -} -// impl ZKSumcheckInstanceProof { -// pub fn prove_quad( -// claim: &Scalar, -// blind_claim: &Scalar, -// num_rounds: usize, -// poly_A: &mut DensePolynomial, -// poly_B: &mut DensePolynomial, -// comb_func: F, -// gens_1: &MultiCommitGens, -// gens_n: &MultiCommitGens, -// transcript: &mut Transcript, -// random_tape: &mut RandomTape, -// ) -> (Self, Vec, Vec, Scalar) -// where -// F: Fn(&Scalar, &Scalar) -> Scalar, -// { -// let (blinds_poly, blinds_evals) = ( -// random_tape.random_vector(b"blinds_poly", num_rounds), -// random_tape.random_vector(b"blinds_evals", num_rounds), -// ); -// let mut claim_per_round = *claim; -// let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); - -// let mut r: Vec = Vec::new(); -// let mut comm_polys: Vec = Vec::new(); -// let mut comm_evals: Vec = Vec::new(); -// let mut proofs: Vec = Vec::new(); - -// for j in 0..num_rounds { -// let (poly, comm_poly) = { -// let mut eval_point_0 = Scalar::zero(); -// let mut eval_point_2 = Scalar::zero(); - -// let len = poly_A.len() / 2; -// for i in 0..len { -// // eval 0: bound_func is A(low) -// eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); - -// // eval 2: bound_func is -A(low) + 2*A(high) -// let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; -// let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; -// eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); -// } - -// let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; -// let poly = UniPoly::from_evals(&evals); -// let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); -// (poly, comm_poly) -// }; - -// // append the prover's message to the transcript -// comm_poly.append_to_transcript(b"comm_poly", transcript); -// comm_polys.push(comm_poly); - -// //derive the verifier's challenge for the next round -// let r_j = transcript.challenge_scalar(b"challenge_nextround"); - -// // bound all tables to the verifier's challenege -// poly_A.bound_poly_var_top(&r_j); -// poly_B.bound_poly_var_top(&r_j); - -// // produce a proof of sum-check and of evaluation -// let (proof, claim_next_round, comm_claim_next_round) = { -// let eval = poly.evaluate(&r_j); -// let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); - -// // we need to prove the following under homomorphic commitments: -// // (1) poly(0) + poly(1) = claim_per_round -// // (2) poly(r_j) = eval - -// // Our technique is to leverage dot product proofs: -// // (1) we can prove: = claim_per_round -// // (2) we can prove: >() -// .as_slice(), -// ) -// .compress(); - -// let blind = { -// let blind_sc = if j == 0 { -// blind_claim -// } else { -// &blinds_evals[j - 1] -// }; - -// let blind_eval = &blinds_evals[j]; - -// w[0] * blind_sc + w[1] * blind_eval -// }; -// assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); - -// let a = { -// // the vector to use to decommit for sum-check test -// let a_sc = { -// let mut a = vec![Scalar::one(); poly.degree() + 1]; -// a[0] += Scalar::one(); -// a -// }; - -// // the vector to use to decommit for evaluation -// let a_eval = { -// let mut a = vec![Scalar::one(); poly.degree() + 1]; -// for j in 1..a.len() { -// a[j] = a[j - 1] * r_j; -// } -// a -// }; - -// // take weighted sum of the two vectors using w -// assert_eq!(a_sc.len(), a_eval.len()); -// (0..a_sc.len()) -// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) -// .collect::>() -// }; - -// let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( -// gens_1, -// gens_n, -// transcript, -// random_tape, -// &poly.as_vec(), -// &blinds_poly[j], -// &a, -// &target, -// &blind, -// ); - -// (proof, eval, comm_eval) -// }; - -// claim_per_round = claim_next_round; -// comm_claim_per_round = comm_claim_next_round; - -// proofs.push(proof); -// r.push(r_j); -// comm_evals.push(comm_claim_per_round.clone()); -// } - -// ( -// ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), -// r, -// vec![poly_A[0], poly_B[0]], -// blinds_evals[num_rounds - 1], -// ) -// } - -// pub fn prove_cubic_with_additive_term( -// claim: &Scalar, -// blind_claim: &Scalar, -// num_rounds: usize, -// poly_A: &mut DensePolynomial, -// poly_B: &mut DensePolynomial, -// poly_C: &mut DensePolynomial, -// poly_D: &mut DensePolynomial, -// comb_func: F, -// gens_1: &MultiCommitGens, -// gens_n: &MultiCommitGens, -// transcript: &mut Transcript, -// random_tape: &mut RandomTape, -// ) -> (Self, Vec, Vec, Scalar) -// where -// F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar, -// { -// let (blinds_poly, blinds_evals) = ( -// random_tape.random_vector(b"blinds_poly", num_rounds), -// random_tape.random_vector(b"blinds_evals", num_rounds), -// ); - -// let mut claim_per_round = *claim; -// let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); - -// let mut r: Vec = Vec::new(); -// let mut comm_polys: Vec = Vec::new(); -// let mut comm_evals: Vec = Vec::new(); -// let mut proofs: Vec = Vec::new(); - -// for j in 0..num_rounds { -// let (poly, comm_poly) = { -// let mut eval_point_0 = Scalar::zero(); -// let mut eval_point_2 = Scalar::zero(); -// let mut eval_point_3 = Scalar::zero(); - -// let len = poly_A.len() / 2; -// for i in 0..len { -// // eval 0: bound_func is A(low) -// eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - -// // eval 2: bound_func is -A(low) + 2*A(high) -// let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; -// let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; -// let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; -// let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; -// eval_point_2 += comb_func( -// &poly_A_bound_point, -// &poly_B_bound_point, -// &poly_C_bound_point, -// &poly_D_bound_point, -// ); - -// // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) -// let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; -// let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; -// let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; -// let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; -// eval_point_3 += comb_func( -// &poly_A_bound_point, -// &poly_B_bound_point, -// &poly_C_bound_point, -// &poly_D_bound_point, -// ); -// } - -// let evals = vec![ -// eval_point_0, -// claim_per_round - eval_point_0, -// eval_point_2, -// eval_point_3, -// ]; -// let poly = UniPoly::from_evals(&evals); -// let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); -// (poly, comm_poly) -// }; - -// // append the prover's message to the transcript -// comm_poly.append_to_transcript(b"comm_poly", transcript); -// comm_polys.push(comm_poly); - -// //derive the verifier's challenge for the next round -// let r_j = transcript.challenge_scalar(b"challenge_nextround"); - -// // bound all tables to the verifier's challenege -// poly_A.bound_poly_var_top(&r_j); -// poly_B.bound_poly_var_top(&r_j); -// poly_C.bound_poly_var_top(&r_j); -// poly_D.bound_poly_var_top(&r_j); - -// // produce a proof of sum-check and of evaluation -// let (proof, claim_next_round, comm_claim_next_round) = { -// let eval = poly.evaluate(&r_j); -// let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); - -// // we need to prove the following under homomorphic commitments: -// // (1) poly(0) + poly(1) = claim_per_round -// // (2) poly(r_j) = eval - -// // Our technique is to leverage dot product proofs: -// // (1) we can prove: = claim_per_round -// // (2) we can prove: >() -// .as_slice(), -// ) -// .compress(); - -// let blind = { -// let blind_sc = if j == 0 { -// blind_claim -// } else { -// &blinds_evals[j - 1] -// }; - -// let blind_eval = &blinds_evals[j]; - -// w[0] * blind_sc + w[1] * blind_eval -// }; - -// let res = target.commit(&blind, gens_1); - -// assert_eq!(res.compress(), comm_target); - -// let a = { -// // the vector to use to decommit for sum-check test -// let a_sc = { -// let mut a = vec![Scalar::one(); poly.degree() + 1]; -// a[0] += Scalar::one(); -// a -// }; - -// // the vector to use to decommit for evaluation -// let a_eval = { -// let mut a = vec![Scalar::one(); poly.degree() + 1]; -// for j in 1..a.len() { -// a[j] = a[j - 1] * r_j; -// } -// a -// }; - -// // take weighted sum of the two vectors using w -// assert_eq!(a_sc.len(), a_eval.len()); -// (0..a_sc.len()) -// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) -// .collect::>() -// }; - -// let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( -// gens_1, -// gens_n, -// transcript, -// random_tape, -// &poly.as_vec(), -// &blinds_poly[j], -// &a, -// &target, -// &blind, -// ); - -// (proof, eval, comm_eval) -// }; - -// proofs.push(proof); -// claim_per_round = claim_next_round; -// comm_claim_per_round = comm_claim_next_round; -// r.push(r_j); -// comm_evals.push(comm_claim_per_round.clone()); -// } - -// ( -// ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), -// r, -// vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]], -// blinds_evals[num_rounds - 1], -// ) -// } -// } + ( + SumcheckInstanceProof::new(quad_polys), + r, + vec![poly_A[0], poly_B[0]], + ) + } +} diff --git a/src/testudo_nizk.rs b/src/testudo_nizk.rs new file mode 100644 index 0000000..a456a60 --- /dev/null +++ b/src/testudo_nizk.rs @@ -0,0 +1,202 @@ +use std::cmp::max; + +use crate::errors::ProofVerifyError; +use crate::r1csproof::R1CSVerifierProof; +use crate::{ + poseidon_transcript::PoseidonTranscript, + r1csproof::{R1CSGens, R1CSProof}, + transcript::Transcript, + InputsAssignment, Instance, VarsAssignment, +}; +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] + +// TestudoNizk is suitable for uniform circuits where the +// evaluation of R1CS matrices A, B and C is cheap and can +// be done by the verifier. For more complex circuits this +// operation has to be offloaded to the prover. +pub struct TestudoNizk { + pub r1cs_verifier_proof: R1CSVerifierProof, + pub r: (Vec, Vec), +} + +pub struct TestudoNizkGens { + gens_r1cs_sat: R1CSGens, +} + +impl TestudoNizkGens { + /// Performs the setup required by the polynomial commitment PST and Groth16 + pub fn setup( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + poseidon: PoseidonConfig, + ) -> Self { + // ensure num_vars is a power of 2 + let num_vars_padded = { + let mut num_vars_padded = max(num_vars, num_inputs + 1); + if num_vars_padded != num_vars_padded.next_power_of_two() { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + num_vars_padded + }; + + let num_cons_padded = { + let mut num_cons_padded = num_cons; + + // ensure that num_cons_padded is at least 2 + if num_cons_padded == 0 || num_cons_padded == 1 { + num_cons_padded = 2; + } + + // ensure that num_cons_padded is a power of 2 + if num_cons.next_power_of_two() != num_cons { + num_cons_padded = num_cons.next_power_of_two(); + } + num_cons_padded + }; + + let gens_r1cs_sat = R1CSGens::setup( + b"gens_r1cs_sat", + num_cons_padded, + num_vars_padded, + num_inputs, + poseidon, + ); + TestudoNizkGens { gens_r1cs_sat } + } +} + +impl TestudoNizk +where + E::ScalarField: Absorb, +{ + // Returns a proof that the R1CS instance is satisfiable + pub fn prove( + inst: &Instance, + vars: VarsAssignment, + inputs: &InputsAssignment, + gens: &TestudoNizkGens, + transcript: &mut PoseidonTranscript, + poseidon: PoseidonConfig, + ) -> Result, ProofVerifyError> { + transcript.append_bytes(b"", &inst.digest); + + let c: E::ScalarField = transcript.challenge_scalar(b""); + transcript.new_from_state(&c); + + // we might need to pad variables + let padded_vars = { + let num_padded_vars = inst.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars + } + }; + + let (r1cs_sat_proof, rx, ry) = R1CSProof::prove( + &inst.inst, + padded_vars.assignment, + &inputs.assignment, + &gens.gens_r1cs_sat, + transcript, + ); + + let inst_evals = inst.inst.evaluate(&rx, &ry); + + transcript.new_from_state(&c); + let r1cs_verifier_proof = r1cs_sat_proof + .prove_verifier( + inst.inst.get_num_vars(), + inst.inst.get_num_cons(), + &inputs.assignment, + &inst_evals, + transcript, + &gens.gens_r1cs_sat, + poseidon, + ) + .unwrap(); + Ok(TestudoNizk { + r1cs_verifier_proof, + r: (rx, ry), + }) + } + + // Verifies the satisfiability proof for the R1CS instance. In NIZK mode, the + // verifier evaluates matrices A, B and C themselves, which is a linear + // operation and hence this is not a SNARK. + // However, for highly structured circuits this operation is fast. + pub fn verify( + &self, + gens: &TestudoNizkGens, + inst: &Instance, + input: &InputsAssignment, + transcript: &mut PoseidonTranscript, + _poseidon: PoseidonConfig, + ) -> Result { + transcript.append_bytes(b"", &inst.digest); + let (claimed_rx, claimed_ry) = &self.r; + let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); + + let sat_verified = self.r1cs_verifier_proof.verify( + (claimed_rx.clone(), claimed_ry.clone()), + &input.assignment, + &inst_evals, + transcript, + &gens.gens_r1cs_sat, + )?; + assert!(sat_verified == true); + Ok(sat_verified) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + parameters::poseidon_params, + poseidon_transcript::PoseidonTranscript, + testudo_nizk::{TestudoNizk, TestudoNizkGens}, + Instance, + }; + + #[test] + pub fn check_testudo_nizk() { + let num_vars = 256; + let num_cons = num_vars; + let num_inputs = 10; + + type E = ark_bls12_377::Bls12_377; + + // produce public generators + let gens = TestudoNizkGens::::setup(num_cons, num_vars, num_inputs, poseidon_params()); + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + let params = poseidon_params(); + + // produce a proof + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let proof = + TestudoNizk::prove(&inst, vars, &inputs, &gens, &mut prover_transcript, params).unwrap(); + + // verify the proof + let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params()); + assert!(proof + .verify( + &gens, + &inst, + &inputs, + &mut verifier_transcript, + poseidon_params() + ) + .is_ok()); + } +} diff --git a/src/testudo_snark.rs b/src/testudo_snark.rs new file mode 100644 index 0000000..e6cb430 --- /dev/null +++ b/src/testudo_snark.rs @@ -0,0 +1,377 @@ +use std::cmp::max; + +use crate::errors::ProofVerifyError; +use crate::r1csinstance::{R1CSCommitmentGens, R1CSEvalProof}; +use crate::r1csproof::R1CSVerifierProof; + +use crate::timer::Timer; +use crate::transcript::TranscriptWriter; +use crate::{ + poseidon_transcript::PoseidonTranscript, + r1csproof::{R1CSGens, R1CSProof}, + transcript::Transcript, + InputsAssignment, Instance, VarsAssignment, +}; +use crate::{ComputationCommitment, ComputationDecommitment}; +use ark_crypto_primitives::sponge::poseidon::PoseidonConfig; +use ark_crypto_primitives::sponge::Absorb; +use ark_ec::pairing::Pairing; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + +#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] + +pub struct TestudoSnark { + pub r1cs_verifier_proof: R1CSVerifierProof, + pub r1cs_eval_proof: R1CSEvalProof, + pub inst_evals: (E::ScalarField, E::ScalarField, E::ScalarField), + pub r: (Vec, Vec), +} + +pub struct TestudoSnarkGens { + gens_r1cs_sat: R1CSGens, + gens_r1cs_eval: R1CSCommitmentGens, +} + +impl TestudoSnarkGens { + /// Performs the setups required by the polynomial commitment PST, Groth16 + /// and the computational commitment given the size of the R1CS statement, + /// `num_nz_entries` specifies the maximum number of non-zero entries in + /// any of the three R1CS matrices. + pub fn setup( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + num_nz_entries: usize, + poseidon: PoseidonConfig, + ) -> Self { + // ensure num_vars is a power of 2 + let num_vars_padded = { + let mut num_vars_padded = max(num_vars, num_inputs + 1); + if num_vars_padded != num_vars_padded.next_power_of_two() { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + num_vars_padded + }; + + let num_cons_padded = { + let mut num_cons_padded = num_cons; + + // ensure that num_cons_padded is at least 2 + if num_cons_padded == 0 || num_cons_padded == 1 { + num_cons_padded = 2; + } + + // ensure that num_cons_padded is a power of 2 + if num_cons.next_power_of_two() != num_cons { + num_cons_padded = num_cons.next_power_of_two(); + } + num_cons_padded + }; + + let gens_r1cs_sat = R1CSGens::setup( + b"gens_r1cs_sat", + num_cons_padded, + num_vars_padded, + num_inputs, + poseidon, + ); + let gens_r1cs_eval = R1CSCommitmentGens::setup( + b"gens_r1cs_eval", + num_cons_padded, + num_vars_padded, + num_inputs, + num_nz_entries, + ); + TestudoSnarkGens { + gens_r1cs_sat, + gens_r1cs_eval, + } + } +} + +impl TestudoSnark +where + E::ScalarField: Absorb, +{ + // Constructs the computational commitment, used to prove that the + // evaluations of matrices A, B and C sent by the prover to the verifier + // are correct. + pub fn encode( + inst: &Instance, + gens: &TestudoSnarkGens, + ) -> ( + ComputationCommitment, + ComputationDecommitment, + ) { + let timer_encode = Timer::new("SNARK::encode"); + let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval); + timer_encode.stop(); + ( + ComputationCommitment { comm }, + ComputationDecommitment { decomm }, + ) + } + + // Returns the Testudo SNARK proof which has two components: + // * proof that the R1CS instance is satisfiable + // * proof that the evlauation of matrices A, B and C on point (x,y) + // resulted from the two rounda of sumcheck are correct + pub fn prove( + inst: &Instance, + comm: &ComputationCommitment, + decomm: &ComputationDecommitment, + vars: VarsAssignment, + inputs: &InputsAssignment, + gens: &TestudoSnarkGens, + transcript: &mut PoseidonTranscript, + poseidon: PoseidonConfig, + ) -> Result, ProofVerifyError> { + comm.comm.write_to_transcript(transcript); + let c: E::ScalarField = transcript.challenge_scalar(b""); + transcript.new_from_state(&c); + + // we might need to pad variables + let padded_vars = { + let num_padded_vars = inst.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars + } + }; + + let (r1cs_sat_proof, rx, ry) = R1CSProof::prove( + &inst.inst, + padded_vars.assignment, + &inputs.assignment, + &gens.gens_r1cs_sat, + transcript, + ); + + // We send evaluations of A, B, C at r = (rx, ry) as claims + // to enable the verifier complete the first sum-check + let timer_eval = Timer::new("eval_sparse_polys"); + let inst_evals = { + let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry); + transcript.append_scalar(b"", &Ar); + transcript.append_scalar(b"", &Br); + transcript.append_scalar(b"", &Cr); + (Ar, Br, Cr) + }; + timer_eval.stop(); + + let timer_eval_proof = Timer::new("r1cs_eval_proof"); + let r1cs_eval_proof = R1CSEvalProof::prove( + &decomm.decomm, + &rx, + &ry, + &inst_evals, + &gens.gens_r1cs_eval, + transcript, + ); + timer_eval_proof.stop(); + + transcript.new_from_state(&c); + let timer_sat_circuit_verification = Timer::new("r1cs_sat_circuit_verification"); + let r1cs_verifier_proof = r1cs_sat_proof + .prove_verifier( + inst.inst.get_num_vars(), + inst.inst.get_num_cons(), + &inputs.assignment, + &inst_evals, + transcript, + &gens.gens_r1cs_sat, + poseidon, + ) + .unwrap(); + timer_sat_circuit_verification.stop(); + Ok(TestudoSnark { + r1cs_verifier_proof, + r1cs_eval_proof, + inst_evals, + r: (rx, ry), + }) + } + + pub fn verify( + &self, + gens: &TestudoSnarkGens, + comm: &ComputationCommitment, + input: &InputsAssignment, + transcript: &mut PoseidonTranscript, + _poseidon: PoseidonConfig, + ) -> Result { + let (rx, ry) = &self.r; + + let timer_sat_verification = Timer::new("r1cs_sat_verification"); + let sat_verified = self.r1cs_verifier_proof.verify( + (rx.clone(), ry.clone()), + &input.assignment, + &self.inst_evals, + transcript, + &gens.gens_r1cs_sat, + )?; + timer_sat_verification.stop(); + assert!(sat_verified == true); + + let (Ar, Br, Cr) = &self.inst_evals; + transcript.append_scalar(b"", Ar); + transcript.append_scalar(b"", Br); + transcript.append_scalar(b"", Cr); + + let timer_eval_verification = Timer::new("r1cs_eval_verification"); + let eval_verified = self.r1cs_eval_proof.verify( + &comm.comm, + rx, + ry, + &self.inst_evals, + &gens.gens_r1cs_eval, + transcript, + ); + timer_eval_verification.stop(); + Ok(sat_verified && eval_verified.is_ok()) + } +} + +#[cfg(test)] +mod tests { + + use crate::ark_std::Zero; + use crate::{ + parameters::poseidon_params, + poseidon_transcript::PoseidonTranscript, + testudo_snark::{TestudoSnark, TestudoSnarkGens}, + InputsAssignment, Instance, VarsAssignment, + }; + use ark_ff::{BigInteger, One, PrimeField}; + + #[test] + pub fn check_testudo_snark() { + let num_vars = 256; + let num_cons = num_vars; + let num_inputs = 10; + + type E = ark_bls12_377::Bls12_377; + + // produce public generators + let gens = + TestudoSnarkGens::::setup(num_cons, num_vars, num_inputs, num_cons, poseidon_params()); + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // create a commitment to R1CSInstance + let (comm, decomm) = TestudoSnark::encode(&inst, &gens); + + let params = poseidon_params(); + + // produce a proof + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let proof = TestudoSnark::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + params, + ) + .unwrap(); + + // verify the proof + let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params()); + assert!(proof + .verify( + &gens, + &comm, + &inputs, + &mut verifier_transcript, + poseidon_params() + ) + .is_ok()); + } + + #[test] + fn test_padded_constraints() { + type F = ark_bls12_377::Fr; + type E = ark_bls12_377::Bls12_377; + // parameters of the R1CS instance + let num_cons = 1; + let num_vars = 0; + let num_inputs = 3; + let num_non_zero_entries = 3; + + // We will encode the above constraints into three matrices, where + // the coefficients in the matrix are in the little-endian byte order + let mut A: Vec<(usize, usize, Vec)> = Vec::new(); + let mut B: Vec<(usize, usize, Vec)> = Vec::new(); + let mut C: Vec<(usize, usize, Vec)> = Vec::new(); + + // Create a^2 + b + 13 + A.push((0, num_vars + 2, (F::one().into_bigint().to_bytes_le()))); // 1*a + B.push((0, num_vars + 2, F::one().into_bigint().to_bytes_le())); // 1*a + C.push((0, num_vars + 1, F::one().into_bigint().to_bytes_le())); // 1*z + C.push((0, num_vars, (-F::from(13u64)).into_bigint().to_bytes_le())); // -13*1 + C.push((0, num_vars + 3, (-F::one()).into_bigint().to_bytes_le())); // -1*b + + // Var Assignments (Z_0 = 16 is the only output) + let vars = vec![F::zero().into_bigint().to_bytes_le(); num_vars]; + + // create an InputsAssignment (a = 1, b = 2) + let mut inputs = vec![F::zero().into_bigint().to_bytes_le(); num_inputs]; + inputs[0] = F::from(16u64).into_bigint().to_bytes_le(); + inputs[1] = F::from(1u64).into_bigint().to_bytes_le(); + inputs[2] = F::from(2u64).into_bigint().to_bytes_le(); + + let assignment_inputs = InputsAssignment::::new(&inputs).unwrap(); + let assignment_vars = VarsAssignment::new(&vars).unwrap(); + + // Check if instance is satisfiable + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); + let res = inst.is_sat(&assignment_vars, &assignment_inputs); + assert!(res.unwrap(), "should be satisfied"); + + // Testudo public params + let gens = TestudoSnarkGens::::setup( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + poseidon_params(), + ); + + // create a commitment to the R1CS instance + let (comm, decomm) = TestudoSnark::encode(&inst, &gens); + + let params = poseidon_params(); + + // produce a TestudoSnark + let mut prover_transcript = PoseidonTranscript::new(¶ms); + let proof = TestudoSnark::prove( + &inst, + &comm, + &decomm, + assignment_vars.clone(), + &assignment_inputs, + &gens, + &mut prover_transcript, + poseidon_params(), + ) + .unwrap(); + + // verify the TestudoSnark + let mut verifier_transcript = PoseidonTranscript::new(¶ms); + assert!(proof + .verify( + &gens, + &comm, + &assignment_inputs, + &mut verifier_transcript, + poseidon_params() + ) + .is_ok()); + } +} diff --git a/src/timer.rs b/src/timer.rs index 30d6684..fbc2d31 100644 --- a/src/timer.rs +++ b/src/timer.rs @@ -1,3 +1,4 @@ +/// Timer is a simple utility to profile the execution time of a block of code. #[cfg(feature = "profile")] use colored::Colorize; #[cfg(feature = "profile")] @@ -12,77 +13,77 @@ pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0); #[cfg(feature = "profile")] pub struct Timer { - label: String, - timer: Instant, + label: String, + timer: Instant, } #[cfg(feature = "profile")] impl Timer { - #[inline(always)] - pub fn new(label: &str) -> Self { - let timer = Instant::now(); - CALL_DEPTH.fetch_add(1, Ordering::Relaxed); - let star = "* "; - println!( - "{:indent$}{}{}", - "", - star, - label.yellow().bold(), - indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) - ); - Self { - label: label.to_string(), - timer, - } + #[inline(always)] + pub fn new(label: &str) -> Self { + let timer = Instant::now(); + CALL_DEPTH.fetch_add(1, Ordering::Relaxed); + let star = "* "; + println!( + "{:indent$}{}{}", + "", + star, + label.yellow().bold(), + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + Self { + label: label.to_string(), + timer, } + } - #[inline(always)] - pub fn stop(&self) { - let duration = self.timer.elapsed(); - let star = "* "; - println!( - "{:indent$}{}{} {:?}", - "", - star, - self.label.blue().bold(), - duration, - indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) - ); - CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); - } + #[inline(always)] + pub fn stop(&self) { + let duration = self.timer.elapsed(); + let star = "* "; + println!( + "{:indent$}{}{} {:?}", + "", + star, + self.label.blue().bold(), + duration, + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); + } - #[inline(always)] - pub fn print(msg: &str) { - CALL_DEPTH.fetch_add(1, Ordering::Relaxed); - let star = "* "; - println!( - "{:indent$}{}{}", - "", - star, - msg.to_string().green().bold(), - indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) - ); - CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); - } + #[inline(always)] + pub fn print(msg: &str) { + CALL_DEPTH.fetch_add(1, Ordering::Relaxed); + let star = "* "; + println!( + "{:indent$}{}{}", + "", + star, + msg.to_string().green().bold(), + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); + } } #[cfg(not(feature = "profile"))] pub struct Timer { - _label: String, + _label: String, } #[cfg(not(feature = "profile"))] impl Timer { - #[inline(always)] - pub fn new(label: &str) -> Self { - Self { - _label: label.to_string(), - } + #[inline(always)] + pub fn new(label: &str) -> Self { + Self { + _label: label.to_string(), } + } - #[inline(always)] - pub fn stop(&self) {} + #[inline(always)] + pub fn stop(&self) {} - #[inline(always)] - pub fn print(_msg: &str) {} + #[inline(always)] + pub fn print(_msg: &str) {} } diff --git a/src/transcript.rs b/src/transcript.rs index 810f730..aaec095 100644 --- a/src/transcript.rs +++ b/src/transcript.rs @@ -1,67 +1,16 @@ -use super::scalar::Scalar; -use crate::group::CompressedGroup; -use ark_ff::{BigInteger, PrimeField}; +use ark_ff::PrimeField; use ark_serialize::CanonicalSerialize; -use merlin::Transcript; - -pub trait ProofTranscript { - fn append_protocol_name(&mut self, protocol_name: &'static [u8]); - fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar); - fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup); - fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar; - fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec; -} - -impl ProofTranscript for Transcript { - fn append_protocol_name(&mut self, protocol_name: &'static [u8]) { - self.append_message(b"protocol-name", protocol_name); - } - - fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) { - self.append_message(label, scalar.into_repr().to_bytes_le().as_slice()); - } - - fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) { - let mut point_encoded = Vec::new(); - point.serialize(&mut point_encoded).unwrap(); - self.append_message(label, point_encoded.as_slice()); - } - - fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar { - let mut buf = [0u8; 64]; - self.challenge_bytes(label, &mut buf); - Scalar::from_le_bytes_mod_order(&buf) - } - - fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec { - (0..len) - .map(|_i| self.challenge_scalar(label)) - .collect::>() - } +/// Transcript is the application level transcript to derive the challenges +/// needed for Fiat Shamir during aggregation. It is given to the +/// prover/verifier so that the transcript can be fed with any other data first. +/// TODO: Make this trait the only Transcript trait +pub trait Transcript { + fn domain_sep(&mut self); + fn append(&mut self, label: &'static [u8], point: &S); + fn challenge_scalar(&mut self, label: &'static [u8]) -> F; + fn challenge_scalar_vec(&mut self, label: &'static [u8], n: usize) -> Vec { + (0..n).map(|_| self.challenge_scalar(label)).collect() + } } -pub trait AppendToTranscript { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript); -} - -impl AppendToTranscript for Scalar { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_scalar(label, self); - } -} - -impl AppendToTranscript for [Scalar] { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_message(label, b"begin_append_vector"); - for item in self { - transcript.append_scalar(label, item); - } - transcript.append_message(label, b"end_append_vector"); - } -} - -impl AppendToTranscript for CompressedGroup { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_point(label, self); - } -} +pub use crate::poseidon_transcript::TranscriptWriter; diff --git a/src/unipoly.rs b/src/unipoly.rs index 6b28187..3355780 100644 --- a/src/unipoly.rs +++ b/src/unipoly.rs @@ -1,198 +1,175 @@ -use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript}; - -use super::commitments::{Commitments, MultiCommitGens}; -use super::group::GroupElement; -use super::scalar::Scalar; -use super::transcript::{AppendToTranscript, ProofTranscript}; -use ark_ff::Field; +use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter}; +use ark_crypto_primitives::sponge::Absorb; +use ark_ff::{Field, PrimeField}; use ark_serialize::*; -use merlin::Transcript; // ax^2 + bx + c stored as vec![c,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] #[derive(Debug, CanonicalDeserialize, CanonicalSerialize, Clone)] -pub struct UniPoly { - pub coeffs: Vec, - // pub coeffs_fq: Vec, +pub struct UniPoly { + pub coeffs: Vec, + // pub coeffs_fq: Vec, } // ax^2 + bx + c stored as vec![c,a] // ax^3 + bx^2 + cx + d stored as vec![d,b,a] #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct CompressedUniPoly { - pub coeffs_except_linear_term: Vec, -} - -impl UniPoly { - pub fn from_evals(evals: &[Scalar]) -> Self { - // we only support degree-2 or degree-3 univariate polynomials - assert!(evals.len() == 3 || evals.len() == 4); - let coeffs = if evals.len() == 3 { - // ax^2 + bx + c - let two_inv = Scalar::from(2).inverse().unwrap(); - - let c = evals[0]; - let a = two_inv * (evals[2] - evals[1] - evals[1] + c); - let b = evals[1] - c - a; - vec![c, b, a] - } else { - // ax^3 + bx^2 + cx + d - let two_inv = Scalar::from(2).inverse().unwrap(); - let six_inv = Scalar::from(6).inverse().unwrap(); - - let d = evals[0]; - let a = six_inv - * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - - evals[0]); - let b = two_inv - * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] - + evals[2] - + evals[2] - + evals[2] - + evals[2] - - evals[3]); - let c = evals[1] - d - a - b; - vec![d, c, b, a] - }; - - UniPoly { coeffs } - } - - pub fn degree(&self) -> usize { - self.coeffs.len() - 1 - } - - pub fn as_vec(&self) -> Vec { - self.coeffs.clone() - } - - pub fn eval_at_zero(&self) -> Scalar { - self.coeffs[0] - } - - pub fn eval_at_one(&self) -> Scalar { - (0..self.coeffs.len()).map(|i| self.coeffs[i]).sum() - } - - pub fn evaluate(&self, r: &Scalar) -> Scalar { - let mut eval = self.coeffs[0]; - let mut power = *r; - for i in 1..self.coeffs.len() { - eval += power * self.coeffs[i]; - power *= r; - } - eval - } - - pub fn compress(&self) -> CompressedUniPoly { - let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); - assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); - CompressedUniPoly { - coeffs_except_linear_term, - } - } - - pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement { - self.coeffs.commit(blind, gens) - } +pub struct CompressedUniPoly { + pub coeffs_except_linear_term: Vec, } -impl CompressedUniPoly { - // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: - // linear_term = hint - 2 * constant_term - deg2 term - deg3 term - pub fn decompress(&self, hint: &Scalar) -> UniPoly { - let mut linear_term = - (*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; - for i in 1..self.coeffs_except_linear_term.len() { - linear_term -= self.coeffs_except_linear_term[i]; - } - - let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term]; - coeffs.extend(&self.coeffs_except_linear_term[1..]); - assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); - UniPoly { coeffs } +impl UniPoly { + pub fn from_evals(evals: &[F]) -> Self { + // we only support degree-2 or degree-3 univariate polynomials + assert!(evals.len() == 3 || evals.len() == 4); + let coeffs = if evals.len() == 3 { + // ax^2 + bx + c + let two_inv = F::from(2 as u8).inverse().unwrap(); + + let c = evals[0]; + let a = two_inv * (evals[2] - evals[1] - evals[1] + c); + let b = evals[1] - c - a; + vec![c, b, a] + } else { + // ax^3 + bx^2 + cx + d + let two_inv = F::from(2 as u8).inverse().unwrap(); + let six_inv = F::from(6 as u8).inverse().unwrap(); + + let d = evals[0]; + let a = six_inv + * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]); + let b = two_inv + * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] + + evals[2] + + evals[2] + + evals[2] + + evals[2] + - evals[3]); + let c = evals[1] - d - a - b; + vec![d, c, b, a] + }; + + UniPoly { coeffs } + } + + pub fn degree(&self) -> usize { + self.coeffs.len() - 1 + } + + pub fn eval_at_zero(&self) -> F { + self.coeffs[0] + } + + pub fn eval_at_one(&self) -> F { + (0..self.coeffs.len()).map(|i| self.coeffs[i]).sum() + } + + pub fn evaluate(&self, r: &F) -> F { + let mut eval = self.coeffs[0]; + let mut power = *r; + for i in 1..self.coeffs.len() { + eval += power * self.coeffs[i]; + power *= r; } + eval + } + // pub fn compress(&self) -> CompressedUniPoly { + // let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); + // assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); + // CompressedUniPoly { + // coeffs_except_linear_term, + // } + // } } -impl AppendToPoseidon for UniPoly { - fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) { - // transcript.append_message(label, b"UniPoly_begin"); - for i in 0..self.coeffs.len() { - transcript.append_scalar(&self.coeffs[i]); - } - // transcript.append_message(label, b"UniPoly_end"); - } -} - -impl AppendToTranscript for UniPoly { - fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { - transcript.append_message(label, b"UniPoly_begin"); - for i in 0..self.coeffs.len() { - transcript.append_scalar(b"coeff", &self.coeffs[i]); - } - transcript.append_message(label, b"UniPoly_end"); +// impl CompressedUniPoly { +// // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: +// // linear_term = hint - 2 * constant_term - deg2 term - deg3 term +// pub fn decompress(&self, hint: &F) -> UniPoly { +// let mut linear_term = +// (*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; +// for i in 1..self.coeffs_except_linear_term.len() { +// linear_term -= self.coeffs_except_linear_term[i]; +// } + +// let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term]; +// coeffs.extend(&self.coeffs_except_linear_term[1..]); +// assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); +// UniPoly { coeffs } +// } +// } + +impl TranscriptWriter for UniPoly { + fn write_to_transcript(&self, transcript: &mut PoseidonTranscript) { + // transcript.append_message(label, b"UniPoly_begin"); + for i in 0..self.coeffs.len() { + transcript.append_scalar(b"", &self.coeffs[i]); } + // transcript.append_message(label, b"UniPoly_end"); + } } #[cfg(test)] mod tests { - use ark_ff::One; - - use super::*; - - #[test] - fn test_from_evals_quad() { - // polynomial is 2x^2 + 3x + 1 - let e0 = Scalar::one(); - let e1 = Scalar::from(6); - let e2 = Scalar::from(15); - let evals = vec![e0, e1, e2]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 3); - assert_eq!(poly.coeffs[0], Scalar::one()); - assert_eq!(poly.coeffs[1], Scalar::from(3)); - assert_eq!(poly.coeffs[2], Scalar::from(2)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e3 = Scalar::from(28); - assert_eq!(poly.evaluate(&Scalar::from(3)), e3); - } - - #[test] - fn test_from_evals_cubic() { - // polynomial is x^3 + 2x^2 + 3x + 1 - let e0 = Scalar::one(); - let e1 = Scalar::from(7); - let e2 = Scalar::from(23); - let e3 = Scalar::from(55); - let evals = vec![e0, e1, e2, e3]; - let poly = UniPoly::from_evals(&evals); - - assert_eq!(poly.eval_at_zero(), e0); - assert_eq!(poly.eval_at_one(), e1); - assert_eq!(poly.coeffs.len(), 4); - assert_eq!(poly.coeffs[0], Scalar::one()); - assert_eq!(poly.coeffs[1], Scalar::from(3)); - assert_eq!(poly.coeffs[2], Scalar::from(2)); - assert_eq!(poly.coeffs[3], Scalar::from(1)); - - let hint = e0 + e1; - let compressed_poly = poly.compress(); - let decompressed_poly = compressed_poly.decompress(&hint); - for i in 0..decompressed_poly.coeffs.len() { - assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); - } - - let e4 = Scalar::from(109); - assert_eq!(poly.evaluate(&Scalar::from(4)), e4); - } + use ark_ff::One; + + use super::*; + + type F = ark_bls12_377::Fr; + + #[test] + fn test_from_evals_quad() { + // polynomial is 2x^2 + 3x + 1 + let e0 = F::one(); + let e1 = F::from(6 as u8); + let e2 = F::from(15 as u8); + let evals = vec![e0, e1, e2]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 3); + assert_eq!(poly.coeffs[0], F::one()); + assert_eq!(poly.coeffs[1], F::from(3 as u8)); + assert_eq!(poly.coeffs[2], F::from(2 as u8)); + + // let hint = e0 + e1; + // // let compressed_poly = poly.compress(); + // // let decompressed_poly = compressed_poly.decompress(&hint); + // for i in 0..poly.coeffs.len() { + // assert_eq!(poly.coeffs[i], poly.coeffs[i]); + // } + + let e3 = F::from(28 as u8); + assert_eq!(poly.evaluate(&F::from(3 as u8)), e3); + } + + #[test] + fn test_from_evals_cubic() { + // polynomial is x^3 + 2x^2 + 3x + 1 + let e0 = F::one(); + let e1 = F::from(7); + let e2 = F::from(23); + let e3 = F::from(55); + let evals = vec![e0, e1, e2, e3]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 4); + assert_eq!(poly.coeffs[0], F::one()); + assert_eq!(poly.coeffs[1], F::from(3)); + assert_eq!(poly.coeffs[2], F::from(2)); + assert_eq!(poly.coeffs[3], F::from(1)); + + // let hint = e0 + e1; + // let compressed_poly = poly.compress(); + // let decompressed_poly = compressed_poly.decompress(&hint); + // for i in 0..decompressed_poly.coeffs.len() { + // assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + // } + + let e4 = F::from(109); + assert_eq!(poly.evaluate(&F::from(4)), e4); + } }