Browse Source

PST/SQRT + Benches (#35)

* first version of the sqrt PST without the MIPP

* snarkpack integration

* snarkpack integration

* adding mipp as submodule directly

* snarkpack integration

* finalizing

* snarkpack integration

* update mipp with latestest optimisations and add preliminary
documentation

* improve codebase documentation

* remove unused imports and apply cargo fix changes

* passing v0.4

* adding gh action

* correct workflow item

* correct working dir and msrv

* remove unnecessary stuff

* wip

* wip

* remove circuit in fq as it's not needed now

* done for tonight

* wip

* wip

* sip

* prallelise commitment and groth16 verification

* finalise comments for mipp

* wip

* finalise comments

* wip

* compiling but test failing

* putting back non random blinds

* using absorb when we can

* absorbing scalar

* with bls12-381

* stuff

* trying to bring ark-blst to testudo

* correcting random implementation

* with square in place

* works with blst

* works with blst

* fix: don't require nightly Rust

With removing the `test` feature, it can also be built with a stable
Rust release and don't require a nightly Rust version.

* using ark-blst main branch

* started cleanup and added testudo benchmark

* add testudo snark and nizk in separate files

* rename functions that perform setups and add comments

* prototyping

* explain testudo-nizk

* add support for odd case in sqrt_pst

* add missing constraints and correct proof size for benchmarks

* add support for odd case in sqrt_pst

* fix typo in comment

* Documentation #31

* fix typo in comment

* Fix Cargo.toml and add benchmark for sqrt pst (#34)

* add benchmark for sqrt pst

* fix typo in comment

* add README

* comment from readme not executing

---------

Co-authored-by: Mara Mihali <maramihali@google.com>
Co-authored-by: Mara Mihali <mihalimara22@gmail.com>
Co-authored-by: Volker Mische <volker.mische@gmail.com>
master
Nicolas Gailly 1 year ago
committed by GitHub
parent
commit
7db2d30972
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 9773 additions and 8339 deletions
  1. +1
    -4
      .cargo/config
  2. +21
    -31
      .github/workflows/testudo.yml
  3. +32
    -31
      Cargo.toml
  4. +15
    -409
      README.md
  5. +0
    -151
      benches/nizk.rs
  6. +98
    -0
      benches/pst.rs
  7. +0
    -72
      benches/r1cs.rs
  8. +0
    -137
      benches/snark.rs
  9. +127
    -0
      benches/testudo.rs
  10. +150
    -130
      examples/cubic.rs
  11. +0
    -52
      profiler/nizk.rs
  12. +0
    -63
      profiler/snark.rs
  13. +92
    -0
      profiler/testudo.rs
  14. +4
    -0
      rustfmt.toml
  15. +70
    -75
      src/commitments.rs
  16. +413
    -422
      src/constraints.rs
  17. +669
    -689
      src/dense_mlpoly.rs
  18. +19
    -19
      src/errors.rs
  19. +0
    -80
      src/group.rs
  20. +247
    -747
      src/lib.rs
  21. +56
    -0
      src/macros.rs
  22. +26
    -26
      src/math.rs
  23. +410
    -0
      src/mipp.rs
  24. +246
    -244
      src/nizk/bullet.rs
  25. +187
    -730
      src/nizk/mod.rs
  26. +2327
    -27
      src/parameters.rs
  27. +97
    -61
      src/poseidon_transcript.rs
  28. +424
    -438
      src/product_tree.rs
  29. +347
    -352
      src/r1csinstance.rs
  30. +601
    -510
      src/r1csproof.rs
  31. +0
    -28
      src/random.rs
  32. +0
    -44
      src/scalar/mod.rs
  33. +1552
    -1591
      src/sparse_mlpoly.rs
  34. +343
    -0
      src/sqrt_pst.rs
  35. +397
    -880
      src/sumcheck.rs
  36. +202
    -0
      src/testudo_nizk.rs
  37. +377
    -0
      src/testudo_snark.rs
  38. +56
    -55
      src/timer.rs
  39. +13
    -64
      src/transcript.rs
  40. +154
    -177
      src/unipoly.rs

+ 1
- 4
.cargo/config

@ -1,4 +1 @@
[build]
rustflags = [
"-C", "target-cpu=native",
]

+ 21
- 31
.github/workflows/testudo.yml

@ -1,37 +1,27 @@
name: Build and Test Testudo name: Build and Test Testudo
on:
push:
branches: [master]
pull_request:
branches: [master]
# The crate ark-ff uses the macro llvm_asm! when emitting asm which returns an
# error because it was deprecated in favour of asm!. We temporarily overcome
# this problem by setting the environment variable below (until the crate
# is updated).
env:
RUSTFLAGS: "--emit asm -C llvm-args=-x86-asm-syntax=intel"
on: [push, pull_request]
jobs: jobs:
build_nightly:
cargo-test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2
- name: Install
run: rustup default nightly
- name: Install rustfmt Components
run: rustup component add rustfmt
# - name: Install clippy
# run: rustup component add clippy
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
- name: Build examples
run: cargo build --examples --verbose
- name: Check Rustfmt Code Style
run: cargo fmt --all -- --check
# cargo clippy uses cargo check which returns an error when asm is emitted
# we want to emit asm for ark-ff operations so we avoid using clippy for # now
# - name: Check clippy warnings
# run: cargo clippy --all-targets --all-features
- name: Checkout sources
uses: actions/checkout@v2
with:
submodules: recursive
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- uses: Swatinem/rust-cache@v2
with:
shared-key: cache-${{ hashFiles('**/Cargo.lock') }}
cache-on-failure: true
- name: cargo test
run: RUST_LOG=info cargo test --all --all-features -- --nocapture

+ 32
- 31
Cargo.toml

@ -18,20 +18,26 @@ itertools = "0.10.0"
colored = "2.0.0" colored = "2.0.0"
thiserror = "1.0" thiserror = "1.0"
json = "0.12.4" json = "0.12.4"
ark-ff = { version = "^0.3.0", default-features = false }
ark-ec = { version = "^0.3.0", default-features = false }
ark-std = { version = "^0.3.0"}
ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] }
ark-serialize = { version = "^0.3.0", features = ["derive"] }
ark-sponge = { version = "^0.3.0" , features = ["r1cs"] }
ark-crypto-primitives = { version = "^0.3.0", default-features = true }
ark-r1cs-std = { version = "^0.3.0", default-features = false }
ark-nonnative-field = { version = "0.3.0", default-features = false }
ark-relations = { version = "^0.3.0", default-features = false, optional = true }
ark-groth16 = { version = "^0.3.0", features = ["r1cs"] }
ark-bw6-761 = { version = "^0.3.0" }
ark-poly-commit = { version = "^0.3.0" }
ark-poly = {version = "^0.3.0"}
ark-ff = { version = "0.4.0", default-features = false }
ark-ec = { version = "0.4.0", default-features = false }
ark-std = { version = "0.4.0"}
ark-bls12-377 = { version = "0.4.0", features = ["r1cs","curve"] }
ark-bls12-381 = { version = "0.4.0", features = ["curve"] }
ark-blst = { git = "https://github.com/nikkolasg/ark-blst" }
ark-serialize = { version = "0.4.0", features = ["derive"] }
ark-crypto-primitives = {version = "0.4.0", features = ["sponge","r1cs","snark"] }
ark-r1cs-std = { version = "0.4.0", default-features = false }
ark-relations = { version = "0.4.0", default-features = false, optional = true }
ark-snark = { version = "0.4.0", default-features = false }
ark-groth16 = { version = "0.3.0" }
ark-bw6-761 = { version = "0.4.0" }
ark-poly-commit = { version = "0.4.0" }
ark-poly = {version = "0.4.0"}
poseidon-paramgen = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" }
poseidon-parameters = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" }
# Needed for ark-blst
blstrs = { version = "^0.6.1", features = ["__private_bench"] }
lazy_static = "1.4.0" lazy_static = "1.4.0"
rand = { version = "0.8", features = [ "std", "std_rng" ] } rand = { version = "0.8", features = [ "std", "std_rng" ] }
@ -46,29 +52,20 @@ csv = "1.1.5"
criterion = "0.3.6" criterion = "0.3.6"
[lib] [lib]
name = "libspartan"
name = "libtestudo"
path = "src/lib.rs" path = "src/lib.rs"
[[bin]] [[bin]]
name = "snark"
path = "profiler/snark.rs"
[[bin]]
name = "nizk"
path = "profiler/nizk.rs"
[[bench]]
name = "snark"
harness = false
name = "testudo"
path = "profiler/testudo.rs"
[[bench]] [[bench]]
name = "nizk"
name = "testudo"
harness = false harness = false
[[bench]] [[bench]]
name = "r1cs"
name = "pst"
harness = false harness = false
debug = true
[features] [features]
multicore = ["rayon"] multicore = ["rayon"]
@ -79,6 +76,10 @@ parallel = [ "std", "ark-ff/parallel", "ark-std/parallel", "ark-ec/parallel", "a
std = ["ark-ff/std", "ark-ec/std", "ark-std/std", "ark-relations/std", "ark-serialize/std"] std = ["ark-ff/std", "ark-ec/std", "ark-std/std", "ark-relations/std", "ark-serialize/std"]
[patch.crates-io] [patch.crates-io]
ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/", rev = "a2a5ac491ae005ba2afd03fd21b7d3160d794a83"}
ark-poly-commit = {git = "https://github.com/maramihali/poly-commit"}
ark-poly-commit = {git = "https://github.com/cryptonetlab/ark-polycommit", branch="feat/variable-crs"}
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16" }
blstrs = { git = "https://github.com/nikkolasg/blstrs", branch = "feat/arkwork" }
ark-ec = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
ark-ff = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
ark-poly = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
ark-serialize = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }

+ 15
- 409
README.md

@ -1,421 +1,27 @@
# Spartan: High-speed zkSNARKs without trusted setup
# Testudo
![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg)
[![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>)
[![Build and Test Testudo](https://github.com/cryptonetlab/testudo/actions/workflows/testudo.yml/badge.svg?branch=master)](https://github.com/cryptonetlab/testudo/actions/workflows/testudo.yml)
Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model.
Testudo is a linear-time prover SNARK with a small and universal trusted setup. For a deep dive, please refer to [this](https://www.notion.so/pl-strflt/Testudo-Blog-Post-Final-a18db71f8e634ebbb9f68383f7904c51) blog post.
A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details.
In the current stage, the repository contains:
Note that this library has _not_ received a security review or audit.
- a modified version of [Spartan](https://github.com/microsoft/Spartan) using [arkworks](https://github.com/arkworks-rs) with the sumchecks verified using Groth16
- a fast version of the [PST](https://eprint.iacr.org/2011/587.pdf) commitment scheme with a square-root trusted setup
- support for an arkworks wrapper around the fast blst library with GPU integration [repo](https://github.com/nikkolasg/ark-blst)
## Highlights
## Building `testudo`
We now highlight Spartan's distinctive features.
Testudo is available with stable Rust.
- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters.
Run `cargo build` or `cargo test` to build, respectively test the repository.
- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest.
To run the current benchmarks on BLS12-377:
- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS).
- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`.
- **State-of-the-art performance:**
Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads.
### Implementation details
`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library.
## Examples
To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`:
```text
spartan = "0.7.1"
```
The following example shows how to use `libspartan` to create and verify a SNARK proof.
Some of our public APIs' style is inspired by the underlying crates we use.
```rust
# extern crate libspartan;
# extern crate merlin;
# use libspartan::{Instance, SNARKGens, SNARK};
# use libspartan::poseidon_transcript::PoseidonTranscript;
# use libspartan::parameters::poseidon_params;
# fn main() {
// specify the size of an R1CS instance
let num_vars = 1024;
let num_cons = 1024;
let num_inputs = 10;
let num_non_zero_entries = 1024;
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// ask the library to produce a synthentic R1CS instance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
let params = poseidon_params();
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = SNARK::prove(&inst, &comm, &decomm, vars, &inputs, &gens, &mut prover_transcript);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
# }
```
Here is another example to use the NIZK variant of the Spartan proof system:
```rust
# extern crate libspartan;
# extern crate merlin;
# use libspartan::{Instance, NIZKGens, NIZK};
# use libspartan::poseidon_transcript::PoseidonTranscript;
# use libspartan::parameters::poseidon_params;
# fn main() {
// specify the size of an R1CS instance
let num_vars = 1024;
let num_cons = 1024;
let num_inputs = 10;
// produce public parameters
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// ask the library to produce a synthentic R1CS instance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let params = poseidon_params();
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
# }
```
Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance
```rust
#![allow(non_snake_case)]
# extern crate ark_std;
# extern crate libspartan;
# extern crate merlin;
# mod scalar;
# use scalar::Scalar;
# use libspartan::parameters::poseidon_params;
# use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
# use libspartan::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
#
# use ark_ff::{PrimeField, Field, BigInteger};
# use ark_std::{One, Zero, UniformRand};
# fn main() {
// produce a tiny instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_tiny_r1cs();
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
let params = poseidon_params();
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
# }
# fn produce_tiny_r1cs() -> (
# usize,
# usize,
# usize,
# usize,
# Instance,
# VarsAssignment,
# InputsAssignment,
# ) {
// We will use the following example, but one could construct any R1CS instance.
// Our R1CS instance is three constraints over five variables and two public inputs
// (Z0 + Z1) * I0 - Z2 = 0
// (Z0 + I1) * Z2 - Z3 = 0
// Z4 * 1 - 0 = 0
// parameters of the R1CS instance rounded to the nearest power of two
let num_cons = 4;
let num_vars = 5;
let num_inputs = 2;
let num_non_zero_entries = 5;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
// The constraint system is defined over a finite field, which in our case is
// the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493
// To construct these matrices, we will use `curve25519-dalek` but one can use any other method.
// a variable that holds a byte representation of 1
let one = Scalar::one().into_repr().to_bytes_le();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is (Z0 + Z1) * I0 - Z2 = 0.
// We set 1 in matrix A for columns that correspond to Z0 and Z1
// We set 1 in matrix B for column that corresponds to I0
// We set 1 in matrix C for column that corresponds to Z2
A.push((0, 0, one.clone()));
A.push((0, 1, one.clone()));
B.push((0, num_vars + 1, one.clone()));
C.push((0, 2, one.clone()));
// constraint 1 entries in (A,B,C)
A.push((1, 0, one.clone()));
A.push((1, num_vars + 2, one.clone()));
B.push((1, 2, one.clone()));
C.push((1, 3, one.clone()));
// constraint 3 entries in (A,B,C)
A.push((2, 4, one.clone()));
B.push((2, num_vars, one.clone()));
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut rng = ark_std::rand::thread_rng();
let i0 = Scalar::rand(&mut rng);
let i1 = Scalar::rand(&mut rng);
let z0 = Scalar::rand(&mut rng);
let z1 = Scalar::rand(&mut rng);
let z2 = (z0 + z1) * i0; // constraint 0
let z3 = (z0 + i1) * z2; // constraint 1
let z4 = Scalar::zero(); //constraint 2
// create a VarsAssignment
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
vars[0] = z0.into_repr().to_bytes_le();
vars[1] = z1.into_repr().to_bytes_le();
vars[2] = z2.into_repr().to_bytes_le();
vars[3] = z3.into_repr().to_bytes_le();
vars[4] = z4.into_repr().to_bytes_le();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
inputs[0] = i0.into_repr().to_bytes_le();
inputs[1] = i1.into_repr().to_bytes_le();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert_eq!(res.unwrap(), true);
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
# }
```
For more examples, see [`examples/`](examples) directory in this repo.
## Building `libspartan`
Install [`rustup`](https://rustup.rs/)
Switch to nightly Rust using `rustup`:
```text
rustup default nightly
```console
cargo bench --bench testudo --all-features release -- --nocapture
``` ```
Clone the repository:
```text
git clone https://github.com/Microsoft/Spartan
cd Spartan
```
To build docs for public APIs of `libspartan`:
```text
cargo doc
```
To run tests:
```text
RUSTFLAGS="-C target_cpu=native" cargo test
```
To build `libspartan`:
```text
RUSTFLAGS="-C target_cpu=native" cargo build --release
```
> NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`.
### Supported features
- `profile`: enables fine-grained profiling information (see below for its use)
## Performance
### End-to-end benchmarks
`libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below).
To run end-to-end benchmarks:
```text
RUSTFLAGS="-C target_cpu=native" cargo bench
```
### Fine-grained profiling
Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`.
These profilers report performance as depicted below (for varying R1CS instance sizes). The reported
performance is from running the profilers on a Microsoft Surface Laptop 3 on a single CPU core of Intel Core i7-1065G7 running Ubuntu 20.04 (atop WSL2 on Windows 10).
See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature.
```text
$ ./target/release/snark
Profiler:: SNARK
* number_of_constraints 1048576
* number_of_variables 1048576
* number_of_inputs 10
* number_non-zero_entries_A 1048576
* number_non-zero_entries_B 1048576
* number_non-zero_entries_C 1048576
* SNARK::encode
* SNARK::encode 14.2644201s
* SNARK::prove
* R1CSProof::prove
* polycommit
* polycommit 2.7175848s
* prove_sc_phase_one
* prove_sc_phase_one 683.7481ms
* prove_sc_phase_two
* prove_sc_phase_two 846.1056ms
* polyeval
* polyeval 193.4216ms
* R1CSProof::prove 4.4416193s
* len_r1cs_sat_proof 47024
* eval_sparse_polys
* eval_sparse_polys 377.357ms
* R1CSEvalProof::prove
* commit_nondet_witness
* commit_nondet_witness 14.4507331s
* build_layered_network
* build_layered_network 3.4360521s
* evalproof_layered_network
* len_product_layer_proof 64712
* evalproof_layered_network 15.5708066s
* R1CSEvalProof::prove 34.2930559s
* len_r1cs_eval_proof 133720
* SNARK::prove 39.1297568s
* SNARK::proof_compressed_len 141768
* SNARK::verify
* verify_sat_proof
* verify_sat_proof 20.0828ms
* verify_eval_proof
* verify_polyeval_proof
* verify_prod_proof
* verify_prod_proof 1.1847ms
* verify_hash_proof
* verify_hash_proof 81.06ms
* verify_polyeval_proof 82.3583ms
* verify_eval_proof 82.8937ms
* SNARK::verify 103.0536ms
```
```text
$ ./target/release/nizk
Profiler:: NIZK
* number_of_constraints 1048576
* number_of_variables 1048576
* number_of_inputs 10
* number_non-zero_entries_A 1048576
* number_non-zero_entries_B 1048576
* number_non-zero_entries_C 1048576
* NIZK::prove
* R1CSProof::prove
* polycommit
* polycommit 2.7220635s
* prove_sc_phase_one
* prove_sc_phase_one 722.5487ms
* prove_sc_phase_two
* prove_sc_phase_two 862.6796ms
* polyeval
* polyeval 190.2233ms
* R1CSProof::prove 4.4982305s
* len_r1cs_sat_proof 47024
* NIZK::prove 4.5139888s
* NIZK::proof_compressed_len 48134
* NIZK::verify
* eval_sparse_polys
* eval_sparse_polys 395.0847ms
* verify_sat_proof
* verify_sat_proof 19.286ms
* NIZK::verify 414.5102ms
```
## LICENSE
See [LICENSE](./LICENSE)
## Contributing
## Join us!
See [CONTRIBUTING](./CONTRIBUTING.md)
If you want to contribute, reach out to the Discord server of [cryptonet](https://discord.com/invite/CFnTSkVTCk).

+ 0
- 151
benches/nizk.rs

@ -1,151 +0,0 @@
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate sha3;
use std::time::{Duration, SystemTime};
use libspartan::{
parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance,
NIZKGens, NIZK,
};
use criterion::*;
fn nizk_prove_benchmark(c: &mut Criterion) {
for &s in [24, 28, 30].iter() {
let mut group = c.benchmark_group("R1CS_prove_benchmark");
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let start = SystemTime::now();
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let end = SystemTime::now();
let duration = end.duration_since(start).unwrap();
println!(
"Generating r1cs instance with {} constraints took {} ms",
num_cons,
duration.as_millis()
);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let name = format!("R1CS_prove_{}", num_vars);
group
.measurement_time(Duration::from_secs(60))
.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript =
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
NIZK::prove(
black_box(&inst),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn nizk_verify_benchmark(c: &mut Criterion) {
for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() {
let mut group = c.benchmark_group("R1CS_verify_benchmark");
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
// these are the public io
let num_inputs = 10;
let start = SystemTime::now();
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let end = SystemTime::now();
let duration = end.duration_since(start).unwrap();
println!(
"Generating r1cs instance with {} constraints took {} ms",
num_cons,
duration.as_millis()
);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let name = format!("R1CS_verify_{}", num_cons);
group
.measurement_time(Duration::from_secs(60))
.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript =
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
assert!(proof
.verify(
black_box(&inst),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens),
)
.is_ok());
});
});
group.finish();
}
}
fn nizk_verify_groth16_benchmark(c: &mut Criterion) {
for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() {
let mut group = c.benchmark_group("R1CS_verify_groth16_benchmark");
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
// these are the public io
let num_inputs = 10;
let start = SystemTime::now();
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let end = SystemTime::now();
let duration = end.duration_since(start).unwrap();
println!(
"Generating r1cs instance with {} constraints took {} ms",
num_cons,
duration.as_millis()
);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let name = format!("R1CS_verify_groth16_{}", num_cons);
group
.measurement_time(Duration::from_secs(60))
.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript =
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
assert!(proof
.verify_groth16(
black_box(&inst),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(2)
}
criterion_group! {
name = benches_nizk;
config = set_duration();
targets = nizk_prove_benchmark, nizk_verify_benchmark, nizk_verify_groth16_benchmark
}
criterion_main!(benches_nizk);

+ 98
- 0
benches/pst.rs

@ -0,0 +1,98 @@
use std::time::Instant;
use ark_poly_commit::multilinear_pc::MultilinearPC;
use ark_serialize::CanonicalSerialize;
use libtestudo::{
parameters::PoseidonConfiguration, poseidon_transcript::PoseidonTranscript, sqrt_pst::Polynomial,
};
use serde::Serialize;
type F = ark_bls12_377::Fr;
type E = ark_bls12_377::Bls12_377;
use ark_std::UniformRand;
#[derive(Default, Clone, Serialize)]
struct BenchmarkResults {
power: usize,
commit_time: u128,
opening_time: u128,
verification_time: u128,
proof_size: usize,
commiter_key_size: usize,
}
fn main() {
let params = ark_bls12_377::Fr::poseidon_params();
let mut writer = csv::Writer::from_path("sqrt_pst.csv").expect("unable to open csv writer");
for &s in [4, 5, 20, 27].iter() {
println!("Running for {} inputs", s);
let mut rng = ark_std::test_rng();
let mut br = BenchmarkResults::default();
br.power = s;
let num_vars = s;
let len = 2_usize.pow(num_vars as u32);
let z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
let r: Vec<F> = (0..num_vars)
.into_iter()
.map(|_| F::rand(&mut rng))
.collect();
let setup_vars = (num_vars as f32 / 2.0).ceil() as usize;
let gens = MultilinearPC::<E>::setup((num_vars as f32 / 2.0).ceil() as usize, &mut rng);
let (ck, vk) = MultilinearPC::<E>::trim(&gens, setup_vars);
let mut cks = Vec::<u8>::new();
ck.serialize_with_mode(&mut cks, ark_serialize::Compress::Yes)
.unwrap();
br.commiter_key_size = cks.len();
let mut pl = Polynomial::from_evaluations(&z.clone());
let v = pl.eval(&r);
let start = Instant::now();
let (comm_list, t) = pl.commit(&ck);
let duration = start.elapsed().as_millis();
br.commit_time = duration;
let mut prover_transcript = PoseidonTranscript::new(&params);
let start = Instant::now();
let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t);
let duration = start.elapsed().as_millis();
br.opening_time = duration;
let mut p1 = Vec::<u8>::new();
let mut p2 = Vec::<u8>::new();
pst_proof
.serialize_with_mode(&mut p1, ark_serialize::Compress::Yes)
.unwrap();
mipp_proof
.serialize_with_mode(&mut p2, ark_serialize::Compress::Yes)
.unwrap();
br.proof_size = p1.len() + p2.len();
let mut verifier_transcript = PoseidonTranscript::new(&params);
let start = Instant::now();
let res = Polynomial::verify(
&mut verifier_transcript,
&vk,
&u,
&r,
v,
&pst_proof,
&mipp_proof,
&t,
);
let duration = start.elapsed().as_millis();
br.verification_time = duration;
assert!(res == true);
writer
.serialize(br)
.expect("unable to write results to csv");
writer.flush().expect("wasn't able to flush");
}
}

+ 0
- 72
benches/r1cs.rs

@ -1,72 +0,0 @@
use std::time::Instant;
use libspartan::{
parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance,
NIZKGens, NIZK,
};
use serde::Serialize;
#[derive(Default, Clone, Serialize)]
struct BenchmarkResults {
power: usize,
input_constraints: usize,
spartan_verifier_circuit_constraints: usize,
r1cs_instance_generation_time: u128,
spartan_proving_time: u128,
groth16_setup_time: u128,
groth16_proving_time: u128,
testudo_verification_time: u128,
testudo_proving_time: u128,
}
fn main() {
let mut writer = csv::Writer::from_path("testudo.csv").expect("unable to open csv writer");
// for &s in [
// 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
// ]
// .iter()
// For testing purposes we currently bench on very small instance to ensure
// correctness and then on biggest one for timings.
for &s in [4, 26].iter() {
println!("Running for {} inputs", s);
let mut br = BenchmarkResults::default();
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
br.power = s;
br.input_constraints = num_cons;
let num_inputs = 10;
let start = Instant::now();
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let duration = start.elapsed().as_millis();
br.r1cs_instance_generation_time = duration;
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let start = Instant::now();
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let duration = start.elapsed().as_millis();
br.spartan_proving_time = duration;
let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
let res = proof.verify(&inst, &inputs, &mut verifier_transcript, &gens);
assert!(res.is_ok());
br.spartan_verifier_circuit_constraints = res.unwrap();
let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
let res = proof.verify_groth16(&inst, &inputs, &mut verifier_transcript, &gens);
assert!(res.is_ok());
let (ds, dp, dv) = res.unwrap();
br.groth16_setup_time = ds;
br.groth16_proving_time = dp;
br.testudo_proving_time = br.spartan_proving_time + br.groth16_proving_time;
br.testudo_verification_time = dv;
writer
.serialize(br)
.expect("unable to write results to csv");
writer.flush().expect("wasn't able to flush");
}
}

+ 0
- 137
benches/snark.rs

@ -1,137 +0,0 @@
extern crate libspartan;
extern crate merlin;
use libspartan::{
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, Instance, SNARKGens,
SNARK,
};
use criterion::*;
fn snark_encode_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_encode_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _inputs) =
Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let name = format!("SNARK_encode_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
SNARK::encode(black_box(&inst), black_box(&gens));
});
});
group.finish();
}
}
fn snark_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let params = poseidon_params();
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let name = format!("SNARK_prove_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = PoseidonTranscript::new(&params);
SNARK::prove(
black_box(&inst),
black_box(&comm),
black_box(&decomm),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn snark_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_verify_benchmark");
group.plot_config(plot_config);
let params = poseidon_params();
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let name = format!("SNARK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(
black_box(&comm),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_snark;
config = set_duration();
targets = snark_verify_benchmark
}
criterion_main!(benches_snark);

+ 127
- 0
benches/testudo.rs

@ -0,0 +1,127 @@
use std::time::Instant;
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::pairing::Pairing;
use ark_ff::PrimeField;
use ark_serialize::*;
use libtestudo::parameters::PoseidonConfiguration;
use libtestudo::{
poseidon_transcript::PoseidonTranscript,
testudo_snark::{TestudoSnark, TestudoSnarkGens},
Instance,
};
use serde::Serialize;
#[derive(Default, Clone, Serialize)]
struct BenchmarkResults {
power: usize,
input_constraints: usize,
testudo_proving_time: u128,
testudo_verification_time: u128,
sat_proof_size: usize,
eval_proof_size: usize,
total_proof_size: usize,
}
fn main() {
bench_with_bls12_377();
// bench_with_bls12_381();
// bench_with_ark_blst();
}
fn bench_with_ark_blst() {
let params = ark_blst::Scalar::poseidon_params();
testudo_snark_bench::<ark_blst::Bls12>(params, "testudo_blst");
}
fn bench_with_bls12_377() {
let params = ark_bls12_377::Fr::poseidon_params();
testudo_snark_bench::<ark_bls12_377::Bls12_377>(params, "testudo_bls12_377");
}
fn bench_with_bls12_381() {
let params = ark_bls12_381::Fr::poseidon_params();
testudo_snark_bench::<ark_bls12_381::Bls12_381>(params, "testudo_bls12_381");
}
fn testudo_snark_bench<E>(params: PoseidonConfig<E::ScalarField>, file_name: &str)
where
E: Pairing,
E::ScalarField: PrimeField,
E::ScalarField: Absorb,
{
let mut writer = csv::Writer::from_path(file_name).expect("unable to open csv writer");
for &s in [4, 5, 10, 12, 14, 16, 18, 20, 22, 24, 26].iter() {
println!("Running for {} inputs", s);
let mut br = BenchmarkResults::default();
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
br.power = s;
br.input_constraints = num_cons;
let num_inputs = 10;
let (inst, vars, inputs) =
Instance::<E::ScalarField>::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let mut prover_transcript = PoseidonTranscript::new(&params.clone());
let gens =
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, params.clone());
let (comm, decomm) = TestudoSnark::<E>::encode(&inst, &gens);
let start = Instant::now();
let proof = TestudoSnark::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
params.clone(),
)
.unwrap();
let duration = start.elapsed().as_millis();
br.testudo_proving_time = duration;
let mut sat_proof = Vec::<u8>::new();
proof
.r1cs_verifier_proof
.serialize_with_mode(&mut sat_proof, Compress::Yes)
.unwrap();
br.sat_proof_size = sat_proof.len();
let mut eval_proof = Vec::<u8>::new();
proof
.r1cs_eval_proof
.serialize_with_mode(&mut eval_proof, Compress::Yes)
.unwrap();
br.eval_proof_size = eval_proof.len();
let mut total_proof = Vec::<u8>::new();
proof
.serialize_with_mode(&mut total_proof, Compress::Yes)
.unwrap();
br.total_proof_size = total_proof.len();
let mut verifier_transcript = PoseidonTranscript::new(&params.clone());
let start = Instant::now();
let res = proof.verify(
&gens,
&comm,
&inputs,
&mut verifier_transcript,
params.clone(),
);
assert!(res.is_ok());
let duration = start.elapsed().as_millis();
br.testudo_verification_time = duration;
writer
.serialize(br)
.expect("unable to write results to csv");
writer.flush().expect("wasn't able to flush");
}
}

+ 150
- 130
examples/cubic.rs

@ -8,143 +8,163 @@
//! `(Z3 + 5) * 1 - I0 = 0` //! `(Z3 + 5) * 1 - I0 = 0`
//! //!
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 //! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
use ark_bls12_377::Fr as Scalar;
use ark_ec::pairing::Pairing;
use ark_ff::{BigInteger, PrimeField}; use ark_ff::{BigInteger, PrimeField};
use ark_std::{One, UniformRand, Zero}; use ark_std::{One, UniformRand, Zero};
use libspartan::{
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment,
Instance, SNARKGens, VarsAssignment, SNARK,
use libtestudo::testudo_snark::{TestudoSnark, TestudoSnarkGens};
use libtestudo::{
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment, Instance,
VarsAssignment,
}; };
#[allow(non_snake_case)] #[allow(non_snake_case)]
fn produce_r1cs() -> (
usize,
usize,
usize,
usize,
Instance,
VarsAssignment,
InputsAssignment,
fn produce_r1cs<E: Pairing>() -> (
usize,
usize,
usize,
usize,
Instance<E::ScalarField>,
VarsAssignment<E::ScalarField>,
InputsAssignment<E::ScalarField>,
) { ) {
// parameters of the R1CS instance
let num_cons = 4;
let num_vars = 4;
let num_inputs = 1;
let num_non_zero_entries = 8;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let one = Scalar::one().into_repr().to_bytes_le();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is Z0 * Z0 - Z1 = 0.
A.push((0, 0, one.clone()));
B.push((0, 0, one.clone()));
C.push((0, 1, one.clone()));
// constraint 1 entries in (A,B,C)
// constraint 1 is Z1 * Z0 - Z2 = 0.
A.push((1, 1, one.clone()));
B.push((1, 0, one.clone()));
C.push((1, 2, one.clone()));
// constraint 2 entries in (A,B,C)
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
A.push((2, 2, one.clone()));
A.push((2, 0, one.clone()));
B.push((2, num_vars, one.clone()));
C.push((2, 3, one.clone()));
// constraint 3 entries in (A,B,C)
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
A.push((3, 3, one.clone()));
A.push((3, num_vars, Scalar::from(5u32).into_repr().to_bytes_le()));
B.push((3, num_vars, one.clone()));
C.push((3, num_vars + 1, one));
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut rng = ark_std::rand::thread_rng();
let z0 = Scalar::rand(&mut rng);
let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2
let i0 = z3 + Scalar::from(5u32); // constraint 3
// create a VarsAssignment
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
vars[0] = z0.into_repr().to_bytes_le();
vars[1] = z1.into_repr().to_bytes_le();
vars[2] = z2.into_repr().to_bytes_le();
vars[3] = z3.into_repr().to_bytes_le();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
inputs[0] = i0.into_repr().to_bytes_le();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
// parameters of the R1CS instance
let num_cons = 4;
let num_vars = 4;
let num_inputs = 1;
let num_non_zero_entries = 8;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let one = E::ScalarField::one().into_bigint().to_bytes_le();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is Z0 * Z0 - Z1 = 0.
A.push((0, 0, one.clone()));
B.push((0, 0, one.clone()));
C.push((0, 1, one.clone()));
// constraint 1 entries in (A,B,C)
// constraint 1 is Z1 * Z0 - Z2 = 0.
A.push((1, 1, one.clone()));
B.push((1, 0, one.clone()));
C.push((1, 2, one.clone()));
// constraint 2 entries in (A,B,C)
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
A.push((2, 2, one.clone()));
A.push((2, 0, one.clone()));
B.push((2, num_vars, one.clone()));
C.push((2, 3, one.clone()));
// constraint 3 entries in (A,B,C)
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
A.push((3, 3, one.clone()));
A.push((
3,
num_vars,
E::ScalarField::from(5u32).into_bigint().to_bytes_le(),
));
B.push((3, num_vars, one.clone()));
C.push((3, num_vars + 1, one));
let inst = Instance::<E::ScalarField>::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut rng = ark_std::rand::thread_rng();
let z0 = E::ScalarField::rand(&mut rng);
let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2
let i0 = z3 + E::ScalarField::from(5u32); // constraint 3
// create a VarsAssignment
let mut vars = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_vars];
vars[0] = z0.into_bigint().to_bytes_le();
vars[1] = z1.into_bigint().to_bytes_le();
vars[2] = z2.into_bigint().to_bytes_le();
vars[3] = z3.into_bigint().to_bytes_le();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_inputs];
inputs[0] = i0.into_bigint().to_bytes_le();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
} }
type E = ark_bls12_377::Bls12_377;
fn main() { fn main() {
// produce an R1CS instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_r1cs();
let params = poseidon_params();
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
// produce an R1CS instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_r1cs::<E>();
let params = poseidon_params();
// produce public parameters
let gens = TestudoSnarkGens::<E>::setup(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
params.clone(),
);
// create a commitment to the R1CS instance
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = TestudoSnark::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
params.clone(),
)
.unwrap();
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(
&gens,
&comm,
&assignment_inputs,
&mut verifier_transcript,
params
)
.is_ok());
println!("proof verification successful!");
} }

+ 0
- 52
profiler/nizk.rs

@ -1,52 +0,0 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate libspartan;
extern crate merlin;
extern crate rand;
use ark_serialize::*;
use libspartan::parameters::poseidon_params;
use libspartan::poseidon_transcript::PoseidonTranscript;
use libspartan::{Instance, NIZKGens, NIZK};
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: NIZK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let params = poseidon_params();
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let mut proof_encoded = Vec::new();
proof.serialize(&mut proof_encoded).unwrap();
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

+ 0
- 63
profiler/snark.rs

@ -1,63 +0,0 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate libspartan;
extern crate merlin;
use ark_serialize::*;
use libspartan::parameters::poseidon_params;
use libspartan::poseidon_transcript::PoseidonTranscript;
use libspartan::{Instance, SNARKGens, SNARK};
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
let params = poseidon_params();
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
let mut proof_encoded = Vec::new();
proof.serialize(&mut proof_encoded).unwrap();
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

+ 92
- 0
profiler/testudo.rs

@ -0,0 +1,92 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate libtestudo;
extern crate merlin;
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::pairing::Pairing;
use ark_ff::PrimeField;
use ark_serialize::*;
use libtestudo::parameters::PoseidonConfiguration;
use libtestudo::poseidon_transcript::PoseidonTranscript;
use libtestudo::{
testudo_snark::{TestudoSnark, TestudoSnarkGens},
Instance,
};
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
fn main() {
let params = ark_bls12_377::Fr::poseidon_params();
profiler::<ark_bls12_377::Bls12_377>(params);
}
fn profiler<E>(params: PoseidonConfig<E::ScalarField>)
where
E: Pairing,
E::ScalarField: PrimeField,
E::ScalarField: Absorb,
{
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) =
Instance::<E::ScalarField>::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens =
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, params.clone());
// create a commitment to R1CSInstance
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = PoseidonTranscript::new(&params.clone());
let proof = TestudoSnark::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
params.clone(),
)
.unwrap();
let mut proof_encoded = Vec::new();
proof
.serialize_with_mode(&mut proof_encoded, Compress::Yes)
.unwrap();
let msg_proof_len = format!(
"TestudoSnark::proof_compressed_len {:?}",
proof_encoded.len()
);
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = PoseidonTranscript::new(&params.clone());
assert!(proof
.verify(
&gens,
&comm,
&inputs,
&mut verifier_transcript,
params.clone()
)
.is_ok());
println!();
}
}

+ 4
- 0
rustfmt.toml

@ -0,0 +1,4 @@
edition = "2018"
tab_spaces = 2
newline_style = "Unix"
use_try_shorthand = true

+ 70
- 75
src/commitments.rs

@ -1,92 +1,87 @@
use super::group::{GroupElement, GroupElementAffine, VartimeMultiscalarMul, GROUP_BASEPOINT};
use super::scalar::Scalar;
use crate::group::CompressGroupElement;
use crate::ark_std::UniformRand;
use crate::parameters::*; use crate::parameters::*;
use ark_ec::{AffineCurve, ProjectiveCurve};
use ark_ff::PrimeField;
use ark_sponge::poseidon::PoseidonSponge;
use ark_sponge::CryptographicSponge;
use ark_crypto_primitives::sponge::poseidon::PoseidonSponge;
use ark_crypto_primitives::sponge::CryptographicSponge;
use ark_ec::{CurveGroup, VariableBaseMSM};
use rand::SeedableRng;
use std::ops::Mul;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MultiCommitGens {
pub n: usize,
pub G: Vec<GroupElement>,
pub h: GroupElement,
pub struct MultiCommitGens<G: CurveGroup> {
pub n: usize,
pub G: Vec<G::Affine>,
pub h: G::Affine,
} }
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let params = poseidon_params();
let mut sponge = PoseidonSponge::new(&params);
sponge.absorb(&label);
sponge.absorb(&GROUP_BASEPOINT.compress().0);
impl<G: CurveGroup> MultiCommitGens<G> {
pub fn new(n: usize, label: &[u8]) -> Self {
let params = poseidon_params();
let mut sponge = PoseidonSponge::new(&params);
sponge.absorb(&label);
let mut b = Vec::new();
G::generator().serialize_compressed(&mut b).unwrap();
sponge.absorb(&b);
let mut gens: Vec<GroupElement> = Vec::new();
for _ in 0..n + 1 {
let mut el_aff: Option<GroupElementAffine> = None;
while el_aff.is_none() {
let uniform_bytes = sponge.squeeze_bytes(64);
el_aff = GroupElementAffine::from_random_bytes(&uniform_bytes);
}
let el = el_aff.unwrap().mul_by_cofactor_to_projective();
gens.push(el);
}
let gens = (0..=n)
.map(|_| {
let mut uniform_bytes = [0u8; 32];
uniform_bytes.copy_from_slice(&sponge.squeeze_bytes(32)[..]);
let mut prng = rand::rngs::StdRng::from_seed(uniform_bytes);
G::Affine::rand(&mut prng)
})
.collect::<Vec<_>>();
MultiCommitGens {
n,
G: gens[..n].to_vec(),
h: gens[n],
}
MultiCommitGens {
n,
G: gens[..n].to_vec(),
h: gens[n],
} }
}
pub fn clone(&self) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
}
pub fn clone(&self) -> Self {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
} }
}
pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
let (G1, G2) = self.G.split_at(mid);
pub fn split_at(&self, mid: usize) -> (Self, Self) {
let (G1, G2) = self.G.split_at(mid);
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
} }
pub trait Commitments {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
}
pub struct PedersenCommit;
impl Commitments for Scalar {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, 1);
GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h])
}
}
impl PedersenCommit {
pub fn commit_scalar<G: CurveGroup>(
scalar: &G::ScalarField,
blind: &G::ScalarField,
gens_n: &MultiCommitGens<G>,
) -> G {
assert_eq!(gens_n.n, 1);
<G as VariableBaseMSM>::msm_unchecked(&[gens_n.G[0], gens_n.h], &[*scalar, *blind])
}
impl Commitments for Vec<Scalar> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
}
}
impl Commitments for [Scalar] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
}
pub fn commit_slice<G: CurveGroup>(
scalars: &[G::ScalarField],
blind: &G::ScalarField,
gens_n: &MultiCommitGens<G>,
) -> G {
assert_eq!(scalars.len(), gens_n.n);
<G as VariableBaseMSM>::msm_unchecked(&gens_n.G, scalars) + gens_n.h.mul(blind)
}
} }

+ 413
- 422
src/constraints.rs

@ -1,488 +1,479 @@
use std::{borrow::Borrow, vec};
use ark_ec::pairing::Pairing;
use std::borrow::Borrow;
use super::scalar::Scalar;
use crate::{ use crate::{
group::Fq,
math::Math,
sparse_mlpoly::{SparsePolyEntry, SparsePolynomial},
unipoly::UniPoly,
};
use ark_bls12_377::{constraints::PairingVar as IV, Bls12_377 as I, Fr};
use ark_crypto_primitives::{
snark::BooleanInputVar, CircuitSpecificSetupSNARK, SNARKGadget, SNARK,
math::Math,
sparse_mlpoly::{SparsePolyEntry, SparsePolynomial},
unipoly::UniPoly,
}; };
use ark_ff::{BitIteratorLE, PrimeField, Zero};
use ark_groth16::{
constraints::{Groth16VerifierGadget, PreparedVerifyingKeyVar, ProofVar},
Groth16, PreparedVerifyingKey, Proof as GrothProof,
};
use ark_ff::PrimeField;
use ark_crypto_primitives::sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig},
};
use ark_poly_commit::multilinear_pc::data_structures::Commitment;
use ark_r1cs_std::{ use ark_r1cs_std::{
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
prelude::{Boolean, EqGadget, FieldVar},
alloc::{AllocVar, AllocationMode},
fields::fp::FpVar,
prelude::{EqGadget, FieldVar},
}; };
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError}; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
use ark_sponge::{
constraints::CryptographicSpongeVar,
poseidon::{constraints::PoseidonSpongeVar, PoseidonParameters},
};
use rand::{CryptoRng, Rng};
pub struct PoseidonTranscripVar {
pub cs: ConstraintSystemRef<Fr>,
pub sponge: PoseidonSpongeVar<Fr>,
pub params: PoseidonParameters<Fr>,
pub struct PoseidonTranscripVar<F>
where
F: PrimeField,
{
pub cs: ConstraintSystemRef<F>,
pub sponge: PoseidonSpongeVar<F>,
} }
impl PoseidonTranscripVar {
fn new(
cs: ConstraintSystemRef<Fr>,
params: &PoseidonParameters<Fr>,
challenge: Option<Fr>,
) -> Self {
let mut sponge = PoseidonSpongeVar::new(cs.clone(), params);
if let Some(c) = challenge {
let c_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(c)).unwrap();
sponge.absorb(&c_var).unwrap();
}
Self {
cs,
sponge,
params: params.clone(),
}
}
impl<F> PoseidonTranscripVar<F>
where
F: PrimeField,
{
fn new(cs: ConstraintSystemRef<F>, params: &PoseidonConfig<F>, c_var: FpVar<F>) -> Self {
let mut sponge = PoseidonSpongeVar::new(cs.clone(), params);
fn append(&mut self, input: &FpVar<Fr>) -> Result<(), SynthesisError> {
self.sponge.absorb(&input)
}
sponge.absorb(&c_var).unwrap();
fn append_vector(&mut self, input_vec: &[FpVar<Fr>]) -> Result<(), SynthesisError> {
for input in input_vec.iter() {
self.append(input)?;
}
Ok(())
}
Self { cs, sponge }
}
fn challenge(&mut self) -> Result<FpVar<Fr>, SynthesisError> {
let c_var = self.sponge.squeeze_field_elements(1).unwrap().remove(0);
fn append(&mut self, input: &FpVar<F>) -> Result<(), SynthesisError> {
self.sponge.absorb(&input)
}
Ok(c_var)
fn append_vector(&mut self, input_vec: &[FpVar<F>]) -> Result<(), SynthesisError> {
for input in input_vec.iter() {
self.append(input)?;
} }
Ok(())
}
fn challenge_vector(&mut self, len: usize) -> Result<Vec<FpVar<Fr>>, SynthesisError> {
let c_vars = self.sponge.squeeze_field_elements(len).unwrap();
fn challenge(&mut self) -> Result<FpVar<F>, SynthesisError> {
Ok(self.sponge.squeeze_field_elements(1).unwrap().remove(0))
}
Ok(c_vars)
}
fn challenge_scalar_vec(&mut self, len: usize) -> Result<Vec<FpVar<F>>, SynthesisError> {
let c_vars = self.sponge.squeeze_field_elements(len).unwrap();
Ok(c_vars)
}
} }
/// Univariate polynomial in constraint system
#[derive(Clone)] #[derive(Clone)]
pub struct UniPolyVar {
pub coeffs: Vec<FpVar<Fr>>,
pub struct UniPolyVar<F: PrimeField> {
pub coeffs: Vec<FpVar<F>>,
} }
impl AllocVar<UniPoly, Fr> for UniPolyVar {
fn new_variable<T: Borrow<UniPoly>>(
cs: impl Into<Namespace<Fr>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|c| {
let cs = cs.into();
let cp: &UniPoly = c.borrow();
let mut coeffs_var = Vec::new();
for coeff in cp.coeffs.iter() {
let coeff_var = FpVar::<Fr>::new_variable(cs.clone(), || Ok(coeff), mode)?;
coeffs_var.push(coeff_var);
}
Ok(Self { coeffs: coeffs_var })
})
}
impl<F: PrimeField> AllocVar<UniPoly<F>, F> for UniPolyVar<F> {
fn new_variable<T: Borrow<UniPoly<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|c| {
let cs = cs.into();
let cp: &UniPoly<F> = c.borrow();
let mut coeffs_var = Vec::new();
for coeff in cp.coeffs.iter() {
let coeff_var = FpVar::<F>::new_variable(cs.clone(), || Ok(coeff), mode)?;
coeffs_var.push(coeff_var);
}
Ok(Self { coeffs: coeffs_var })
})
}
} }
impl UniPolyVar {
pub fn eval_at_zero(&self) -> FpVar<Fr> {
self.coeffs[0].clone()
}
impl<F: PrimeField> UniPolyVar<F> {
pub fn eval_at_zero(&self) -> FpVar<F> {
self.coeffs[0].clone()
}
pub fn eval_at_one(&self) -> FpVar<Fr> {
let mut res = self.coeffs[0].clone();
for i in 1..self.coeffs.len() {
res = &res + &self.coeffs[i];
}
res
pub fn eval_at_one(&self) -> FpVar<F> {
let mut res = self.coeffs[0].clone();
for i in 1..self.coeffs.len() {
res = &res + &self.coeffs[i];
} }
res
}
// mul without reduce
pub fn evaluate(&self, r: &FpVar<Fr>) -> FpVar<Fr> {
let mut eval = self.coeffs[0].clone();
let mut power = r.clone();
// TODO check if mul without reduce can help
pub fn evaluate(&self, r: &FpVar<F>) -> FpVar<F> {
let mut eval = self.coeffs[0].clone();
let mut power = r.clone();
for i in 1..self.coeffs.len() {
eval += &power * &self.coeffs[i];
power *= r;
}
eval
for i in 1..self.coeffs.len() {
eval += &power * &self.coeffs[i];
power *= r;
} }
eval
}
} }
/// Circuit gadget that implements the sumcheck verifier
#[derive(Clone)] #[derive(Clone)]
pub struct SumcheckVerificationCircuit {
pub polys: Vec<UniPoly>,
pub struct SumcheckVerificationCircuit<F: PrimeField> {
pub polys: Vec<UniPoly<F>>,
} }
impl SumcheckVerificationCircuit {
fn verifiy_sumcheck(
&self,
poly_vars: &[UniPolyVar],
claim_var: &FpVar<Fr>,
transcript_var: &mut PoseidonTranscripVar,
) -> Result<(FpVar<Fr>, Vec<FpVar<Fr>>), SynthesisError> {
let mut e_var = claim_var.clone();
let mut r_vars: Vec<FpVar<Fr>> = Vec::new();
for (poly_var, _poly) in poly_vars.iter().zip(self.polys.iter()) {
let res = poly_var.eval_at_one() + poly_var.eval_at_zero();
res.enforce_equal(&e_var)?;
transcript_var.append_vector(&poly_var.coeffs)?;
let r_i_var = transcript_var.challenge()?;
r_vars.push(r_i_var.clone());
e_var = poly_var.evaluate(&r_i_var.clone());
}
Ok((e_var, r_vars))
impl<F: PrimeField> SumcheckVerificationCircuit<F> {
fn verifiy_sumcheck(
&self,
poly_vars: &[UniPolyVar<F>],
claim_var: &FpVar<F>,
transcript_var: &mut PoseidonTranscripVar<F>,
) -> Result<(FpVar<F>, Vec<FpVar<F>>), SynthesisError> {
let mut e_var = claim_var.clone();
let mut r_vars: Vec<FpVar<F>> = Vec::new();
for (poly_var, _poly) in poly_vars.iter().zip(self.polys.iter()) {
let res = poly_var.eval_at_one() + poly_var.eval_at_zero();
res.enforce_equal(&e_var)?;
transcript_var.append_vector(&poly_var.coeffs)?;
let r_i_var = transcript_var.challenge()?;
r_vars.push(r_i_var.clone());
e_var = poly_var.evaluate(&r_i_var.clone());
} }
Ok((e_var, r_vars))
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct SparsePolyEntryVar {
idx: usize,
val_var: FpVar<Fr>,
pub struct SparsePolyEntryVar<F: PrimeField> {
idx: usize,
val_var: FpVar<F>,
} }
impl AllocVar<SparsePolyEntry, Fr> for SparsePolyEntryVar {
fn new_variable<T: Borrow<SparsePolyEntry>>(
cs: impl Into<Namespace<Fr>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|s| {
let cs = cs.into();
let spe: &SparsePolyEntry = s.borrow();
let val_var = FpVar::<Fr>::new_witness(cs, || Ok(spe.val))?;
Ok(Self {
idx: spe.idx,
val_var,
})
})
}
impl<F: PrimeField> AllocVar<SparsePolyEntry<F>, F> for SparsePolyEntryVar<F> {
fn new_variable<T: Borrow<SparsePolyEntry<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
_mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|s| {
let cs = cs.into();
let spe: &SparsePolyEntry<F> = s.borrow();
let val_var = FpVar::<F>::new_witness(cs, || Ok(spe.val))?;
Ok(Self {
idx: spe.idx,
val_var,
})
})
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct SparsePolynomialVar {
num_vars: usize,
Z_var: Vec<SparsePolyEntryVar>,
pub struct SparsePolynomialVar<F: PrimeField> {
Z_var: Vec<SparsePolyEntryVar<F>>,
} }
impl AllocVar<SparsePolynomial, Fr> for SparsePolynomialVar {
fn new_variable<T: Borrow<SparsePolynomial>>(
cs: impl Into<Namespace<Fr>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|s| {
let cs = cs.into();
let sp: &SparsePolynomial = s.borrow();
let mut Z_var = Vec::new();
for spe in sp.Z.iter() {
let spe_var = SparsePolyEntryVar::new_variable(cs.clone(), || Ok(spe), mode)?;
Z_var.push(spe_var);
}
Ok(Self {
num_vars: sp.num_vars,
Z_var,
})
})
}
impl<F: PrimeField> AllocVar<SparsePolynomial<F>, F> for SparsePolynomialVar<F> {
fn new_variable<T: Borrow<SparsePolynomial<F>>>(
cs: impl Into<Namespace<F>>,
f: impl FnOnce() -> Result<T, SynthesisError>,
mode: AllocationMode,
) -> Result<Self, SynthesisError> {
f().and_then(|s| {
let cs = cs.into();
let sp: &SparsePolynomial<F> = s.borrow();
let mut Z_var = Vec::new();
for spe in sp.Z.iter() {
let spe_var = SparsePolyEntryVar::new_variable(cs.clone(), || Ok(spe), mode)?;
Z_var.push(spe_var);
}
Ok(Self { Z_var })
})
}
} }
impl SparsePolynomialVar {
fn compute_chi(a: &[bool], r_vars: &[FpVar<Fr>]) -> FpVar<Fr> {
let mut chi_i_var = FpVar::<Fr>::one();
let one = FpVar::<Fr>::one();
for (i, r_var) in r_vars.iter().enumerate() {
if a[i] {
chi_i_var *= r_var;
} else {
chi_i_var *= &one - r_var;
}
}
chi_i_var
impl<F: PrimeField> SparsePolynomialVar<F> {
fn compute_chi(a: &[bool], r_vars: &[FpVar<F>]) -> FpVar<F> {
let mut chi_i_var = FpVar::<F>::one();
let one = FpVar::<F>::one();
for (i, r_var) in r_vars.iter().enumerate() {
if a[i] {
chi_i_var *= r_var;
} else {
chi_i_var *= &one - r_var;
}
} }
pub fn evaluate(&self, r_var: &[FpVar<Fr>]) -> FpVar<Fr> {
let mut sum = FpVar::<Fr>::zero();
for spe_var in self.Z_var.iter() {
// potential problem
let bits = &spe_var.idx.get_bits(r_var.len());
sum += SparsePolynomialVar::compute_chi(bits, r_var) * &spe_var.val_var;
}
sum
chi_i_var
}
pub fn evaluate(&self, r_var: &[FpVar<F>]) -> FpVar<F> {
let mut sum = FpVar::<F>::zero();
for spe_var in self.Z_var.iter() {
// potential problem
let bits = &spe_var.idx.get_bits(r_var.len());
sum += SparsePolynomialVar::compute_chi(bits, r_var) * &spe_var.val_var;
} }
sum
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct R1CSVerificationCircuit {
pub num_vars: usize,
pub num_cons: usize,
pub input: Vec<Fr>,
pub input_as_sparse_poly: SparsePolynomial,
pub evals: (Fr, Fr, Fr),
pub params: PoseidonParameters<Fr>,
pub prev_challenge: Fr,
pub claims_phase2: (Scalar, Scalar, Scalar, Scalar),
pub eval_vars_at_ry: Fr,
pub sc_phase1: SumcheckVerificationCircuit,
pub sc_phase2: SumcheckVerificationCircuit,
// The point on which the polynomial was evaluated by the prover.
pub claimed_ry: Vec<Scalar>,
pub claimed_transcript_sat_state: Scalar,
pub struct R1CSVerificationCircuit<F: PrimeField> {
pub num_vars: usize,
pub num_cons: usize,
pub input: Vec<F>,
pub input_as_sparse_poly: SparsePolynomial<F>,
pub evals: (F, F, F),
pub params: PoseidonConfig<F>,
pub prev_challenge: F,
pub claims_phase2: (F, F, F, F),
pub eval_vars_at_ry: F,
pub sc_phase1: SumcheckVerificationCircuit<F>,
pub sc_phase2: SumcheckVerificationCircuit<F>,
// The point on which the polynomial was evaluated by the prover.
pub claimed_rx: Vec<F>,
pub claimed_ry: Vec<F>,
pub claimed_transcript_sat_state: F,
} }
impl R1CSVerificationCircuit {
fn new(config: &VerifierConfig) -> Self {
Self {
num_vars: config.num_vars,
num_cons: config.num_cons,
input: config.input.clone(),
input_as_sparse_poly: config.input_as_sparse_poly.clone(),
evals: config.evals,
params: config.params.clone(),
prev_challenge: config.prev_challenge,
claims_phase2: config.claims_phase2,
eval_vars_at_ry: config.eval_vars_at_ry,
sc_phase1: SumcheckVerificationCircuit {
polys: config.polys_sc1.clone(),
},
sc_phase2: SumcheckVerificationCircuit {
polys: config.polys_sc2.clone(),
},
claimed_ry: config.ry.clone(),
claimed_transcript_sat_state: config.transcript_sat_state,
}
impl<F: PrimeField> R1CSVerificationCircuit<F> {
pub fn new<E: Pairing<ScalarField = F>>(config: &VerifierConfig<E>) -> Self {
Self {
num_vars: config.num_vars,
num_cons: config.num_cons,
input: config.input.clone(),
input_as_sparse_poly: config.input_as_sparse_poly.clone(),
evals: config.evals,
params: config.params.clone(),
prev_challenge: config.prev_challenge,
claims_phase2: config.claims_phase2,
eval_vars_at_ry: config.eval_vars_at_ry,
sc_phase1: SumcheckVerificationCircuit {
polys: config.polys_sc1.clone(),
},
sc_phase2: SumcheckVerificationCircuit {
polys: config.polys_sc2.clone(),
},
claimed_rx: config.rx.clone(),
claimed_ry: config.ry.clone(),
claimed_transcript_sat_state: config.transcript_sat_state,
} }
}
} }
impl ConstraintSynthesizer<Fr> for R1CSVerificationCircuit {
fn generate_constraints(self, cs: ConstraintSystemRef<Fr>) -> ark_relations::r1cs::Result<()> {
let mut transcript_var =
PoseidonTranscripVar::new(cs.clone(), &self.params, Some(self.prev_challenge));
let poly_sc1_vars = self
.sc_phase1
.polys
.iter()
.map(|p| {
UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()
})
.collect::<Vec<UniPolyVar>>();
let poly_sc2_vars = self
.sc_phase2
.polys
.iter()
.map(|p| {
UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()
})
.collect::<Vec<UniPolyVar>>();
let input_vars = self
.input
.iter()
.map(|i| {
FpVar::<Fr>::new_variable(cs.clone(), || Ok(i), AllocationMode::Witness).unwrap()
})
.collect::<Vec<FpVar<Fr>>>();
let claimed_ry_vars = self
.claimed_ry
.iter()
.map(|r| {
FpVar::<Fr>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap()
})
.collect::<Vec<FpVar<Fr>>>();
transcript_var.append_vector(&input_vars)?;
let num_rounds_x = self.num_cons.log_2();
let _num_rounds_y = (2 * self.num_vars).log_2();
let tau_vars = transcript_var.challenge_vector(num_rounds_x)?;
let claim_phase1_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::zero()))?;
let (claim_post_phase1_var, rx_var) = self.sc_phase1.verifiy_sumcheck(
&poly_sc1_vars,
&claim_phase1_var,
&mut transcript_var,
)?;
let (Az_claim, Bz_claim, Cz_claim, prod_Az_Bz_claims) = &self.claims_phase2;
let Az_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Az_claim))?;
let Bz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Bz_claim))?;
let Cz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Cz_claim))?;
let prod_Az_Bz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(prod_Az_Bz_claims))?;
let one = FpVar::<Fr>::one();
let prod_vars: Vec<FpVar<Fr>> = (0..rx_var.len())
.map(|i| (&rx_var[i] * &tau_vars[i]) + (&one - &rx_var[i]) * (&one - &tau_vars[i]))
.collect();
let mut taus_bound_rx_var = FpVar::<Fr>::one();
for p_var in prod_vars.iter() {
taus_bound_rx_var *= p_var;
}
let expected_claim_post_phase1_var =
(&prod_Az_Bz_claim_var - &Cz_claim_var) * &taus_bound_rx_var;
claim_post_phase1_var.enforce_equal(&expected_claim_post_phase1_var)?;
let r_A_var = transcript_var.challenge()?;
let r_B_var = transcript_var.challenge()?;
let r_C_var = transcript_var.challenge()?;
let claim_phase2_var =
&r_A_var * &Az_claim_var + &r_B_var * &Bz_claim_var + &r_C_var * &Cz_claim_var;
let (claim_post_phase2_var, ry_var) = self.sc_phase2.verifiy_sumcheck(
&poly_sc2_vars,
&claim_phase2_var,
&mut transcript_var,
)?;
// Because the verifier checks the commitment opening on point ry outside
// the circuit, the prover needs to send ry to the verifier (making the
// proof size O(log n)). As this point is normally obtained by the verifier
// from the second round of sumcheck, the circuit needs to ensure the
// claimed point, coming from the prover, is actually the point derived
// inside the circuit. These additional checks will be removed
// when the commitment verification is done inside the circuit.
for (i, r) in claimed_ry_vars.iter().enumerate() {
ry_var[i].enforce_equal(r)?;
}
let input_as_sparse_poly_var = SparsePolynomialVar::new_variable(
cs.clone(),
|| Ok(&self.input_as_sparse_poly),
AllocationMode::Witness,
)?;
let poly_input_eval_var = input_as_sparse_poly_var.evaluate(&ry_var[1..]);
let eval_vars_at_ry_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?;
let eval_Z_at_ry_var = (FpVar::<Fr>::one() - &ry_var[0]) * &eval_vars_at_ry_var
+ &ry_var[0] * &poly_input_eval_var;
let (eval_A_r, eval_B_r, eval_C_r) = self.evals;
let eval_A_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_A_r))?;
let eval_B_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_B_r))?;
let eval_C_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_C_r))?;
let scalar_var =
&r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var;
let expected_claim_post_phase2_var = eval_Z_at_ry_var * scalar_var;
claim_post_phase2_var.enforce_equal(&expected_claim_post_phase2_var)?;
let expected_transcript_state_var = transcript_var.challenge()?;
let claimed_transcript_state_var =
FpVar::<Fr>::new_input(cs, || Ok(self.claimed_transcript_sat_state))?;
/// This section implements the sumcheck verification part of Spartan
impl<F: PrimeField> ConstraintSynthesizer<F> for R1CSVerificationCircuit<F> {
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> ark_relations::r1cs::Result<()> {
let initial_challenge_var = FpVar::<F>::new_input(cs.clone(), || Ok(self.prev_challenge))?;
let mut transcript_var =
PoseidonTranscripVar::new(cs.clone(), &self.params, initial_challenge_var);
let poly_sc1_vars = self
.sc_phase1
.polys
.iter()
.map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap())
.collect::<Vec<UniPolyVar<_>>>();
let poly_sc2_vars = self
.sc_phase2
.polys
.iter()
.map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap())
.collect::<Vec<UniPolyVar<_>>>();
let input_vars = self
.input
.iter()
.map(|i| FpVar::<F>::new_variable(cs.clone(), || Ok(i), AllocationMode::Input).unwrap())
.collect::<Vec<FpVar<F>>>();
let claimed_rx_vars = self
.claimed_rx
.iter()
.map(|r| FpVar::<F>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap())
.collect::<Vec<FpVar<F>>>();
let claimed_ry_vars = self
.claimed_ry
.iter()
.map(|r| FpVar::<F>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap())
.collect::<Vec<FpVar<F>>>();
transcript_var.append_vector(&input_vars)?;
let num_rounds_x = self.num_cons.log_2();
let _num_rounds_y = (2 * self.num_vars).log_2();
let tau_vars = transcript_var.challenge_scalar_vec(num_rounds_x)?;
let claim_phase1_var = FpVar::<F>::new_witness(cs.clone(), || Ok(F::zero()))?;
let (claim_post_phase1_var, rx_var) =
self
.sc_phase1
.verifiy_sumcheck(&poly_sc1_vars, &claim_phase1_var, &mut transcript_var)?;
// The prover sends (rx, ry) to the verifier for the evaluation proof so
// the constraints need to ensure it is indeed the result from the first
// round of sumcheck verification.
for (i, r) in claimed_rx_vars.iter().enumerate() {
rx_var[i].enforce_equal(r)?;
}
// Ensure that the prover and verifier transcipt views are consistent at
// the end of the satisfiability proof.
expected_transcript_state_var.enforce_equal(&claimed_transcript_state_var)?;
let (Az_claim, Bz_claim, Cz_claim, prod_Az_Bz_claims) = &self.claims_phase2;
Ok(())
}
}
let Az_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Az_claim))?;
let Bz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Bz_claim))?;
let Cz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Cz_claim))?;
let prod_Az_Bz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(prod_Az_Bz_claims))?;
let one = FpVar::<F>::one();
let prod_vars: Vec<FpVar<F>> = (0..rx_var.len())
.map(|i| (&rx_var[i] * &tau_vars[i]) + (&one - &rx_var[i]) * (&one - &tau_vars[i]))
.collect();
let mut taus_bound_rx_var = FpVar::<F>::one();
#[derive(Clone)]
pub struct VerifierConfig {
pub num_vars: usize,
pub num_cons: usize,
pub input: Vec<Fr>,
pub input_as_sparse_poly: SparsePolynomial,
pub evals: (Fr, Fr, Fr),
pub params: PoseidonParameters<Fr>,
pub prev_challenge: Fr,
pub claims_phase2: (Fr, Fr, Fr, Fr),
pub eval_vars_at_ry: Fr,
pub polys_sc1: Vec<UniPoly>,
pub polys_sc2: Vec<UniPoly>,
pub ry: Vec<Scalar>,
pub transcript_sat_state: Scalar,
}
#[derive(Clone)]
pub struct VerifierCircuit {
pub inner_circuit: R1CSVerificationCircuit,
pub inner_proof: GrothProof<I>,
pub inner_vk: PreparedVerifyingKey<I>,
pub eval_vars_at_ry: Fr,
pub claims_phase2: (Fr, Fr, Fr, Fr),
pub ry: Vec<Fr>,
pub transcript_sat_state: Scalar,
}
for p_var in prod_vars.iter() {
taus_bound_rx_var *= p_var;
}
impl VerifierCircuit {
pub fn new<R: Rng + CryptoRng>(
config: &VerifierConfig,
mut rng: &mut R,
) -> Result<Self, SynthesisError> {
let inner_circuit = R1CSVerificationCircuit::new(config);
let (pk, vk) = Groth16::<I>::setup(inner_circuit.clone(), &mut rng).unwrap();
let proof = Groth16::<I>::prove(&pk, inner_circuit.clone(), &mut rng)?;
let pvk = Groth16::<I>::process_vk(&vk).unwrap();
Ok(Self {
inner_circuit,
inner_proof: proof,
inner_vk: pvk,
eval_vars_at_ry: config.eval_vars_at_ry,
claims_phase2: config.claims_phase2,
ry: config.ry.clone(),
transcript_sat_state: config.transcript_sat_state,
})
let expected_claim_post_phase1_var =
(&prod_Az_Bz_claim_var - &Cz_claim_var) * &taus_bound_rx_var;
claim_post_phase1_var.enforce_equal(&expected_claim_post_phase1_var)?;
let r_A_var = transcript_var.challenge()?;
let r_B_var = transcript_var.challenge()?;
let r_C_var = transcript_var.challenge()?;
let claim_phase2_var =
&r_A_var * &Az_claim_var + &r_B_var * &Bz_claim_var + &r_C_var * &Cz_claim_var;
let (claim_post_phase2_var, ry_var) =
self
.sc_phase2
.verifiy_sumcheck(&poly_sc2_vars, &claim_phase2_var, &mut transcript_var)?;
// Because the verifier checks the commitment opening on point ry outside
// the circuit, the prover needs to send ry to the verifier (making the
// proof size O(log n)). As this point is normally obtained by the verifier
// from the second round of sumcheck, the circuit needs to ensure the
// claimed point, coming from the prover, is actually the point derived
// inside the circuit. These additional checks will be removed
// when the commitment verification is done inside the circuit.
// Moreover, (rx, ry) will be used in the evaluation proof.
for (i, r) in claimed_ry_vars.iter().enumerate() {
ry_var[i].enforce_equal(r)?;
} }
let input_as_sparse_poly_var = SparsePolynomialVar::new_variable(
cs.clone(),
|| Ok(&self.input_as_sparse_poly),
AllocationMode::Witness,
)?;
let poly_input_eval_var = input_as_sparse_poly_var.evaluate(&ry_var[1..]);
let eval_vars_at_ry_var = FpVar::<F>::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?;
let eval_Z_at_ry_var =
(FpVar::<F>::one() - &ry_var[0]) * &eval_vars_at_ry_var + &ry_var[0] * &poly_input_eval_var;
let (eval_A_r, eval_B_r, eval_C_r) = self.evals;
let eval_A_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_A_r))?;
let eval_B_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_B_r))?;
let eval_C_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_C_r))?;
let scalar_var = &r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var;
let expected_claim_post_phase2_var = eval_Z_at_ry_var * scalar_var;
claim_post_phase2_var.enforce_equal(&expected_claim_post_phase2_var)?;
let expected_transcript_state_var = transcript_var.challenge()?;
let claimed_transcript_state_var =
FpVar::<F>::new_input(cs, || Ok(self.claimed_transcript_sat_state))?;
// Ensure that the prover and verifier transcipt views are consistent at
// the end of the satisfiability proof.
expected_transcript_state_var.enforce_equal(&claimed_transcript_state_var)?;
Ok(())
}
} }
impl ConstraintSynthesizer<Fq> for VerifierCircuit {
fn generate_constraints(self, cs: ConstraintSystemRef<Fq>) -> ark_relations::r1cs::Result<()> {
let proof_var =
ProofVar::<I, IV>::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?;
let (v_A, v_B, v_C, v_AB) = self.claims_phase2;
let mut pubs = vec![];
pubs.extend(self.ry);
pubs.extend(vec![v_A, v_B, v_C, v_AB]);
pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]);
let bits = pubs
.iter()
.map(|c| {
let bits: Vec<bool> = BitIteratorLE::new(c.into_repr().as_ref().to_vec()).collect();
Vec::new_witness(cs.clone(), || Ok(bits))
})
.collect::<Result<Vec<_>, _>>()?;
let input_var = BooleanInputVar::<Fr, Fq>::new(bits);
let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?;
Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)?
.enforce_equal(&Boolean::constant(true))?;
Ok(())
}
#[derive(Clone)]
pub struct VerifierConfig<E: Pairing> {
pub comm: Commitment<E>,
pub num_vars: usize,
pub num_cons: usize,
pub input: Vec<E::ScalarField>,
pub input_as_sparse_poly: SparsePolynomial<E::ScalarField>,
pub evals: (E::ScalarField, E::ScalarField, E::ScalarField),
pub params: PoseidonConfig<E::ScalarField>,
pub prev_challenge: E::ScalarField,
pub claims_phase2: (
E::ScalarField,
E::ScalarField,
E::ScalarField,
E::ScalarField,
),
pub eval_vars_at_ry: E::ScalarField,
pub polys_sc1: Vec<UniPoly<E::ScalarField>>,
pub polys_sc2: Vec<UniPoly<E::ScalarField>>,
pub rx: Vec<E::ScalarField>,
pub ry: Vec<E::ScalarField>,
pub transcript_sat_state: E::ScalarField,
} }
// Skeleton for the polynomial commitment verification circuit
// #[derive(Clone)]
// pub struct VerifierCircuit {
// pub inner_circuit: R1CSVerificationCircuit,
// pub inner_proof: GrothProof<I>,
// pub inner_vk: PreparedVerifyingKey<I>,
// pub eval_vars_at_ry: Fr,
// pub claims_phase2: (Fr, Fr, Fr, Fr),
// pub ry: Vec<Fr>,
// pub transcript_sat_state: Scalar,
// }
// impl VerifierCircuit {
// pub fn new<R: Rng + CryptoRng>(
// config: &VerifierConfig,
// mut rng: &mut R,
// ) -> Result<Self, SynthesisError> {
// let inner_circuit = R1CSVerificationCircuit::new(config);
// let (pk, vk) = Groth16::<I>::setup(inner_circuit.clone(), &mut rng).unwrap();
// let proof = Groth16::<I>::prove(&pk, inner_circuit.clone(), &mut rng)?;
// let pvk = Groth16::<I>::process_vk(&vk).unwrap();
// Ok(Self {
// inner_circuit,
// inner_proof: proof,
// inner_vk: pvk,
// eval_vars_at_ry: config.eval_vars_at_ry,
// claims_phase2: config.claims_phase2,
// ry: config.ry.clone(),
// transcript_sat_state: config.transcript_sat_state,
// })
// }
// }
// impl ConstraintSynthesizer<Fq> for VerifierCircuit {
// fn generate_constraints(self, cs: ConstraintSystemRef<Fq>) -> ark_relations::r1cs::Result<()> {
// let proof_var = ProofVar::<I, IV>::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?;
// let (v_A, v_B, v_C, v_AB) = self.claims_phase2;
// let mut pubs = vec![];
// pubs.extend(self.ry);
// pubs.extend(vec![v_A, v_B, v_C, v_AB]);
// pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]);
// let bits = pubs
// .iter()
// .map(|c| {
// let bits: Vec<bool> = BitIteratorLE::new(c.into_bigint().as_ref().to_vec()).collect();
// Vec::new_witness(cs.clone(), || Ok(bits))
// })
// .collect::<Result<Vec<_>, _>>()?;
// let input_var = BooleanInputVar::<Fr, Fq>::new(bits);
// let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?;
// Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)?
// .enforce_equal(&Boolean::constant(true))?;
// Ok(())
// }
// }

+ 669
- 689
src/dense_mlpoly.rs
File diff suppressed because it is too large
View File


+ 19
- 19
src/errors.rs

@ -3,30 +3,30 @@ use thiserror::Error;
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum ProofVerifyError { pub enum ProofVerifyError {
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError(Vec<u8>),
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError(Vec<u8>),
} }
impl Default for ProofVerifyError { impl Default for ProofVerifyError {
fn default() -> Self {
ProofVerifyError::InternalError
}
fn default() -> Self {
ProofVerifyError::InternalError
}
} }
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
pub enum R1CSError { pub enum R1CSError {
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
} }

+ 0
- 80
src/group.rs

@ -1,80 +0,0 @@
use crate::errors::ProofVerifyError;
use ark_ec::msm::VariableBaseMSM;
use ark_ff::PrimeField;
use lazy_static::lazy_static;
use super::scalar::Scalar;
use ark_ec::ProjectiveCurve;
use ark_serialize::*;
use core::borrow::Borrow;
pub type GroupElement = ark_bls12_377::G1Projective;
pub type GroupElementAffine = ark_bls12_377::G1Affine;
pub type Fq = ark_bls12_377::Fq;
pub type Fr = ark_bls12_377::Fr;
#[derive(Clone, Eq, PartialEq, Hash, Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct CompressedGroup(pub Vec<u8>);
lazy_static! {
pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator();
}
pub trait CompressGroupElement {
fn compress(&self) -> CompressedGroup;
}
pub trait DecompressGroupElement {
fn decompress(encoded: &CompressedGroup) -> Option<GroupElement>;
}
pub trait UnpackGroupElement {
fn unpack(&self) -> Result<GroupElement, ProofVerifyError>;
}
impl CompressGroupElement for GroupElement {
fn compress(&self) -> CompressedGroup {
let mut point_encoding = Vec::new();
self.serialize(&mut point_encoding).unwrap();
CompressedGroup(point_encoding)
}
}
impl DecompressGroupElement for GroupElement {
fn decompress(encoded: &CompressedGroup) -> Option<Self> {
let res = GroupElement::deserialize(&*encoded.0);
if let Ok(r) = res {
Some(r)
} else {
println!("{:?}", res);
None
}
}
}
impl UnpackGroupElement for CompressedGroup {
fn unpack(&self) -> Result<GroupElement, ProofVerifyError> {
let encoded = self.0.clone();
GroupElement::decompress(self).ok_or(ProofVerifyError::DecompressionError(encoded))
}
}
pub trait VartimeMultiscalarMul {
fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement;
}
impl VartimeMultiscalarMul for GroupElement {
fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement {
let repr_scalars = scalars
.iter()
.map(|S| S.borrow().into_repr())
.collect::<Vec<<Scalar as PrimeField>::BigInt>>();
let aff_points = points
.iter()
.map(|P| P.borrow().into_affine())
.collect::<Vec<GroupElementAffine>>();
VariableBaseMSM::multi_scalar_mul(aff_points.as_slice(), repr_scalars.as_slice())
}
}

+ 247
- 747
src/lib.rs
File diff suppressed because it is too large
View File


+ 56
- 0
src/macros.rs

@ -0,0 +1,56 @@
macro_rules! try_par {
($(let $name:ident = $f:expr),+) => {
$(
let mut $name = None;
)+
rayon::scope(|s| {
$(
let $name = &mut $name;
s.spawn(move |_| {
*$name = Some($f);
});)+
});
$(
let $name = $name.unwrap()?;
)+
};
}
macro_rules! par {
($(let $name:ident = $f:expr),+) => {
$(
let mut $name = None;
)+
rayon::scope(|s| {
$(
let $name = &mut $name;
s.spawn(move |_| {
*$name = Some($f);
});)+
});
$(
let $name = $name.unwrap();
)+
};
($(let ($name1:ident, $name2:ident) = $f:block),+) => {
$(
let mut $name1 = None;
let mut $name2 = None;
)+
rayon::scope(|s| {
$(
let $name1 = &mut $name1;
let $name2 = &mut $name2;
s.spawn(move |_| {
let (a, b) = $f;
*$name1 = Some(a);
*$name2 = Some(b);
});)+
});
$(
let $name1 = $name1.unwrap();
let $name2 = $name2.unwrap();
)+
}
}

+ 26
- 26
src/math.rs

@ -1,36 +1,36 @@
pub trait Math { pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
fn log_2(self) -> usize;
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
fn log_2(self) -> usize;
} }
impl Math for usize { impl Math for usize {
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
fn log_2(self) -> usize {
assert_ne!(self, 0);
fn log_2(self) -> usize {
assert_ne!(self, 0);
if self.is_power_of_two() {
(1usize.leading_zeros() - self.leading_zeros()) as usize
} else {
(0usize.leading_zeros() - self.leading_zeros()) as usize
}
if self.is_power_of_two() {
(1usize.leading_zeros() - self.leading_zeros()) as usize
} else {
(0usize.leading_zeros() - self.leading_zeros()) as usize
} }
}
} }

+ 410
- 0
src/mipp.rs

@ -0,0 +1,410 @@
use crate::poseidon_transcript::PoseidonTranscript;
use crate::transcript::Transcript;
use ark_ec::scalar_mul::variable_base::VariableBaseMSM;
use ark_ec::CurveGroup;
use ark_ec::{pairing::Pairing, AffineRepr};
use ark_ff::{Field, PrimeField};
use ark_poly::DenseMultilinearExtension;
use ark_poly_commit::multilinear_pc::data_structures::{
CommitmentG2, CommitterKey, ProofG1, VerifierKey,
};
use ark_poly_commit::multilinear_pc::MultilinearPC;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError};
use ark_std::One;
use ark_std::Zero;
use rayon::iter::ParallelIterator;
use rayon::prelude::IntoParallelIterator;
use rayon::prelude::*;
use std::ops::{AddAssign, Mul, MulAssign};
use thiserror::Error;
#[derive(Debug, Clone, CanonicalDeserialize, CanonicalSerialize)]
pub struct MippProof<E: Pairing> {
pub comms_t: Vec<(<E as Pairing>::TargetField, <E as Pairing>::TargetField)>,
pub comms_u: Vec<(E::G1Affine, E::G1Affine)>,
pub final_a: E::G1Affine,
pub final_h: E::G2Affine,
pub pst_proof_h: ProofG1<E>,
}
impl<E: Pairing> MippProof<E> {
pub fn prove(
transcript: &mut PoseidonTranscript<E::ScalarField>,
ck: &CommitterKey<E>,
a: Vec<E::G1Affine>,
y: Vec<E::ScalarField>,
h: Vec<E::G2Affine>,
U: &E::G1Affine,
_T: &<E as Pairing>::TargetField,
) -> Result<MippProof<E>, Error> {
// the values of vectors A and y rescaled at each step of the loop
let (mut m_a, mut m_y) = (a.clone(), y.clone());
// the values of the commitment keys h for the vector A rescaled at
// each step of the loop
let mut m_h = h.clone();
// storing the cross commitments for including in the proofs
let mut comms_t = Vec::new();
let mut comms_u = Vec::new();
// the transcript challenges
let mut xs: Vec<E::ScalarField> = Vec::new();
let mut xs_inv: Vec<E::ScalarField> = Vec::new();
// we append only the MIPP because the aggregated commitment T has been
// appended already
transcript.append(b"U", U);
while m_a.len() > 1 {
// recursive step
// Recurse with problem of half size
let split = m_a.len() / 2;
// MIPP where n' = split///
// a[:n'] a[n':]
let (a_l, a_r) = m_a.split_at_mut(split);
// y[:n'] y[n':]
let (y_l, y_r) = m_y.split_at_mut(split);
// h[:n'] y[n':]
let (h_l, h_r) = m_h.split_at_mut(split);
// since we do this in parallel we take reference first so it can be
// moved within the macro's rayon scope.
let (_rh_l, _rh_r) = (&h_l, &h_r);
let (ra_l, ra_r) = (&a_l, &a_r);
let (ry_l, ry_r) = (&y_l, &y_r);
try_par! {
// MIPP part
// Compute cross commitments
// u_l = a[n':] ^ y[:n']
// TODO to replace by bitsf_multiexp
let comm_u_l = multiexponentiation(ra_l, &ry_r),
// u_r = a[:n'] ^ y[n':]
let comm_u_r = multiexponentiation(ra_r, &ry_l)
};
par! {
// Compute the cross pairing products over the distinct halfs of A
// t_l = a[n':] * h[:n']
let comm_t_l = pairings_product::<E>(&a_l, h_r),
// t_r = a[:n'] * h[n':]
let comm_t_r = pairings_product::<E>(&a_r, h_l)
};
// Fiat-Shamir challenge
transcript.append(b"comm_u_l", &comm_u_l);
transcript.append(b"comm_u_r", &comm_u_r);
transcript.append(b"comm_t_l", &comm_t_l);
transcript.append(b"comm_t_r", &comm_t_r);
let c_inv = transcript.challenge_scalar::<E::ScalarField>(b"challenge_i");
// Optimization for multiexponentiation to rescale G2 elements with
// 128-bit challenge Swap 'c' and 'c_inv' since we
// can't control bit size of c_inv
let c = c_inv.inverse().unwrap();
// Set up values for next step of recursion by compressing as follows
// a[n':] + a[:n']^x
compress(&mut m_a, split, &c);
// y[n':] + y[:n']^x_inv
compress_field(&mut m_y, split, &c_inv);
// h[n':] + h[:n']^x_inv
compress(&mut m_h, split, &c_inv);
comms_t.push((comm_t_l, comm_t_r));
comms_u.push((comm_u_l.into_affine(), comm_u_r.into_affine()));
xs.push(c);
xs_inv.push(c_inv);
}
assert!(m_a.len() == 1 && m_y.len() == 1 && m_h.len() == 1);
let final_a = m_a[0];
let final_h = m_h[0];
// get the structured polynomial p_h for which final_h = h^p_h(vec{t})
// is the PST commitment given generator h and toxic waste \vec{t}
let poly = DenseMultilinearExtension::<E::ScalarField>::from_evaluations_vec(
xs_inv.len(),
Self::polynomial_evaluations_from_transcript::<E::ScalarField>(&xs_inv),
);
let c = MultilinearPC::<E>::commit_g2(ck, &poly);
debug_assert!(c.h_product == final_h);
// generate a proof of opening final_h at the random point rs
// from the transcript
let rs: Vec<E::ScalarField> = (0..poly.num_vars)
.into_iter()
.map(|_| transcript.challenge_scalar::<E::ScalarField>(b"random_point"))
.collect();
let pst_proof_h = MultilinearPC::<E>::open_g1(ck, &poly, &rs);
Ok(MippProof {
comms_t,
comms_u,
final_a,
final_h,
pst_proof_h,
})
}
// builds the polynomial p_h in Lagrange basis which uses the
// inverses of transcript challenges this is the following
// structured polynomial $\prod_i(1 - z_i + cs_inv[m - i - 1] * z_i)$
// where m is the length of cs_inv and z_i is the unknown
fn polynomial_evaluations_from_transcript<F: Field>(cs_inv: &[F]) -> Vec<F> {
let m = cs_inv.len();
let pow_m = 2_usize.pow(m as u32);
// constructs the list of evaluations over the boolean hypercube \{0,1\}^m
let evals = (0..pow_m)
.into_par_iter()
.map(|i| {
let mut res = F::one();
for j in 0..m {
// we iterate from lsb to msb and, in case the bit is 1,
// we multiply by the corresponding challenge i.e whose
// index corresponds to the bit's position
if (i >> j) & 1 == 1 {
res *= cs_inv[m - j - 1];
}
}
res
})
.collect();
evals
}
pub fn verify(
vk: &VerifierKey<E>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
proof: &MippProof<E>,
point: Vec<E::ScalarField>,
U: &E::G1Affine,
T: &<E as Pairing>::TargetField,
) -> bool {
let comms_u = proof.comms_u.clone();
let comms_t = proof.comms_t.clone();
let mut xs = Vec::new();
let mut xs_inv = Vec::new();
let mut final_y = E::ScalarField::one();
let mut final_res = MippTU {
tc: T.clone(),
uc: U.into_group(),
};
transcript.append(b"U", U);
// Challenges need to be generated first in sequential order so the
// prover and the verifier have a consistent view of the transcript
for (i, (comm_u, comm_t)) in comms_u.iter().zip(comms_t.iter()).enumerate() {
let (comm_u_l, comm_u_r) = comm_u;
let (comm_t_l, comm_t_r) = comm_t;
// Fiat-Shamir challenge
transcript.append(b"comm_u_l", comm_u_l);
transcript.append(b"comm_u_r", comm_u_r);
transcript.append(b"comm_t_l", comm_t_l);
transcript.append(b"comm_t_r", comm_t_r);
let c_inv = transcript.challenge_scalar::<E::ScalarField>(b"challenge_i");
let c = c_inv.inverse().unwrap();
xs.push(c);
xs_inv.push(c_inv);
// the verifier computes the final_y by themselves because
// this is field operations so it's quite fast and parallelisation
// doesn't bring much improvement
final_y *= E::ScalarField::one() + c_inv.mul(point[i]) - point[i];
}
// First, each entry of T and U are multiplied independently by their
// respective challenges which is done in parralel and, at the end,
// the results are merged together for each vector following their
// corresponding merge operation.
enum Op<'a, E: Pairing> {
TC(&'a E::TargetField, <E::ScalarField as PrimeField>::BigInt),
UC(&'a E::G1Affine, &'a E::ScalarField),
}
let res = comms_t
.par_iter()
.zip(comms_u.par_iter())
.zip(xs.par_iter().zip(xs_inv.par_iter()))
.flat_map(|((comm_t, comm_u), (c, c_inv))| {
let (comm_t_l, comm_t_r) = comm_t;
let (comm_u_l, comm_u_r) = comm_u;
// we multiple left side by x^-1 and right side by x
vec![
Op::TC::<E>(comm_t_l, c_inv.into_bigint()),
Op::TC(comm_t_r, c.into_bigint()),
Op::UC(comm_u_l, c_inv),
Op::UC(comm_u_r, c),
]
})
.fold(MippTU::<E>::default, |mut res, op: Op<E>| {
match op {
Op::TC(tx, c) => {
let tx: E::TargetField = tx.pow(c);
res.tc.mul_assign(&tx);
}
Op::UC(zx, c) => {
let uxp: E::G1 = zx.mul(c);
res.uc.add_assign(&uxp);
}
}
res
})
.reduce(MippTU::default, |mut acc_res, res| {
acc_res.merge(&res);
acc_res
});
// the initial values of T and U are also merged to get the final result
let ref_final_res = &mut final_res;
ref_final_res.merge(&res);
// get the point rs from the transcript, used by the prover to generate
// the PST proof
let mut rs: Vec<E::ScalarField> = Vec::new();
let m = xs_inv.len();
for _i in 0..m {
let r = transcript.challenge_scalar::<E::ScalarField>(b"random_point");
rs.push(r);
}
// Given p_h is structured as defined above, the verifier can compute
// p_h(rs) by themselves in O(m) time
let v = (0..m)
.into_par_iter()
.map(|i| E::ScalarField::one() + rs[i].mul(xs_inv[m - i - 1]) - rs[i])
.product();
let comm_h = CommitmentG2 {
nv: m,
h_product: proof.final_h,
};
// final_h is the commitment of p_h so the verifier can perform
// a PST verification at the random point rs, given the pst proof
// received from the prover prover
let check_h = MultilinearPC::<E>::check_2(vk, &comm_h, &rs, v, &proof.pst_proof_h);
assert!(check_h == true);
let final_u = proof.final_a.mul(final_y);
let final_t: <E as Pairing>::TargetField = E::pairing(proof.final_a, proof.final_h).0;
let check_t = ref_final_res.tc == final_t;
assert!(check_t == true);
let check_u = ref_final_res.uc == final_u;
assert!(check_u == true);
check_h & check_u
}
}
/// MippTU keeps track of the variables that have been sent by the prover and
/// must be multiplied together by the verifier.
struct MippTU<E: Pairing> {
pub tc: E::TargetField,
pub uc: E::G1,
}
impl<E> Default for MippTU<E>
where
E: Pairing,
{
fn default() -> Self {
Self {
tc: E::TargetField::one(),
uc: E::G1::zero(),
}
}
}
impl<E> MippTU<E>
where
E: Pairing,
{
fn merge(&mut self, other: &Self) {
self.tc.mul_assign(&other.tc);
self.uc.add_assign(&other.uc);
}
}
/// compress modifies the `vec` vector by setting the value at
/// index $i:0 -> split$ $vec[i] = vec[i] + vec[i+split]^scaler$.
/// The `vec` vector is half of its size after this call.
pub fn compress<C: AffineRepr>(vec: &mut Vec<C>, split: usize, scaler: &C::ScalarField) {
let (left, right) = vec.split_at_mut(split);
left
.par_iter_mut()
.zip(right.par_iter())
.for_each(|(a_l, a_r)| {
// TODO remove that with master version
let mut x = a_r.mul(scaler);
x.add_assign(a_l.into_group());
*a_l = x.into_affine();
});
let len = left.len();
vec.resize(len, C::zero());
}
// TODO make that generic with points as well
pub fn compress_field<F: PrimeField>(vec: &mut Vec<F>, split: usize, scaler: &F) {
let (left, right) = vec.split_at_mut(split);
assert!(left.len() == right.len());
left
.par_iter_mut()
.zip(right.par_iter_mut())
.for_each(|(a_l, a_r)| {
// TODO remove copy
a_r.mul_assign(scaler);
a_l.add_assign(a_r.clone());
});
let len = left.len();
vec.resize(len, F::zero());
}
pub fn multiexponentiation<G: AffineRepr>(
left: &[G],
right: &[G::ScalarField],
) -> Result<G::Group, Error> {
if left.len() != right.len() {
return Err(Error::InvalidIPVectorLength);
}
Ok(<G::Group as VariableBaseMSM>::msm_unchecked(left, right))
}
pub fn pairings_product<E: Pairing>(gs: &[E::G1Affine], hs: &[E::G2Affine]) -> E::TargetField {
E::multi_pairing(gs, hs).0
}
#[derive(Debug, Error)]
pub enum Error {
#[error("Serialization error: {0}")]
Serialization(#[from] SerializationError),
#[error("Vectors length do not match for inner product (IP)")]
InvalidIPVectorLength,
// #[error("Commitment key length invalid")]
// InvalidKeyLength,
// #[error("Invalid pairing result")]
// InvalidPairing,
// #[error("Invalid SRS: {0}")]
// InvalidSRS(String),
// #[error("Invalid proof: {0}")]
// InvalidProof(String),
// #[error("Malformed Groth16 verifying key")]
// MalformedVerifyingKey,
}

+ 246
- 244
src/nizk/bullet.rs

@ -3,259 +3,261 @@
#![allow(non_snake_case)] #![allow(non_snake_case)]
#![allow(clippy::type_complexity)] #![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)] #![allow(clippy::too_many_arguments)]
use super::super::errors::ProofVerifyError;
use crate::math::Math; use crate::math::Math;
use crate::poseidon_transcript::PoseidonTranscript; use crate::poseidon_transcript::PoseidonTranscript;
use super::super::errors::ProofVerifyError;
use super::super::group::{
CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement,
VartimeMultiscalarMul,
};
use super::super::scalar::Scalar;
use crate::transcript::Transcript;
use ark_ec::AffineRepr;
use ark_ec::CurveGroup;
use ark_ff::Field; use ark_ff::Field;
use ark_serialize::*; use ark_serialize::*;
use ark_std::{One, Zero}; use ark_std::{One, Zero};
use core::iter; use core::iter;
use std::ops::Mul;
use std::ops::MulAssign; use std::ops::MulAssign;
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>,
pub struct BulletReductionProof<G: CurveGroup> {
L_vec: Vec<G>,
R_vec: Vec<G>,
} }
impl BulletReductionProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut PoseidonTranscript,
Q: &GroupElement,
G_vec: &[GroupElement],
H: &GroupElement,
a_vec: &[Scalar],
b_vec: &[Scalar],
blind: &Scalar,
blinds_vec: &[(Scalar, Scalar)],
) -> (
BulletReductionProof,
GroupElement,
Scalar,
Scalar,
GroupElement,
Scalar,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.to_owned()[..];
let mut a = &mut a_vec.to_owned()[..];
let mut b = &mut b_vec.to_owned()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log_2();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let L = GroupElement::vartime_multiscalar_mul(
a_L.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L))
.copied()
.collect::<Vec<Scalar>>()
.as_slice(),
G_R.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.copied()
.collect::<Vec<GroupElement>>()
.as_slice(),
);
let R = GroupElement::vartime_multiscalar_mul(
a_R.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R))
.copied()
.collect::<Vec<Scalar>>()
.as_slice(),
G_L.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.copied()
.collect::<Vec<GroupElement>>()
.as_slice(),
);
transcript.append_point(&L.compress());
transcript.append_point(&R.compress());
let u = transcript.challenge_scalar();
let u_inv = u.inverse().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
}
blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R;
L_vec.push(L.compress());
R_vec.push(R.compress());
a = a_L;
b = b_L;
G = G_L;
}
let Gamma_hat =
GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]);
(
BulletReductionProof { L_vec, R_vec },
Gamma_hat,
a[0],
b[0],
G[0],
blind_fin,
)
impl<G: CurveGroup> BulletReductionProof<G> {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut PoseidonTranscript<G::ScalarField>,
Q: &G::Affine,
G_vec: &[G::Affine],
H: &G::Affine,
a_vec: &[G::ScalarField],
b_vec: &[G::ScalarField],
blind: &G::ScalarField,
blinds_vec: &[(G::ScalarField, G::ScalarField)],
) -> (
BulletReductionProof<G>,
G,
G::ScalarField,
G::ScalarField,
G,
G::ScalarField,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.to_owned()[..];
let mut a = &mut a_vec.to_owned()[..];
let mut b = &mut b_vec.to_owned()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log_2();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let gright_vec = G_R
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.cloned()
.collect::<Vec<G::Affine>>();
let L = G::msm_unchecked(
&gright_vec,
a_L
.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L))
.copied()
.collect::<Vec<G::ScalarField>>()
.as_slice(),
);
let gl_vec = G_L
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.cloned()
.collect::<Vec<G::Affine>>();
let R = G::msm_unchecked(
&gl_vec,
a_R
.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R))
.copied()
.collect::<Vec<G::ScalarField>>()
.as_slice(),
);
transcript.append_point(b"", &L);
transcript.append_point(b"", &R);
let u: G::ScalarField = transcript.challenge_scalar(b"");
let u_inv = u.inverse().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = (G_L[i].mul(u_inv) + G_R[i].mul(u)).into_affine();
}
blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R;
L_vec.push(L);
R_vec.push(R);
a = a_L;
b = b_L;
G = G_L;
} }
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut PoseidonTranscript,
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError::InternalError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError::InternalError);
}
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(L);
transcript.append_point(R);
challenges.push(transcript.challenge_scalar());
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv: Vec<Scalar> = challenges.clone();
ark_ff::fields::batch_inversion(&mut challenges_inv);
let mut allinv: Scalar = Scalar::one();
for c in challenges.iter().filter(|s| !s.is_zero()) {
allinv.mul_assign(c);
}
allinv = allinv.inverse().unwrap();
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i].square();
challenges_inv[i] = challenges_inv[i].square();
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
Ok((challenges_sq, challenges_inv_sq, s))
let Gamma_hat = G::msm_unchecked(&[G[0], *Q, *H], &[a[0], a[0] * b[0], blind_fin]);
(
BulletReductionProof { L_vec, R_vec },
Gamma_hat,
a[0],
b[0],
G[0].into_group(),
blind_fin,
)
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut PoseidonTranscript<G::ScalarField>,
) -> Result<
(
Vec<G::ScalarField>,
Vec<G::ScalarField>,
Vec<G::ScalarField>,
),
ProofVerifyError,
> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError::InternalError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError::InternalError);
} }
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &[Scalar],
transcript: &mut PoseidonTranscript,
Gamma: &GroupElement,
G: &[GroupElement],
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = self
.L_vec
.iter()
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let Rs = self
.R_vec
.iter()
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(b"", L);
transcript.append_point(b"", R);
challenges.push(transcript.challenge_scalar(b""));
}
let G_hat = GroupElement::vartime_multiscalar_mul(s.as_slice(), G);
let a_hat = inner_product(a, &s);
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv: Vec<G::ScalarField> = challenges.clone();
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
u_sq.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&Scalar::one()))
.copied()
.collect::<Vec<Scalar>>()
.as_slice(),
Ls.iter()
.chain(Rs.iter())
.chain(iter::once(Gamma))
.copied()
.collect::<Vec<GroupElement>>()
.as_slice(),
);
ark_ff::fields::batch_inversion(&mut challenges_inv);
let mut allinv = G::ScalarField::one();
for c in challenges.iter().filter(|s| !s.is_zero()) {
allinv.mul_assign(c);
}
allinv = allinv.inverse().unwrap();
Ok((G_hat, Gamma_hat, a_hat))
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i].square();
challenges_inv[i] = challenges_inv[i].square();
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
} }
Ok((challenges_sq, challenges_inv_sq, s))
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &[G::ScalarField],
transcript: &mut PoseidonTranscript<G::ScalarField>,
Gamma: &G,
Gs: &[G::Affine],
) -> Result<(G, G, G::ScalarField), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = &self.L_vec;
let Rs = &self.R_vec;
let G_hat = G::msm(Gs, s.as_slice()).map_err(|_| ProofVerifyError::InternalError)?;
let a_hat = inner_product(a, &s);
let Gamma_hat = G::msm(
&G::normalize_batch(
&Ls
.iter()
.chain(Rs.iter())
.chain(iter::once(Gamma))
.copied()
.collect::<Vec<G>>(),
),
u_sq
.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&G::ScalarField::one()))
.copied()
.collect::<Vec<G::ScalarField>>()
.as_slice(),
)
.map_err(|_| ProofVerifyError::InternalError)?;
Ok((G_hat, Gamma_hat, a_hat))
}
} }
/// Computes an inner product of two vectors /// Computes an inner product of two vectors
@ -263,14 +265,14 @@ impl BulletReductionProof {
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\] /// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert!(
a.len() == b.len(),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = Scalar::zero();
for i in 0..a.len() {
out += a[i] * b[i];
}
out
fn inner_product<F: Field>(a: &[F], b: &[F]) -> F {
assert!(
a.len() == b.len(),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = F::zero();
for i in 0..a.len() {
out += a[i] * b[i];
}
out
} }

+ 187
- 730
src/nizk/mod.rs

@ -1,760 +1,217 @@
#![allow(clippy::too_many_arguments)] #![allow(clippy::too_many_arguments)]
use super::commitments::{MultiCommitGens, PedersenCommit};
use super::errors::ProofVerifyError;
use crate::ark_std::UniformRand;
use crate::math::Math; use crate::math::Math;
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
use crate::poseidon_transcript::PoseidonTranscript;
use crate::transcript::Transcript;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::CurveGroup;
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{
CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, UnpackGroupElement,
};
use super::random::RandomTape;
use super::scalar::Scalar;
use ark_ec::ProjectiveCurve;
use ark_ff::PrimeField;
use ark_serialize::*; use ark_serialize::*;
use std::ops::Mul;
mod bullet; mod bullet;
use bullet::BulletReductionProof; use bullet::BulletReductionProof;
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct KnowledgeProof {
alpha: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl KnowledgeProof {
fn protocol_name() -> &'static [u8] {
b"knowledge proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
x: &Scalar,
r: &Scalar,
) -> (KnowledgeProof, CompressedGroup) {
// transcript.append_protocol_name(KnowledgeProof::protocol_name());
// produce two random Scalars
let t1 = random_tape.random_scalar(b"t1");
let t2 = random_tape.random_scalar(b"t2");
let C = x.commit(r, gens_n).compress();
C.append_to_poseidon(transcript);
let alpha = t1.commit(&t2, gens_n).compress();
alpha.append_to_poseidon(transcript);
let c = transcript.challenge_scalar();
let z1 = c * x + t1;
let z2 = c * r + t2;
(KnowledgeProof { alpha, z1, z2 }, C)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
C: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
// transcript.append_protocol_name(KnowledgeProof::protocol_name());
C.append_to_poseidon(transcript);
self.alpha.append_to_poseidon(transcript);
let c = transcript.challenge_scalar();
let lhs = self.z1.commit(&self.z2, gens_n).compress();
let rhs = (C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct EqualityProof {
alpha: CompressedGroup,
z: Scalar,
}
impl EqualityProof {
fn protocol_name() -> &'static [u8] {
b"equality proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
v1: &Scalar,
s1: &Scalar,
v2: &Scalar,
s2: &Scalar,
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
// transcript.append_protocol_name(EqualityProof::protocol_name());
// produce a random Scalar
let r = random_tape.random_scalar(b"r");
let C1 = v1.commit(s1, gens_n).compress();
transcript.append_point(&C1);
let C2 = v2.commit(s2, gens_n).compress();
transcript.append_point(&C2);
let alpha = gens_n.h.mul(r.into_repr()).compress();
transcript.append_point(&alpha);
let c = transcript.challenge_scalar();
let z = c * ((*s1) - s2) + r;
(EqualityProof { alpha, z }, C1, C2)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
C1: &CompressedGroup,
C2: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
// transcript.append_protocol_name(EqualityProof::protocol_name());
transcript.append_point(C1);
transcript.append_point(C2);
transcript.append_point(&self.alpha);
let c = transcript.challenge_scalar();
let rhs = {
let C = C1.unpack()? - C2.unpack()?;
(C.mul(c.into_repr()) + self.alpha.unpack()?).compress()
};
println!("rhs {:?}", rhs);
let lhs = gens_n.h.mul(self.z.into_repr()).compress();
println!("lhs {:?}", lhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct ProductProof {
alpha: CompressedGroup,
beta: CompressedGroup,
delta: CompressedGroup,
z: Vec<Scalar>,
}
impl ProductProof {
fn protocol_name() -> &'static [u8] {
b"product proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
x: &Scalar,
rX: &Scalar,
y: &Scalar,
rY: &Scalar,
z: &Scalar,
rZ: &Scalar,
) -> (
ProductProof,
CompressedGroup,
CompressedGroup,
CompressedGroup,
) {
// transcript.append_protocol_name(ProductProof::protocol_name());
// produce five random Scalar
let b1 = random_tape.random_scalar(b"b1");
let b2 = random_tape.random_scalar(b"b2");
let b3 = random_tape.random_scalar(b"b3");
let b4 = random_tape.random_scalar(b"b4");
let b5 = random_tape.random_scalar(b"b5");
let X_unc = x.commit(rX, gens_n);
let X = X_unc.compress();
transcript.append_point(&X);
let X_new = GroupElement::decompress(&X);
assert_eq!(X_unc, X_new.unwrap());
let Y = y.commit(rY, gens_n).compress();
transcript.append_point(&Y);
let Z = z.commit(rZ, gens_n).compress();
transcript.append_point(&Z);
let alpha = b1.commit(&b2, gens_n).compress();
transcript.append_point(&alpha);
let beta = b3.commit(&b4, gens_n).compress();
transcript.append_point(&beta);
let delta = {
let gens_X = &MultiCommitGens {
n: 1,
G: vec![GroupElement::decompress(&X).unwrap()],
h: gens_n.h,
};
b3.commit(&b5, gens_X).compress()
};
transcript.append_point(&delta);
let c = transcript.challenge_scalar();
let z1 = b1 + c * x;
let z2 = b2 + c * rX;
let z3 = b3 + c * y;
let z4 = b4 + c * rY;
let z5 = b5 + c * ((*rZ) - (*rX) * y);
let z = [z1, z2, z3, z4, z5].to_vec();
(
ProductProof {
alpha,
beta,
delta,
z,
},
X,
Y,
Z,
)
}
fn check_equality(
P: &CompressedGroup,
X: &CompressedGroup,
c: &Scalar,
gens_n: &MultiCommitGens,
z1: &Scalar,
z2: &Scalar,
) -> bool {
println!("{:?}", X);
let lhs = (GroupElement::decompress(P).unwrap()
+ GroupElement::decompress(X).unwrap().mul(c.into_repr()))
.compress();
let rhs = z1.commit(z2, gens_n).compress();
lhs == rhs
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
X: &CompressedGroup,
Y: &CompressedGroup,
Z: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
// transcript.append_protocol_name(ProductProof::protocol_name());
X.append_to_poseidon(transcript);
Y.append_to_poseidon(transcript);
Z.append_to_poseidon(transcript);
self.alpha.append_to_poseidon(transcript);
self.beta.append_to_poseidon(transcript);
self.delta.append_to_poseidon(transcript);
let z1 = self.z[0];
let z2 = self.z[1];
let z3 = self.z[2];
let z4 = self.z[3];
let z5 = self.z[4];
let c = transcript.challenge_scalar();
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
Z,
&c,
&MultiCommitGens {
n: 1,
G: vec![X.unpack()?],
h: gens_n.h,
},
&z3,
&z5,
)
{
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DotProductProof {
delta: CompressedGroup,
beta: CompressedGroup,
z: Vec<Scalar>,
z_delta: Scalar,
z_beta: Scalar,
}
impl DotProductProof {
fn protocol_name() -> &'static [u8] {
b"dot product proof"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
// transcript.append_protocol_name(DotProductProof::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens_n.n, a_vec.len());
assert_eq!(gens_1.n, 1);
// produce randomness for the proofs
let d_vec = random_tape.random_vector(b"d_vec", n);
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_beta");
let Cx = x_vec.commit(blind_x, gens_n).compress();
Cx.append_to_poseidon(transcript);
let Cy = y.commit(blind_y, gens_1).compress();
Cy.append_to_poseidon(transcript);
transcript.append_scalar_vector(a_vec);
let delta = d_vec.commit(&r_delta, gens_n).compress();
delta.append_to_poseidon(transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_poseidon(transcript);
let c = transcript.challenge_scalar();
let z = (0..d_vec.len())
.map(|i| c * x_vec[i] + d_vec[i])
.collect::<Vec<Scalar>>();
let z_delta = c * blind_x + r_delta;
let z_beta = c * blind_y + r_beta;
(
DotProductProof {
delta,
beta,
z,
z_delta,
z_beta,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut PoseidonTranscript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
// transcript.append_protocol_name(DotProductProof::protocol_name());
Cx.append_to_poseidon(transcript);
Cy.append_to_poseidon(transcript);
transcript.append_scalar_vector(a);
self.delta.append_to_poseidon(transcript);
self.beta.append_to_poseidon(transcript);
let c = transcript.challenge_scalar();
let mut result = Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()?
== self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()?
== dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct DotProductProofGens {
n: usize,
pub gens_n: MultiCommitGens,
pub gens_1: MultiCommitGens,
pub struct DotProductProofGens<G: CurveGroup> {
n: usize,
pub gens_n: MultiCommitGens<G>,
pub gens_1: MultiCommitGens<G>,
} }
impl DotProductProofGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n);
DotProductProofGens { n, gens_n, gens_1 }
}
impl<G: CurveGroup> DotProductProofGens<G> {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::<G>::new(n + 1, label).split_at(n);
DotProductProofGens { n, gens_n, gens_1 }
}
} }
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup,
beta: CompressedGroup,
z1: Scalar,
z2: Scalar,
pub struct DotProductProofLog<G: CurveGroup> {
bullet_reduction_proof: BulletReductionProof<G>,
delta: G,
beta: G,
z1: G::ScalarField,
z2: G::ScalarField,
} }
impl DotProductProofLog {
fn protocol_name() -> &'static [u8] {
b"dot product proof (log)"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens: &DotProductProofGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = random_tape.random_scalar(b"d");
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2());
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2());
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
transcript.append_point(&Cx);
let Cy = y.commit(blind_y, &gens.gens_1).compress();
transcript.append_point(&Cy);
transcript.append_scalar_vector(a_vec);
let blind_Gamma = (*blind_x) + blind_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::prove(
transcript,
&gens.gens_1.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x_vec,
a_vec,
&blind_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat],
h: gens.gens_1.h,
};
d.commit(&r_delta, &gens_hat).compress()
};
transcript.append_point(&delta);
let beta = d.commit(&r_beta, &gens.gens_1).compress();
transcript.append_point(&beta);
let c = transcript.challenge_scalar();
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
DotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens,
transcript: &mut PoseidonTranscript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
// Cx.append_to_poseidon( transcript);
// Cy.append_to_poseidon( transcript);
// a.append_to_poseidon( transcript);
transcript.append_point(Cx);
transcript.append_point(Cy);
transcript.append_scalar_vector(a);
let Gamma = Cx.unpack()? + Cy.unpack()?;
let (g_hat, Gamma_hat, a_hat) =
self.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
// self.delta.append_to_poseidon( transcript);
// self.beta.append_to_poseidon( transcript);
transcript.append_point(&self.delta);
transcript.append_point(&self.beta);
let c = transcript.challenge_scalar();
let c_s = &c;
let beta_s = self.beta.unpack()?;
let a_hat_s = &a_hat;
let delta_s = self.delta.unpack()?;
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s)
.compress();
let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr())
+ gens.gens_1.h.mul(z2_s.into_repr()))
.compress();
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
impl<G> DotProductProofLog<G>
where
G: CurveGroup,
G::ScalarField: Absorb,
{
pub fn prove(
gens: &DotProductProofGens<G>,
transcript: &mut PoseidonTranscript<G::ScalarField>,
x_vec: &[G::ScalarField],
blind_x: &G::ScalarField,
a_vec: &[G::ScalarField],
y: &G::ScalarField,
blind_y: &G::ScalarField,
) -> (Self, G, G) {
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = G::ScalarField::rand(&mut rand::thread_rng());
let r_delta = G::ScalarField::rand(&mut rand::thread_rng()).into();
let r_beta = G::ScalarField::rand(&mut rand::thread_rng()).into();
let blinds_vec = {
(0..2 * n.log_2())
.map(|_| {
(
G::ScalarField::rand(&mut rand::thread_rng()).into(),
G::ScalarField::rand(&mut rand::thread_rng()).into(),
)
})
.collect::<Vec<(G::ScalarField, G::ScalarField)>>()
};
let Cx = PedersenCommit::commit_slice(x_vec, blind_x, &gens.gens_n);
transcript.append_point(b"", &Cx);
let Cy = PedersenCommit::commit_scalar(y, blind_y, &gens.gens_1);
transcript.append_point(b"", &Cy);
transcript.append_scalar_vector(b"", &a_vec);
let blind_Gamma = (*blind_x) + blind_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::<G>::prove(
transcript,
&gens.gens_1.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x_vec,
a_vec,
&blind_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat.into_affine()],
h: gens.gens_1.h,
};
PedersenCommit::commit_scalar(&d, &r_delta, &gens_hat)
};
transcript.append_point(b"", &delta);
let beta = PedersenCommit::commit_scalar(&d, &r_beta, &gens.gens_1);
transcript.append_point(b"", &beta);
let c: G::ScalarField = transcript.challenge_scalar(b"");
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
Self {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens<G>,
transcript: &mut PoseidonTranscript<G::ScalarField>,
a: &[G::ScalarField],
Cx: &G,
Cy: &G,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
// Cx.write_to_transcript( transcript);
// Cy.write_to_transcript( transcript);
// a.write_to_transcript( transcript);
transcript.append_point(b"", Cx);
transcript.append_point(b"", Cy);
transcript.append_scalar_vector(b"", &a);
let Gamma = Cx.add(Cy);
let (g_hat, Gamma_hat, a_hat) =
self
.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
// self.delta.write_to_transcript( transcript);
// self.beta.write_to_transcript( transcript);
transcript.append_point(b"", &self.delta);
transcript.append_point(b"", &self.beta);
let c = transcript.challenge_scalar(b"");
let c_s = &c;
let beta_s = self.beta;
let a_hat_s = &a_hat;
let delta_s = self.delta;
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = (Gamma_hat.mul(c_s) + beta_s).mul(a_hat_s) + delta_s;
let rhs = (g_hat + gens.gens_1.G[0].mul(a_hat_s)).mul(z1_s) + gens.gens_1.h.mul(z2_s);
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::parameters::poseidon_params;
use crate::parameters::poseidon_params;
use super::*;
use ark_std::UniformRand;
#[test]
fn check_knowledgeproof() {
let mut rng = ark_std::rand::thread_rng();
use super::*;
use ark_std::UniformRand;
type F = ark_bls12_377::Fr;
type G = ark_bls12_377::G1Projective;
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
#[test]
fn check_dotproductproof_log() {
let mut rng = ark_std::rand::thread_rng();
let x = Scalar::rand(&mut rng);
let r = Scalar::rand(&mut rng);
let n = 1024;
let params = poseidon_params();
let gens = DotProductProofGens::<G>::new(n, b"test-1024");
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, committed_value) =
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
let x: Vec<F> = (0..n).map(|_i| F::rand(&mut rng)).collect();
let a: Vec<F> = (0..n).map(|_i| F::rand(&mut rng)).collect();
let y = crate::dot_product(&x, &a);
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &committed_value)
.is_ok());
}
let r_x = F::rand(&mut rng);
let r_y = F::rand(&mut rng);
#[test]
fn check_equalityproof() {
let mut rng = ark_std::rand::thread_rng();
let params = poseidon_params();
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::rand(&mut rng);
let v2 = v1;
let s1 = Scalar::rand(&mut rng);
let s2 = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, C1, C2) = EqualityProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&v1,
&s1,
&v2,
&s2,
);
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
.is_ok());
}
let params = poseidon_params();
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, Cx, Cy) =
DotProductProofLog::<G>::prove(&gens, &mut prover_transcript, &x, &r_x, &a, &y, &r_y);
#[test]
fn check_productproof() {
let mut rng = ark_std::rand::thread_rng();
let pt = GroupElement::rand(&mut rng);
let pt_c = pt.compress();
let pt2 = GroupElement::decompress(&pt_c).unwrap();
assert_eq!(pt, pt2);
let params = poseidon_params();
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::rand(&mut rng);
let rX = Scalar::rand(&mut rng);
let y = Scalar::rand(&mut rng);
let rY = Scalar::rand(&mut rng);
let z = x * y;
let rZ = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, X, Y, Z) = ProductProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&x,
&rX,
&y,
&rY,
&z,
&rZ,
);
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
.is_ok());
}
#[test]
fn check_dotproductproof() {
let mut rng = ark_std::rand::thread_rng();
let n = 1024;
let gens_1 = MultiCommitGens::new(1, b"test-two");
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
let params = poseidon_params();
let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n {
x.push(Scalar::rand(&mut rng));
a.push(Scalar::rand(&mut rng));
}
let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::rand(&mut rng);
let r_y = Scalar::rand(&mut rng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, Cx, Cy) = DotProductProof::prove(
&gens_1,
&gens_1024,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
#[test]
fn check_dotproductproof_log() {
let mut rng = ark_std::rand::thread_rng();
let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::rand(&mut rng);
let r_y = Scalar::rand(&mut rng);
let params = poseidon_params();
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = PoseidonTranscript::new(&params);
let (proof, Cx, Cy) = DotProductProofLog::prove(
&gens,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
} }

+ 2327
- 27
src/parameters.rs
File diff suppressed because it is too large
View File


+ 97
- 61
src/poseidon_transcript.rs

@ -1,82 +1,118 @@
use crate::group::{CompressedGroup, Fr};
use super::scalar::Scalar;
use ark_bls12_377::Bls12_377 as I;
use ark_poly_commit::multilinear_pc::data_structures::Commitment;
use ark_serialize::CanonicalSerialize;
// use ark_r1cs_std::prelude::*;
use ark_sponge::{
poseidon::{PoseidonParameters, PoseidonSponge},
CryptographicSponge,
use crate::transcript::Transcript;
use ark_crypto_primitives::sponge::{
poseidon::{PoseidonConfig, PoseidonSponge},
Absorb, CryptographicSponge,
}; };
use ark_ec::{pairing::Pairing, CurveGroup};
use ark_ff::PrimeField;
use ark_serialize::CanonicalSerialize;
use ark_serialize::Compress;
#[derive(Clone)] #[derive(Clone)]
/// TODO /// TODO
pub struct PoseidonTranscript {
sponge: PoseidonSponge<Fr>,
params: PoseidonParameters<Fr>,
pub struct PoseidonTranscript<F: PrimeField> {
sponge: PoseidonSponge<F>,
params: PoseidonConfig<F>,
} }
impl PoseidonTranscript {
/// create a new transcript
pub fn new(params: &PoseidonParameters<Fr>) -> Self {
let sponge = PoseidonSponge::new(params);
PoseidonTranscript {
sponge,
params: params.clone(),
}
}
impl<F: PrimeField> Transcript for PoseidonTranscript<F> {
fn domain_sep(&mut self) {
self.sponge.absorb(&b"testudo".to_vec());
}
pub fn new_from_state(&mut self, challenge: &Scalar) {
self.sponge = PoseidonSponge::new(&self.params);
self.append_scalar(challenge);
}
fn append<S: CanonicalSerialize>(&mut self, _label: &'static [u8], point: &S) {
let mut buf = Vec::new();
point
.serialize_with_mode(&mut buf, Compress::Yes)
.expect("serialization failed");
self.sponge.absorb(&buf);
}
pub fn append_u64(&mut self, x: u64) {
self.sponge.absorb(&x);
}
fn challenge_scalar<FF: PrimeField>(&mut self, _label: &'static [u8]) -> FF {
self.sponge.squeeze_field_elements(1).remove(0)
}
}
pub fn append_bytes(&mut self, x: &Vec<u8>) {
self.sponge.absorb(x);
impl<F: PrimeField> PoseidonTranscript<F> {
/// create a new transcript
pub fn new(params: &PoseidonConfig<F>) -> Self {
let sponge = PoseidonSponge::new(params);
PoseidonTranscript {
sponge,
params: params.clone(),
} }
}
}
pub fn append_scalar(&mut self, scalar: &Scalar) {
self.sponge.absorb(&scalar);
}
impl<F: PrimeField + Absorb> PoseidonTranscript<F> {
pub fn new_from_state(&mut self, challenge: &F) {
self.sponge = PoseidonSponge::new(&self.params.clone());
self.append_scalar(b"", challenge);
}
}
pub fn append_point(&mut self, point: &CompressedGroup) {
self.sponge.absorb(&point.0);
}
impl<F: PrimeField> PoseidonTranscript<F> {
pub fn append_u64(&mut self, _label: &'static [u8], x: u64) {
self.sponge.absorb(&x);
}
pub fn append_scalar_vector(&mut self, scalars: &[Scalar]) {
for scalar in scalars.iter() {
self.append_scalar(scalar);
}
}
pub fn append_bytes(&mut self, _label: &'static [u8], x: &Vec<u8>) {
self.sponge.absorb(x);
}
pub fn challenge_scalar(&mut self) -> Scalar {
self.sponge.squeeze_field_elements(1).remove(0)
}
pub fn append_scalar<T: PrimeField + Absorb>(&mut self, _label: &'static [u8], scalar: &T) {
self.sponge.absorb(&scalar);
}
pub fn append_point<G>(&mut self, _label: &'static [u8], point: &G)
where
G: CurveGroup,
{
let mut point_encoding = Vec::new();
point
.serialize_with_mode(&mut point_encoding, Compress::Yes)
.unwrap();
self.sponge.absorb(&point_encoding);
}
pub fn challenge_vector(&mut self, len: usize) -> Vec<Scalar> {
self.sponge.squeeze_field_elements(len)
pub fn append_scalar_vector<T: PrimeField + Absorb>(
&mut self,
_label: &'static [u8],
scalars: &[T],
) {
for scalar in scalars.iter() {
self.append_scalar(b"", scalar);
} }
}
}
pub trait AppendToPoseidon {
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript);
pub fn append_gt<E>(&mut self, _label: &'static [u8], g_t: &E::TargetField)
where
E: Pairing,
{
let mut bytes = Vec::new();
g_t.serialize_with_mode(&mut bytes, Compress::Yes).unwrap();
self.append_bytes(b"", &bytes);
}
} }
impl AppendToPoseidon for CompressedGroup {
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
transcript.append_point(self);
}
pub trait TranscriptWriter<F: PrimeField> {
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<F>);
} }
impl AppendToPoseidon for Commitment<I> {
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
let mut bytes = Vec::new();
self.serialize(&mut bytes).unwrap();
transcript.append_bytes(&bytes);
}
#[cfg(test)]
mod test {
use ark_bls12_381::Fr;
use ark_ff::PrimeField;
use poseidon_paramgen;
#[test]
fn poseidon_parameters_generation() {
print_modulus::<Fr>();
println!(
"{}",
poseidon_paramgen::poseidon_build::compile::<Fr>(128, vec![2], Fr::MODULUS, true)
);
}
fn print_modulus<F: PrimeField>() {
println!("modulus: {:?}", F::MODULUS);
}
} }

+ 424
- 438
src/product_tree.rs

@ -1,491 +1,477 @@
#![allow(dead_code)] #![allow(dead_code)]
use crate::poseidon_transcript::PoseidonTranscript;
use super::dense_mlpoly::DensePolynomial; use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial; use super::dense_mlpoly::EqPolynomial;
use super::math::Math; use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof; use super::sumcheck::SumcheckInstanceProof;
use crate::poseidon_transcript::PoseidonTranscript;
use crate::transcript::Transcript;
use ark_crypto_primitives::sponge::Absorb;
use ark_ff::PrimeField;
use ark_serialize::*; use ark_serialize::*;
use ark_std::One;
#[derive(Debug)] #[derive(Debug)]
pub struct ProductCircuit {
left_vec: Vec<DensePolynomial>,
right_vec: Vec<DensePolynomial>,
pub struct ProductCircuit<F: PrimeField> {
left_vec: Vec<DensePolynomial<F>>,
right_vec: Vec<DensePolynomial<F>>,
} }
impl ProductCircuit {
fn compute_layer(
inp_left: &DensePolynomial,
inp_right: &DensePolynomial,
) -> (DensePolynomial, DensePolynomial) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial) -> Self {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) =
ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
ProductCircuit {
left_vec,
right_vec,
}
impl<F: PrimeField> ProductCircuit<F> {
fn compute_layer(
inp_left: &DensePolynomial<F>,
inp_right: &DensePolynomial<F>,
) -> (DensePolynomial<F>, DensePolynomial<F>) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<_>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<_>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial<F>) -> Self {
let mut left_vec: Vec<DensePolynomial<F>> = Vec::new();
let mut right_vec: Vec<DensePolynomial<F>> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
} }
pub fn evaluate(&self) -> Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
ProductCircuit {
left_vec,
right_vec,
} }
}
pub fn evaluate(&self) -> F {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
} }
pub struct DotProductCircuit {
left: DensePolynomial,
right: DensePolynomial,
weight: DensePolynomial,
pub struct DotProductCircuit<F: PrimeField> {
left: DensePolynomial<F>,
right: DensePolynomial<F>,
weight: DensePolynomial<F>,
} }
impl DotProductCircuit {
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
}
}
pub fn evaluate(&self) -> Scalar {
(0..self.left.len())
.map(|i| self.left[i] * self.right[i] * self.weight[i])
.sum()
}
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
impl<F: PrimeField> DotProductCircuit<F> {
pub fn new(
left: DensePolynomial<F>,
right: DensePolynomial<F>,
weight: DensePolynomial<F>,
) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
} }
}
pub fn evaluate(&self) -> F {
(0..self.left.len())
.map(|i| self.left[i] * self.right[i] * self.weight[i])
.sum()
}
pub fn split(&mut self) -> (Self, Self) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
}
} }
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct LayerProof {
pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>,
pub struct LayerProof<F: PrimeField> {
pub proof: SumcheckInstanceProof<F>,
pub claims: Vec<F>,
} }
#[allow(dead_code)] #[allow(dead_code)]
impl LayerProof {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut PoseidonTranscript,
) -> (Scalar, Vec<Scalar>) {
self.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
impl<F: PrimeField + Absorb> LayerProof<F> {
pub fn verify(
&self,
claim: F,
num_rounds: usize,
degree_bound: usize,
transcript: &mut PoseidonTranscript<F>,
) -> (F, Vec<F>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
} }
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>,
pub claims_prod_right: Vec<Scalar>,
pub struct LayerProofBatched<F: PrimeField> {
pub proof: SumcheckInstanceProof<F>,
pub claims_prod_left: Vec<F>,
pub claims_prod_right: Vec<F>,
} }
#[allow(dead_code)] #[allow(dead_code)]
impl LayerProofBatched {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut PoseidonTranscript,
) -> (Scalar, Vec<Scalar>) {
self.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
impl<F: PrimeField + Absorb> LayerProofBatched<F> {
pub fn verify(
&self,
claim: F,
num_rounds: usize,
degree_bound: usize,
transcript: &mut PoseidonTranscript<F>,
) -> (F, Vec<F>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
} }
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>,
pub struct ProductCircuitEvalProof<F: PrimeField> {
proof: Vec<LayerProof<F>>,
} }
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
pub struct ProductCircuitEvalProofBatched<F: PrimeField> {
proof: Vec<LayerProofBatched<F>>,
claims_dotp: (Vec<F>, Vec<F>, Vec<F>),
} }
impl ProductCircuitEvalProof {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit,
transcript: &mut PoseidonTranscript,
) -> (Self, Scalar, Vec<Scalar>) {
let mut proof: Vec<LayerProof> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar {
(*poly_A_comp) * poly_B_comp * poly_C_comp
};
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(&claims_prod[0]);
transcript.append_scalar(&claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar();
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
impl<F: PrimeField + Absorb> ProductCircuitEvalProof<F> {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit<F>,
transcript: &mut PoseidonTranscript<F>,
) -> (Self, F, Vec<F>) {
let mut proof: Vec<LayerProof<F>> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F {
(*poly_A_comp) * poly_B_comp * poly_C_comp
};
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(b"", &claims_prod[0]);
transcript.append_scalar(b"", &claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"");
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
(ProductCircuitEvalProof { proof }, claim, rand)
(ProductCircuitEvalProof { proof }, claim, rand)
}
pub fn verify(&self, eval: F, len: usize, transcript: &mut PoseidonTranscript<F>) -> (F, Vec<F>) {
let num_layers = len.log_2();
let mut claim = eval;
let mut rand: Vec<F> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(b"", &claims_prod[0]);
transcript.append_scalar(b"", &claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: F = (0..rand.len())
.map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i]))
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"");
claim = (F::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
} }
pub fn verify(
&self,
eval: Scalar,
len: usize,
transcript: &mut PoseidonTranscript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log_2();
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(&claims_prod[0]);
transcript.append_scalar(&claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i]
+ (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar();
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
(claim, rand)
}
}
impl<F: PrimeField + Absorb> ProductCircuitEvalProofBatched<F> {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit<F>>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit<F>>,
transcript: &mut PoseidonTranscript<F>,
) -> (Self, Vec<F>) {
assert!(!prod_circuit_vec.is_empty());
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched<F>> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<F>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log_2();
let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F {
(*poly_A_comp) * poly_B_comp * poly_C_comp
};
let mut poly_A_batched_par: Vec<&mut DensePolynomial<F>> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial<F>> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
// add additional claims
for item in dotp_circuit_vec.iter() {
claims_to_verify.push(item.evaluate());
assert_eq!(len / 2, item.left.len());
assert_eq!(len / 2, item.right.len());
assert_eq!(len / 2, item.weight.len());
} }
(claim, rand)
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec = transcript.challenge_scalar_vec(b"", claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(b"", &claims_prod_left[i]);
transcript.append_scalar(b"", &claims_prod_right[i]);
}
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(b"", &claims_dotp_left[i]);
transcript.append_scalar(b"", &claims_dotp_right[i]);
transcript.append_scalar(b"", &claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar(b"");
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<F>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
} }
}
impl ProductCircuitEvalProofBatched {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
transcript: &mut PoseidonTranscript,
) -> (Self, Vec<Scalar>) {
assert!(!prod_circuit_vec.is_empty());
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<Scalar>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log_2();
let comb_func_prod =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar {
(*poly_A_comp) * poly_B_comp * poly_C_comp
};
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
// add additional claims
for item in dotp_circuit_vec.iter() {
claims_to_verify.push(item.evaluate());
assert_eq!(len / 2, item.left.len());
assert_eq!(len / 2, item.right.len());
assert_eq!(len / 2, item.weight.len());
}
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec = transcript.challenge_vector(claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) =
SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(&claims_prod_left[i]);
transcript.append_scalar(&claims_prod_right[i]);
}
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(&claims_dotp_left[i]);
transcript.append_scalar(&claims_dotp_right[i]);
transcript.append_scalar(&claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar();
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| {
claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])
})
.collect::<Vec<Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
pub fn verify(
&self,
claims_prod_vec: &[F],
claims_dotp_vec: &[F],
len: usize,
transcript: &mut PoseidonTranscript<F>,
) -> (Vec<F>, Vec<F>, Vec<F>) {
let num_layers = len.log_2();
let mut rand: Vec<F> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<F> = Vec::new();
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec: Vec<F> = transcript.challenge_scalar_vec(b"", claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(b"", &claims_prod_left[i]);
transcript.append_scalar(b"", &claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: F = (0..rand.len())
.map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i]))
.product();
let mut claim_expected: F = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(b"", &claims_dotp_left[i]);
transcript.append_scalar(b"", &claims_dotp_right[i]);
transcript.append_scalar(b"", &claims_dotp_weight[i]);
claim_expected += coeff_vec[i + num_prod_instances]
* claims_dotp_left[i]
* claims_dotp_right[i]
* claims_dotp_weight[i];
} }
}
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"");
claims_to_verify = (0..claims_prod_left.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect();
pub fn verify(
&self,
claims_prod_vec: &[Scalar],
claims_dotp_vec: &[Scalar],
len: usize,
transcript: &mut PoseidonTranscript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log_2();
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec = transcript.challenge_vector(claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(&claims_prod_left[i]);
transcript.append_scalar(&claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i]
+ (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(&claims_dotp_left[i]);
transcript.append_scalar(&claims_dotp_right[i]);
transcript.append_scalar(&claims_dotp_weight[i]);
claim_expected += coeff_vec[i + num_prod_instances]
* claims_dotp_left[i]
* claims_dotp_right[i]
* claims_dotp_weight[i];
}
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar();
claims_to_verify = (0..claims_prod_left.len())
.map(|i| {
claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])
})
.collect();
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = claims_dotp_left[2 * i]
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
let claim_right = claims_dotp_right[2 * i]
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
let claim_weight = claims_dotp_weight[2 * i]
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
}
}
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = claims_dotp_left[2 * i]
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
let claim_right = claims_dotp_right[2 * i]
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
let claim_weight = claims_dotp_weight[2 * i]
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
} }
(claims_to_verify, claims_to_verify_dotp, rand)
}
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
} }
(claims_to_verify, claims_to_verify_dotp, rand)
}
} }

+ 347
- 352
src/r1csinstance.rs

@ -1,391 +1,386 @@
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
use crate::transcript::AppendToTranscript;
use super::dense_mlpoly::DensePolynomial; use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError; use super::errors::ProofVerifyError;
use super::math::Math; use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{ use super::sparse_mlpoly::{
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
}; };
use super::timer::Timer; use super::timer::Timer;
use ark_ff::Field;
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::pairing::Pairing;
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use ark_serialize::*; use ark_serialize::*;
use ark_std::{One, UniformRand, Zero};
use digest::{ExtendableOutput, Input}; use digest::{ExtendableOutput, Input};
use merlin::Transcript;
use sha3::Shake256; use sha3::Shake256;
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)]
pub struct R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
pub struct R1CSInstance<F: PrimeField> {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial<F>,
B: SparseMatPolynomial<F>,
C: SparseMatPolynomial<F>,
} }
pub struct R1CSCommitmentGens {
gens: SparseMatPolyCommitmentGens,
pub struct R1CSCommitmentGens<E: Pairing> {
gens: SparseMatPolyCommitmentGens<E>,
} }
impl R1CSCommitmentGens {
pub fn new(
label: &'static [u8],
num_cons: usize,
num_vars: usize,
num_inputs: usize,
num_nz_entries: usize,
) -> R1CSCommitmentGens {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let gens = SparseMatPolyCommitmentGens::new(
label,
num_poly_vars_x,
num_poly_vars_y,
num_nz_entries,
3,
);
R1CSCommitmentGens { gens }
}
}
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSCommitment {
impl<E: Pairing> R1CSCommitmentGens<E> {
pub fn setup(
label: &'static [u8],
num_cons: usize, num_cons: usize,
num_vars: usize, num_vars: usize,
num_inputs: usize, num_inputs: usize,
comm: SparseMatPolyCommitment,
num_nz_entries: usize,
) -> Self {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let gens = SparseMatPolyCommitmentGens::setup(
label,
num_poly_vars_x,
num_poly_vars_y,
num_nz_entries,
3,
);
R1CSCommitmentGens { gens }
}
} }
impl AppendToTranscript for R1CSCommitment {
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
transcript.append_u64(b"num_cons", self.num_cons as u64);
transcript.append_u64(b"num_vars", self.num_vars as u64);
transcript.append_u64(b"num_inputs", self.num_inputs as u64);
self.comm.append_to_transcript(b"comm", transcript);
}
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSCommitment<G: CurveGroup> {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
comm: SparseMatPolyCommitment<G>,
} }
impl AppendToPoseidon for R1CSCommitment {
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
transcript.append_u64(self.num_cons as u64);
transcript.append_u64(self.num_vars as u64);
transcript.append_u64(self.num_inputs as u64);
self.comm.append_to_poseidon(transcript);
}
impl<G: CurveGroup> TranscriptWriter<G::ScalarField> for R1CSCommitment<G> {
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<G::ScalarField>) {
transcript.append_u64(b"", self.num_cons as u64);
transcript.append_u64(b"", self.num_vars as u64);
transcript.append_u64(b"", self.num_inputs as u64);
self.comm.write_to_transcript(transcript);
}
} }
pub struct R1CSDecommitment {
dense: MultiSparseMatPolynomialAsDense,
pub struct R1CSDecommitment<F: PrimeField> {
dense: MultiSparseMatPolynomialAsDense<F>,
} }
impl R1CSCommitment {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
impl<G: CurveGroup> R1CSCommitment<G> {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
} }
impl R1CSInstance {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, Scalar)],
B: &[(usize, usize, Scalar)],
C: &[(usize, usize, Scalar)],
) -> R1CSInstance {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
// check that num_cons is a power of 2
assert_eq!(num_cons.next_power_of_two(), num_cons);
// check that num_vars is a power of 2
assert_eq!(num_vars.next_power_of_two(), num_vars);
// check that number_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_B = (0..B.len())
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_C = (0..C.len())
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
.collect::<Vec<SparseMatEntry>>();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_digest(&self) -> Vec<u8> {
let mut bytes = Vec::new();
self.serialize(&mut bytes).unwrap();
let mut shake = Shake256::default();
shake.input(bytes);
let mut reader = shake.xof_result();
let mut buf = [0u8; 256];
reader.read_exact(&mut buf).unwrap();
buf.to_vec()
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut rng = ark_std::rand::thread_rng();
// assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log_2()).pow2(), num_cons);
assert_eq!((num_vars.log_2()).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::rand(&mut rng))
.collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == Scalar::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.inverse().unwrap(),
));
}
}
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..]));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.to_vec();
z.extend(&vec![Scalar::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
.sum();
res == 0
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &[Scalar],
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &[Scalar],
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
impl<F: PrimeField> R1CSInstance<F> {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, F)],
B: &[(usize, usize, F)],
C: &[(usize, usize, F)],
) -> Self {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
// check that num_cons is a power of 2
assert_eq!(num_cons.next_power_of_two(), num_cons);
// check that num_vars is a power of 2
assert_eq!(num_vars.next_power_of_two(), num_vars);
// check that number_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
.collect::<Vec<_>>();
let mat_B = (0..B.len())
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
.collect::<Vec<_>>();
let mat_C = (0..C.len())
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
.collect::<Vec<_>>();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
} }
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) {
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
(evals[0], evals[1], evals[2])
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_digest(&self) -> Vec<u8> {
let mut bytes = Vec::new();
self.serialize_with_mode(&mut bytes, Compress::Yes).unwrap();
let mut shake = Shake256::default();
shake.input(bytes);
let mut reader = shake.xof_result();
let mut buf = [0u8; 256];
reader.read_exact(&mut buf).unwrap();
buf.to_vec()
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (Self, Vec<F>, Vec<F>) {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut rng = ark_std::rand::thread_rng();
// assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log_2()).pow2(), num_cons);
assert_eq!((num_vars.log_2()).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<F> = (0..size_z).map(|_i| F::rand(&mut rng)).collect::<Vec<F>>();
Z[num_vars] = F::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry<F>> = Vec::new();
let mut B: Vec<SparseMatEntry<F>> = Vec::new();
let mut C: Vec<SparseMatEntry<F>> = Vec::new();
let one = F::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == F::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.inverse().unwrap(),
));
}
} }
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
let (comm, dense) =
SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..]));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &[F], input: &[F]) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.to_vec();
z.extend(&vec![F::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
.sum();
res == 0
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &[F],
) -> (DensePolynomial<F>, DensePolynomial<F>, DensePolynomial<F>) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &[F],
) -> (Vec<F>, Vec<F>, Vec<F>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}
pub fn evaluate(&self, rx: &[F], ry: &[F]) -> (F, F, F) {
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
(evals[0], evals[1], evals[2])
}
pub fn commit<E: Pairing<ScalarField = F>>(
&self,
gens: &R1CSCommitmentGens<E>,
) -> (R1CSCommitment<E::G1>, R1CSDecommitment<F>) {
// Noting that matrices A, B and C are sparse, produces a combined dense
// dense polynomial from the non-zero entry that we commit to. This
// represents the computational commitment.
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
// The decommitment is used by the prover to convince the verifier
// the received openings of A, B and C are correct.
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
} }
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof,
pub struct R1CSEvalProof<E: Pairing> {
proof: SparseMatPolyEvalProof<E>,
} }
impl R1CSEvalProof {
pub fn prove(
decomm: &R1CSDecommitment,
rx: &[Scalar], // point at which the polynomial is evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut PoseidonTranscript,
random_tape: &mut RandomTape,
) -> R1CSEvalProof {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
random_tape,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment,
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut PoseidonTranscript,
) -> Result<(), ProofVerifyError> {
self.proof.verify(
&comm.comm,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
)
}
impl<E> R1CSEvalProof<E>
where
E: Pairing,
E::ScalarField: Absorb,
{
pub fn prove(
decomm: &R1CSDecommitment<E::ScalarField>,
rx: &[E::ScalarField], // point at which the polynomial is evaluated
ry: &[E::ScalarField],
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
gens: &R1CSCommitmentGens<E>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
) -> Self {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment<E::G1>,
rx: &[E::ScalarField], // point at which the R1CS matrix polynomials are evaluated
ry: &[E::ScalarField],
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
gens: &R1CSCommitmentGens<E>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
) -> Result<(), ProofVerifyError> {
self.proof.verify(
&comm.comm,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
)
}
} }

+ 601
- 510
src/r1csproof.rs
File diff suppressed because it is too large
View File


+ 0
- 28
src/random.rs

@ -1,28 +0,0 @@
use super::scalar::Scalar;
use super::transcript::ProofTranscript;
use ark_std::UniformRand;
use merlin::Transcript;
pub struct RandomTape {
tape: Transcript,
}
impl RandomTape {
pub fn new(name: &'static [u8]) -> Self {
let tape = {
let mut rng = ark_std::rand::thread_rng();
let mut tape = Transcript::new(name);
tape.append_scalar(b"init_randomness", &Scalar::rand(&mut rng));
tape
};
Self { tape }
}
pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
self.tape.challenge_scalar(label)
}
pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
self.tape.challenge_vector(label, len)
}
}

+ 0
- 44
src/scalar/mod.rs

@ -1,44 +0,0 @@
pub use ark_bls12_377::Fr as Scalar;
// mod ristretto255;
// pub type Scalar = ristretto255::Scalar;
// pub type ScalarBytes = curve25519_dalek::scalar::Scalar;
// pub trait ScalarFromPrimitives {
// fn to_scalar(self) -> Scalar;
// }
// impl ScalarFromPrimitives for usize {
// #[inline]
// fn to_scalar(self) -> Scalar {
// (0..self).map(|_i| Scalar::one()).sum()
// }
// }
// impl ScalarFromPrimitives for bool {
// #[inline]
// fn to_scalar(self) -> Scalar {
// if self {
// Scalar::one()
// } else {
// Scalar::zero()
// }
// }
// }
// pub trait ScalarBytesFromScalar {
// fn decompress_scalar(s: &Scalar) -> ScalarBytes;
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
// }
// impl ScalarBytesFromScalar for Scalar {
// fn decompress_scalar(s: &Scalar) -> ScalarBytes {
// ScalarBytes::from_bytes_mod_order(s.to_bytes())
// }
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
// (0..s.len())
// .map(|i| Scalar::decompress_scalar(&s[i]))
// .collect::<Vec<ScalarBytes>>()
// }
// }

+ 1552
- 1591
src/sparse_mlpoly.rs
File diff suppressed because it is too large
View File


+ 343
- 0
src/sqrt_pst.rs

@ -0,0 +1,343 @@
use crate::mipp::MippProof;
use ark_ec::{pairing::Pairing, scalar_mul::variable_base::VariableBaseMSM, CurveGroup};
use ark_ff::One;
use ark_poly_commit::multilinear_pc::{
data_structures::{Commitment, CommitterKey, Proof, VerifierKey},
MultilinearPC,
};
use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator};
use crate::{
dense_mlpoly::DensePolynomial, math::Math, poseidon_transcript::PoseidonTranscript, timer::Timer,
};
pub struct Polynomial<E: Pairing> {
m: usize,
odd: usize,
polys: Vec<DensePolynomial<E::ScalarField>>,
q: Option<DensePolynomial<E::ScalarField>>,
chis_b: Option<Vec<E::ScalarField>>,
}
impl<E: Pairing> Polynomial<E> {
// Given the evaluations over the boolean hypercube of a polynomial p of size
// n compute the sqrt-sized polynomials p_i as
// p_i(X) = \sum_{j \in \{0,1\}^m} p(j, i) * chi_j(X)
// where p(X,Y) = \sum_{i \in \{0,\1}^m}
// (\sum_{j \in \{0, 1\}^{m}} p(j, i) * \chi_j(X)) * \chi_i(Y)
// and m is n/2.
// To handle the case in which n is odd, the number of variables in the
// sqrt-sized polynomials will be increased by a factor of 2 (i.e. 2^{m+1})
// while the number of polynomials remains the same (i.e. 2^m)
pub fn from_evaluations(Z: &[E::ScalarField]) -> Self {
let pl_timer = Timer::new("poly_list_build");
// check the evaluation list is a power of 2
debug_assert!(Z.len() & (Z.len() - 1) == 0);
let num_vars = Z.len().log_2();
let m_col = num_vars / 2;
let m_row = if num_vars % 2 == 0 {
num_vars / 2
} else {
num_vars / 2 + 1
};
let pow_m_col = 2_usize.pow(m_col as u32);
let pow_m_row = 2_usize.pow(m_row as u32);
let polys: Vec<DensePolynomial<E::ScalarField>> = (0..pow_m_col)
.into_par_iter()
.map(|i| {
let z: Vec<E::ScalarField> = (0..pow_m_row)
.into_par_iter()
// viewing the list of evaluation as a square matrix
// we select by row j and column i
// to handle the odd case, we add another row to the matrix i.e.
// we add an extra variable to the polynomials while keeping their
// number tje same
.map(|j| Z[(j << m_col) | i])
.collect();
DensePolynomial::new(z)
})
.collect();
debug_assert!(polys.len() == pow_m_col);
debug_assert!(polys[0].len == pow_m_row);
pl_timer.stop();
Self {
m: m_col,
odd: if num_vars % 2 == 1 { 1 } else { 0 },
polys,
q: None,
chis_b: None,
}
}
// Given point = (\vec{a}, \vec{b}), compute the polynomial q as
// q(Y) =
// \sum_{j \in \{0,1\}^m}(\sum_{i \in \{0,1\}^m} p(j,i) * chi_i(b)) * chi_j(Y)
// and p(a,b) = q(a) where p is the initial polynomial
fn get_q(&mut self, point: &[E::ScalarField]) {
let q_timer = Timer::new("build_q");
debug_assert!(point.len() == 2 * self.m + self.odd);
let b = &point[self.m + self.odd..];
let pow_m = 2_usize.pow(self.m as u32);
let chis: Vec<E::ScalarField> = (0..pow_m)
.into_par_iter()
.map(|i| Self::get_chi_i(b, i))
.collect();
let z_q: Vec<E::ScalarField> = (0..(pow_m * 2_usize.pow(self.odd as u32)))
.into_par_iter()
.map(|j| (0..pow_m).map(|i| self.polys[i].Z[j] * chis[i]).sum())
.collect();
q_timer.stop();
self.q = Some(DensePolynomial::new(z_q));
self.chis_b = Some(chis);
}
// Given point = (\vec{a}, \vec{b}) used to construct q
// compute q(a) = p(a,b).
pub fn eval(&mut self, point: &[E::ScalarField]) -> E::ScalarField {
let a = &point[0..point.len() / 2 + self.odd];
if self.q.is_none() {
self.get_q(point);
}
let q = self.q.clone().unwrap();
(0..q.Z.len())
.into_par_iter()
.map(|j| q.Z[j] * Polynomial::<E>::get_chi_i(&a, j))
.sum()
}
pub fn commit(&self, ck: &CommitterKey<E>) -> (Vec<Commitment<E>>, E::TargetField) {
let timer_commit = Timer::new("sqrt_commit");
let timer_list = Timer::new("comm_list");
// commit to each of the sqrt sized p_i
let comm_list: Vec<Commitment<E>> = self
.polys
.par_iter()
.map(|p| MultilinearPC::<E>::commit(&ck, p))
.collect();
timer_list.stop();
let h_vec = ck.powers_of_h[self.odd].clone();
assert!(comm_list.len() == h_vec.len());
let ipp_timer = Timer::new("ipp");
let left_pairs: Vec<_> = comm_list
.clone()
.into_par_iter()
.map(|c| E::G1Prepared::from(c.g_product))
.collect();
let right_pairs: Vec<_> = h_vec
.into_par_iter()
.map(|h| E::G2Prepared::from(h))
.collect();
// compute the IPP commitment
let t = E::multi_pairing(left_pairs, right_pairs).0;
ipp_timer.stop();
timer_commit.stop();
(comm_list, t)
}
// computes \chi_i(\vec{b}) = \prod_{i_j = 0}(1 - b_j)\prod_{i_j = 1}(b_j)
pub fn get_chi_i(b: &[E::ScalarField], i: usize) -> E::ScalarField {
let m = b.len();
let mut prod = E::ScalarField::one();
for j in 0..m {
let b_j = b[j];
// iterate from first (msb) to last (lsb) bit of i
// to build chi_i using the formula above
if i >> (m - j - 1) & 1 == 1 {
prod = prod * b_j;
} else {
prod = prod * (E::ScalarField::one() - b_j)
};
}
prod
}
pub fn open(
&mut self,
transcript: &mut PoseidonTranscript<E::ScalarField>,
comm_list: Vec<Commitment<E>>,
ck: &CommitterKey<E>,
point: &[E::ScalarField],
t: &E::TargetField,
) -> (Commitment<E>, Proof<E>, MippProof<E>) {
let a = &point[0..self.m + self.odd];
if self.q.is_none() {
self.get_q(point);
}
let q = self.q.clone().unwrap();
let timer_open = Timer::new("sqrt_open");
// Compute the PST commitment to q obtained as the inner products of the
// commitments to the polynomials p_i and chi_i(\vec{b}) for i ranging over
// the boolean hypercube of size m.
let timer_msm = Timer::new("msm");
if self.chis_b.is_none() {
panic!("chis(b) should have been computed for q");
}
// TODO remove that cloning - the whole option thing
let chis = self.chis_b.clone().unwrap();
assert!(chis.len() == comm_list.len());
let comms: Vec<_> = comm_list.par_iter().map(|c| c.g_product).collect();
let c_u = <E::G1 as VariableBaseMSM>::msm_unchecked(&comms, &chis).into_affine();
timer_msm.stop();
let U: Commitment<E> = Commitment {
nv: q.num_vars,
g_product: c_u,
};
let comm = MultilinearPC::<E>::commit(ck, &q);
debug_assert!(c_u == comm.g_product);
let h_vec = ck.powers_of_h[self.odd].clone();
// construct MIPP proof that U is the inner product of the vector A
// and the vector y, where A is the opening vector to T
let timer_mipp_proof = Timer::new("mipp_prove");
let mipp_proof =
MippProof::<E>::prove(transcript, ck, comms, chis.to_vec(), h_vec, &c_u, t).unwrap();
timer_mipp_proof.stop();
let timer_proof = Timer::new("pst_open");
// reversing a is necessary because the sumcheck code in spartan generates
// the point in reverse order compared to how the polynomial commitment
// expects it
let mut a_rev = a.to_vec().clone();
a_rev.reverse();
// construct PST proof for opening q at a
let pst_proof = MultilinearPC::<E>::open(ck, &q, &a_rev);
timer_proof.stop();
timer_open.stop();
(U, pst_proof, mipp_proof)
}
pub fn verify(
transcript: &mut PoseidonTranscript<E::ScalarField>,
vk: &VerifierKey<E>,
U: &Commitment<E>,
point: &[E::ScalarField],
v: E::ScalarField,
pst_proof: &Proof<E>,
mipp_proof: &MippProof<E>,
T: &E::TargetField,
) -> bool {
let len = point.len();
let odd = if len % 2 == 1 { 1 } else { 0 };
let a = &point[0..len / 2 + odd];
let b = &point[len / 2 + odd..len];
let timer_mipp_verify = Timer::new("mipp_verify");
// verify that U = A^y where A is the opening vector of T
let res_mipp = MippProof::<E>::verify(vk, transcript, mipp_proof, b.to_vec(), &U.g_product, T);
assert!(res_mipp == true);
timer_mipp_verify.stop();
// reversing a is necessary because the sumcheck code in spartan generates
// the point in reverse order compared to how the polynomial commitment
// expects
let mut a_rev = a.to_vec().clone();
a_rev.reverse();
let timer_pst_verify = Timer::new("pst_verify");
// PST proof that q(a) is indeed equal to value claimed by the prover
let res = MultilinearPC::<E>::check(vk, U, &a_rev, v, pst_proof);
timer_pst_verify.stop();
res
}
}
#[cfg(test)]
mod tests {
use crate::parameters::poseidon_params;
use super::*;
type F = ark_bls12_377::Fr;
type E = ark_bls12_377::Bls12_377;
use ark_std::UniformRand;
#[test]
fn check_sqrt_poly_eval() {
let mut rng = ark_std::test_rng();
let num_vars = 6;
let len = 2_usize.pow(num_vars);
let Z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
let r: Vec<F> = (0..num_vars)
.into_iter()
.map(|_| F::rand(&mut rng))
.collect();
let p = DensePolynomial::new(Z.clone());
let res1 = p.evaluate(&r);
let mut pl = Polynomial::<E>::from_evaluations(&Z.clone());
let res2 = pl.eval(&r);
assert!(res1 == res2);
}
#[test]
fn check_commit() {
// check odd case
check_sqrt_poly_commit(5);
// check even case
check_sqrt_poly_commit(6);
}
fn check_sqrt_poly_commit(num_vars: u32) {
let mut rng = ark_std::test_rng();
let len = 2_usize.pow(num_vars);
let Z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
let r: Vec<F> = (0..num_vars)
.into_iter()
.map(|_| F::rand(&mut rng))
.collect();
let gens = MultilinearPC::<E>::setup(3, &mut rng);
let (ck, vk) = MultilinearPC::<E>::trim(&gens, 3);
let mut pl = Polynomial::from_evaluations(&Z.clone());
let v = pl.eval(&r);
let (comm_list, t) = pl.commit(&ck);
let params = poseidon_params();
let mut prover_transcript = PoseidonTranscript::new(&params);
let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t);
let mut verifier_transcript = PoseidonTranscript::new(&params);
let res = Polynomial::verify(
&mut verifier_transcript,
&vk,
&u,
&r,
v,
&pst_proof,
&mipp_proof,
&t,
);
assert!(res == true);
}
}

+ 397
- 880
src/sumcheck.rs
File diff suppressed because it is too large
View File


+ 202
- 0
src/testudo_nizk.rs

@ -0,0 +1,202 @@
use std::cmp::max;
use crate::errors::ProofVerifyError;
use crate::r1csproof::R1CSVerifierProof;
use crate::{
poseidon_transcript::PoseidonTranscript,
r1csproof::{R1CSGens, R1CSProof},
transcript::Transcript,
InputsAssignment, Instance, VarsAssignment,
};
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::pairing::Pairing;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
// TestudoNizk is suitable for uniform circuits where the
// evaluation of R1CS matrices A, B and C is cheap and can
// be done by the verifier. For more complex circuits this
// operation has to be offloaded to the prover.
pub struct TestudoNizk<E: Pairing> {
pub r1cs_verifier_proof: R1CSVerifierProof<E>,
pub r: (Vec<E::ScalarField>, Vec<E::ScalarField>),
}
pub struct TestudoNizkGens<E: Pairing> {
gens_r1cs_sat: R1CSGens<E>,
}
impl<E: Pairing> TestudoNizkGens<E> {
/// Performs the setup required by the polynomial commitment PST and Groth16
pub fn setup(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
poseidon: PoseidonConfig<E::ScalarField>,
) -> Self {
// ensure num_vars is a power of 2
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let num_cons_padded = {
let mut num_cons_padded = num_cons;
// ensure that num_cons_padded is at least 2
if num_cons_padded == 0 || num_cons_padded == 1 {
num_cons_padded = 2;
}
// ensure that num_cons_padded is a power of 2
if num_cons.next_power_of_two() != num_cons {
num_cons_padded = num_cons.next_power_of_two();
}
num_cons_padded
};
let gens_r1cs_sat = R1CSGens::setup(
b"gens_r1cs_sat",
num_cons_padded,
num_vars_padded,
num_inputs,
poseidon,
);
TestudoNizkGens { gens_r1cs_sat }
}
}
impl<E: Pairing> TestudoNizk<E>
where
E::ScalarField: Absorb,
{
// Returns a proof that the R1CS instance is satisfiable
pub fn prove(
inst: &Instance<E::ScalarField>,
vars: VarsAssignment<E::ScalarField>,
inputs: &InputsAssignment<E::ScalarField>,
gens: &TestudoNizkGens<E>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
poseidon: PoseidonConfig<E::ScalarField>,
) -> Result<TestudoNizk<E>, ProofVerifyError> {
transcript.append_bytes(b"", &inst.digest);
let c: E::ScalarField = transcript.challenge_scalar(b"");
transcript.new_from_state(&c);
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
let (r1cs_sat_proof, rx, ry) = R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&inputs.assignment,
&gens.gens_r1cs_sat,
transcript,
);
let inst_evals = inst.inst.evaluate(&rx, &ry);
transcript.new_from_state(&c);
let r1cs_verifier_proof = r1cs_sat_proof
.prove_verifier(
inst.inst.get_num_vars(),
inst.inst.get_num_cons(),
&inputs.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
poseidon,
)
.unwrap();
Ok(TestudoNizk {
r1cs_verifier_proof,
r: (rx, ry),
})
}
// Verifies the satisfiability proof for the R1CS instance. In NIZK mode, the
// verifier evaluates matrices A, B and C themselves, which is a linear
// operation and hence this is not a SNARK.
// However, for highly structured circuits this operation is fast.
pub fn verify(
&self,
gens: &TestudoNizkGens<E>,
inst: &Instance<E::ScalarField>,
input: &InputsAssignment<E::ScalarField>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
_poseidon: PoseidonConfig<E::ScalarField>,
) -> Result<bool, ProofVerifyError> {
transcript.append_bytes(b"", &inst.digest);
let (claimed_rx, claimed_ry) = &self.r;
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
let sat_verified = self.r1cs_verifier_proof.verify(
(claimed_rx.clone(), claimed_ry.clone()),
&input.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
assert!(sat_verified == true);
Ok(sat_verified)
}
}
#[cfg(test)]
mod tests {
use crate::{
parameters::poseidon_params,
poseidon_transcript::PoseidonTranscript,
testudo_nizk::{TestudoNizk, TestudoNizkGens},
Instance,
};
#[test]
pub fn check_testudo_nizk() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
type E = ark_bls12_377::Bls12_377;
// produce public generators
let gens = TestudoNizkGens::<E>::setup(num_cons, num_vars, num_inputs, poseidon_params());
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let params = poseidon_params();
// produce a proof
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof =
TestudoNizk::prove(&inst, vars, &inputs, &gens, &mut prover_transcript, params).unwrap();
// verify the proof
let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params());
assert!(proof
.verify(
&gens,
&inst,
&inputs,
&mut verifier_transcript,
poseidon_params()
)
.is_ok());
}
}

+ 377
- 0
src/testudo_snark.rs

@ -0,0 +1,377 @@
use std::cmp::max;
use crate::errors::ProofVerifyError;
use crate::r1csinstance::{R1CSCommitmentGens, R1CSEvalProof};
use crate::r1csproof::R1CSVerifierProof;
use crate::timer::Timer;
use crate::transcript::TranscriptWriter;
use crate::{
poseidon_transcript::PoseidonTranscript,
r1csproof::{R1CSGens, R1CSProof},
transcript::Transcript,
InputsAssignment, Instance, VarsAssignment,
};
use crate::{ComputationCommitment, ComputationDecommitment};
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
use ark_crypto_primitives::sponge::Absorb;
use ark_ec::pairing::Pairing;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct TestudoSnark<E: Pairing> {
pub r1cs_verifier_proof: R1CSVerifierProof<E>,
pub r1cs_eval_proof: R1CSEvalProof<E>,
pub inst_evals: (E::ScalarField, E::ScalarField, E::ScalarField),
pub r: (Vec<E::ScalarField>, Vec<E::ScalarField>),
}
pub struct TestudoSnarkGens<E: Pairing> {
gens_r1cs_sat: R1CSGens<E>,
gens_r1cs_eval: R1CSCommitmentGens<E>,
}
impl<E: Pairing> TestudoSnarkGens<E> {
/// Performs the setups required by the polynomial commitment PST, Groth16
/// and the computational commitment given the size of the R1CS statement,
/// `num_nz_entries` specifies the maximum number of non-zero entries in
/// any of the three R1CS matrices.
pub fn setup(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
num_nz_entries: usize,
poseidon: PoseidonConfig<E::ScalarField>,
) -> Self {
// ensure num_vars is a power of 2
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let num_cons_padded = {
let mut num_cons_padded = num_cons;
// ensure that num_cons_padded is at least 2
if num_cons_padded == 0 || num_cons_padded == 1 {
num_cons_padded = 2;
}
// ensure that num_cons_padded is a power of 2
if num_cons.next_power_of_two() != num_cons {
num_cons_padded = num_cons.next_power_of_two();
}
num_cons_padded
};
let gens_r1cs_sat = R1CSGens::setup(
b"gens_r1cs_sat",
num_cons_padded,
num_vars_padded,
num_inputs,
poseidon,
);
let gens_r1cs_eval = R1CSCommitmentGens::setup(
b"gens_r1cs_eval",
num_cons_padded,
num_vars_padded,
num_inputs,
num_nz_entries,
);
TestudoSnarkGens {
gens_r1cs_sat,
gens_r1cs_eval,
}
}
}
impl<E: Pairing> TestudoSnark<E>
where
E::ScalarField: Absorb,
{
// Constructs the computational commitment, used to prove that the
// evaluations of matrices A, B and C sent by the prover to the verifier
// are correct.
pub fn encode(
inst: &Instance<E::ScalarField>,
gens: &TestudoSnarkGens<E>,
) -> (
ComputationCommitment<E::G1>,
ComputationDecommitment<E::ScalarField>,
) {
let timer_encode = Timer::new("SNARK::encode");
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
timer_encode.stop();
(
ComputationCommitment { comm },
ComputationDecommitment { decomm },
)
}
// Returns the Testudo SNARK proof which has two components:
// * proof that the R1CS instance is satisfiable
// * proof that the evlauation of matrices A, B and C on point (x,y)
// resulted from the two rounda of sumcheck are correct
pub fn prove(
inst: &Instance<E::ScalarField>,
comm: &ComputationCommitment<E::G1>,
decomm: &ComputationDecommitment<E::ScalarField>,
vars: VarsAssignment<E::ScalarField>,
inputs: &InputsAssignment<E::ScalarField>,
gens: &TestudoSnarkGens<E>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
poseidon: PoseidonConfig<E::ScalarField>,
) -> Result<TestudoSnark<E>, ProofVerifyError> {
comm.comm.write_to_transcript(transcript);
let c: E::ScalarField = transcript.challenge_scalar(b"");
transcript.new_from_state(&c);
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
let (r1cs_sat_proof, rx, ry) = R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&inputs.assignment,
&gens.gens_r1cs_sat,
transcript,
);
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let inst_evals = {
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
transcript.append_scalar(b"", &Ar);
transcript.append_scalar(b"", &Br);
transcript.append_scalar(b"", &Cr);
(Ar, Br, Cr)
};
timer_eval.stop();
let timer_eval_proof = Timer::new("r1cs_eval_proof");
let r1cs_eval_proof = R1CSEvalProof::prove(
&decomm.decomm,
&rx,
&ry,
&inst_evals,
&gens.gens_r1cs_eval,
transcript,
);
timer_eval_proof.stop();
transcript.new_from_state(&c);
let timer_sat_circuit_verification = Timer::new("r1cs_sat_circuit_verification");
let r1cs_verifier_proof = r1cs_sat_proof
.prove_verifier(
inst.inst.get_num_vars(),
inst.inst.get_num_cons(),
&inputs.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
poseidon,
)
.unwrap();
timer_sat_circuit_verification.stop();
Ok(TestudoSnark {
r1cs_verifier_proof,
r1cs_eval_proof,
inst_evals,
r: (rx, ry),
})
}
pub fn verify(
&self,
gens: &TestudoSnarkGens<E>,
comm: &ComputationCommitment<E::G1>,
input: &InputsAssignment<E::ScalarField>,
transcript: &mut PoseidonTranscript<E::ScalarField>,
_poseidon: PoseidonConfig<E::ScalarField>,
) -> Result<bool, ProofVerifyError> {
let (rx, ry) = &self.r;
let timer_sat_verification = Timer::new("r1cs_sat_verification");
let sat_verified = self.r1cs_verifier_proof.verify(
(rx.clone(), ry.clone()),
&input.assignment,
&self.inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
timer_sat_verification.stop();
assert!(sat_verified == true);
let (Ar, Br, Cr) = &self.inst_evals;
transcript.append_scalar(b"", Ar);
transcript.append_scalar(b"", Br);
transcript.append_scalar(b"", Cr);
let timer_eval_verification = Timer::new("r1cs_eval_verification");
let eval_verified = self.r1cs_eval_proof.verify(
&comm.comm,
rx,
ry,
&self.inst_evals,
&gens.gens_r1cs_eval,
transcript,
);
timer_eval_verification.stop();
Ok(sat_verified && eval_verified.is_ok())
}
}
#[cfg(test)]
mod tests {
use crate::ark_std::Zero;
use crate::{
parameters::poseidon_params,
poseidon_transcript::PoseidonTranscript,
testudo_snark::{TestudoSnark, TestudoSnarkGens},
InputsAssignment, Instance, VarsAssignment,
};
use ark_ff::{BigInteger, One, PrimeField};
#[test]
pub fn check_testudo_snark() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
type E = ark_bls12_377::Bls12_377;
// produce public generators
let gens =
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, poseidon_params());
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// create a commitment to R1CSInstance
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
let params = poseidon_params();
// produce a proof
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = TestudoSnark::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
params,
)
.unwrap();
// verify the proof
let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params());
assert!(proof
.verify(
&gens,
&comm,
&inputs,
&mut verifier_transcript,
poseidon_params()
)
.is_ok());
}
#[test]
fn test_padded_constraints() {
type F = ark_bls12_377::Fr;
type E = ark_bls12_377::Bls12_377;
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
let num_non_zero_entries = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, (F::one().into_bigint().to_bytes_le()))); // 1*a
B.push((0, num_vars + 2, F::one().into_bigint().to_bytes_le())); // 1*a
C.push((0, num_vars + 1, F::one().into_bigint().to_bytes_le())); // 1*z
C.push((0, num_vars, (-F::from(13u64)).into_bigint().to_bytes_le())); // -13*1
C.push((0, num_vars + 3, (-F::one()).into_bigint().to_bytes_le())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![F::zero().into_bigint().to_bytes_le(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![F::zero().into_bigint().to_bytes_le(); num_inputs];
inputs[0] = F::from(16u64).into_bigint().to_bytes_le();
inputs[1] = F::from(1u64).into_bigint().to_bytes_le();
inputs[2] = F::from(2u64).into_bigint().to_bytes_le();
let assignment_inputs = InputsAssignment::<F>::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
// Testudo public params
let gens = TestudoSnarkGens::<E>::setup(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
poseidon_params(),
);
// create a commitment to the R1CS instance
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
let params = poseidon_params();
// produce a TestudoSnark
let mut prover_transcript = PoseidonTranscript::new(&params);
let proof = TestudoSnark::prove(
&inst,
&comm,
&decomm,
assignment_vars.clone(),
&assignment_inputs,
&gens,
&mut prover_transcript,
poseidon_params(),
)
.unwrap();
// verify the TestudoSnark
let mut verifier_transcript = PoseidonTranscript::new(&params);
assert!(proof
.verify(
&gens,
&comm,
&assignment_inputs,
&mut verifier_transcript,
poseidon_params()
)
.is_ok());
}
}

+ 56
- 55
src/timer.rs

@ -1,3 +1,4 @@
/// Timer is a simple utility to profile the execution time of a block of code.
#[cfg(feature = "profile")] #[cfg(feature = "profile")]
use colored::Colorize; use colored::Colorize;
#[cfg(feature = "profile")] #[cfg(feature = "profile")]
@ -12,77 +13,77 @@ pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "profile")] #[cfg(feature = "profile")]
pub struct Timer { pub struct Timer {
label: String,
timer: Instant,
label: String,
timer: Instant,
} }
#[cfg(feature = "profile")] #[cfg(feature = "profile")]
impl Timer { impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
let timer = Instant::now();
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
label.yellow().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
Self {
label: label.to_string(),
timer,
}
#[inline(always)]
pub fn new(label: &str) -> Self {
let timer = Instant::now();
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
label.yellow().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
Self {
label: label.to_string(),
timer,
} }
}
#[inline(always)]
pub fn stop(&self) {
let duration = self.timer.elapsed();
let star = "* ";
println!(
"{:indent$}{}{} {:?}",
"",
star,
self.label.blue().bold(),
duration,
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn stop(&self) {
let duration = self.timer.elapsed();
let star = "* ";
println!(
"{:indent$}{}{} {:?}",
"",
star,
self.label.blue().bold(),
duration,
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn print(msg: &str) {
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
msg.to_string().green().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn print(msg: &str) {
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
msg.to_string().green().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
} }
#[cfg(not(feature = "profile"))] #[cfg(not(feature = "profile"))]
pub struct Timer { pub struct Timer {
_label: String,
_label: String,
} }
#[cfg(not(feature = "profile"))] #[cfg(not(feature = "profile"))]
impl Timer { impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
Self {
_label: label.to_string(),
}
#[inline(always)]
pub fn new(label: &str) -> Self {
Self {
_label: label.to_string(),
} }
}
#[inline(always)]
pub fn stop(&self) {}
#[inline(always)]
pub fn stop(&self) {}
#[inline(always)]
pub fn print(_msg: &str) {}
#[inline(always)]
pub fn print(_msg: &str) {}
} }

+ 13
- 64
src/transcript.rs

@ -1,67 +1,16 @@
use super::scalar::Scalar;
use crate::group::CompressedGroup;
use ark_ff::{BigInteger, PrimeField};
use ark_ff::PrimeField;
use ark_serialize::CanonicalSerialize; use ark_serialize::CanonicalSerialize;
use merlin::Transcript;
pub trait ProofTranscript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup);
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar>;
}
impl ProofTranscript for Transcript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
self.append_message(b"protocol-name", protocol_name);
}
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
self.append_message(label, scalar.into_repr().to_bytes_le().as_slice());
}
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
let mut point_encoded = Vec::new();
point.serialize(&mut point_encoded).unwrap();
self.append_message(label, point_encoded.as_slice());
}
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
let mut buf = [0u8; 64];
self.challenge_bytes(label, &mut buf);
Scalar::from_le_bytes_mod_order(&buf)
}
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
(0..len)
.map(|_i| self.challenge_scalar(label))
.collect::<Vec<Scalar>>()
}
/// Transcript is the application level transcript to derive the challenges
/// needed for Fiat Shamir during aggregation. It is given to the
/// prover/verifier so that the transcript can be fed with any other data first.
/// TODO: Make this trait the only Transcript trait
pub trait Transcript {
fn domain_sep(&mut self);
fn append<S: CanonicalSerialize>(&mut self, label: &'static [u8], point: &S);
fn challenge_scalar<F: PrimeField>(&mut self, label: &'static [u8]) -> F;
fn challenge_scalar_vec<F: PrimeField>(&mut self, label: &'static [u8], n: usize) -> Vec<F> {
(0..n).map(|_| self.challenge_scalar(label)).collect()
}
} }
pub trait AppendToTranscript {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript);
}
impl AppendToTranscript for Scalar {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_scalar(label, self);
}
}
impl AppendToTranscript for [Scalar] {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"begin_append_vector");
for item in self {
transcript.append_scalar(label, item);
}
transcript.append_message(label, b"end_append_vector");
}
}
impl AppendToTranscript for CompressedGroup {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_point(label, self);
}
}
pub use crate::poseidon_transcript::TranscriptWriter;

+ 154
- 177
src/unipoly.rs

@ -1,198 +1,175 @@
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
use super::commitments::{Commitments, MultiCommitGens};
use super::group::GroupElement;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use ark_ff::Field;
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
use ark_crypto_primitives::sponge::Absorb;
use ark_ff::{Field, PrimeField};
use ark_serialize::*; use ark_serialize::*;
use merlin::Transcript;
// ax^2 + bx + c stored as vec![c,b,a] // ax^2 + bx + c stored as vec![c,b,a]
// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,c,b,a]
#[derive(Debug, CanonicalDeserialize, CanonicalSerialize, Clone)] #[derive(Debug, CanonicalDeserialize, CanonicalSerialize, Clone)]
pub struct UniPoly {
pub coeffs: Vec<Scalar>,
// pub coeffs_fq: Vec<Fq>,
pub struct UniPoly<F: Field> {
pub coeffs: Vec<F>,
// pub coeffs_fq: Vec<Fq>,
} }
// ax^2 + bx + c stored as vec![c,a] // ax^2 + bx + c stored as vec![c,a]
// ax^3 + bx^2 + cx + d stored as vec![d,b,a] // ax^3 + bx^2 + cx + d stored as vec![d,b,a]
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
pub struct CompressedUniPoly {
pub coeffs_except_linear_term: Vec<Scalar>,
}
impl UniPoly {
pub fn from_evals(evals: &[Scalar]) -> Self {
// we only support degree-2 or degree-3 univariate polynomials
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = Scalar::from(2).inverse().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
let b = evals[1] - c - a;
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = Scalar::from(2).inverse().unwrap();
let six_inv = Scalar::from(6).inverse().unwrap();
let d = evals[0];
let a = six_inv
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1]
- evals[0]);
let b = two_inv
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
+ evals[2]
+ evals[2]
+ evals[2]
+ evals[2]
- evals[3]);
let c = evals[1] - d - a - b;
vec![d, c, b, a]
};
UniPoly { coeffs }
}
pub fn degree(&self) -> usize {
self.coeffs.len() - 1
}
pub fn as_vec(&self) -> Vec<Scalar> {
self.coeffs.clone()
}
pub fn eval_at_zero(&self) -> Scalar {
self.coeffs[0]
}
pub fn eval_at_one(&self) -> Scalar {
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
}
pub fn evaluate(&self, r: &Scalar) -> Scalar {
let mut eval = self.coeffs[0];
let mut power = *r;
for i in 1..self.coeffs.len() {
eval += power * self.coeffs[i];
power *= r;
}
eval
}
pub fn compress(&self) -> CompressedUniPoly {
let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
CompressedUniPoly {
coeffs_except_linear_term,
}
}
pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement {
self.coeffs.commit(blind, gens)
}
pub struct CompressedUniPoly<F: Field> {
pub coeffs_except_linear_term: Vec<F>,
} }
impl CompressedUniPoly {
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
let mut linear_term =
(*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
for i in 1..self.coeffs_except_linear_term.len() {
linear_term -= self.coeffs_except_linear_term[i];
}
let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
coeffs.extend(&self.coeffs_except_linear_term[1..]);
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
UniPoly { coeffs }
impl<F: Field> UniPoly<F> {
pub fn from_evals(evals: &[F]) -> Self {
// we only support degree-2 or degree-3 univariate polynomials
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = F::from(2 as u8).inverse().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
let b = evals[1] - c - a;
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = F::from(2 as u8).inverse().unwrap();
let six_inv = F::from(6 as u8).inverse().unwrap();
let d = evals[0];
let a = six_inv
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
let b = two_inv
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
+ evals[2]
+ evals[2]
+ evals[2]
+ evals[2]
- evals[3]);
let c = evals[1] - d - a - b;
vec![d, c, b, a]
};
UniPoly { coeffs }
}
pub fn degree(&self) -> usize {
self.coeffs.len() - 1
}
pub fn eval_at_zero(&self) -> F {
self.coeffs[0]
}
pub fn eval_at_one(&self) -> F {
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
}
pub fn evaluate(&self, r: &F) -> F {
let mut eval = self.coeffs[0];
let mut power = *r;
for i in 1..self.coeffs.len() {
eval += power * self.coeffs[i];
power *= r;
} }
eval
}
// pub fn compress(&self) -> CompressedUniPoly<F> {
// let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
// assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
// CompressedUniPoly {
// coeffs_except_linear_term,
// }
// }
} }
impl AppendToPoseidon for UniPoly {
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
// transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(&self.coeffs[i]);
}
// transcript.append_message(label, b"UniPoly_end");
}
}
impl AppendToTranscript for UniPoly {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(b"coeff", &self.coeffs[i]);
}
transcript.append_message(label, b"UniPoly_end");
// impl<F: PrimeField> CompressedUniPoly<F> {
// // we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
// // linear_term = hint - 2 * constant_term - deg2 term - deg3 term
// pub fn decompress(&self, hint: &F) -> UniPoly<F> {
// let mut linear_term =
// (*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
// for i in 1..self.coeffs_except_linear_term.len() {
// linear_term -= self.coeffs_except_linear_term[i];
// }
// let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
// coeffs.extend(&self.coeffs_except_linear_term[1..]);
// assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
// UniPoly { coeffs }
// }
// }
impl<F: PrimeField + Absorb> TranscriptWriter<F> for UniPoly<F> {
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<F>) {
// transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(b"", &self.coeffs[i]);
} }
// transcript.append_message(label, b"UniPoly_end");
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use ark_ff::One;
use super::*;
#[test]
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = Scalar::from(6);
let e2 = Scalar::from(15);
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], Scalar::from(3));
assert_eq!(poly.coeffs[2], Scalar::from(2));
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e3 = Scalar::from(28);
assert_eq!(poly.evaluate(&Scalar::from(3)), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = Scalar::from(7);
let e2 = Scalar::from(23);
let e3 = Scalar::from(55);
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], Scalar::from(3));
assert_eq!(poly.coeffs[2], Scalar::from(2));
assert_eq!(poly.coeffs[3], Scalar::from(1));
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e4 = Scalar::from(109);
assert_eq!(poly.evaluate(&Scalar::from(4)), e4);
}
use ark_ff::One;
use super::*;
type F = ark_bls12_377::Fr;
#[test]
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = F::one();
let e1 = F::from(6 as u8);
let e2 = F::from(15 as u8);
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], F::one());
assert_eq!(poly.coeffs[1], F::from(3 as u8));
assert_eq!(poly.coeffs[2], F::from(2 as u8));
// let hint = e0 + e1;
// // let compressed_poly = poly.compress();
// // let decompressed_poly = compressed_poly.decompress(&hint);
// for i in 0..poly.coeffs.len() {
// assert_eq!(poly.coeffs[i], poly.coeffs[i]);
// }
let e3 = F::from(28 as u8);
assert_eq!(poly.evaluate(&F::from(3 as u8)), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = F::one();
let e1 = F::from(7);
let e2 = F::from(23);
let e3 = F::from(55);
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], F::one());
assert_eq!(poly.coeffs[1], F::from(3));
assert_eq!(poly.coeffs[2], F::from(2));
assert_eq!(poly.coeffs[3], F::from(1));
// let hint = e0 + e1;
// let compressed_poly = poly.compress();
// let decompressed_poly = compressed_poly.decompress(&hint);
// for i in 0..decompressed_poly.coeffs.len() {
// assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
// }
let e4 = F::from(109);
assert_eq!(poly.evaluate(&F::from(4)), e4);
}
} }

Loading…
Cancel
Save