mirror of
https://github.com/arnaucube/testudo.git
synced 2026-01-12 08:41:29 +01:00
PST/SQRT + Benches (#35)
* first version of the sqrt PST without the MIPP * snarkpack integration * snarkpack integration * adding mipp as submodule directly * snarkpack integration * finalizing * snarkpack integration * update mipp with latestest optimisations and add preliminary documentation * improve codebase documentation * remove unused imports and apply cargo fix changes * passing v0.4 * adding gh action * correct workflow item * correct working dir and msrv * remove unnecessary stuff * wip * wip * remove circuit in fq as it's not needed now * done for tonight * wip * wip * sip * prallelise commitment and groth16 verification * finalise comments for mipp * wip * finalise comments * wip * compiling but test failing * putting back non random blinds * using absorb when we can * absorbing scalar * with bls12-381 * stuff * trying to bring ark-blst to testudo * correcting random implementation * with square in place * works with blst * works with blst * fix: don't require nightly Rust With removing the `test` feature, it can also be built with a stable Rust release and don't require a nightly Rust version. * using ark-blst main branch * started cleanup and added testudo benchmark * add testudo snark and nizk in separate files * rename functions that perform setups and add comments * prototyping * explain testudo-nizk * add support for odd case in sqrt_pst * add missing constraints and correct proof size for benchmarks * add support for odd case in sqrt_pst * fix typo in comment * Documentation #31 * fix typo in comment * Fix Cargo.toml and add benchmark for sqrt pst (#34) * add benchmark for sqrt pst * fix typo in comment * add README * comment from readme not executing --------- Co-authored-by: Mara Mihali <maramihali@google.com> Co-authored-by: Mara Mihali <mihalimara22@gmail.com> Co-authored-by: Volker Mische <volker.mische@gmail.com>
This commit is contained in:
@@ -1,4 +1 @@
|
||||
[build]
|
||||
rustflags = [
|
||||
"-C", "target-cpu=native",
|
||||
]
|
||||
|
||||
|
||||
52
.github/workflows/testudo.yml
vendored
52
.github/workflows/testudo.yml
vendored
@@ -1,37 +1,27 @@
|
||||
name: Build and Test Testudo
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
# The crate ark-ff uses the macro llvm_asm! when emitting asm which returns an
|
||||
# error because it was deprecated in favour of asm!. We temporarily overcome
|
||||
# this problem by setting the environment variable below (until the crate
|
||||
# is updated).
|
||||
env:
|
||||
RUSTFLAGS: "--emit asm -C llvm-args=-x86-asm-syntax=intel"
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build_nightly:
|
||||
cargo-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install
|
||||
run: rustup default nightly
|
||||
- name: Install rustfmt Components
|
||||
run: rustup component add rustfmt
|
||||
# - name: Install clippy
|
||||
# run: rustup component add clippy
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
- name: Build examples
|
||||
run: cargo build --examples --verbose
|
||||
- name: Check Rustfmt Code Style
|
||||
run: cargo fmt --all -- --check
|
||||
# cargo clippy uses cargo check which returns an error when asm is emitted
|
||||
# we want to emit asm for ark-ff operations so we avoid using clippy for # now
|
||||
# - name: Check clippy warnings
|
||||
# run: cargo clippy --all-targets --all-features
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: cache-${{ hashFiles('**/Cargo.lock') }}
|
||||
cache-on-failure: true
|
||||
|
||||
- name: cargo test
|
||||
run: RUST_LOG=info cargo test --all --all-features -- --nocapture
|
||||
63
Cargo.toml
63
Cargo.toml
@@ -18,20 +18,26 @@ itertools = "0.10.0"
|
||||
colored = "2.0.0"
|
||||
thiserror = "1.0"
|
||||
json = "0.12.4"
|
||||
ark-ff = { version = "^0.3.0", default-features = false }
|
||||
ark-ec = { version = "^0.3.0", default-features = false }
|
||||
ark-std = { version = "^0.3.0"}
|
||||
ark-bls12-377 = { version = "^0.3.0", features = ["r1cs","curve"] }
|
||||
ark-serialize = { version = "^0.3.0", features = ["derive"] }
|
||||
ark-sponge = { version = "^0.3.0" , features = ["r1cs"] }
|
||||
ark-crypto-primitives = { version = "^0.3.0", default-features = true }
|
||||
ark-r1cs-std = { version = "^0.3.0", default-features = false }
|
||||
ark-nonnative-field = { version = "0.3.0", default-features = false }
|
||||
ark-relations = { version = "^0.3.0", default-features = false, optional = true }
|
||||
ark-groth16 = { version = "^0.3.0", features = ["r1cs"] }
|
||||
ark-bw6-761 = { version = "^0.3.0" }
|
||||
ark-poly-commit = { version = "^0.3.0" }
|
||||
ark-poly = {version = "^0.3.0"}
|
||||
ark-ff = { version = "0.4.0", default-features = false }
|
||||
ark-ec = { version = "0.4.0", default-features = false }
|
||||
ark-std = { version = "0.4.0"}
|
||||
ark-bls12-377 = { version = "0.4.0", features = ["r1cs","curve"] }
|
||||
ark-bls12-381 = { version = "0.4.0", features = ["curve"] }
|
||||
ark-blst = { git = "https://github.com/nikkolasg/ark-blst" }
|
||||
ark-serialize = { version = "0.4.0", features = ["derive"] }
|
||||
ark-crypto-primitives = {version = "0.4.0", features = ["sponge","r1cs","snark"] }
|
||||
ark-r1cs-std = { version = "0.4.0", default-features = false }
|
||||
ark-relations = { version = "0.4.0", default-features = false, optional = true }
|
||||
ark-snark = { version = "0.4.0", default-features = false }
|
||||
ark-groth16 = { version = "0.3.0" }
|
||||
ark-bw6-761 = { version = "0.4.0" }
|
||||
ark-poly-commit = { version = "0.4.0" }
|
||||
ark-poly = {version = "0.4.0"}
|
||||
|
||||
poseidon-paramgen = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" }
|
||||
poseidon-parameters = { git = "https://github.com/nikkolasg/poseidon377", branch = "feat/v0.4" }
|
||||
# Needed for ark-blst
|
||||
blstrs = { version = "^0.6.1", features = ["__private_bench"] }
|
||||
|
||||
lazy_static = "1.4.0"
|
||||
rand = { version = "0.8", features = [ "std", "std_rng" ] }
|
||||
@@ -46,30 +52,21 @@ csv = "1.1.5"
|
||||
criterion = "0.3.6"
|
||||
|
||||
[lib]
|
||||
name = "libspartan"
|
||||
name = "libtestudo"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "snark"
|
||||
path = "profiler/snark.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "nizk"
|
||||
path = "profiler/nizk.rs"
|
||||
name = "testudo"
|
||||
path = "profiler/testudo.rs"
|
||||
|
||||
[[bench]]
|
||||
name = "snark"
|
||||
name = "testudo"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "nizk"
|
||||
name = "pst"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "r1cs"
|
||||
harness = false
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
multicore = ["rayon"]
|
||||
profile = []
|
||||
@@ -79,6 +76,10 @@ parallel = [ "std", "ark-ff/parallel", "ark-std/parallel", "ark-ec/parallel", "a
|
||||
std = ["ark-ff/std", "ark-ec/std", "ark-std/std", "ark-relations/std", "ark-serialize/std"]
|
||||
|
||||
[patch.crates-io]
|
||||
ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/", rev = "a2a5ac491ae005ba2afd03fd21b7d3160d794a83"}
|
||||
ark-poly-commit = {git = "https://github.com/maramihali/poly-commit"}
|
||||
|
||||
ark-poly-commit = {git = "https://github.com/cryptonetlab/ark-polycommit", branch="feat/variable-crs"}
|
||||
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16" }
|
||||
blstrs = { git = "https://github.com/nikkolasg/blstrs", branch = "feat/arkwork" }
|
||||
ark-ec = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
|
||||
ark-ff = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
|
||||
ark-poly = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
|
||||
ark-serialize = { git = "https://github.com/vmx/algebra", branch="affine-repr-xy-owned" }
|
||||
424
README.md
424
README.md
@@ -1,421 +1,27 @@
|
||||
# Spartan: High-speed zkSNARKs without trusted setup
|
||||
# Testudo
|
||||
|
||||

|
||||
[](<(https://crates.io/crates/spartan)>)
|
||||
[](https://github.com/cryptonetlab/testudo/actions/workflows/testudo.yml)
|
||||
|
||||
Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libspartan,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model.
|
||||
Testudo is a linear-time prover SNARK with a small and universal trusted setup. For a deep dive, please refer to [this](https://www.notion.so/pl-strflt/Testudo-Blog-Post-Final-a18db71f8e634ebbb9f68383f7904c51) blog post.
|
||||
|
||||
A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details.
|
||||
In the current stage, the repository contains:
|
||||
|
||||
Note that this library has _not_ received a security review or audit.
|
||||
- a modified version of [Spartan](https://github.com/microsoft/Spartan) using [arkworks](https://github.com/arkworks-rs) with the sumchecks verified using Groth16
|
||||
- a fast version of the [PST](https://eprint.iacr.org/2011/587.pdf) commitment scheme with a square-root trusted setup
|
||||
- support for an arkworks wrapper around the fast blst library with GPU integration [repo](https://github.com/nikkolasg/ark-blst)
|
||||
|
||||
## Highlights
|
||||
## Building `testudo`
|
||||
|
||||
We now highlight Spartan's distinctive features.
|
||||
Testudo is available with stable Rust.
|
||||
|
||||
- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters.
|
||||
Run `cargo build` or `cargo test` to build, respectively test the repository.
|
||||
|
||||
- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libspartan` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest.
|
||||
To run the current benchmarks on BLS12-377:
|
||||
|
||||
- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS).
|
||||
|
||||
- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libspartan` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`.
|
||||
|
||||
- **State-of-the-art performance:**
|
||||
Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads.
|
||||
|
||||
### Implementation details
|
||||
|
||||
`libspartan` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library.
|
||||
|
||||
## Examples
|
||||
|
||||
To import `libspartan` into your Rust project, add the following dependency to `Cargo.toml`:
|
||||
|
||||
```text
|
||||
spartan = "0.7.1"
|
||||
```console
|
||||
cargo bench --bench testudo --all-features release -- --nocapture
|
||||
```
|
||||
|
||||
The following example shows how to use `libspartan` to create and verify a SNARK proof.
|
||||
Some of our public APIs' style is inspired by the underlying crates we use.
|
||||
## Join us!
|
||||
|
||||
```rust
|
||||
# extern crate libspartan;
|
||||
# extern crate merlin;
|
||||
# use libspartan::{Instance, SNARKGens, SNARK};
|
||||
# use libspartan::poseidon_transcript::PoseidonTranscript;
|
||||
# use libspartan::parameters::poseidon_params;
|
||||
# fn main() {
|
||||
// specify the size of an R1CS instance
|
||||
let num_vars = 1024;
|
||||
let num_cons = 1024;
|
||||
let num_inputs = 10;
|
||||
let num_non_zero_entries = 1024;
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
|
||||
|
||||
// ask the library to produce a synthentic R1CS instance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// create a commitment to the R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(&inst, &comm, &decomm, vars, &inputs, &gens, &mut prover_transcript);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
println!("proof verification successful!");
|
||||
# }
|
||||
```
|
||||
|
||||
Here is another example to use the NIZK variant of the Spartan proof system:
|
||||
|
||||
```rust
|
||||
# extern crate libspartan;
|
||||
# extern crate merlin;
|
||||
# use libspartan::{Instance, NIZKGens, NIZK};
|
||||
# use libspartan::poseidon_transcript::PoseidonTranscript;
|
||||
# use libspartan::parameters::poseidon_params;
|
||||
# fn main() {
|
||||
// specify the size of an R1CS instance
|
||||
let num_vars = 1024;
|
||||
let num_cons = 1024;
|
||||
let num_inputs = 10;
|
||||
|
||||
// produce public parameters
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
|
||||
// ask the library to produce a synthentic R1CS instance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
println!("proof verification successful!");
|
||||
# }
|
||||
```
|
||||
|
||||
Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance
|
||||
|
||||
```rust
|
||||
#![allow(non_snake_case)]
|
||||
# extern crate ark_std;
|
||||
# extern crate libspartan;
|
||||
# extern crate merlin;
|
||||
# mod scalar;
|
||||
# use scalar::Scalar;
|
||||
# use libspartan::parameters::poseidon_params;
|
||||
# use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
|
||||
# use libspartan::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
#
|
||||
# use ark_ff::{PrimeField, Field, BigInteger};
|
||||
# use ark_std::{One, Zero, UniformRand};
|
||||
# fn main() {
|
||||
// produce a tiny instance
|
||||
let (
|
||||
num_cons,
|
||||
num_vars,
|
||||
num_inputs,
|
||||
num_non_zero_entries,
|
||||
inst,
|
||||
assignment_vars,
|
||||
assignment_inputs,
|
||||
) = produce_tiny_r1cs();
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
|
||||
|
||||
// create a commitment to the R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
assignment_vars,
|
||||
&assignment_inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
println!("proof verification successful!");
|
||||
# }
|
||||
|
||||
# fn produce_tiny_r1cs() -> (
|
||||
# usize,
|
||||
# usize,
|
||||
# usize,
|
||||
# usize,
|
||||
# Instance,
|
||||
# VarsAssignment,
|
||||
# InputsAssignment,
|
||||
# ) {
|
||||
// We will use the following example, but one could construct any R1CS instance.
|
||||
// Our R1CS instance is three constraints over five variables and two public inputs
|
||||
// (Z0 + Z1) * I0 - Z2 = 0
|
||||
// (Z0 + I1) * Z2 - Z3 = 0
|
||||
// Z4 * 1 - 0 = 0
|
||||
|
||||
// parameters of the R1CS instance rounded to the nearest power of two
|
||||
let num_cons = 4;
|
||||
let num_vars = 5;
|
||||
let num_inputs = 2;
|
||||
let num_non_zero_entries = 5;
|
||||
|
||||
// We will encode the above constraints into three matrices, where
|
||||
// the coefficients in the matrix are in the little-endian byte order
|
||||
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
|
||||
// The constraint system is defined over a finite field, which in our case is
|
||||
// the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493
|
||||
// To construct these matrices, we will use `curve25519-dalek` but one can use any other method.
|
||||
|
||||
// a variable that holds a byte representation of 1
|
||||
let one = Scalar::one().into_repr().to_bytes_le();
|
||||
|
||||
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
||||
// constraint and a column for every entry in z = (vars, 1, inputs)
|
||||
// An R1CS instance is satisfiable iff:
|
||||
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
|
||||
|
||||
// constraint 0 entries in (A,B,C)
|
||||
// constraint 0 is (Z0 + Z1) * I0 - Z2 = 0.
|
||||
// We set 1 in matrix A for columns that correspond to Z0 and Z1
|
||||
// We set 1 in matrix B for column that corresponds to I0
|
||||
// We set 1 in matrix C for column that corresponds to Z2
|
||||
A.push((0, 0, one.clone()));
|
||||
A.push((0, 1, one.clone()));
|
||||
B.push((0, num_vars + 1, one.clone()));
|
||||
C.push((0, 2, one.clone()));
|
||||
|
||||
// constraint 1 entries in (A,B,C)
|
||||
A.push((1, 0, one.clone()));
|
||||
A.push((1, num_vars + 2, one.clone()));
|
||||
B.push((1, 2, one.clone()));
|
||||
C.push((1, 3, one.clone()));
|
||||
|
||||
// constraint 3 entries in (A,B,C)
|
||||
A.push((2, 4, one.clone()));
|
||||
B.push((2, num_vars, one.clone()));
|
||||
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||
|
||||
// compute a satisfying assignment
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let i0 = Scalar::rand(&mut rng);
|
||||
let i1 = Scalar::rand(&mut rng);
|
||||
let z0 = Scalar::rand(&mut rng);
|
||||
let z1 = Scalar::rand(&mut rng);
|
||||
let z2 = (z0 + z1) * i0; // constraint 0
|
||||
let z3 = (z0 + i1) * z2; // constraint 1
|
||||
let z4 = Scalar::zero(); //constraint 2
|
||||
|
||||
// create a VarsAssignment
|
||||
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||
vars[0] = z0.into_repr().to_bytes_le();
|
||||
vars[1] = z1.into_repr().to_bytes_le();
|
||||
vars[2] = z2.into_repr().to_bytes_le();
|
||||
vars[3] = z3.into_repr().to_bytes_le();
|
||||
vars[4] = z4.into_repr().to_bytes_le();
|
||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||
|
||||
// create an InputsAssignment
|
||||
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||
inputs[0] = i0.into_repr().to_bytes_le();
|
||||
inputs[1] = i1.into_repr().to_bytes_le();
|
||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||
|
||||
// check if the instance we created is satisfiable
|
||||
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
|
||||
assert_eq!(res.unwrap(), true);
|
||||
|
||||
(
|
||||
num_cons,
|
||||
num_vars,
|
||||
num_inputs,
|
||||
num_non_zero_entries,
|
||||
inst,
|
||||
assignment_vars,
|
||||
assignment_inputs,
|
||||
)
|
||||
# }
|
||||
```
|
||||
|
||||
For more examples, see [`examples/`](examples) directory in this repo.
|
||||
|
||||
## Building `libspartan`
|
||||
|
||||
Install [`rustup`](https://rustup.rs/)
|
||||
|
||||
Switch to nightly Rust using `rustup`:
|
||||
|
||||
```text
|
||||
rustup default nightly
|
||||
```
|
||||
|
||||
Clone the repository:
|
||||
|
||||
```text
|
||||
git clone https://github.com/Microsoft/Spartan
|
||||
cd Spartan
|
||||
```
|
||||
|
||||
To build docs for public APIs of `libspartan`:
|
||||
|
||||
```text
|
||||
cargo doc
|
||||
```
|
||||
|
||||
To run tests:
|
||||
|
||||
```text
|
||||
RUSTFLAGS="-C target_cpu=native" cargo test
|
||||
```
|
||||
|
||||
To build `libspartan`:
|
||||
|
||||
```text
|
||||
RUSTFLAGS="-C target_cpu=native" cargo build --release
|
||||
```
|
||||
|
||||
> NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`.
|
||||
|
||||
### Supported features
|
||||
|
||||
- `profile`: enables fine-grained profiling information (see below for its use)
|
||||
|
||||
## Performance
|
||||
|
||||
### End-to-end benchmarks
|
||||
|
||||
`libspartan` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below).
|
||||
|
||||
To run end-to-end benchmarks:
|
||||
|
||||
```text
|
||||
RUSTFLAGS="-C target_cpu=native" cargo bench
|
||||
```
|
||||
|
||||
### Fine-grained profiling
|
||||
|
||||
Build `libspartan` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`.
|
||||
|
||||
These profilers report performance as depicted below (for varying R1CS instance sizes). The reported
|
||||
performance is from running the profilers on a Microsoft Surface Laptop 3 on a single CPU core of Intel Core i7-1065G7 running Ubuntu 20.04 (atop WSL2 on Windows 10).
|
||||
See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature.
|
||||
|
||||
```text
|
||||
$ ./target/release/snark
|
||||
Profiler:: SNARK
|
||||
* number_of_constraints 1048576
|
||||
* number_of_variables 1048576
|
||||
* number_of_inputs 10
|
||||
* number_non-zero_entries_A 1048576
|
||||
* number_non-zero_entries_B 1048576
|
||||
* number_non-zero_entries_C 1048576
|
||||
* SNARK::encode
|
||||
* SNARK::encode 14.2644201s
|
||||
* SNARK::prove
|
||||
* R1CSProof::prove
|
||||
* polycommit
|
||||
* polycommit 2.7175848s
|
||||
* prove_sc_phase_one
|
||||
* prove_sc_phase_one 683.7481ms
|
||||
* prove_sc_phase_two
|
||||
* prove_sc_phase_two 846.1056ms
|
||||
* polyeval
|
||||
* polyeval 193.4216ms
|
||||
* R1CSProof::prove 4.4416193s
|
||||
* len_r1cs_sat_proof 47024
|
||||
* eval_sparse_polys
|
||||
* eval_sparse_polys 377.357ms
|
||||
* R1CSEvalProof::prove
|
||||
* commit_nondet_witness
|
||||
* commit_nondet_witness 14.4507331s
|
||||
* build_layered_network
|
||||
* build_layered_network 3.4360521s
|
||||
* evalproof_layered_network
|
||||
* len_product_layer_proof 64712
|
||||
* evalproof_layered_network 15.5708066s
|
||||
* R1CSEvalProof::prove 34.2930559s
|
||||
* len_r1cs_eval_proof 133720
|
||||
* SNARK::prove 39.1297568s
|
||||
* SNARK::proof_compressed_len 141768
|
||||
* SNARK::verify
|
||||
* verify_sat_proof
|
||||
* verify_sat_proof 20.0828ms
|
||||
* verify_eval_proof
|
||||
* verify_polyeval_proof
|
||||
* verify_prod_proof
|
||||
* verify_prod_proof 1.1847ms
|
||||
* verify_hash_proof
|
||||
* verify_hash_proof 81.06ms
|
||||
* verify_polyeval_proof 82.3583ms
|
||||
* verify_eval_proof 82.8937ms
|
||||
* SNARK::verify 103.0536ms
|
||||
```
|
||||
|
||||
```text
|
||||
$ ./target/release/nizk
|
||||
Profiler:: NIZK
|
||||
* number_of_constraints 1048576
|
||||
* number_of_variables 1048576
|
||||
* number_of_inputs 10
|
||||
* number_non-zero_entries_A 1048576
|
||||
* number_non-zero_entries_B 1048576
|
||||
* number_non-zero_entries_C 1048576
|
||||
* NIZK::prove
|
||||
* R1CSProof::prove
|
||||
* polycommit
|
||||
* polycommit 2.7220635s
|
||||
* prove_sc_phase_one
|
||||
* prove_sc_phase_one 722.5487ms
|
||||
* prove_sc_phase_two
|
||||
* prove_sc_phase_two 862.6796ms
|
||||
* polyeval
|
||||
* polyeval 190.2233ms
|
||||
* R1CSProof::prove 4.4982305s
|
||||
* len_r1cs_sat_proof 47024
|
||||
* NIZK::prove 4.5139888s
|
||||
* NIZK::proof_compressed_len 48134
|
||||
* NIZK::verify
|
||||
* eval_sparse_polys
|
||||
* eval_sparse_polys 395.0847ms
|
||||
* verify_sat_proof
|
||||
* verify_sat_proof 19.286ms
|
||||
* NIZK::verify 414.5102ms
|
||||
```
|
||||
|
||||
## LICENSE
|
||||
|
||||
See [LICENSE](./LICENSE)
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING](./CONTRIBUTING.md)
|
||||
If you want to contribute, reach out to the Discord server of [cryptonet](https://discord.com/invite/CFnTSkVTCk).
|
||||
|
||||
151
benches/nizk.rs
151
benches/nizk.rs
@@ -1,151 +0,0 @@
|
||||
extern crate core;
|
||||
extern crate criterion;
|
||||
extern crate digest;
|
||||
extern crate libspartan;
|
||||
extern crate merlin;
|
||||
extern crate sha3;
|
||||
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use libspartan::{
|
||||
parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance,
|
||||
NIZKGens, NIZK,
|
||||
};
|
||||
|
||||
use criterion::*;
|
||||
|
||||
fn nizk_prove_benchmark(c: &mut Criterion) {
|
||||
for &s in [24, 28, 30].iter() {
|
||||
let mut group = c.benchmark_group("R1CS_prove_benchmark");
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
let start = SystemTime::now();
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let end = SystemTime::now();
|
||||
let duration = end.duration_since(start).unwrap();
|
||||
println!(
|
||||
"Generating r1cs instance with {} constraints took {} ms",
|
||||
num_cons,
|
||||
duration.as_millis()
|
||||
);
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
|
||||
let name = format!("R1CS_prove_{}", num_vars);
|
||||
group
|
||||
.measurement_time(Duration::from_secs(60))
|
||||
.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
let mut prover_transcript =
|
||||
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
NIZK::prove(
|
||||
black_box(&inst),
|
||||
black_box(vars.clone()),
|
||||
black_box(&inputs),
|
||||
black_box(&gens),
|
||||
black_box(&mut prover_transcript),
|
||||
);
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn nizk_verify_benchmark(c: &mut Criterion) {
|
||||
for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() {
|
||||
let mut group = c.benchmark_group("R1CS_verify_benchmark");
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
// these are the public io
|
||||
let num_inputs = 10;
|
||||
let start = SystemTime::now();
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let end = SystemTime::now();
|
||||
let duration = end.duration_since(start).unwrap();
|
||||
println!(
|
||||
"Generating r1cs instance with {} constraints took {} ms",
|
||||
num_cons,
|
||||
duration.as_millis()
|
||||
);
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||
|
||||
let name = format!("R1CS_verify_{}", num_cons);
|
||||
group
|
||||
.measurement_time(Duration::from_secs(60))
|
||||
.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
let mut verifier_transcript =
|
||||
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
assert!(proof
|
||||
.verify(
|
||||
black_box(&inst),
|
||||
black_box(&inputs),
|
||||
black_box(&mut verifier_transcript),
|
||||
black_box(&gens),
|
||||
)
|
||||
.is_ok());
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn nizk_verify_groth16_benchmark(c: &mut Criterion) {
|
||||
for &s in [4, 6, 8, 10, 12, 16, 20, 24, 28, 30].iter() {
|
||||
let mut group = c.benchmark_group("R1CS_verify_groth16_benchmark");
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
// these are the public io
|
||||
let num_inputs = 10;
|
||||
let start = SystemTime::now();
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let end = SystemTime::now();
|
||||
let duration = end.duration_since(start).unwrap();
|
||||
println!(
|
||||
"Generating r1cs instance with {} constraints took {} ms",
|
||||
num_cons,
|
||||
duration.as_millis()
|
||||
);
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||
|
||||
let name = format!("R1CS_verify_groth16_{}", num_cons);
|
||||
group
|
||||
.measurement_time(Duration::from_secs(60))
|
||||
.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
let mut verifier_transcript =
|
||||
PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
assert!(proof
|
||||
.verify_groth16(
|
||||
black_box(&inst),
|
||||
black_box(&inputs),
|
||||
black_box(&mut verifier_transcript),
|
||||
black_box(&gens)
|
||||
)
|
||||
.is_ok());
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn set_duration() -> Criterion {
|
||||
Criterion::default().sample_size(2)
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches_nizk;
|
||||
config = set_duration();
|
||||
targets = nizk_prove_benchmark, nizk_verify_benchmark, nizk_verify_groth16_benchmark
|
||||
}
|
||||
|
||||
criterion_main!(benches_nizk);
|
||||
98
benches/pst.rs
Normal file
98
benches/pst.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use ark_poly_commit::multilinear_pc::MultilinearPC;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use libtestudo::{
|
||||
parameters::PoseidonConfiguration, poseidon_transcript::PoseidonTranscript, sqrt_pst::Polynomial,
|
||||
};
|
||||
use serde::Serialize;
|
||||
type F = ark_bls12_377::Fr;
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
#[derive(Default, Clone, Serialize)]
|
||||
struct BenchmarkResults {
|
||||
power: usize,
|
||||
commit_time: u128,
|
||||
opening_time: u128,
|
||||
verification_time: u128,
|
||||
proof_size: usize,
|
||||
commiter_key_size: usize,
|
||||
}
|
||||
fn main() {
|
||||
let params = ark_bls12_377::Fr::poseidon_params();
|
||||
|
||||
let mut writer = csv::Writer::from_path("sqrt_pst.csv").expect("unable to open csv writer");
|
||||
for &s in [4, 5, 20, 27].iter() {
|
||||
println!("Running for {} inputs", s);
|
||||
let mut rng = ark_std::test_rng();
|
||||
let mut br = BenchmarkResults::default();
|
||||
br.power = s;
|
||||
let num_vars = s;
|
||||
let len = 2_usize.pow(num_vars as u32);
|
||||
let z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
|
||||
let r: Vec<F> = (0..num_vars)
|
||||
.into_iter()
|
||||
.map(|_| F::rand(&mut rng))
|
||||
.collect();
|
||||
|
||||
let setup_vars = (num_vars as f32 / 2.0).ceil() as usize;
|
||||
let gens = MultilinearPC::<E>::setup((num_vars as f32 / 2.0).ceil() as usize, &mut rng);
|
||||
let (ck, vk) = MultilinearPC::<E>::trim(&gens, setup_vars);
|
||||
|
||||
let mut cks = Vec::<u8>::new();
|
||||
ck.serialize_with_mode(&mut cks, ark_serialize::Compress::Yes)
|
||||
.unwrap();
|
||||
br.commiter_key_size = cks.len();
|
||||
|
||||
let mut pl = Polynomial::from_evaluations(&z.clone());
|
||||
|
||||
let v = pl.eval(&r);
|
||||
|
||||
let start = Instant::now();
|
||||
let (comm_list, t) = pl.commit(&ck);
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.commit_time = duration;
|
||||
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
|
||||
let start = Instant::now();
|
||||
let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t);
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.opening_time = duration;
|
||||
|
||||
let mut p1 = Vec::<u8>::new();
|
||||
let mut p2 = Vec::<u8>::new();
|
||||
pst_proof
|
||||
.serialize_with_mode(&mut p1, ark_serialize::Compress::Yes)
|
||||
.unwrap();
|
||||
|
||||
mipp_proof
|
||||
.serialize_with_mode(&mut p2, ark_serialize::Compress::Yes)
|
||||
.unwrap();
|
||||
|
||||
br.proof_size = p1.len() + p2.len();
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
|
||||
let start = Instant::now();
|
||||
let res = Polynomial::verify(
|
||||
&mut verifier_transcript,
|
||||
&vk,
|
||||
&u,
|
||||
&r,
|
||||
v,
|
||||
&pst_proof,
|
||||
&mipp_proof,
|
||||
&t,
|
||||
);
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.verification_time = duration;
|
||||
assert!(res == true);
|
||||
|
||||
writer
|
||||
.serialize(br)
|
||||
.expect("unable to write results to csv");
|
||||
writer.flush().expect("wasn't able to flush");
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use libspartan::{
|
||||
parameters::POSEIDON_PARAMETERS_FR_377, poseidon_transcript::PoseidonTranscript, Instance,
|
||||
NIZKGens, NIZK,
|
||||
};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Default, Clone, Serialize)]
|
||||
struct BenchmarkResults {
|
||||
power: usize,
|
||||
input_constraints: usize,
|
||||
spartan_verifier_circuit_constraints: usize,
|
||||
r1cs_instance_generation_time: u128,
|
||||
spartan_proving_time: u128,
|
||||
groth16_setup_time: u128,
|
||||
groth16_proving_time: u128,
|
||||
testudo_verification_time: u128,
|
||||
testudo_proving_time: u128,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut writer = csv::Writer::from_path("testudo.csv").expect("unable to open csv writer");
|
||||
// for &s in [
|
||||
// 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
|
||||
// ]
|
||||
// .iter()
|
||||
// For testing purposes we currently bench on very small instance to ensure
|
||||
// correctness and then on biggest one for timings.
|
||||
for &s in [4, 26].iter() {
|
||||
println!("Running for {} inputs", s);
|
||||
let mut br = BenchmarkResults::default();
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
br.power = s;
|
||||
br.input_constraints = num_cons;
|
||||
let num_inputs = 10;
|
||||
|
||||
let start = Instant::now();
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.r1cs_instance_generation_time = duration;
|
||||
let mut prover_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
|
||||
let start = Instant::now();
|
||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.spartan_proving_time = duration;
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
let res = proof.verify(&inst, &inputs, &mut verifier_transcript, &gens);
|
||||
assert!(res.is_ok());
|
||||
br.spartan_verifier_circuit_constraints = res.unwrap();
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(&POSEIDON_PARAMETERS_FR_377);
|
||||
let res = proof.verify_groth16(&inst, &inputs, &mut verifier_transcript, &gens);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let (ds, dp, dv) = res.unwrap();
|
||||
br.groth16_setup_time = ds;
|
||||
br.groth16_proving_time = dp;
|
||||
|
||||
br.testudo_proving_time = br.spartan_proving_time + br.groth16_proving_time;
|
||||
br.testudo_verification_time = dv;
|
||||
writer
|
||||
.serialize(br)
|
||||
.expect("unable to write results to csv");
|
||||
writer.flush().expect("wasn't able to flush");
|
||||
}
|
||||
}
|
||||
137
benches/snark.rs
137
benches/snark.rs
@@ -1,137 +0,0 @@
|
||||
extern crate libspartan;
|
||||
extern crate merlin;
|
||||
|
||||
use libspartan::{
|
||||
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, Instance, SNARKGens,
|
||||
SNARK,
|
||||
};
|
||||
|
||||
use criterion::*;
|
||||
|
||||
fn snark_encode_benchmark(c: &mut Criterion) {
|
||||
for &s in [10, 12, 16].iter() {
|
||||
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
|
||||
let mut group = c.benchmark_group("SNARK_encode_benchmark");
|
||||
group.plot_config(plot_config);
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
let (inst, _vars, _inputs) =
|
||||
Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
|
||||
|
||||
// produce a commitment to R1CS instance
|
||||
let name = format!("SNARK_encode_{}", num_cons);
|
||||
group.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
SNARK::encode(black_box(&inst), black_box(&gens));
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn snark_prove_benchmark(c: &mut Criterion) {
|
||||
for &s in [10, 12, 16].iter() {
|
||||
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
|
||||
let mut group = c.benchmark_group("SNARK_prove_benchmark");
|
||||
group.plot_config(plot_config);
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
|
||||
|
||||
// produce a commitment to R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
// produce a proof
|
||||
let name = format!("SNARK_prove_{}", num_cons);
|
||||
group.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
SNARK::prove(
|
||||
black_box(&inst),
|
||||
black_box(&comm),
|
||||
black_box(&decomm),
|
||||
black_box(vars.clone()),
|
||||
black_box(&inputs),
|
||||
black_box(&gens),
|
||||
black_box(&mut prover_transcript),
|
||||
);
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn snark_verify_benchmark(c: &mut Criterion) {
|
||||
for &s in [10, 12, 16].iter() {
|
||||
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
|
||||
let mut group = c.benchmark_group("SNARK_verify_benchmark");
|
||||
group.plot_config(plot_config);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
|
||||
|
||||
// produce a commitment to R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
// verify the proof
|
||||
let name = format!("SNARK_verify_{}", num_cons);
|
||||
group.bench_function(&name, move |b| {
|
||||
b.iter(|| {
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(
|
||||
black_box(&comm),
|
||||
black_box(&inputs),
|
||||
black_box(&mut verifier_transcript),
|
||||
black_box(&gens)
|
||||
)
|
||||
.is_ok());
|
||||
});
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
fn set_duration() -> Criterion {
|
||||
Criterion::default().sample_size(10)
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches_snark;
|
||||
config = set_duration();
|
||||
targets = snark_verify_benchmark
|
||||
}
|
||||
|
||||
criterion_main!(benches_snark);
|
||||
127
benches/testudo.rs
Normal file
127
benches/testudo.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::*;
|
||||
use libtestudo::parameters::PoseidonConfiguration;
|
||||
use libtestudo::{
|
||||
poseidon_transcript::PoseidonTranscript,
|
||||
testudo_snark::{TestudoSnark, TestudoSnarkGens},
|
||||
Instance,
|
||||
};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Default, Clone, Serialize)]
|
||||
struct BenchmarkResults {
|
||||
power: usize,
|
||||
input_constraints: usize,
|
||||
testudo_proving_time: u128,
|
||||
testudo_verification_time: u128,
|
||||
sat_proof_size: usize,
|
||||
eval_proof_size: usize,
|
||||
total_proof_size: usize,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
bench_with_bls12_377();
|
||||
// bench_with_bls12_381();
|
||||
// bench_with_ark_blst();
|
||||
}
|
||||
|
||||
fn bench_with_ark_blst() {
|
||||
let params = ark_blst::Scalar::poseidon_params();
|
||||
testudo_snark_bench::<ark_blst::Bls12>(params, "testudo_blst");
|
||||
}
|
||||
|
||||
fn bench_with_bls12_377() {
|
||||
let params = ark_bls12_377::Fr::poseidon_params();
|
||||
testudo_snark_bench::<ark_bls12_377::Bls12_377>(params, "testudo_bls12_377");
|
||||
}
|
||||
|
||||
fn bench_with_bls12_381() {
|
||||
let params = ark_bls12_381::Fr::poseidon_params();
|
||||
testudo_snark_bench::<ark_bls12_381::Bls12_381>(params, "testudo_bls12_381");
|
||||
}
|
||||
|
||||
fn testudo_snark_bench<E>(params: PoseidonConfig<E::ScalarField>, file_name: &str)
|
||||
where
|
||||
E: Pairing,
|
||||
E::ScalarField: PrimeField,
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
let mut writer = csv::Writer::from_path(file_name).expect("unable to open csv writer");
|
||||
for &s in [4, 5, 10, 12, 14, 16, 18, 20, 22, 24, 26].iter() {
|
||||
println!("Running for {} inputs", s);
|
||||
let mut br = BenchmarkResults::default();
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
br.power = s;
|
||||
br.input_constraints = num_cons;
|
||||
let num_inputs = 10;
|
||||
|
||||
let (inst, vars, inputs) =
|
||||
Instance::<E::ScalarField>::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
|
||||
let gens =
|
||||
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, params.clone());
|
||||
|
||||
let (comm, decomm) = TestudoSnark::<E>::encode(&inst, &gens);
|
||||
|
||||
let start = Instant::now();
|
||||
let proof = TestudoSnark::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
params.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.testudo_proving_time = duration;
|
||||
|
||||
let mut sat_proof = Vec::<u8>::new();
|
||||
proof
|
||||
.r1cs_verifier_proof
|
||||
.serialize_with_mode(&mut sat_proof, Compress::Yes)
|
||||
.unwrap();
|
||||
br.sat_proof_size = sat_proof.len();
|
||||
|
||||
let mut eval_proof = Vec::<u8>::new();
|
||||
proof
|
||||
.r1cs_eval_proof
|
||||
.serialize_with_mode(&mut eval_proof, Compress::Yes)
|
||||
.unwrap();
|
||||
br.eval_proof_size = eval_proof.len();
|
||||
|
||||
let mut total_proof = Vec::<u8>::new();
|
||||
proof
|
||||
.serialize_with_mode(&mut total_proof, Compress::Yes)
|
||||
.unwrap();
|
||||
br.total_proof_size = total_proof.len();
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
let start = Instant::now();
|
||||
|
||||
let res = proof.verify(
|
||||
&gens,
|
||||
&comm,
|
||||
&inputs,
|
||||
&mut verifier_transcript,
|
||||
params.clone(),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let duration = start.elapsed().as_millis();
|
||||
br.testudo_verification_time = duration;
|
||||
|
||||
writer
|
||||
.serialize(br)
|
||||
.expect("unable to write results to csv");
|
||||
writer.flush().expect("wasn't able to flush");
|
||||
}
|
||||
}
|
||||
@@ -8,23 +8,24 @@
|
||||
//! `(Z3 + 5) * 1 - I0 = 0`
|
||||
//!
|
||||
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
|
||||
use ark_bls12_377::Fr as Scalar;
|
||||
use ark_ec::pairing::Pairing;
|
||||
use ark_ff::{BigInteger, PrimeField};
|
||||
use ark_std::{One, UniformRand, Zero};
|
||||
use libspartan::{
|
||||
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment,
|
||||
Instance, SNARKGens, VarsAssignment, SNARK,
|
||||
use libtestudo::testudo_snark::{TestudoSnark, TestudoSnarkGens};
|
||||
use libtestudo::{
|
||||
parameters::poseidon_params, poseidon_transcript::PoseidonTranscript, InputsAssignment, Instance,
|
||||
VarsAssignment,
|
||||
};
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
fn produce_r1cs() -> (
|
||||
fn produce_r1cs<E: Pairing>() -> (
|
||||
usize,
|
||||
usize,
|
||||
usize,
|
||||
usize,
|
||||
Instance,
|
||||
VarsAssignment,
|
||||
InputsAssignment,
|
||||
Instance<E::ScalarField>,
|
||||
VarsAssignment<E::ScalarField>,
|
||||
InputsAssignment<E::ScalarField>,
|
||||
) {
|
||||
// parameters of the R1CS instance
|
||||
let num_cons = 4;
|
||||
@@ -38,7 +39,7 @@ fn produce_r1cs() -> (
|
||||
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
|
||||
let one = Scalar::one().into_repr().to_bytes_le();
|
||||
let one = E::ScalarField::one().into_bigint().to_bytes_le();
|
||||
|
||||
// R1CS is a set of three sparse matrices A B C, where is a row for every
|
||||
// constraint and a column for every entry in z = (vars, 1, inputs)
|
||||
@@ -67,31 +68,35 @@ fn produce_r1cs() -> (
|
||||
// constraint 3 entries in (A,B,C)
|
||||
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
|
||||
A.push((3, 3, one.clone()));
|
||||
A.push((3, num_vars, Scalar::from(5u32).into_repr().to_bytes_le()));
|
||||
A.push((
|
||||
3,
|
||||
num_vars,
|
||||
E::ScalarField::from(5u32).into_bigint().to_bytes_le(),
|
||||
));
|
||||
B.push((3, num_vars, one.clone()));
|
||||
C.push((3, num_vars + 1, one));
|
||||
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||
let inst = Instance::<E::ScalarField>::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||
|
||||
// compute a satisfying assignment
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let z0 = Scalar::rand(&mut rng);
|
||||
let z0 = E::ScalarField::rand(&mut rng);
|
||||
let z1 = z0 * z0; // constraint 0
|
||||
let z2 = z1 * z0; // constraint 1
|
||||
let z3 = z2 + z0; // constraint 2
|
||||
let i0 = z3 + Scalar::from(5u32); // constraint 3
|
||||
let i0 = z3 + E::ScalarField::from(5u32); // constraint 3
|
||||
|
||||
// create a VarsAssignment
|
||||
let mut vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||
vars[0] = z0.into_repr().to_bytes_le();
|
||||
vars[1] = z1.into_repr().to_bytes_le();
|
||||
vars[2] = z2.into_repr().to_bytes_le();
|
||||
vars[3] = z3.into_repr().to_bytes_le();
|
||||
let mut vars = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_vars];
|
||||
vars[0] = z0.into_bigint().to_bytes_le();
|
||||
vars[1] = z1.into_bigint().to_bytes_le();
|
||||
vars[2] = z2.into_bigint().to_bytes_le();
|
||||
vars[3] = z3.into_bigint().to_bytes_le();
|
||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||
|
||||
// create an InputsAssignment
|
||||
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||
inputs[0] = i0.into_repr().to_bytes_le();
|
||||
let mut inputs = vec![E::ScalarField::zero().into_bigint().to_bytes_le(); num_inputs];
|
||||
inputs[0] = i0.into_bigint().to_bytes_le();
|
||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||
|
||||
// check if the instance we created is satisfiable
|
||||
@@ -109,6 +114,7 @@ fn produce_r1cs() -> (
|
||||
)
|
||||
}
|
||||
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
fn main() {
|
||||
// produce an R1CS instance
|
||||
let (
|
||||
@@ -119,19 +125,25 @@ fn main() {
|
||||
inst,
|
||||
assignment_vars,
|
||||
assignment_inputs,
|
||||
) = produce_r1cs();
|
||||
) = produce_r1cs::<E>();
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce public parameters
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
|
||||
let gens = TestudoSnarkGens::<E>::setup(
|
||||
num_cons,
|
||||
num_vars,
|
||||
num_inputs,
|
||||
num_non_zero_entries,
|
||||
params.clone(),
|
||||
);
|
||||
|
||||
// create a commitment to the R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
let proof = TestudoSnark::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
@@ -139,12 +151,20 @@ fn main() {
|
||||
&assignment_inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
params.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
|
||||
.verify(
|
||||
&gens,
|
||||
&comm,
|
||||
&assignment_inputs,
|
||||
&mut verifier_transcript,
|
||||
params
|
||||
)
|
||||
.is_ok());
|
||||
println!("proof verification successful!");
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(clippy::assertions_on_result_states)]
|
||||
|
||||
extern crate libspartan;
|
||||
extern crate merlin;
|
||||
extern crate rand;
|
||||
|
||||
use ark_serialize::*;
|
||||
use libspartan::parameters::poseidon_params;
|
||||
use libspartan::poseidon_transcript::PoseidonTranscript;
|
||||
use libspartan::{Instance, NIZKGens, NIZK};
|
||||
|
||||
fn print(msg: &str) {
|
||||
let star = "* ";
|
||||
println!("{:indent$}{}{}", "", star, msg, indent = 2);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
// the list of number of variables (and constraints) in an R1CS instance
|
||||
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
|
||||
|
||||
println!("Profiler:: NIZK");
|
||||
for &s in inst_sizes.iter() {
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public generators
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
|
||||
let params = poseidon_params();
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
|
||||
|
||||
let mut proof_encoded = Vec::new();
|
||||
proof.serialize(&mut proof_encoded).unwrap();
|
||||
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
|
||||
print(&msg_proof_len);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
|
||||
println!();
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(clippy::assertions_on_result_states)]
|
||||
|
||||
extern crate libspartan;
|
||||
extern crate merlin;
|
||||
|
||||
use ark_serialize::*;
|
||||
use libspartan::parameters::poseidon_params;
|
||||
use libspartan::poseidon_transcript::PoseidonTranscript;
|
||||
use libspartan::{Instance, SNARKGens, SNARK};
|
||||
|
||||
fn print(msg: &str) {
|
||||
let star = "* ";
|
||||
println!("{:indent$}{}{}", "", star, msg, indent = 2);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
// the list of number of variables (and constraints) in an R1CS instance
|
||||
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
|
||||
|
||||
println!("Profiler:: SNARK");
|
||||
for &s in inst_sizes.iter() {
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public generators
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
|
||||
|
||||
// create a commitment to R1CSInstance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
let mut proof_encoded = Vec::new();
|
||||
proof.serialize(&mut proof_encoded).unwrap();
|
||||
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
|
||||
print(&msg_proof_len);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
|
||||
println!();
|
||||
}
|
||||
}
|
||||
92
profiler/testudo.rs
Normal file
92
profiler/testudo.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(clippy::assertions_on_result_states)]
|
||||
|
||||
extern crate libtestudo;
|
||||
extern crate merlin;
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::*;
|
||||
use libtestudo::parameters::PoseidonConfiguration;
|
||||
use libtestudo::poseidon_transcript::PoseidonTranscript;
|
||||
use libtestudo::{
|
||||
testudo_snark::{TestudoSnark, TestudoSnarkGens},
|
||||
Instance,
|
||||
};
|
||||
|
||||
fn print(msg: &str) {
|
||||
let star = "* ";
|
||||
println!("{:indent$}{}{}", "", star, msg, indent = 2);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let params = ark_bls12_377::Fr::poseidon_params();
|
||||
profiler::<ark_bls12_377::Bls12_377>(params);
|
||||
}
|
||||
|
||||
fn profiler<E>(params: PoseidonConfig<E::ScalarField>)
|
||||
where
|
||||
E: Pairing,
|
||||
E::ScalarField: PrimeField,
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
// the list of number of variables (and constraints) in an R1CS instance
|
||||
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
|
||||
|
||||
println!("Profiler:: SNARK");
|
||||
for &s in inst_sizes.iter() {
|
||||
let num_vars = (2_usize).pow(s as u32);
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) =
|
||||
Instance::<E::ScalarField>::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// produce public generators
|
||||
let gens =
|
||||
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, params.clone());
|
||||
|
||||
// create a commitment to R1CSInstance
|
||||
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
|
||||
|
||||
// produce a proof of satisfiability
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
let proof = TestudoSnark::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
params.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut proof_encoded = Vec::new();
|
||||
proof
|
||||
.serialize_with_mode(&mut proof_encoded, Compress::Yes)
|
||||
.unwrap();
|
||||
let msg_proof_len = format!(
|
||||
"TestudoSnark::proof_compressed_len {:?}",
|
||||
proof_encoded.len()
|
||||
);
|
||||
print(&msg_proof_len);
|
||||
|
||||
// verify the proof of satisfiability
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
assert!(proof
|
||||
.verify(
|
||||
&gens,
|
||||
&comm,
|
||||
&inputs,
|
||||
&mut verifier_transcript,
|
||||
params.clone()
|
||||
)
|
||||
.is_ok());
|
||||
|
||||
println!();
|
||||
}
|
||||
}
|
||||
4
rustfmt.toml
Normal file
4
rustfmt.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
edition = "2018"
|
||||
tab_spaces = 2
|
||||
newline_style = "Unix"
|
||||
use_try_shorthand = true
|
||||
@@ -1,37 +1,35 @@
|
||||
use super::group::{GroupElement, GroupElementAffine, VartimeMultiscalarMul, GROUP_BASEPOINT};
|
||||
use super::scalar::Scalar;
|
||||
use crate::group::CompressGroupElement;
|
||||
use crate::ark_std::UniformRand;
|
||||
use crate::parameters::*;
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use ark_sponge::poseidon::PoseidonSponge;
|
||||
use ark_sponge::CryptographicSponge;
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonSponge;
|
||||
use ark_crypto_primitives::sponge::CryptographicSponge;
|
||||
use ark_ec::{CurveGroup, VariableBaseMSM};
|
||||
use rand::SeedableRng;
|
||||
use std::ops::Mul;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MultiCommitGens {
|
||||
pub struct MultiCommitGens<G: CurveGroup> {
|
||||
pub n: usize,
|
||||
pub G: Vec<GroupElement>,
|
||||
pub h: GroupElement,
|
||||
pub G: Vec<G::Affine>,
|
||||
pub h: G::Affine,
|
||||
}
|
||||
|
||||
impl MultiCommitGens {
|
||||
impl<G: CurveGroup> MultiCommitGens<G> {
|
||||
pub fn new(n: usize, label: &[u8]) -> Self {
|
||||
let params = poseidon_params();
|
||||
let mut sponge = PoseidonSponge::new(¶ms);
|
||||
sponge.absorb(&label);
|
||||
sponge.absorb(&GROUP_BASEPOINT.compress().0);
|
||||
let mut b = Vec::new();
|
||||
G::generator().serialize_compressed(&mut b).unwrap();
|
||||
sponge.absorb(&b);
|
||||
|
||||
let mut gens: Vec<GroupElement> = Vec::new();
|
||||
for _ in 0..n + 1 {
|
||||
let mut el_aff: Option<GroupElementAffine> = None;
|
||||
while el_aff.is_none() {
|
||||
let uniform_bytes = sponge.squeeze_bytes(64);
|
||||
el_aff = GroupElementAffine::from_random_bytes(&uniform_bytes);
|
||||
}
|
||||
let el = el_aff.unwrap().mul_by_cofactor_to_projective();
|
||||
gens.push(el);
|
||||
}
|
||||
let gens = (0..=n)
|
||||
.map(|_| {
|
||||
let mut uniform_bytes = [0u8; 32];
|
||||
uniform_bytes.copy_from_slice(&sponge.squeeze_bytes(32)[..]);
|
||||
let mut prng = rand::rngs::StdRng::from_seed(uniform_bytes);
|
||||
G::Affine::rand(&mut prng)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
MultiCommitGens {
|
||||
n,
|
||||
@@ -40,7 +38,7 @@ impl MultiCommitGens {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clone(&self) -> MultiCommitGens {
|
||||
pub fn clone(&self) -> Self {
|
||||
MultiCommitGens {
|
||||
n: self.n,
|
||||
h: self.h,
|
||||
@@ -48,7 +46,7 @@ impl MultiCommitGens {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
|
||||
pub fn split_at(&self, mid: usize) -> (Self, Self) {
|
||||
let (G1, G2) = self.G.split_at(mid);
|
||||
|
||||
(
|
||||
@@ -66,27 +64,24 @@ impl MultiCommitGens {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Commitments {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
|
||||
}
|
||||
pub struct PedersenCommit;
|
||||
|
||||
impl Commitments for Scalar {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
impl PedersenCommit {
|
||||
pub fn commit_scalar<G: CurveGroup>(
|
||||
scalar: &G::ScalarField,
|
||||
blind: &G::ScalarField,
|
||||
gens_n: &MultiCommitGens<G>,
|
||||
) -> G {
|
||||
assert_eq!(gens_n.n, 1);
|
||||
GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h])
|
||||
<G as VariableBaseMSM>::msm_unchecked(&[gens_n.G[0], gens_n.h], &[*scalar, *blind])
|
||||
}
|
||||
}
|
||||
|
||||
impl Commitments for Vec<Scalar> {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert_eq!(gens_n.n, self.len());
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
|
||||
}
|
||||
}
|
||||
|
||||
impl Commitments for [Scalar] {
|
||||
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
|
||||
assert_eq!(gens_n.n, self.len());
|
||||
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + gens_n.h.mul(blind.into_repr())
|
||||
pub fn commit_slice<G: CurveGroup>(
|
||||
scalars: &[G::ScalarField],
|
||||
blind: &G::ScalarField,
|
||||
gens_n: &MultiCommitGens<G>,
|
||||
) -> G {
|
||||
assert_eq!(scalars.len(), gens_n.n);
|
||||
<G as VariableBaseMSM>::msm_unchecked(&gens_n.G, scalars) + gens_n.h.mul(blind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,102 +1,85 @@
|
||||
use std::{borrow::Borrow, vec};
|
||||
use ark_ec::pairing::Pairing;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use super::scalar::Scalar;
|
||||
use crate::{
|
||||
group::Fq,
|
||||
math::Math,
|
||||
sparse_mlpoly::{SparsePolyEntry, SparsePolynomial},
|
||||
unipoly::UniPoly,
|
||||
};
|
||||
use ark_bls12_377::{constraints::PairingVar as IV, Bls12_377 as I, Fr};
|
||||
use ark_crypto_primitives::{
|
||||
snark::BooleanInputVar, CircuitSpecificSetupSNARK, SNARKGadget, SNARK,
|
||||
};
|
||||
|
||||
use ark_ff::{BitIteratorLE, PrimeField, Zero};
|
||||
use ark_groth16::{
|
||||
constraints::{Groth16VerifierGadget, PreparedVerifyingKeyVar, ProofVar},
|
||||
Groth16, PreparedVerifyingKey, Proof as GrothProof,
|
||||
};
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use ark_crypto_primitives::sponge::{
|
||||
constraints::CryptographicSpongeVar,
|
||||
poseidon::{constraints::PoseidonSpongeVar, PoseidonConfig},
|
||||
};
|
||||
use ark_poly_commit::multilinear_pc::data_structures::Commitment;
|
||||
use ark_r1cs_std::{
|
||||
alloc::{AllocVar, AllocationMode},
|
||||
fields::fp::FpVar,
|
||||
prelude::{Boolean, EqGadget, FieldVar},
|
||||
prelude::{EqGadget, FieldVar},
|
||||
};
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, Namespace, SynthesisError};
|
||||
use ark_sponge::{
|
||||
constraints::CryptographicSpongeVar,
|
||||
poseidon::{constraints::PoseidonSpongeVar, PoseidonParameters},
|
||||
};
|
||||
use rand::{CryptoRng, Rng};
|
||||
|
||||
pub struct PoseidonTranscripVar {
|
||||
pub cs: ConstraintSystemRef<Fr>,
|
||||
pub sponge: PoseidonSpongeVar<Fr>,
|
||||
pub params: PoseidonParameters<Fr>,
|
||||
pub struct PoseidonTranscripVar<F>
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
pub cs: ConstraintSystemRef<F>,
|
||||
pub sponge: PoseidonSpongeVar<F>,
|
||||
}
|
||||
|
||||
impl PoseidonTranscripVar {
|
||||
fn new(
|
||||
cs: ConstraintSystemRef<Fr>,
|
||||
params: &PoseidonParameters<Fr>,
|
||||
challenge: Option<Fr>,
|
||||
) -> Self {
|
||||
impl<F> PoseidonTranscripVar<F>
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
fn new(cs: ConstraintSystemRef<F>, params: &PoseidonConfig<F>, c_var: FpVar<F>) -> Self {
|
||||
let mut sponge = PoseidonSpongeVar::new(cs.clone(), params);
|
||||
|
||||
if let Some(c) = challenge {
|
||||
let c_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(c)).unwrap();
|
||||
sponge.absorb(&c_var).unwrap();
|
||||
|
||||
Self { cs, sponge }
|
||||
}
|
||||
|
||||
Self {
|
||||
cs,
|
||||
sponge,
|
||||
params: params.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn append(&mut self, input: &FpVar<Fr>) -> Result<(), SynthesisError> {
|
||||
fn append(&mut self, input: &FpVar<F>) -> Result<(), SynthesisError> {
|
||||
self.sponge.absorb(&input)
|
||||
}
|
||||
|
||||
fn append_vector(&mut self, input_vec: &[FpVar<Fr>]) -> Result<(), SynthesisError> {
|
||||
fn append_vector(&mut self, input_vec: &[FpVar<F>]) -> Result<(), SynthesisError> {
|
||||
for input in input_vec.iter() {
|
||||
self.append(input)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn challenge(&mut self) -> Result<FpVar<Fr>, SynthesisError> {
|
||||
let c_var = self.sponge.squeeze_field_elements(1).unwrap().remove(0);
|
||||
|
||||
Ok(c_var)
|
||||
fn challenge(&mut self) -> Result<FpVar<F>, SynthesisError> {
|
||||
Ok(self.sponge.squeeze_field_elements(1).unwrap().remove(0))
|
||||
}
|
||||
|
||||
fn challenge_vector(&mut self, len: usize) -> Result<Vec<FpVar<Fr>>, SynthesisError> {
|
||||
fn challenge_scalar_vec(&mut self, len: usize) -> Result<Vec<FpVar<F>>, SynthesisError> {
|
||||
let c_vars = self.sponge.squeeze_field_elements(len).unwrap();
|
||||
|
||||
Ok(c_vars)
|
||||
}
|
||||
}
|
||||
|
||||
/// Univariate polynomial in constraint system
|
||||
#[derive(Clone)]
|
||||
pub struct UniPolyVar {
|
||||
pub coeffs: Vec<FpVar<Fr>>,
|
||||
pub struct UniPolyVar<F: PrimeField> {
|
||||
pub coeffs: Vec<FpVar<F>>,
|
||||
}
|
||||
|
||||
impl AllocVar<UniPoly, Fr> for UniPolyVar {
|
||||
fn new_variable<T: Borrow<UniPoly>>(
|
||||
cs: impl Into<Namespace<Fr>>,
|
||||
impl<F: PrimeField> AllocVar<UniPoly<F>, F> for UniPolyVar<F> {
|
||||
fn new_variable<T: Borrow<UniPoly<F>>>(
|
||||
cs: impl Into<Namespace<F>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|c| {
|
||||
let cs = cs.into();
|
||||
let cp: &UniPoly = c.borrow();
|
||||
let cp: &UniPoly<F> = c.borrow();
|
||||
let mut coeffs_var = Vec::new();
|
||||
for coeff in cp.coeffs.iter() {
|
||||
let coeff_var = FpVar::<Fr>::new_variable(cs.clone(), || Ok(coeff), mode)?;
|
||||
let coeff_var = FpVar::<F>::new_variable(cs.clone(), || Ok(coeff), mode)?;
|
||||
coeffs_var.push(coeff_var);
|
||||
}
|
||||
Ok(Self { coeffs: coeffs_var })
|
||||
@@ -104,12 +87,12 @@ impl AllocVar<UniPoly, Fr> for UniPolyVar {
|
||||
}
|
||||
}
|
||||
|
||||
impl UniPolyVar {
|
||||
pub fn eval_at_zero(&self) -> FpVar<Fr> {
|
||||
impl<F: PrimeField> UniPolyVar<F> {
|
||||
pub fn eval_at_zero(&self) -> FpVar<F> {
|
||||
self.coeffs[0].clone()
|
||||
}
|
||||
|
||||
pub fn eval_at_one(&self) -> FpVar<Fr> {
|
||||
pub fn eval_at_one(&self) -> FpVar<F> {
|
||||
let mut res = self.coeffs[0].clone();
|
||||
for i in 1..self.coeffs.len() {
|
||||
res = &res + &self.coeffs[i];
|
||||
@@ -117,8 +100,8 @@ impl UniPolyVar {
|
||||
res
|
||||
}
|
||||
|
||||
// mul without reduce
|
||||
pub fn evaluate(&self, r: &FpVar<Fr>) -> FpVar<Fr> {
|
||||
// TODO check if mul without reduce can help
|
||||
pub fn evaluate(&self, r: &FpVar<F>) -> FpVar<F> {
|
||||
let mut eval = self.coeffs[0].clone();
|
||||
let mut power = r.clone();
|
||||
|
||||
@@ -130,20 +113,21 @@ impl UniPolyVar {
|
||||
}
|
||||
}
|
||||
|
||||
/// Circuit gadget that implements the sumcheck verifier
|
||||
#[derive(Clone)]
|
||||
pub struct SumcheckVerificationCircuit {
|
||||
pub polys: Vec<UniPoly>,
|
||||
pub struct SumcheckVerificationCircuit<F: PrimeField> {
|
||||
pub polys: Vec<UniPoly<F>>,
|
||||
}
|
||||
|
||||
impl SumcheckVerificationCircuit {
|
||||
impl<F: PrimeField> SumcheckVerificationCircuit<F> {
|
||||
fn verifiy_sumcheck(
|
||||
&self,
|
||||
poly_vars: &[UniPolyVar],
|
||||
claim_var: &FpVar<Fr>,
|
||||
transcript_var: &mut PoseidonTranscripVar,
|
||||
) -> Result<(FpVar<Fr>, Vec<FpVar<Fr>>), SynthesisError> {
|
||||
poly_vars: &[UniPolyVar<F>],
|
||||
claim_var: &FpVar<F>,
|
||||
transcript_var: &mut PoseidonTranscripVar<F>,
|
||||
) -> Result<(FpVar<F>, Vec<FpVar<F>>), SynthesisError> {
|
||||
let mut e_var = claim_var.clone();
|
||||
let mut r_vars: Vec<FpVar<Fr>> = Vec::new();
|
||||
let mut r_vars: Vec<FpVar<F>> = Vec::new();
|
||||
|
||||
for (poly_var, _poly) in poly_vars.iter().zip(self.polys.iter()) {
|
||||
let res = poly_var.eval_at_one() + poly_var.eval_at_zero();
|
||||
@@ -159,21 +143,21 @@ impl SumcheckVerificationCircuit {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SparsePolyEntryVar {
|
||||
pub struct SparsePolyEntryVar<F: PrimeField> {
|
||||
idx: usize,
|
||||
val_var: FpVar<Fr>,
|
||||
val_var: FpVar<F>,
|
||||
}
|
||||
|
||||
impl AllocVar<SparsePolyEntry, Fr> for SparsePolyEntryVar {
|
||||
fn new_variable<T: Borrow<SparsePolyEntry>>(
|
||||
cs: impl Into<Namespace<Fr>>,
|
||||
impl<F: PrimeField> AllocVar<SparsePolyEntry<F>, F> for SparsePolyEntryVar<F> {
|
||||
fn new_variable<T: Borrow<SparsePolyEntry<F>>>(
|
||||
cs: impl Into<Namespace<F>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
_mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|s| {
|
||||
let cs = cs.into();
|
||||
let spe: &SparsePolyEntry = s.borrow();
|
||||
let val_var = FpVar::<Fr>::new_witness(cs, || Ok(spe.val))?;
|
||||
let spe: &SparsePolyEntry<F> = s.borrow();
|
||||
let val_var = FpVar::<F>::new_witness(cs, || Ok(spe.val))?;
|
||||
Ok(Self {
|
||||
idx: spe.idx,
|
||||
val_var,
|
||||
@@ -183,37 +167,33 @@ impl AllocVar<SparsePolyEntry, Fr> for SparsePolyEntryVar {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SparsePolynomialVar {
|
||||
num_vars: usize,
|
||||
Z_var: Vec<SparsePolyEntryVar>,
|
||||
pub struct SparsePolynomialVar<F: PrimeField> {
|
||||
Z_var: Vec<SparsePolyEntryVar<F>>,
|
||||
}
|
||||
|
||||
impl AllocVar<SparsePolynomial, Fr> for SparsePolynomialVar {
|
||||
fn new_variable<T: Borrow<SparsePolynomial>>(
|
||||
cs: impl Into<Namespace<Fr>>,
|
||||
impl<F: PrimeField> AllocVar<SparsePolynomial<F>, F> for SparsePolynomialVar<F> {
|
||||
fn new_variable<T: Borrow<SparsePolynomial<F>>>(
|
||||
cs: impl Into<Namespace<F>>,
|
||||
f: impl FnOnce() -> Result<T, SynthesisError>,
|
||||
mode: AllocationMode,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
f().and_then(|s| {
|
||||
let cs = cs.into();
|
||||
let sp: &SparsePolynomial = s.borrow();
|
||||
let sp: &SparsePolynomial<F> = s.borrow();
|
||||
let mut Z_var = Vec::new();
|
||||
for spe in sp.Z.iter() {
|
||||
let spe_var = SparsePolyEntryVar::new_variable(cs.clone(), || Ok(spe), mode)?;
|
||||
Z_var.push(spe_var);
|
||||
}
|
||||
Ok(Self {
|
||||
num_vars: sp.num_vars,
|
||||
Z_var,
|
||||
})
|
||||
Ok(Self { Z_var })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SparsePolynomialVar {
|
||||
fn compute_chi(a: &[bool], r_vars: &[FpVar<Fr>]) -> FpVar<Fr> {
|
||||
let mut chi_i_var = FpVar::<Fr>::one();
|
||||
let one = FpVar::<Fr>::one();
|
||||
impl<F: PrimeField> SparsePolynomialVar<F> {
|
||||
fn compute_chi(a: &[bool], r_vars: &[FpVar<F>]) -> FpVar<F> {
|
||||
let mut chi_i_var = FpVar::<F>::one();
|
||||
let one = FpVar::<F>::one();
|
||||
for (i, r_var) in r_vars.iter().enumerate() {
|
||||
if a[i] {
|
||||
chi_i_var *= r_var;
|
||||
@@ -224,8 +204,8 @@ impl SparsePolynomialVar {
|
||||
chi_i_var
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r_var: &[FpVar<Fr>]) -> FpVar<Fr> {
|
||||
let mut sum = FpVar::<Fr>::zero();
|
||||
pub fn evaluate(&self, r_var: &[FpVar<F>]) -> FpVar<F> {
|
||||
let mut sum = FpVar::<F>::zero();
|
||||
for spe_var in self.Z_var.iter() {
|
||||
// potential problem
|
||||
let bits = &spe_var.idx.get_bits(r_var.len());
|
||||
@@ -236,25 +216,26 @@ impl SparsePolynomialVar {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct R1CSVerificationCircuit {
|
||||
pub struct R1CSVerificationCircuit<F: PrimeField> {
|
||||
pub num_vars: usize,
|
||||
pub num_cons: usize,
|
||||
pub input: Vec<Fr>,
|
||||
pub input_as_sparse_poly: SparsePolynomial,
|
||||
pub evals: (Fr, Fr, Fr),
|
||||
pub params: PoseidonParameters<Fr>,
|
||||
pub prev_challenge: Fr,
|
||||
pub claims_phase2: (Scalar, Scalar, Scalar, Scalar),
|
||||
pub eval_vars_at_ry: Fr,
|
||||
pub sc_phase1: SumcheckVerificationCircuit,
|
||||
pub sc_phase2: SumcheckVerificationCircuit,
|
||||
pub input: Vec<F>,
|
||||
pub input_as_sparse_poly: SparsePolynomial<F>,
|
||||
pub evals: (F, F, F),
|
||||
pub params: PoseidonConfig<F>,
|
||||
pub prev_challenge: F,
|
||||
pub claims_phase2: (F, F, F, F),
|
||||
pub eval_vars_at_ry: F,
|
||||
pub sc_phase1: SumcheckVerificationCircuit<F>,
|
||||
pub sc_phase2: SumcheckVerificationCircuit<F>,
|
||||
// The point on which the polynomial was evaluated by the prover.
|
||||
pub claimed_ry: Vec<Scalar>,
|
||||
pub claimed_transcript_sat_state: Scalar,
|
||||
pub claimed_rx: Vec<F>,
|
||||
pub claimed_ry: Vec<F>,
|
||||
pub claimed_transcript_sat_state: F,
|
||||
}
|
||||
|
||||
impl R1CSVerificationCircuit {
|
||||
fn new(config: &VerifierConfig) -> Self {
|
||||
impl<F: PrimeField> R1CSVerificationCircuit<F> {
|
||||
pub fn new<E: Pairing<ScalarField = F>>(config: &VerifierConfig<E>) -> Self {
|
||||
Self {
|
||||
num_vars: config.num_vars,
|
||||
num_cons: config.num_cons,
|
||||
@@ -271,77 +252,84 @@ impl R1CSVerificationCircuit {
|
||||
sc_phase2: SumcheckVerificationCircuit {
|
||||
polys: config.polys_sc2.clone(),
|
||||
},
|
||||
claimed_rx: config.rx.clone(),
|
||||
claimed_ry: config.ry.clone(),
|
||||
claimed_transcript_sat_state: config.transcript_sat_state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConstraintSynthesizer<Fr> for R1CSVerificationCircuit {
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<Fr>) -> ark_relations::r1cs::Result<()> {
|
||||
/// This section implements the sumcheck verification part of Spartan
|
||||
impl<F: PrimeField> ConstraintSynthesizer<F> for R1CSVerificationCircuit<F> {
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<F>) -> ark_relations::r1cs::Result<()> {
|
||||
let initial_challenge_var = FpVar::<F>::new_input(cs.clone(), || Ok(self.prev_challenge))?;
|
||||
let mut transcript_var =
|
||||
PoseidonTranscripVar::new(cs.clone(), &self.params, Some(self.prev_challenge));
|
||||
PoseidonTranscripVar::new(cs.clone(), &self.params, initial_challenge_var);
|
||||
|
||||
let poly_sc1_vars = self
|
||||
.sc_phase1
|
||||
.polys
|
||||
.iter()
|
||||
.map(|p| {
|
||||
UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()
|
||||
})
|
||||
.collect::<Vec<UniPolyVar>>();
|
||||
.map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap())
|
||||
.collect::<Vec<UniPolyVar<_>>>();
|
||||
|
||||
let poly_sc2_vars = self
|
||||
.sc_phase2
|
||||
.polys
|
||||
.iter()
|
||||
.map(|p| {
|
||||
UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap()
|
||||
})
|
||||
.collect::<Vec<UniPolyVar>>();
|
||||
.map(|p| UniPolyVar::new_variable(cs.clone(), || Ok(p), AllocationMode::Witness).unwrap())
|
||||
.collect::<Vec<UniPolyVar<_>>>();
|
||||
|
||||
let input_vars = self
|
||||
.input
|
||||
.iter()
|
||||
.map(|i| {
|
||||
FpVar::<Fr>::new_variable(cs.clone(), || Ok(i), AllocationMode::Witness).unwrap()
|
||||
})
|
||||
.collect::<Vec<FpVar<Fr>>>();
|
||||
.map(|i| FpVar::<F>::new_variable(cs.clone(), || Ok(i), AllocationMode::Input).unwrap())
|
||||
.collect::<Vec<FpVar<F>>>();
|
||||
|
||||
let claimed_rx_vars = self
|
||||
.claimed_rx
|
||||
.iter()
|
||||
.map(|r| FpVar::<F>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap())
|
||||
.collect::<Vec<FpVar<F>>>();
|
||||
|
||||
let claimed_ry_vars = self
|
||||
.claimed_ry
|
||||
.iter()
|
||||
.map(|r| {
|
||||
FpVar::<Fr>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap()
|
||||
})
|
||||
.collect::<Vec<FpVar<Fr>>>();
|
||||
.map(|r| FpVar::<F>::new_variable(cs.clone(), || Ok(r), AllocationMode::Input).unwrap())
|
||||
.collect::<Vec<FpVar<F>>>();
|
||||
|
||||
transcript_var.append_vector(&input_vars)?;
|
||||
|
||||
let num_rounds_x = self.num_cons.log_2();
|
||||
let _num_rounds_y = (2 * self.num_vars).log_2();
|
||||
|
||||
let tau_vars = transcript_var.challenge_vector(num_rounds_x)?;
|
||||
let tau_vars = transcript_var.challenge_scalar_vec(num_rounds_x)?;
|
||||
|
||||
let claim_phase1_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(Fr::zero()))?;
|
||||
let claim_phase1_var = FpVar::<F>::new_witness(cs.clone(), || Ok(F::zero()))?;
|
||||
|
||||
let (claim_post_phase1_var, rx_var) = self.sc_phase1.verifiy_sumcheck(
|
||||
&poly_sc1_vars,
|
||||
&claim_phase1_var,
|
||||
&mut transcript_var,
|
||||
)?;
|
||||
let (claim_post_phase1_var, rx_var) =
|
||||
self
|
||||
.sc_phase1
|
||||
.verifiy_sumcheck(&poly_sc1_vars, &claim_phase1_var, &mut transcript_var)?;
|
||||
|
||||
// The prover sends (rx, ry) to the verifier for the evaluation proof so
|
||||
// the constraints need to ensure it is indeed the result from the first
|
||||
// round of sumcheck verification.
|
||||
for (i, r) in claimed_rx_vars.iter().enumerate() {
|
||||
rx_var[i].enforce_equal(r)?;
|
||||
}
|
||||
|
||||
let (Az_claim, Bz_claim, Cz_claim, prod_Az_Bz_claims) = &self.claims_phase2;
|
||||
|
||||
let Az_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Az_claim))?;
|
||||
let Bz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Bz_claim))?;
|
||||
let Cz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(Cz_claim))?;
|
||||
let prod_Az_Bz_claim_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(prod_Az_Bz_claims))?;
|
||||
let one = FpVar::<Fr>::one();
|
||||
let prod_vars: Vec<FpVar<Fr>> = (0..rx_var.len())
|
||||
let Az_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Az_claim))?;
|
||||
let Bz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Bz_claim))?;
|
||||
let Cz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(Cz_claim))?;
|
||||
let prod_Az_Bz_claim_var = FpVar::<F>::new_witness(cs.clone(), || Ok(prod_Az_Bz_claims))?;
|
||||
let one = FpVar::<F>::one();
|
||||
let prod_vars: Vec<FpVar<F>> = (0..rx_var.len())
|
||||
.map(|i| (&rx_var[i] * &tau_vars[i]) + (&one - &rx_var[i]) * (&one - &tau_vars[i]))
|
||||
.collect();
|
||||
let mut taus_bound_rx_var = FpVar::<Fr>::one();
|
||||
let mut taus_bound_rx_var = FpVar::<F>::one();
|
||||
|
||||
for p_var in prod_vars.iter() {
|
||||
taus_bound_rx_var *= p_var;
|
||||
@@ -359,11 +347,10 @@ impl ConstraintSynthesizer<Fr> for R1CSVerificationCircuit {
|
||||
let claim_phase2_var =
|
||||
&r_A_var * &Az_claim_var + &r_B_var * &Bz_claim_var + &r_C_var * &Cz_claim_var;
|
||||
|
||||
let (claim_post_phase2_var, ry_var) = self.sc_phase2.verifiy_sumcheck(
|
||||
&poly_sc2_vars,
|
||||
&claim_phase2_var,
|
||||
&mut transcript_var,
|
||||
)?;
|
||||
let (claim_post_phase2_var, ry_var) =
|
||||
self
|
||||
.sc_phase2
|
||||
.verifiy_sumcheck(&poly_sc2_vars, &claim_phase2_var, &mut transcript_var)?;
|
||||
|
||||
// Because the verifier checks the commitment opening on point ry outside
|
||||
// the circuit, the prover needs to send ry to the verifier (making the
|
||||
@@ -372,6 +359,7 @@ impl ConstraintSynthesizer<Fr> for R1CSVerificationCircuit {
|
||||
// claimed point, coming from the prover, is actually the point derived
|
||||
// inside the circuit. These additional checks will be removed
|
||||
// when the commitment verification is done inside the circuit.
|
||||
// Moreover, (rx, ry) will be used in the evaluation proof.
|
||||
for (i, r) in claimed_ry_vars.iter().enumerate() {
|
||||
ry_var[i].enforce_equal(r)?;
|
||||
}
|
||||
@@ -384,105 +372,108 @@ impl ConstraintSynthesizer<Fr> for R1CSVerificationCircuit {
|
||||
|
||||
let poly_input_eval_var = input_as_sparse_poly_var.evaluate(&ry_var[1..]);
|
||||
|
||||
let eval_vars_at_ry_var = FpVar::<Fr>::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?;
|
||||
let eval_vars_at_ry_var = FpVar::<F>::new_input(cs.clone(), || Ok(&self.eval_vars_at_ry))?;
|
||||
|
||||
let eval_Z_at_ry_var = (FpVar::<Fr>::one() - &ry_var[0]) * &eval_vars_at_ry_var
|
||||
+ &ry_var[0] * &poly_input_eval_var;
|
||||
let eval_Z_at_ry_var =
|
||||
(FpVar::<F>::one() - &ry_var[0]) * &eval_vars_at_ry_var + &ry_var[0] * &poly_input_eval_var;
|
||||
|
||||
let (eval_A_r, eval_B_r, eval_C_r) = self.evals;
|
||||
|
||||
let eval_A_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_A_r))?;
|
||||
let eval_B_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_B_r))?;
|
||||
let eval_C_r_var = FpVar::<Fr>::new_witness(cs.clone(), || Ok(eval_C_r))?;
|
||||
let eval_A_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_A_r))?;
|
||||
let eval_B_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_B_r))?;
|
||||
let eval_C_r_var = FpVar::<F>::new_input(cs.clone(), || Ok(eval_C_r))?;
|
||||
|
||||
let scalar_var =
|
||||
&r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var;
|
||||
let scalar_var = &r_A_var * &eval_A_r_var + &r_B_var * &eval_B_r_var + &r_C_var * &eval_C_r_var;
|
||||
|
||||
let expected_claim_post_phase2_var = eval_Z_at_ry_var * scalar_var;
|
||||
claim_post_phase2_var.enforce_equal(&expected_claim_post_phase2_var)?;
|
||||
|
||||
let expected_transcript_state_var = transcript_var.challenge()?;
|
||||
let claimed_transcript_state_var =
|
||||
FpVar::<Fr>::new_input(cs, || Ok(self.claimed_transcript_sat_state))?;
|
||||
FpVar::<F>::new_input(cs, || Ok(self.claimed_transcript_sat_state))?;
|
||||
|
||||
// Ensure that the prover and verifier transcipt views are consistent at
|
||||
// the end of the satisfiability proof.
|
||||
expected_transcript_state_var.enforce_equal(&claimed_transcript_state_var)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VerifierConfig {
|
||||
pub struct VerifierConfig<E: Pairing> {
|
||||
pub comm: Commitment<E>,
|
||||
pub num_vars: usize,
|
||||
pub num_cons: usize,
|
||||
pub input: Vec<Fr>,
|
||||
pub input_as_sparse_poly: SparsePolynomial,
|
||||
pub evals: (Fr, Fr, Fr),
|
||||
pub params: PoseidonParameters<Fr>,
|
||||
pub prev_challenge: Fr,
|
||||
pub claims_phase2: (Fr, Fr, Fr, Fr),
|
||||
pub eval_vars_at_ry: Fr,
|
||||
pub polys_sc1: Vec<UniPoly>,
|
||||
pub polys_sc2: Vec<UniPoly>,
|
||||
pub ry: Vec<Scalar>,
|
||||
pub transcript_sat_state: Scalar,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct VerifierCircuit {
|
||||
pub inner_circuit: R1CSVerificationCircuit,
|
||||
pub inner_proof: GrothProof<I>,
|
||||
pub inner_vk: PreparedVerifyingKey<I>,
|
||||
pub eval_vars_at_ry: Fr,
|
||||
pub claims_phase2: (Fr, Fr, Fr, Fr),
|
||||
pub ry: Vec<Fr>,
|
||||
pub transcript_sat_state: Scalar,
|
||||
pub input: Vec<E::ScalarField>,
|
||||
pub input_as_sparse_poly: SparsePolynomial<E::ScalarField>,
|
||||
pub evals: (E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
pub params: PoseidonConfig<E::ScalarField>,
|
||||
pub prev_challenge: E::ScalarField,
|
||||
pub claims_phase2: (
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
),
|
||||
pub eval_vars_at_ry: E::ScalarField,
|
||||
pub polys_sc1: Vec<UniPoly<E::ScalarField>>,
|
||||
pub polys_sc2: Vec<UniPoly<E::ScalarField>>,
|
||||
pub rx: Vec<E::ScalarField>,
|
||||
pub ry: Vec<E::ScalarField>,
|
||||
pub transcript_sat_state: E::ScalarField,
|
||||
}
|
||||
|
||||
impl VerifierCircuit {
|
||||
pub fn new<R: Rng + CryptoRng>(
|
||||
config: &VerifierConfig,
|
||||
mut rng: &mut R,
|
||||
) -> Result<Self, SynthesisError> {
|
||||
let inner_circuit = R1CSVerificationCircuit::new(config);
|
||||
let (pk, vk) = Groth16::<I>::setup(inner_circuit.clone(), &mut rng).unwrap();
|
||||
let proof = Groth16::<I>::prove(&pk, inner_circuit.clone(), &mut rng)?;
|
||||
let pvk = Groth16::<I>::process_vk(&vk).unwrap();
|
||||
Ok(Self {
|
||||
inner_circuit,
|
||||
inner_proof: proof,
|
||||
inner_vk: pvk,
|
||||
eval_vars_at_ry: config.eval_vars_at_ry,
|
||||
claims_phase2: config.claims_phase2,
|
||||
ry: config.ry.clone(),
|
||||
transcript_sat_state: config.transcript_sat_state,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Skeleton for the polynomial commitment verification circuit
|
||||
// #[derive(Clone)]
|
||||
// pub struct VerifierCircuit {
|
||||
// pub inner_circuit: R1CSVerificationCircuit,
|
||||
// pub inner_proof: GrothProof<I>,
|
||||
// pub inner_vk: PreparedVerifyingKey<I>,
|
||||
// pub eval_vars_at_ry: Fr,
|
||||
// pub claims_phase2: (Fr, Fr, Fr, Fr),
|
||||
// pub ry: Vec<Fr>,
|
||||
// pub transcript_sat_state: Scalar,
|
||||
// }
|
||||
|
||||
impl ConstraintSynthesizer<Fq> for VerifierCircuit {
|
||||
fn generate_constraints(self, cs: ConstraintSystemRef<Fq>) -> ark_relations::r1cs::Result<()> {
|
||||
let proof_var =
|
||||
ProofVar::<I, IV>::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?;
|
||||
let (v_A, v_B, v_C, v_AB) = self.claims_phase2;
|
||||
let mut pubs = vec![];
|
||||
pubs.extend(self.ry);
|
||||
pubs.extend(vec![v_A, v_B, v_C, v_AB]);
|
||||
pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]);
|
||||
// impl VerifierCircuit {
|
||||
// pub fn new<R: Rng + CryptoRng>(
|
||||
// config: &VerifierConfig,
|
||||
// mut rng: &mut R,
|
||||
// ) -> Result<Self, SynthesisError> {
|
||||
// let inner_circuit = R1CSVerificationCircuit::new(config);
|
||||
// let (pk, vk) = Groth16::<I>::setup(inner_circuit.clone(), &mut rng).unwrap();
|
||||
// let proof = Groth16::<I>::prove(&pk, inner_circuit.clone(), &mut rng)?;
|
||||
// let pvk = Groth16::<I>::process_vk(&vk).unwrap();
|
||||
// Ok(Self {
|
||||
// inner_circuit,
|
||||
// inner_proof: proof,
|
||||
// inner_vk: pvk,
|
||||
// eval_vars_at_ry: config.eval_vars_at_ry,
|
||||
// claims_phase2: config.claims_phase2,
|
||||
// ry: config.ry.clone(),
|
||||
// transcript_sat_state: config.transcript_sat_state,
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
let bits = pubs
|
||||
.iter()
|
||||
.map(|c| {
|
||||
let bits: Vec<bool> = BitIteratorLE::new(c.into_repr().as_ref().to_vec()).collect();
|
||||
Vec::new_witness(cs.clone(), || Ok(bits))
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let input_var = BooleanInputVar::<Fr, Fq>::new(bits);
|
||||
|
||||
let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?;
|
||||
Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)?
|
||||
.enforce_equal(&Boolean::constant(true))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
// impl ConstraintSynthesizer<Fq> for VerifierCircuit {
|
||||
// fn generate_constraints(self, cs: ConstraintSystemRef<Fq>) -> ark_relations::r1cs::Result<()> {
|
||||
// let proof_var = ProofVar::<I, IV>::new_witness(cs.clone(), || Ok(self.inner_proof.clone()))?;
|
||||
// let (v_A, v_B, v_C, v_AB) = self.claims_phase2;
|
||||
// let mut pubs = vec![];
|
||||
// pubs.extend(self.ry);
|
||||
// pubs.extend(vec![v_A, v_B, v_C, v_AB]);
|
||||
// pubs.extend(vec![self.eval_vars_at_ry, self.transcript_sat_state]);
|
||||
// let bits = pubs
|
||||
// .iter()
|
||||
// .map(|c| {
|
||||
// let bits: Vec<bool> = BitIteratorLE::new(c.into_bigint().as_ref().to_vec()).collect();
|
||||
// Vec::new_witness(cs.clone(), || Ok(bits))
|
||||
// })
|
||||
// .collect::<Result<Vec<_>, _>>()?;
|
||||
// let input_var = BooleanInputVar::<Fr, Fq>::new(bits);
|
||||
// let vk_var = PreparedVerifyingKeyVar::new_witness(cs, || Ok(self.inner_vk.clone()))?;
|
||||
// Groth16VerifierGadget::verify_with_processed_vk(&vk_var, &input_var, &proof_var)?
|
||||
// .enforce_equal(&Boolean::constant(true))?;
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -1,45 +1,36 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::commitments::{MultiCommitGens, PedersenCommit};
|
||||
use super::errors::ProofVerifyError;
|
||||
use super::group::{
|
||||
CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement,
|
||||
VartimeMultiscalarMul,
|
||||
};
|
||||
use super::math::Math;
|
||||
use super::nizk::{DotProductProofGens, DotProductProofLog};
|
||||
use super::random::RandomTape;
|
||||
use super::scalar::Scalar;
|
||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||
use ark_bls12_377::Bls12_377 as I;
|
||||
use ark_ff::{One, UniformRand, Zero};
|
||||
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::scalar_mul::variable_base::VariableBaseMSM;
|
||||
use ark_ec::{pairing::Pairing, CurveGroup};
|
||||
use ark_ff::{PrimeField, Zero};
|
||||
use ark_poly::MultilinearExtension;
|
||||
use ark_poly_commit::multilinear_pc::data_structures::{CommitterKey, VerifierKey};
|
||||
use ark_poly_commit::multilinear_pc::MultilinearPC;
|
||||
use ark_serialize::*;
|
||||
use core::ops::Index;
|
||||
use merlin::Transcript;
|
||||
use std::ops::{Add, AddAssign, Neg, Sub, SubAssign};
|
||||
|
||||
#[cfg(feature = "multicore")]
|
||||
use rayon::prelude::*;
|
||||
|
||||
use std::ops::{Add, AddAssign, Neg, Sub, SubAssign};
|
||||
// TODO: integrate the DenseMultilinearExtension(and Sparse) https://github.com/arkworks-rs/algebra/tree/master/poly/src/evaluations/multivariate/multilinear from arkworks into Spartan. This requires moving the specific Spartan functionalities in separate traits.
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Hash, CanonicalDeserialize, CanonicalSerialize)]
|
||||
pub struct DensePolynomial {
|
||||
num_vars: usize, // the number of variables in the multilinear polynomial
|
||||
len: usize,
|
||||
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
pub struct DensePolynomial<F: PrimeField> {
|
||||
pub num_vars: usize, // the number of variables in the multilinear polynomial
|
||||
pub len: usize,
|
||||
pub Z: Vec<F>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
|
||||
}
|
||||
|
||||
impl MultilinearExtension<Scalar> for DensePolynomial {
|
||||
impl<F: PrimeField> MultilinearExtension<F> for DensePolynomial<F> {
|
||||
fn num_vars(&self) -> usize {
|
||||
self.get_num_vars()
|
||||
}
|
||||
|
||||
fn evaluate(&self, point: &[Scalar]) -> Option<Scalar> {
|
||||
fn evaluate(&self, point: &[F]) -> Option<F> {
|
||||
if point.len() == self.num_vars {
|
||||
Some(self.evaluate(&point))
|
||||
} else {
|
||||
@@ -48,9 +39,9 @@ impl MultilinearExtension<Scalar> for DensePolynomial {
|
||||
}
|
||||
|
||||
fn rand<R: rand::Rng>(num_vars: usize, rng: &mut R) -> Self {
|
||||
let evals = (0..(1 << num_vars)).map(|_| Scalar::rand(rng)).collect();
|
||||
let evals = (0..(1 << num_vars)).map(|_| F::rand(rng)).collect();
|
||||
Self {
|
||||
num_vars: num_vars,
|
||||
num_vars,
|
||||
len: 1 << num_vars,
|
||||
Z: evals,
|
||||
}
|
||||
@@ -60,21 +51,21 @@ impl MultilinearExtension<Scalar> for DensePolynomial {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn fix_variables(&self, _partial_point: &[Scalar]) -> Self {
|
||||
fn fix_variables(&self, _partial_point: &[F]) -> Self {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn to_evaluations(&self) -> Vec<Scalar> {
|
||||
fn to_evaluations(&self) -> Vec<F> {
|
||||
self.Z.to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl Zero for DensePolynomial {
|
||||
impl<F: PrimeField> Zero for DensePolynomial<F> {
|
||||
fn zero() -> Self {
|
||||
Self {
|
||||
num_vars: 0,
|
||||
len: 1,
|
||||
Z: vec![Scalar::zero()],
|
||||
Z: vec![F::zero()],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,8 +74,8 @@ impl Zero for DensePolynomial {
|
||||
}
|
||||
}
|
||||
|
||||
impl Add for DensePolynomial {
|
||||
type Output = DensePolynomial;
|
||||
impl<F: PrimeField> Add for DensePolynomial<F> {
|
||||
type Output = DensePolynomial<F>;
|
||||
fn add(self, other: Self) -> Self {
|
||||
&self + &other
|
||||
}
|
||||
@@ -92,10 +83,10 @@ impl Add for DensePolynomial {
|
||||
|
||||
// function needed because the result might have a different lifetime than the
|
||||
// operands
|
||||
impl<'a, 'b> Add<&'a DensePolynomial> for &'b DensePolynomial {
|
||||
type Output = DensePolynomial;
|
||||
impl<'a, 'b, F: PrimeField> Add<&'a DensePolynomial<F>> for &'b DensePolynomial<F> {
|
||||
type Output = DensePolynomial<F>;
|
||||
|
||||
fn add(self, other: &'a DensePolynomial) -> Self::Output {
|
||||
fn add(self, other: &'a DensePolynomial<F>) -> Self::Output {
|
||||
if other.is_zero() {
|
||||
return self.clone();
|
||||
}
|
||||
@@ -104,7 +95,7 @@ impl<'a, 'b> Add<&'a DensePolynomial> for &'b DensePolynomial {
|
||||
}
|
||||
assert_eq!(self.num_vars, other.num_vars);
|
||||
|
||||
let res: Vec<Scalar> = self
|
||||
let res = self
|
||||
.Z
|
||||
.iter()
|
||||
.zip(other.Z.iter())
|
||||
@@ -118,20 +109,20 @@ impl<'a, 'b> Add<&'a DensePolynomial> for &'b DensePolynomial {
|
||||
}
|
||||
}
|
||||
|
||||
impl AddAssign for DensePolynomial {
|
||||
impl<F: PrimeField> AddAssign for DensePolynomial<F> {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
*self = &*self + &other;
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b> AddAssign<&'a DensePolynomial> for DensePolynomial {
|
||||
fn add_assign(&mut self, other: &'a DensePolynomial) {
|
||||
impl<'a, 'b, F: PrimeField> AddAssign<&'a DensePolynomial<F>> for DensePolynomial<F> {
|
||||
fn add_assign(&mut self, other: &'a DensePolynomial<F>) {
|
||||
*self = &*self + other;
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b> AddAssign<(Scalar, &'a DensePolynomial)> for DensePolynomial {
|
||||
fn add_assign(&mut self, (scalar, other): (Scalar, &'a DensePolynomial)) {
|
||||
impl<'a, 'b, F: PrimeField> AddAssign<(F, &'a DensePolynomial<F>)> for DensePolynomial<F> {
|
||||
fn add_assign(&mut self, (scalar, other): (F, &'a DensePolynomial<F>)) {
|
||||
let other = Self {
|
||||
num_vars: other.num_vars,
|
||||
len: 1 << other.num_vars,
|
||||
@@ -141,8 +132,8 @@ impl<'a, 'b> AddAssign<(Scalar, &'a DensePolynomial)> for DensePolynomial {
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for DensePolynomial {
|
||||
type Output = DensePolynomial;
|
||||
impl<F: PrimeField> Neg for DensePolynomial<F> {
|
||||
type Output = DensePolynomial<F>;
|
||||
|
||||
fn neg(self) -> Self::Output {
|
||||
Self::Output {
|
||||
@@ -153,92 +144,94 @@ impl Neg for DensePolynomial {
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub for DensePolynomial {
|
||||
type Output = DensePolynomial;
|
||||
impl<F: PrimeField> Sub for DensePolynomial<F> {
|
||||
type Output = DensePolynomial<F>;
|
||||
|
||||
fn sub(self, other: Self) -> Self::Output {
|
||||
&self - &other
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b> Sub<&'a DensePolynomial> for &'b DensePolynomial {
|
||||
type Output = DensePolynomial;
|
||||
impl<'a, 'b, F: PrimeField> Sub<&'a DensePolynomial<F>> for &'b DensePolynomial<F> {
|
||||
type Output = DensePolynomial<F>;
|
||||
|
||||
fn sub(self, other: &'a DensePolynomial) -> Self::Output {
|
||||
fn sub(self, other: &'a DensePolynomial<F>) -> Self::Output {
|
||||
self + &other.clone().neg()
|
||||
}
|
||||
}
|
||||
|
||||
impl SubAssign for DensePolynomial {
|
||||
impl<F: PrimeField> SubAssign for DensePolynomial<F> {
|
||||
fn sub_assign(&mut self, other: Self) {
|
||||
*self = &*self - &other;
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b> SubAssign<&'a DensePolynomial> for DensePolynomial {
|
||||
fn sub_assign(&mut self, other: &'a DensePolynomial) {
|
||||
impl<'a, 'b, F: PrimeField> SubAssign<&'a DensePolynomial<F>> for DensePolynomial<F> {
|
||||
fn sub_assign(&mut self, other: &'a DensePolynomial<F>) {
|
||||
*self = &*self - other;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PolyCommitmentGens {
|
||||
pub gens: DotProductProofGens,
|
||||
pub ck: CommitterKey<I>,
|
||||
pub vk: VerifierKey<I>,
|
||||
pub struct PolyCommitmentGens<E: Pairing> {
|
||||
pub gens: DotProductProofGens<E::G1>,
|
||||
pub ck: CommitterKey<E>,
|
||||
pub vk: VerifierKey<E>,
|
||||
}
|
||||
|
||||
impl PolyCommitmentGens {
|
||||
impl<E: Pairing> PolyCommitmentGens<E> {
|
||||
// num vars is the number of variables in the multilinear polynomial
|
||||
// this gives the maximum degree bound
|
||||
pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
|
||||
let (_left, right) = EqPolynomial::compute_factored_lens(num_vars);
|
||||
pub fn setup(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens<E> {
|
||||
let (_left, right) = EqPolynomial::<E::ScalarField>::compute_factored_lens(num_vars);
|
||||
let gens = DotProductProofGens::new(right.pow2(), label);
|
||||
|
||||
let odd = if num_vars % 2 == 1 { 1 } else { 0 };
|
||||
// Generates the SRS and trims it based on the number of variables in the
|
||||
// multilinear polynomial.
|
||||
// If num_vars is odd, a crs of size num_vars/2 + 1 will be needed for the
|
||||
// polynomial commitment.
|
||||
let mut rng = ark_std::test_rng();
|
||||
let pst_gens = MultilinearPC::<I>::setup(num_vars, &mut rng);
|
||||
let (ck, vk) = MultilinearPC::<I>::trim(&pst_gens, num_vars);
|
||||
let pst_gens = MultilinearPC::<E>::setup(num_vars / 2 + odd, &mut rng);
|
||||
let (ck, vk) = MultilinearPC::<E>::trim(&pst_gens, num_vars / 2 + odd);
|
||||
|
||||
PolyCommitmentGens { gens, ck, vk }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PolyCommitmentBlinds {
|
||||
blinds: Vec<Scalar>,
|
||||
pub struct PolyCommitmentBlinds<F: PrimeField> {
|
||||
blinds: Vec<F>,
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct PolyCommitment {
|
||||
C: Vec<CompressedGroup>,
|
||||
pub struct PolyCommitment<G: CurveGroup> {
|
||||
C: Vec<G>,
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct ConstPolyCommitment {
|
||||
C: CompressedGroup,
|
||||
pub struct ConstPolyCommitment<G: CurveGroup> {
|
||||
C: G,
|
||||
}
|
||||
|
||||
pub struct EqPolynomial {
|
||||
r: Vec<Scalar>,
|
||||
pub struct EqPolynomial<S: PrimeField> {
|
||||
r: Vec<S>,
|
||||
}
|
||||
|
||||
impl EqPolynomial {
|
||||
pub fn new(r: Vec<Scalar>) -> Self {
|
||||
impl<F: PrimeField> EqPolynomial<F> {
|
||||
pub fn new(r: Vec<F>) -> Self {
|
||||
EqPolynomial { r }
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
|
||||
pub fn evaluate(&self, rx: &[F]) -> F {
|
||||
assert_eq!(self.r.len(), rx.len());
|
||||
(0..rx.len())
|
||||
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
|
||||
.map(|i| self.r[i] * rx[i] + (F::one() - self.r[i]) * (F::one() - rx[i]))
|
||||
.product()
|
||||
}
|
||||
|
||||
pub fn evals(&self) -> Vec<Scalar> {
|
||||
pub fn evals(&self) -> Vec<F> {
|
||||
let ell = self.r.len();
|
||||
|
||||
let mut evals: Vec<Scalar> = vec![Scalar::one(); ell.pow2()];
|
||||
let mut evals: Vec<F> = vec![F::one(); ell.pow2()];
|
||||
let mut size = 1;
|
||||
for j in 0..ell {
|
||||
// in each iteration, we double the size of chis
|
||||
@@ -260,9 +253,9 @@ impl EqPolynomial {
|
||||
(ell / 2, ell - ell / 2)
|
||||
}
|
||||
|
||||
pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
|
||||
pub fn compute_factored_evals(&self) -> (Vec<F>, Vec<F>) {
|
||||
let ell = self.r.len();
|
||||
let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell);
|
||||
let (left_num_vars, _right_num_vars) = EqPolynomial::<F>::compute_factored_lens(ell);
|
||||
|
||||
let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals();
|
||||
let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals();
|
||||
@@ -280,17 +273,17 @@ impl IdentityPolynomial {
|
||||
IdentityPolynomial { size_point }
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
pub fn evaluate<F: PrimeField>(&self, r: &[F]) -> F {
|
||||
let len = r.len();
|
||||
assert_eq!(len, self.size_point);
|
||||
(0..len)
|
||||
.map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i])
|
||||
.map(|i| F::from((len - i - 1).pow2() as u64) * r[i])
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
impl DensePolynomial {
|
||||
pub fn new(Z: Vec<Scalar>) -> Self {
|
||||
impl<F: PrimeField> DensePolynomial<F> {
|
||||
pub fn new(Z: Vec<F>) -> Self {
|
||||
DensePolynomial {
|
||||
num_vars: Z.len().log_2(),
|
||||
len: Z.len(),
|
||||
@@ -306,11 +299,11 @@ impl DensePolynomial {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn clone(&self) -> DensePolynomial {
|
||||
pub fn clone(&self) -> Self {
|
||||
DensePolynomial::new(self.Z[0..self.len].to_vec())
|
||||
}
|
||||
|
||||
pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
|
||||
pub fn split(&self, idx: usize) -> (Self, Self) {
|
||||
assert!(idx < self.len());
|
||||
(
|
||||
DensePolynomial::new(self.Z[..idx].to_vec()),
|
||||
@@ -319,23 +312,27 @@ impl DensePolynomial {
|
||||
}
|
||||
|
||||
#[cfg(feature = "multicore")]
|
||||
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
|
||||
fn commit_inner<G>(&self, blinds: &[F], gens: &MultiCommitGens<G>) -> PolyCommitment<G>
|
||||
where
|
||||
G: CurveGroup<ScalarField = F>,
|
||||
{
|
||||
let L_size = blinds.len();
|
||||
let R_size = self.Z.len() / L_size;
|
||||
assert_eq!(L_size * R_size, self.Z.len());
|
||||
let C = (0..L_size)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
self.Z[R_size * i..R_size * (i + 1)]
|
||||
.commit(&blinds[i], gens)
|
||||
.compress()
|
||||
PedersenCommit::commit_slice(&self.Z[R_size * i..R_size * (i + 1)], &blinds[i], gens)
|
||||
})
|
||||
.collect();
|
||||
PolyCommitment { C }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "multicore"))]
|
||||
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
|
||||
fn commit_inner<G>(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment
|
||||
where
|
||||
G: CurveGroup<Affine = F>,
|
||||
{
|
||||
let L_size = blinds.len();
|
||||
let R_size = self.Z.len() / L_size;
|
||||
assert_eq!(L_size * R_size, self.Z.len());
|
||||
@@ -349,36 +346,39 @@ impl DensePolynomial {
|
||||
PolyCommitment { C }
|
||||
}
|
||||
|
||||
pub fn commit(
|
||||
pub fn commit<E>(
|
||||
&self,
|
||||
gens: &PolyCommitmentGens,
|
||||
random_tape: Option<&mut RandomTape>,
|
||||
) -> (PolyCommitment, PolyCommitmentBlinds) {
|
||||
gens: &PolyCommitmentGens<E>,
|
||||
random_blinds: bool,
|
||||
) -> (PolyCommitment<E::G1>, PolyCommitmentBlinds<E::ScalarField>)
|
||||
where
|
||||
E: Pairing<ScalarField = F>,
|
||||
{
|
||||
let n = self.Z.len();
|
||||
let ell = self.get_num_vars();
|
||||
assert_eq!(n, ell.pow2());
|
||||
|
||||
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
|
||||
let (left_num_vars, right_num_vars) =
|
||||
EqPolynomial::<E::ScalarField>::compute_factored_lens(ell);
|
||||
let L_size = left_num_vars.pow2();
|
||||
let R_size = right_num_vars.pow2();
|
||||
assert_eq!(L_size * R_size, n);
|
||||
|
||||
let blinds = if let Some(t) = random_tape {
|
||||
PolyCommitmentBlinds {
|
||||
blinds: t.random_vector(b"poly_blinds", L_size),
|
||||
}
|
||||
let blinds = PolyCommitmentBlinds {
|
||||
blinds: if random_blinds {
|
||||
(0..L_size)
|
||||
.map(|_| F::rand(&mut rand::thread_rng()))
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
PolyCommitmentBlinds {
|
||||
blinds: vec![Scalar::zero(); L_size],
|
||||
}
|
||||
(0..L_size).map(|_| F::zero()).collect::<Vec<_>>()
|
||||
},
|
||||
};
|
||||
|
||||
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
|
||||
}
|
||||
|
||||
pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
|
||||
pub fn bound(&self, L: &[F]) -> Vec<F> {
|
||||
let (left_num_vars, right_num_vars) =
|
||||
EqPolynomial::compute_factored_lens(self.get_num_vars());
|
||||
EqPolynomial::<F>::compute_factored_lens(self.get_num_vars());
|
||||
let L_size = left_num_vars.pow2();
|
||||
let R_size = right_num_vars.pow2();
|
||||
(0..R_size)
|
||||
@@ -386,7 +386,7 @@ impl DensePolynomial {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
|
||||
pub fn bound_poly_var_top(&mut self, r: &F) {
|
||||
let n = self.len() / 2;
|
||||
for i in 0..n {
|
||||
self.Z[i] = self.Z[i] + (self.Z[i + n] - self.Z[i]) * r;
|
||||
@@ -395,7 +395,7 @@ impl DensePolynomial {
|
||||
self.len = n;
|
||||
}
|
||||
|
||||
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
|
||||
pub fn bound_poly_var_bot(&mut self, r: &F) {
|
||||
let n = self.len() / 2;
|
||||
for i in 0..n {
|
||||
self.Z[i] = self.Z[2 * i] + (self.Z[2 * i + 1] - self.Z[2 * i]) * r;
|
||||
@@ -405,19 +405,19 @@ impl DensePolynomial {
|
||||
}
|
||||
|
||||
// returns Z(r) in O(n) time
|
||||
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
|
||||
pub fn evaluate(&self, r: &[F]) -> F {
|
||||
// r must have a value for each variable
|
||||
assert_eq!(r.len(), self.get_num_vars());
|
||||
let chis = EqPolynomial::new(r.to_vec()).evals();
|
||||
assert_eq!(chis.len(), self.Z.len());
|
||||
DotProductProofLog::compute_dotproduct(&self.Z, &chis)
|
||||
crate::dot_product(&self.Z, &chis)
|
||||
}
|
||||
|
||||
fn vec(&self) -> &Vec<Scalar> {
|
||||
fn vec(&self) -> &Vec<F> {
|
||||
&self.Z
|
||||
}
|
||||
|
||||
pub fn extend(&mut self, other: &DensePolynomial) {
|
||||
pub fn extend(&mut self, other: &DensePolynomial<F>) {
|
||||
// TODO: allow extension even when some vars are bound
|
||||
assert_eq!(self.Z.len(), self.len);
|
||||
let other_vec = other.vec();
|
||||
@@ -428,17 +428,17 @@ impl DensePolynomial {
|
||||
assert_eq!(self.Z.len(), self.len);
|
||||
}
|
||||
|
||||
pub fn merge<'a, I>(polys: I) -> DensePolynomial
|
||||
pub fn merge<'a, I>(polys: I) -> DensePolynomial<F>
|
||||
where
|
||||
I: IntoIterator<Item = &'a DensePolynomial>,
|
||||
I: IntoIterator<Item = &'a DensePolynomial<F>>,
|
||||
{
|
||||
let mut Z: Vec<Scalar> = Vec::new();
|
||||
let mut Z: Vec<F> = Vec::new();
|
||||
for poly in polys.into_iter() {
|
||||
Z.extend(poly.vec());
|
||||
}
|
||||
|
||||
// pad the polynomial with zero polynomial at the end
|
||||
Z.resize(Z.len().next_power_of_two(), Scalar::zero());
|
||||
Z.resize(Z.len().next_power_of_two(), F::zero());
|
||||
|
||||
DensePolynomial::new(Z)
|
||||
}
|
||||
@@ -446,76 +446,66 @@ impl DensePolynomial {
|
||||
pub fn from_usize(Z: &[usize]) -> Self {
|
||||
DensePolynomial::new(
|
||||
(0..Z.len())
|
||||
.map(|i| Scalar::from(Z[i] as u64))
|
||||
.collect::<Vec<Scalar>>(),
|
||||
.map(|i| F::from(Z[i] as u64))
|
||||
.collect::<Vec<F>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for DensePolynomial {
|
||||
type Output = Scalar;
|
||||
impl<F: PrimeField> Index<usize> for DensePolynomial<F> {
|
||||
type Output = F;
|
||||
|
||||
#[inline(always)]
|
||||
fn index(&self, _index: usize) -> &Scalar {
|
||||
fn index(&self, _index: usize) -> &F {
|
||||
&(self.Z[_index])
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToTranscript for PolyCommitment {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_message(label, b"poly_commitment_begin");
|
||||
impl<G: CurveGroup> TranscriptWriter<G::ScalarField> for PolyCommitment<G> {
|
||||
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<G::ScalarField>) {
|
||||
for i in 0..self.C.len() {
|
||||
transcript.append_point(b"poly_commitment_share", &self.C[i]);
|
||||
}
|
||||
transcript.append_message(label, b"poly_commitment_end");
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToPoseidon for PolyCommitment {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
|
||||
for i in 0..self.C.len() {
|
||||
transcript.append_point(&self.C[i]);
|
||||
transcript.append_point(b"", &self.C[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct PolyEvalProof {
|
||||
proof: DotProductProofLog,
|
||||
pub struct PolyEvalProof<E: Pairing> {
|
||||
proof: DotProductProofLog<E::G1>,
|
||||
}
|
||||
|
||||
impl PolyEvalProof {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"polynomial evaluation proof"
|
||||
}
|
||||
|
||||
impl<E> PolyEvalProof<E>
|
||||
where
|
||||
E: Pairing,
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
pub fn prove(
|
||||
poly: &DensePolynomial,
|
||||
blinds_opt: Option<&PolyCommitmentBlinds>,
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
|
||||
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
) -> (PolyEvalProof, CompressedGroup) {
|
||||
poly: &DensePolynomial<E::ScalarField>,
|
||||
blinds_opt: Option<&PolyCommitmentBlinds<E::ScalarField>>,
|
||||
r: &[E::ScalarField], // point at which the polynomial is evaluated
|
||||
Zr: &E::ScalarField, // evaluation of \widetilde{Z}(r)
|
||||
blind_Zr_opt: Option<&E::ScalarField>, // specifies a blind for Zr
|
||||
gens: &PolyCommitmentGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> (PolyEvalProof<E>, E::G1) {
|
||||
// transcript.append_protocol_name(PolyEvalProof::protocol_name());
|
||||
|
||||
// assert vectors are of the right size
|
||||
assert_eq!(poly.get_num_vars(), r.len());
|
||||
|
||||
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len());
|
||||
let (left_num_vars, right_num_vars) =
|
||||
EqPolynomial::<E::ScalarField>::compute_factored_lens(r.len());
|
||||
let L_size = left_num_vars.pow2();
|
||||
let R_size = right_num_vars.pow2();
|
||||
|
||||
let default_blinds = PolyCommitmentBlinds {
|
||||
blinds: vec![Scalar::zero(); L_size],
|
||||
blinds: vec![E::ScalarField::zero(); L_size],
|
||||
};
|
||||
let blinds = blinds_opt.map_or(&default_blinds, |p| p);
|
||||
|
||||
assert_eq!(blinds.blinds.len(), L_size);
|
||||
|
||||
let zero = Scalar::zero();
|
||||
let zero = E::ScalarField::zero();
|
||||
let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p);
|
||||
|
||||
// compute the L and R vectors
|
||||
@@ -527,14 +517,13 @@ impl PolyEvalProof {
|
||||
// compute the vector underneath L*Z and the L*blinds
|
||||
// compute vector-matrix product between L and Z viewed as a matrix
|
||||
let LZ = poly.bound(&L);
|
||||
let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
|
||||
let LZ_blind: E::ScalarField = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
|
||||
|
||||
// a dot product proof of size R_size
|
||||
let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove(
|
||||
&gens.gens,
|
||||
transcript,
|
||||
random_tape,
|
||||
&LZ,
|
||||
LZ.as_slice(),
|
||||
&LZ_blind,
|
||||
&R,
|
||||
Zr,
|
||||
@@ -546,11 +535,11 @@ impl PolyEvalProof {
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
|
||||
comm: &PolyCommitment,
|
||||
gens: &PolyCommitmentGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
r: &[E::ScalarField], // point at which the polynomial is evaluated
|
||||
C_Zr: &E::G1, // commitment to \widetilde{Z}(r)
|
||||
comm: &PolyCommitment<E::G1>,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// transcript.append_protocol_name(PolyEvalProof::protocol_name());
|
||||
|
||||
@@ -559,28 +548,27 @@ impl PolyEvalProof {
|
||||
let (L, R) = eq.compute_factored_evals();
|
||||
|
||||
// compute a weighted sum of commitments and L
|
||||
let C_decompressed = comm
|
||||
.C
|
||||
.iter()
|
||||
.map(|pt| GroupElement::decompress(pt).unwrap())
|
||||
.collect::<Vec<GroupElement>>();
|
||||
let C_decompressed = &comm.C;
|
||||
|
||||
let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed.as_slice()).compress();
|
||||
let C_LZ =
|
||||
<E::G1 as VariableBaseMSM>::msm(&<E::G1 as CurveGroup>::normalize_batch(C_decompressed), &L)
|
||||
.unwrap();
|
||||
|
||||
self.proof
|
||||
self
|
||||
.proof
|
||||
.verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr)
|
||||
}
|
||||
|
||||
pub fn verify_plain(
|
||||
&self,
|
||||
gens: &PolyCommitmentGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
r: &[Scalar], // point at which the polynomial is evaluated
|
||||
Zr: &Scalar, // evaluation \widetilde{Z}(r)
|
||||
comm: &PolyCommitment,
|
||||
gens: &PolyCommitmentGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
r: &[E::ScalarField], // point at which the polynomial is evaluated
|
||||
Zr: &E::ScalarField, // evaluation \widetilde{Z}(r)
|
||||
comm: &PolyCommitment<E::G1>,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// compute a commitment to Zr with a blind of zero
|
||||
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
|
||||
let C_Zr = PedersenCommit::commit_scalar(Zr, &E::ScalarField::zero(), &gens.gens.gens_1);
|
||||
|
||||
self.verify(gens, transcript, r, &C_Zr, comm)
|
||||
}
|
||||
@@ -588,12 +576,16 @@ impl PolyEvalProof {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::ark_std::One;
|
||||
use crate::parameters::poseidon_params;
|
||||
|
||||
use super::*;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
|
||||
type F = ark_bls12_377::Fr;
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
|
||||
fn evaluate_with_LR(Z: &[F], r: &[F]) -> F {
|
||||
let eq = EqPolynomial::new(r.to_vec());
|
||||
let (L, R) = eq.compute_factored_evals();
|
||||
|
||||
@@ -608,36 +600,31 @@ mod tests {
|
||||
// compute vector-matrix product between L and Z viewed as a matrix
|
||||
let LZ = (0..m)
|
||||
.map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum())
|
||||
.collect::<Vec<Scalar>>();
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
// compute dot product between LZ and R
|
||||
DotProductProofLog::compute_dotproduct(&LZ, &R)
|
||||
crate::dot_product(&LZ, &R)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_polynomial_evaluation() {
|
||||
// Z = [1, 2, 1, 4]
|
||||
let Z = vec![
|
||||
Scalar::one(),
|
||||
Scalar::from(2),
|
||||
Scalar::from(1),
|
||||
Scalar::from(4),
|
||||
];
|
||||
let Z = vec![F::one(), F::from(2), F::from(1), F::from(4)];
|
||||
|
||||
// r = [4,3]
|
||||
let r = vec![Scalar::from(4), Scalar::from(3)];
|
||||
let r = vec![F::from(4), F::from(3)];
|
||||
|
||||
let eval_with_LR = evaluate_with_LR(&Z, &r);
|
||||
let poly = DensePolynomial::new(Z);
|
||||
|
||||
let eval = poly.evaluate(&r);
|
||||
assert_eq!(eval, Scalar::from(28));
|
||||
assert_eq!(eval, F::from(28));
|
||||
assert_eq!(eval_with_LR, eval);
|
||||
}
|
||||
|
||||
pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
|
||||
let mut L: Vec<Scalar> = Vec::new();
|
||||
let mut R: Vec<Scalar> = Vec::new();
|
||||
pub fn compute_factored_chis_at_r(r: &[F]) -> (Vec<F>, Vec<F>) {
|
||||
let mut L: Vec<F> = Vec::new();
|
||||
let mut R: Vec<F> = Vec::new();
|
||||
|
||||
let ell = r.len();
|
||||
assert!(ell % 2 == 0); // ensure ell is even
|
||||
@@ -646,13 +633,13 @@ mod tests {
|
||||
|
||||
// compute row vector L
|
||||
for i in 0..m {
|
||||
let mut chi_i = Scalar::one();
|
||||
let mut chi_i = F::one();
|
||||
for j in 0..ell / 2 {
|
||||
let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0;
|
||||
if bit_j {
|
||||
chi_i *= r[j];
|
||||
} else {
|
||||
chi_i *= Scalar::one() - r[j];
|
||||
chi_i *= F::one() - r[j];
|
||||
}
|
||||
}
|
||||
L.push(chi_i);
|
||||
@@ -660,13 +647,13 @@ mod tests {
|
||||
|
||||
// compute column vector R
|
||||
for i in 0..m {
|
||||
let mut chi_i = Scalar::one();
|
||||
let mut chi_i = F::one();
|
||||
for j in ell / 2..ell {
|
||||
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
|
||||
if bit_j {
|
||||
chi_i *= r[j];
|
||||
} else {
|
||||
chi_i *= Scalar::one() - r[j];
|
||||
chi_i *= F::one() - r[j];
|
||||
}
|
||||
}
|
||||
R.push(chi_i);
|
||||
@@ -674,18 +661,18 @@ mod tests {
|
||||
(L, R)
|
||||
}
|
||||
|
||||
pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
|
||||
pub fn compute_chis_at_r(r: &[F]) -> Vec<F> {
|
||||
let ell = r.len();
|
||||
let n = ell.pow2();
|
||||
let mut chis: Vec<Scalar> = Vec::new();
|
||||
let mut chis: Vec<F> = Vec::new();
|
||||
for i in 0..n {
|
||||
let mut chi_i = Scalar::one();
|
||||
let mut chi_i = F::one();
|
||||
for j in 0..r.len() {
|
||||
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
|
||||
if bit_j {
|
||||
chi_i *= r[j];
|
||||
} else {
|
||||
chi_i *= Scalar::one() - r[j];
|
||||
chi_i *= F::one() - r[j];
|
||||
}
|
||||
}
|
||||
chis.push(chi_i);
|
||||
@@ -693,14 +680,14 @@ mod tests {
|
||||
chis
|
||||
}
|
||||
|
||||
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
|
||||
pub fn compute_outerproduct(L: Vec<F>, R: Vec<F>) -> Vec<F> {
|
||||
assert_eq!(L.len(), R.len());
|
||||
(0..L.len())
|
||||
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<Scalar>>())
|
||||
.collect::<Vec<Vec<Scalar>>>()
|
||||
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<F>>())
|
||||
.collect::<Vec<Vec<F>>>()
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<Scalar>>()
|
||||
.collect::<Vec<F>>()
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -708,9 +695,9 @@ mod tests {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
|
||||
let s = 10;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
for _i in 0..s {
|
||||
r.push(Scalar::rand(&mut rng));
|
||||
r.push(F::rand(&mut rng));
|
||||
}
|
||||
let chis = tests::compute_chis_at_r(&r);
|
||||
let chis_m = EqPolynomial::new(r).evals();
|
||||
@@ -722,9 +709,9 @@ mod tests {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
|
||||
let s = 10;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
for _i in 0..s {
|
||||
r.push(Scalar::rand(&mut rng));
|
||||
r.push(F::rand(&mut rng));
|
||||
}
|
||||
let chis = EqPolynomial::new(r.clone()).evals();
|
||||
let (L, R) = EqPolynomial::new(r).compute_factored_evals();
|
||||
@@ -737,9 +724,9 @@ mod tests {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
|
||||
let s = 10;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
for _i in 0..s {
|
||||
r.push(Scalar::rand(&mut rng));
|
||||
r.push(F::rand(&mut rng));
|
||||
}
|
||||
let (L, R) = tests::compute_factored_chis_at_r(&r);
|
||||
let eq = EqPolynomial::new(r);
|
||||
@@ -750,26 +737,20 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn check_polynomial_commit() {
|
||||
let Z = vec![
|
||||
Scalar::from(1),
|
||||
Scalar::from(2),
|
||||
Scalar::from(1),
|
||||
Scalar::from(4),
|
||||
];
|
||||
let Z = vec![F::from(1), F::from(2), F::from(1), F::from(4)];
|
||||
let poly = DensePolynomial::new(Z);
|
||||
|
||||
// r = [4,3]
|
||||
let r = vec![Scalar::from(4), Scalar::from(3)];
|
||||
let r = vec![F::from(4), F::from(3)];
|
||||
let eval = poly.evaluate(&r);
|
||||
assert_eq!(eval, Scalar::from(28));
|
||||
assert_eq!(eval, F::from(28));
|
||||
|
||||
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
|
||||
let (poly_commitment, blinds) = poly.commit(&gens, None);
|
||||
let gens = PolyCommitmentGens::setup(poly.get_num_vars(), b"test-two");
|
||||
let (poly_commitment, blinds) = poly.commit(&gens, false);
|
||||
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let params = poseidon_params();
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, C_Zr) = PolyEvalProof::prove(
|
||||
let (proof, C_Zr) = PolyEvalProof::<E>::prove(
|
||||
&poly,
|
||||
Some(&blinds),
|
||||
&r,
|
||||
@@ -777,7 +758,6 @@ mod tests {
|
||||
None,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
&mut random_tape,
|
||||
);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
|
||||
80
src/group.rs
80
src/group.rs
@@ -1,80 +0,0 @@
|
||||
use crate::errors::ProofVerifyError;
|
||||
use ark_ec::msm::VariableBaseMSM;
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use super::scalar::Scalar;
|
||||
|
||||
use ark_ec::ProjectiveCurve;
|
||||
use ark_serialize::*;
|
||||
use core::borrow::Borrow;
|
||||
|
||||
pub type GroupElement = ark_bls12_377::G1Projective;
|
||||
pub type GroupElementAffine = ark_bls12_377::G1Affine;
|
||||
pub type Fq = ark_bls12_377::Fq;
|
||||
pub type Fr = ark_bls12_377::Fr;
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct CompressedGroup(pub Vec<u8>);
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GROUP_BASEPOINT: GroupElement = GroupElement::prime_subgroup_generator();
|
||||
}
|
||||
|
||||
pub trait CompressGroupElement {
|
||||
fn compress(&self) -> CompressedGroup;
|
||||
}
|
||||
|
||||
pub trait DecompressGroupElement {
|
||||
fn decompress(encoded: &CompressedGroup) -> Option<GroupElement>;
|
||||
}
|
||||
|
||||
pub trait UnpackGroupElement {
|
||||
fn unpack(&self) -> Result<GroupElement, ProofVerifyError>;
|
||||
}
|
||||
|
||||
impl CompressGroupElement for GroupElement {
|
||||
fn compress(&self) -> CompressedGroup {
|
||||
let mut point_encoding = Vec::new();
|
||||
self.serialize(&mut point_encoding).unwrap();
|
||||
CompressedGroup(point_encoding)
|
||||
}
|
||||
}
|
||||
|
||||
impl DecompressGroupElement for GroupElement {
|
||||
fn decompress(encoded: &CompressedGroup) -> Option<Self> {
|
||||
let res = GroupElement::deserialize(&*encoded.0);
|
||||
if let Ok(r) = res {
|
||||
Some(r)
|
||||
} else {
|
||||
println!("{:?}", res);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UnpackGroupElement for CompressedGroup {
|
||||
fn unpack(&self) -> Result<GroupElement, ProofVerifyError> {
|
||||
let encoded = self.0.clone();
|
||||
GroupElement::decompress(self).ok_or(ProofVerifyError::DecompressionError(encoded))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VartimeMultiscalarMul {
|
||||
fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement;
|
||||
}
|
||||
|
||||
impl VartimeMultiscalarMul for GroupElement {
|
||||
fn vartime_multiscalar_mul(scalars: &[Scalar], points: &[GroupElement]) -> GroupElement {
|
||||
let repr_scalars = scalars
|
||||
.iter()
|
||||
.map(|S| S.borrow().into_repr())
|
||||
.collect::<Vec<<Scalar as PrimeField>::BigInt>>();
|
||||
let aff_points = points
|
||||
.iter()
|
||||
.map(|P| P.borrow().into_affine())
|
||||
.collect::<Vec<GroupElementAffine>>();
|
||||
VariableBaseMSM::multi_scalar_mul(aff_points.as_slice(), repr_scalars.as_slice())
|
||||
}
|
||||
}
|
||||
614
src/lib.rs
614
src/lib.rs
@@ -1,6 +1,5 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![feature(test)]
|
||||
#![allow(clippy::assertions_on_result_states)]
|
||||
|
||||
extern crate ark_std;
|
||||
@@ -10,7 +9,6 @@ extern crate lazy_static;
|
||||
extern crate merlin;
|
||||
extern crate rand;
|
||||
extern crate sha3;
|
||||
extern crate test;
|
||||
|
||||
#[macro_use]
|
||||
extern crate json;
|
||||
@@ -21,18 +19,21 @@ extern crate rayon;
|
||||
mod commitments;
|
||||
mod dense_mlpoly;
|
||||
mod errors;
|
||||
mod group;
|
||||
#[macro_use]
|
||||
pub(crate) mod macros;
|
||||
mod math;
|
||||
pub(crate) mod mipp;
|
||||
mod nizk;
|
||||
mod product_tree;
|
||||
mod r1csinstance;
|
||||
mod r1csproof;
|
||||
mod random;
|
||||
mod scalar;
|
||||
mod sparse_mlpoly;
|
||||
pub mod sqrt_pst;
|
||||
mod sumcheck;
|
||||
pub mod testudo_nizk;
|
||||
pub mod testudo_snark;
|
||||
mod timer;
|
||||
mod transcript;
|
||||
pub(crate) mod transcript;
|
||||
mod unipoly;
|
||||
|
||||
pub mod parameters;
|
||||
@@ -40,46 +41,37 @@ pub mod parameters;
|
||||
mod constraints;
|
||||
pub mod poseidon_transcript;
|
||||
|
||||
use ark_ff::Field;
|
||||
|
||||
use ark_serialize::*;
|
||||
use ark_std::Zero;
|
||||
use core::cmp::max;
|
||||
use errors::{ProofVerifyError, R1CSError};
|
||||
use errors::R1CSError;
|
||||
|
||||
use poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
use r1csinstance::{
|
||||
R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance,
|
||||
};
|
||||
use r1csproof::{R1CSGens, R1CSProof};
|
||||
use random::RandomTape;
|
||||
use scalar::Scalar;
|
||||
use r1csinstance::{R1CSCommitment, R1CSDecommitment, R1CSInstance};
|
||||
|
||||
use timer::Timer;
|
||||
use ark_ec::CurveGroup;
|
||||
|
||||
/// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS)
|
||||
pub struct ComputationCommitment {
|
||||
comm: R1CSCommitment,
|
||||
pub struct ComputationCommitment<G: CurveGroup> {
|
||||
comm: R1CSCommitment<G>,
|
||||
}
|
||||
|
||||
use ark_ff::PrimeField;
|
||||
/// `ComputationDecommitment` holds information to decommit `ComputationCommitment`
|
||||
pub struct ComputationDecommitment {
|
||||
decomm: R1CSDecommitment,
|
||||
pub struct ComputationDecommitment<F: PrimeField> {
|
||||
decomm: R1CSDecommitment<F>,
|
||||
}
|
||||
|
||||
/// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance`
|
||||
#[derive(Clone)]
|
||||
pub struct Assignment {
|
||||
assignment: Vec<Scalar>,
|
||||
pub struct Assignment<F: PrimeField> {
|
||||
assignment: Vec<F>,
|
||||
}
|
||||
|
||||
impl Assignment {
|
||||
impl<F: PrimeField> Assignment<F> {
|
||||
/// Constructs a new `Assignment` from a vector
|
||||
pub fn new(assignment: &Vec<Vec<u8>>) -> Result<Assignment, R1CSError> {
|
||||
let bytes_to_scalar = |vec: &Vec<Vec<u8>>| -> Result<Vec<Scalar>, R1CSError> {
|
||||
let mut vec_scalar: Vec<Scalar> = Vec::new();
|
||||
pub fn new(assignment: &Vec<Vec<u8>>) -> Result<Self, R1CSError> {
|
||||
let bytes_to_scalar = |vec: &Vec<Vec<u8>>| -> Result<Vec<F>, R1CSError> {
|
||||
let mut vec_scalar: Vec<F> = Vec::new();
|
||||
for v in vec {
|
||||
let val = Scalar::from_random_bytes(v.as_slice());
|
||||
let val = F::from_random_bytes(v.as_slice());
|
||||
if let Some(v) = val {
|
||||
vec_scalar.push(v);
|
||||
} else {
|
||||
@@ -102,13 +94,13 @@ impl Assignment {
|
||||
}
|
||||
|
||||
/// pads Assignment to the specified length
|
||||
fn pad(&self, len: usize) -> VarsAssignment {
|
||||
fn pad(&self, len: usize) -> VarsAssignment<F> {
|
||||
// check that the new length is higher than current length
|
||||
assert!(len > self.assignment.len());
|
||||
|
||||
let padded_assignment = {
|
||||
let mut padded_assignment = self.assignment.clone();
|
||||
padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]);
|
||||
padded_assignment.extend(vec![F::zero(); len - self.assignment.len()]);
|
||||
padded_assignment
|
||||
};
|
||||
|
||||
@@ -119,19 +111,18 @@ impl Assignment {
|
||||
}
|
||||
|
||||
/// `VarsAssignment` holds an assignment of values to variables in an `Instance`
|
||||
pub type VarsAssignment = Assignment;
|
||||
pub type VarsAssignment<F> = Assignment<F>;
|
||||
|
||||
/// `InputsAssignment` holds an assignment of values to variables in an `Instance`
|
||||
pub type InputsAssignment = Assignment;
|
||||
pub type InputsAssignment<F> = Assignment<F>;
|
||||
|
||||
/// `Instance` holds the description of R1CS matrices and a hash of the matrices
|
||||
#[derive(Debug)]
|
||||
pub struct Instance {
|
||||
inst: R1CSInstance,
|
||||
pub struct Instance<F: PrimeField> {
|
||||
inst: R1CSInstance<F>,
|
||||
digest: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
impl<F: PrimeField> Instance<F> {
|
||||
/// Constructs a new `Instance` and an associated satisfying assignment
|
||||
pub fn new(
|
||||
num_cons: usize,
|
||||
@@ -140,7 +131,7 @@ impl Instance {
|
||||
A: &[(usize, usize, Vec<u8>)],
|
||||
B: &[(usize, usize, Vec<u8>)],
|
||||
C: &[(usize, usize, Vec<u8>)],
|
||||
) -> Result<Instance, R1CSError> {
|
||||
) -> Result<Self, R1CSError> {
|
||||
let (num_vars_padded, num_cons_padded) = {
|
||||
let num_vars_padded = {
|
||||
let mut num_vars_padded = num_vars;
|
||||
@@ -174,8 +165,8 @@ impl Instance {
|
||||
};
|
||||
|
||||
let bytes_to_scalar =
|
||||
|tups: &[(usize, usize, Vec<u8>)]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
|
||||
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
|
||||
|tups: &[(usize, usize, Vec<u8>)]| -> Result<Vec<(usize, usize, F)>, R1CSError> {
|
||||
let mut mat: Vec<(usize, usize, F)> = Vec::new();
|
||||
for (row, col, val_bytes) in tups {
|
||||
// row must be smaller than num_cons
|
||||
if *row >= num_cons {
|
||||
@@ -187,7 +178,7 @@ impl Instance {
|
||||
return Err(R1CSError::InvalidIndex);
|
||||
}
|
||||
|
||||
let val = Scalar::from_random_bytes(val_bytes.as_slice());
|
||||
let val = F::from_random_bytes(val_bytes.as_slice());
|
||||
if let Some(v) = val {
|
||||
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
|
||||
// assignment
|
||||
@@ -205,7 +196,7 @@ impl Instance {
|
||||
// we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol
|
||||
if num_cons == 0 || num_cons == 1 {
|
||||
for i in tups.len()..num_cons_padded {
|
||||
mat.push((i, num_vars, Scalar::zero()));
|
||||
mat.push((i, num_vars, F::zero()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,8 +235,8 @@ impl Instance {
|
||||
/// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments
|
||||
pub fn is_sat(
|
||||
&self,
|
||||
vars: &VarsAssignment,
|
||||
inputs: &InputsAssignment,
|
||||
vars: &VarsAssignment<F>,
|
||||
inputs: &InputsAssignment<F>,
|
||||
) -> Result<bool, R1CSError> {
|
||||
if vars.assignment.len() > self.inst.get_num_vars() {
|
||||
return Err(R1CSError::InvalidNumberOfInputs);
|
||||
@@ -266,9 +257,11 @@ impl Instance {
|
||||
}
|
||||
};
|
||||
|
||||
Ok(self
|
||||
Ok(
|
||||
self
|
||||
.inst
|
||||
.is_sat(&padded_vars.assignment, &inputs.assignment))
|
||||
.is_sat(&padded_vars.assignment, &inputs.assignment),
|
||||
)
|
||||
}
|
||||
|
||||
/// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment
|
||||
@@ -276,9 +269,8 @@ impl Instance {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
) -> (Instance, VarsAssignment, InputsAssignment) {
|
||||
let (inst, vars, inputs) =
|
||||
R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
) -> (Instance<F>, VarsAssignment<F>, InputsAssignment<F>) {
|
||||
let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
let digest = inst.get_digest();
|
||||
(
|
||||
Instance { inst, digest },
|
||||
@@ -288,423 +280,21 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK
|
||||
pub struct SNARKGens {
|
||||
gens_r1cs_sat: R1CSGens,
|
||||
gens_r1cs_eval: R1CSCommitmentGens,
|
||||
}
|
||||
|
||||
impl SNARKGens {
|
||||
/// Constructs a new `SNARKGens` given the size of the R1CS statement
|
||||
/// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices
|
||||
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self {
|
||||
let num_vars_padded = {
|
||||
let mut num_vars_padded = max(num_vars, num_inputs + 1);
|
||||
if num_vars_padded != num_vars_padded.next_power_of_two() {
|
||||
num_vars_padded = num_vars_padded.next_power_of_two();
|
||||
}
|
||||
num_vars_padded
|
||||
};
|
||||
|
||||
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
|
||||
let gens_r1cs_eval = R1CSCommitmentGens::new(
|
||||
b"gens_r1cs_eval",
|
||||
num_cons,
|
||||
num_vars_padded,
|
||||
num_inputs,
|
||||
num_nz_entries,
|
||||
);
|
||||
SNARKGens {
|
||||
gens_r1cs_sat,
|
||||
gens_r1cs_eval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `SNARK` holds a proof produced by Spartan SNARK
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct SNARK {
|
||||
r1cs_sat_proof: R1CSProof,
|
||||
inst_evals: (Scalar, Scalar, Scalar),
|
||||
r1cs_eval_proof: R1CSEvalProof,
|
||||
rx: Vec<Scalar>,
|
||||
ry: Vec<Scalar>,
|
||||
}
|
||||
|
||||
impl SNARK {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"Spartan SNARK proof"
|
||||
}
|
||||
|
||||
/// A public computation to create a commitment to an R1CS instance
|
||||
pub fn encode(
|
||||
inst: &Instance,
|
||||
gens: &SNARKGens,
|
||||
) -> (ComputationCommitment, ComputationDecommitment) {
|
||||
let timer_encode = Timer::new("SNARK::encode");
|
||||
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
|
||||
timer_encode.stop();
|
||||
(
|
||||
ComputationCommitment { comm },
|
||||
ComputationDecommitment { decomm },
|
||||
)
|
||||
}
|
||||
|
||||
/// A method to produce a SNARK proof of the satisfiability of an R1CS instance
|
||||
pub fn prove(
|
||||
inst: &Instance,
|
||||
comm: &ComputationCommitment,
|
||||
decomm: &ComputationDecommitment,
|
||||
vars: VarsAssignment,
|
||||
inputs: &InputsAssignment,
|
||||
gens: &SNARKGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> Self {
|
||||
let timer_prove = Timer::new("SNARK::prove");
|
||||
|
||||
// we create a Transcript object seeded with a random Scalar
|
||||
// to aid the prover produce its randomness
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
|
||||
// transcript.append_protocol_name(SNARK::protocol_name());
|
||||
comm.comm.append_to_poseidon(transcript);
|
||||
|
||||
let (r1cs_sat_proof, rx, ry) = {
|
||||
let (proof, rx, ry) = {
|
||||
// we might need to pad variables
|
||||
let padded_vars = {
|
||||
let num_padded_vars = inst.inst.get_num_vars();
|
||||
let num_vars = vars.assignment.len();
|
||||
if num_padded_vars > num_vars {
|
||||
vars.pad(num_padded_vars)
|
||||
} else {
|
||||
vars
|
||||
}
|
||||
};
|
||||
|
||||
R1CSProof::prove(
|
||||
&inst.inst,
|
||||
padded_vars.assignment,
|
||||
&inputs.assignment,
|
||||
&gens.gens_r1cs_sat,
|
||||
transcript,
|
||||
// &mut random_tape,
|
||||
)
|
||||
};
|
||||
|
||||
let mut proof_encoded: Vec<u8> = Vec::new();
|
||||
proof.serialize(&mut proof_encoded).unwrap();
|
||||
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
||||
|
||||
(proof, rx, ry)
|
||||
};
|
||||
|
||||
// We need to reset the transcript state before starting the evaluation
|
||||
// proof and share this state with the verifier because, on the verifier's
|
||||
// side all the previous updates are done on the transcript
|
||||
// circuit variable and the transcript outside the circuit will be
|
||||
// inconsistent wrt to the prover's.
|
||||
transcript.new_from_state(&r1cs_sat_proof.transcript_sat_state);
|
||||
|
||||
// We send evaluations of A, B, C at r = (rx, ry) as claims
|
||||
// to enable the verifier complete the first sum-check
|
||||
let timer_eval = Timer::new("eval_sparse_polys");
|
||||
let inst_evals = {
|
||||
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
|
||||
transcript.append_scalar(&Ar);
|
||||
transcript.append_scalar(&Br);
|
||||
transcript.append_scalar(&Cr);
|
||||
(Ar, Br, Cr)
|
||||
};
|
||||
timer_eval.stop();
|
||||
|
||||
let r1cs_eval_proof = {
|
||||
let proof = R1CSEvalProof::prove(
|
||||
&decomm.decomm,
|
||||
&rx,
|
||||
&ry,
|
||||
&inst_evals,
|
||||
&gens.gens_r1cs_eval,
|
||||
transcript,
|
||||
&mut random_tape,
|
||||
);
|
||||
|
||||
let mut proof_encoded: Vec<u8> = Vec::new();
|
||||
proof.serialize(&mut proof_encoded).unwrap();
|
||||
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
|
||||
proof
|
||||
};
|
||||
|
||||
timer_prove.stop();
|
||||
SNARK {
|
||||
r1cs_sat_proof,
|
||||
inst_evals,
|
||||
r1cs_eval_proof,
|
||||
rx,
|
||||
ry,
|
||||
}
|
||||
}
|
||||
|
||||
/// A method to verify the SNARK proof of the satisfiability of an R1CS instance
|
||||
pub fn verify(
|
||||
&self,
|
||||
comm: &ComputationCommitment,
|
||||
input: &InputsAssignment,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
gens: &SNARKGens,
|
||||
) -> Result<(u128, u128, u128), ProofVerifyError> {
|
||||
let timer_verify = Timer::new("SNARK::verify");
|
||||
// transcript.append_protocol_name(SNARK::protocol_name());
|
||||
|
||||
// append a commitment to the computation to the transcript
|
||||
comm.comm.append_to_poseidon(transcript);
|
||||
|
||||
let timer_sat_proof = Timer::new("verify_sat_proof");
|
||||
assert_eq!(input.assignment.len(), comm.comm.get_num_inputs());
|
||||
// let (rx, ry) =
|
||||
let res = self.r1cs_sat_proof.verify_groth16(
|
||||
comm.comm.get_num_vars(),
|
||||
comm.comm.get_num_cons(),
|
||||
&input.assignment,
|
||||
&self.inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
)?;
|
||||
timer_sat_proof.stop();
|
||||
|
||||
let timer_eval_proof = Timer::new("verify_eval_proof");
|
||||
// Reset the transcript using the state sent by the prover.
|
||||
// TODO: find a way to retrieve this state from the circuit. Currently
|
||||
// the API for generating constraints doesn't support returning values
|
||||
// computed inside the circuit.
|
||||
transcript.new_from_state(&self.r1cs_sat_proof.transcript_sat_state);
|
||||
|
||||
let (Ar, Br, Cr) = &self.inst_evals;
|
||||
transcript.append_scalar(&Ar);
|
||||
transcript.append_scalar(&Br);
|
||||
transcript.append_scalar(&Cr);
|
||||
|
||||
self.r1cs_eval_proof.verify(
|
||||
&comm.comm,
|
||||
&self.rx,
|
||||
&self.ry,
|
||||
&self.inst_evals,
|
||||
&gens.gens_r1cs_eval,
|
||||
transcript,
|
||||
)?;
|
||||
timer_eval_proof.stop();
|
||||
timer_verify.stop();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK
|
||||
pub struct NIZKGens {
|
||||
gens_r1cs_sat: R1CSGens,
|
||||
}
|
||||
|
||||
impl NIZKGens {
|
||||
/// Constructs a new `NIZKGens` given the size of the R1CS statement
|
||||
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self {
|
||||
let num_vars_padded = {
|
||||
let mut num_vars_padded = max(num_vars, num_inputs + 1);
|
||||
if num_vars_padded != num_vars_padded.next_power_of_two() {
|
||||
num_vars_padded = num_vars_padded.next_power_of_two();
|
||||
}
|
||||
num_vars_padded
|
||||
};
|
||||
|
||||
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
|
||||
NIZKGens { gens_r1cs_sat }
|
||||
}
|
||||
}
|
||||
|
||||
/// `NIZK` holds a proof produced by Spartan NIZK
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct NIZK {
|
||||
r1cs_sat_proof: R1CSProof,
|
||||
r: (Vec<Scalar>, Vec<Scalar>),
|
||||
}
|
||||
|
||||
impl NIZK {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"Spartan NIZK proof"
|
||||
}
|
||||
|
||||
/// A method to produce a NIZK proof of the satisfiability of an R1CS instance
|
||||
pub fn prove(
|
||||
inst: &Instance,
|
||||
vars: VarsAssignment,
|
||||
input: &InputsAssignment,
|
||||
gens: &NIZKGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> Self {
|
||||
let timer_prove = Timer::new("NIZK::prove");
|
||||
// we create a Transcript object seeded with a random Scalar
|
||||
// to aid the prover produce its randomness
|
||||
let _random_tape = RandomTape::new(b"proof");
|
||||
|
||||
// transcript.append_protocol_name(NIZK::protocol_name());
|
||||
transcript.append_bytes(&inst.digest);
|
||||
|
||||
let (r1cs_sat_proof, rx, ry) = {
|
||||
// we might need to pad variables
|
||||
let padded_vars = {
|
||||
let num_padded_vars = inst.inst.get_num_vars();
|
||||
let num_vars = vars.assignment.len();
|
||||
if num_padded_vars > num_vars {
|
||||
vars.pad(num_padded_vars)
|
||||
} else {
|
||||
vars
|
||||
}
|
||||
};
|
||||
|
||||
let (proof, rx, ry) = R1CSProof::prove(
|
||||
&inst.inst,
|
||||
padded_vars.assignment,
|
||||
&input.assignment,
|
||||
&gens.gens_r1cs_sat,
|
||||
transcript,
|
||||
// &mut random_tape,
|
||||
);
|
||||
let mut proof_encoded = Vec::new();
|
||||
proof.serialize(&mut proof_encoded).unwrap();
|
||||
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
|
||||
(proof, rx, ry)
|
||||
};
|
||||
|
||||
timer_prove.stop();
|
||||
NIZK {
|
||||
r1cs_sat_proof,
|
||||
r: (rx, ry),
|
||||
}
|
||||
}
|
||||
|
||||
/// A method to verify a NIZK proof of the satisfiability of an R1CS instance
|
||||
pub fn verify(
|
||||
&self,
|
||||
inst: &Instance,
|
||||
input: &InputsAssignment,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
gens: &NIZKGens,
|
||||
) -> Result<usize, ProofVerifyError> {
|
||||
let timer_verify = Timer::new("NIZK::verify");
|
||||
|
||||
transcript.append_bytes(&inst.digest);
|
||||
|
||||
// We send evaluations of A, B, C at r = (rx, ry) as claims
|
||||
// to enable the verifier complete the first sum-check
|
||||
let timer_eval = Timer::new("eval_sparse_polys");
|
||||
let (claimed_rx, claimed_ry) = &self.r;
|
||||
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
|
||||
timer_eval.stop();
|
||||
|
||||
let timer_sat_proof = Timer::new("verify_sat_proof");
|
||||
assert_eq!(input.assignment.len(), inst.inst.get_num_inputs());
|
||||
// let (rx, ry) =
|
||||
let nc = self.r1cs_sat_proof.circuit_size(
|
||||
inst.inst.get_num_vars(),
|
||||
inst.inst.get_num_cons(),
|
||||
&input.assignment,
|
||||
&inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
)?;
|
||||
|
||||
// verify if claimed rx and ry are correct
|
||||
// assert_eq!(rx, *claimed_rx);
|
||||
// assert_eq!(ry, *claimed_ry);
|
||||
timer_sat_proof.stop();
|
||||
timer_verify.stop();
|
||||
|
||||
Ok(nc)
|
||||
}
|
||||
|
||||
/// A method to verify a NIZK proof of the satisfiability of an R1CS instance with Groth16
|
||||
pub fn verify_groth16(
|
||||
&self,
|
||||
inst: &Instance,
|
||||
input: &InputsAssignment,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
gens: &NIZKGens,
|
||||
) -> Result<(u128, u128, u128), ProofVerifyError> {
|
||||
let timer_verify = Timer::new("NIZK::verify");
|
||||
|
||||
// transcript.append_protocol_name(NIZK::protocol_name());
|
||||
transcript.append_bytes(&inst.digest);
|
||||
|
||||
// We send evaluations of A, B, C at r = (rx, ry) as claims
|
||||
// to enable the verifier complete the first sum-check
|
||||
let timer_eval = Timer::new("eval_sparse_polys");
|
||||
let (claimed_rx, claimed_ry) = &self.r;
|
||||
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
|
||||
timer_eval.stop();
|
||||
|
||||
let timer_sat_proof = Timer::new("verify_sat_proof");
|
||||
assert_eq!(input.assignment.len(), inst.inst.get_num_inputs());
|
||||
// let (rx, ry) =
|
||||
let (ds, dp, dv) = self.r1cs_sat_proof.verify_groth16(
|
||||
inst.inst.get_num_vars(),
|
||||
inst.inst.get_num_cons(),
|
||||
&input.assignment,
|
||||
&inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
)?;
|
||||
|
||||
// verify if claimed rx and ry are correct
|
||||
// assert_eq!(rx, *claimed_rx);
|
||||
// assert_eq!(ry, *claimed_ry);
|
||||
timer_sat_proof.stop();
|
||||
timer_verify.stop();
|
||||
|
||||
Ok((ds, dp, dv))
|
||||
#[inline]
|
||||
pub(crate) fn dot_product<F: PrimeField>(a: &[F], b: &[F]) -> F {
|
||||
let mut res = F::zero();
|
||||
for i in 0..a.len() {
|
||||
res += a[i] * &b[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::parameters::poseidon_params;
|
||||
|
||||
use super::*;
|
||||
use ark_ff::{BigInteger, One, PrimeField};
|
||||
|
||||
#[test]
|
||||
pub fn check_snark() {
|
||||
let num_vars = 256;
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
// produce public generators
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// create a commitment to R1CSInstance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
// verify the proof
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
}
|
||||
type F = ark_bls12_377::Fr;
|
||||
|
||||
#[test]
|
||||
pub fn check_r1cs_invalid_index() {
|
||||
@@ -713,15 +303,15 @@ mod tests {
|
||||
let num_inputs = 1;
|
||||
|
||||
let zero: [u8; 32] = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
let A = vec![(0, 0, zero.to_vec())];
|
||||
let B = vec![(100, 1, zero.to_vec())];
|
||||
let C = vec![(1, 1, zero.to_vec())];
|
||||
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||
let inst = Instance::<F>::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||
assert!(inst.is_err());
|
||||
assert_eq!(inst.err(), Some(R1CSError::InvalidIndex));
|
||||
}
|
||||
@@ -733,111 +323,21 @@ mod tests {
|
||||
let num_inputs = 1;
|
||||
|
||||
let zero: [u8; 32] = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
let larger_than_mod = [
|
||||
3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
|
||||
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115,
|
||||
3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216,
|
||||
57, 51, 72, 125, 157, 41, 83, 167, 237, 115,
|
||||
];
|
||||
|
||||
let A = vec![(0, 0, zero.to_vec())];
|
||||
let B = vec![(1, 1, larger_than_mod.to_vec())];
|
||||
let C = vec![(1, 1, zero.to_vec())];
|
||||
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||
let inst = Instance::<F>::new(num_cons, num_vars, num_inputs, &A, &B, &C);
|
||||
assert!(inst.is_err());
|
||||
assert_eq!(inst.err(), Some(R1CSError::InvalidScalar));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_padded_constraints() {
|
||||
// parameters of the R1CS instance
|
||||
let num_cons = 1;
|
||||
let num_vars = 0;
|
||||
let num_inputs = 3;
|
||||
let num_non_zero_entries = 3;
|
||||
|
||||
// We will encode the above constraints into three matrices, where
|
||||
// the coefficients in the matrix are in the little-endian byte order
|
||||
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
|
||||
// Create a^2 + b + 13
|
||||
A.push((0, num_vars + 2, (Scalar::one().into_repr().to_bytes_le()))); // 1*a
|
||||
B.push((0, num_vars + 2, Scalar::one().into_repr().to_bytes_le())); // 1*a
|
||||
C.push((0, num_vars + 1, Scalar::one().into_repr().to_bytes_le())); // 1*z
|
||||
C.push((
|
||||
0,
|
||||
num_vars,
|
||||
(-Scalar::from(13u64)).into_repr().to_bytes_le(),
|
||||
)); // -13*1
|
||||
C.push((0, num_vars + 3, (-Scalar::one()).into_repr().to_bytes_le())); // -1*b
|
||||
|
||||
// Var Assignments (Z_0 = 16 is the only output)
|
||||
let vars = vec![Scalar::zero().into_repr().to_bytes_le(); num_vars];
|
||||
|
||||
// create an InputsAssignment (a = 1, b = 2)
|
||||
let mut inputs = vec![Scalar::zero().into_repr().to_bytes_le(); num_inputs];
|
||||
inputs[0] = Scalar::from(16u64).into_repr().to_bytes_le();
|
||||
inputs[1] = Scalar::from(1u64).into_repr().to_bytes_le();
|
||||
inputs[2] = Scalar::from(2u64).into_repr().to_bytes_le();
|
||||
|
||||
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
|
||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||
|
||||
// Check if instance is satisfiable
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
|
||||
assert!(res.unwrap(), "should be satisfied");
|
||||
|
||||
// SNARK public params
|
||||
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
|
||||
|
||||
// create a commitment to the R1CS instance
|
||||
let (comm, decomm) = SNARK::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a SNARK
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = SNARK::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
assignment_vars.clone(),
|
||||
&assignment_inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
// verify the SNARK
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
|
||||
// NIZK public params
|
||||
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a NIZK
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = NIZK::prove(
|
||||
&inst,
|
||||
assignment_vars,
|
||||
&assignment_inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
);
|
||||
|
||||
// verify the NIZK
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify_groth16(&inst, &assignment_inputs, &mut verifier_transcript, &gens)
|
||||
.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
56
src/macros.rs
Normal file
56
src/macros.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
macro_rules! try_par {
|
||||
($(let $name:ident = $f:expr),+) => {
|
||||
$(
|
||||
let mut $name = None;
|
||||
)+
|
||||
rayon::scope(|s| {
|
||||
$(
|
||||
let $name = &mut $name;
|
||||
s.spawn(move |_| {
|
||||
*$name = Some($f);
|
||||
});)+
|
||||
});
|
||||
$(
|
||||
let $name = $name.unwrap()?;
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! par {
|
||||
($(let $name:ident = $f:expr),+) => {
|
||||
$(
|
||||
let mut $name = None;
|
||||
)+
|
||||
rayon::scope(|s| {
|
||||
$(
|
||||
let $name = &mut $name;
|
||||
s.spawn(move |_| {
|
||||
*$name = Some($f);
|
||||
});)+
|
||||
});
|
||||
$(
|
||||
let $name = $name.unwrap();
|
||||
)+
|
||||
};
|
||||
|
||||
($(let ($name1:ident, $name2:ident) = $f:block),+) => {
|
||||
$(
|
||||
let mut $name1 = None;
|
||||
let mut $name2 = None;
|
||||
)+
|
||||
rayon::scope(|s| {
|
||||
$(
|
||||
let $name1 = &mut $name1;
|
||||
let $name2 = &mut $name2;
|
||||
s.spawn(move |_| {
|
||||
let (a, b) = $f;
|
||||
*$name1 = Some(a);
|
||||
*$name2 = Some(b);
|
||||
});)+
|
||||
});
|
||||
$(
|
||||
let $name1 = $name1.unwrap();
|
||||
let $name2 = $name2.unwrap();
|
||||
)+
|
||||
}
|
||||
}
|
||||
410
src/mipp.rs
Normal file
410
src/mipp.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
use crate::transcript::Transcript;
|
||||
use ark_ec::scalar_mul::variable_base::VariableBaseMSM;
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ec::{pairing::Pairing, AffineRepr};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::DenseMultilinearExtension;
|
||||
use ark_poly_commit::multilinear_pc::data_structures::{
|
||||
CommitmentG2, CommitterKey, ProofG1, VerifierKey,
|
||||
};
|
||||
use ark_poly_commit::multilinear_pc::MultilinearPC;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError};
|
||||
use ark_std::One;
|
||||
use ark_std::Zero;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::prelude::IntoParallelIterator;
|
||||
use rayon::prelude::*;
|
||||
use std::ops::{AddAssign, Mul, MulAssign};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, CanonicalDeserialize, CanonicalSerialize)]
|
||||
pub struct MippProof<E: Pairing> {
|
||||
pub comms_t: Vec<(<E as Pairing>::TargetField, <E as Pairing>::TargetField)>,
|
||||
pub comms_u: Vec<(E::G1Affine, E::G1Affine)>,
|
||||
pub final_a: E::G1Affine,
|
||||
pub final_h: E::G2Affine,
|
||||
pub pst_proof_h: ProofG1<E>,
|
||||
}
|
||||
|
||||
impl<E: Pairing> MippProof<E> {
|
||||
pub fn prove(
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
ck: &CommitterKey<E>,
|
||||
a: Vec<E::G1Affine>,
|
||||
y: Vec<E::ScalarField>,
|
||||
h: Vec<E::G2Affine>,
|
||||
U: &E::G1Affine,
|
||||
_T: &<E as Pairing>::TargetField,
|
||||
) -> Result<MippProof<E>, Error> {
|
||||
// the values of vectors A and y rescaled at each step of the loop
|
||||
let (mut m_a, mut m_y) = (a.clone(), y.clone());
|
||||
// the values of the commitment keys h for the vector A rescaled at
|
||||
// each step of the loop
|
||||
let mut m_h = h.clone();
|
||||
|
||||
// storing the cross commitments for including in the proofs
|
||||
let mut comms_t = Vec::new();
|
||||
let mut comms_u = Vec::new();
|
||||
|
||||
// the transcript challenges
|
||||
let mut xs: Vec<E::ScalarField> = Vec::new();
|
||||
let mut xs_inv: Vec<E::ScalarField> = Vec::new();
|
||||
|
||||
// we append only the MIPP because the aggregated commitment T has been
|
||||
// appended already
|
||||
transcript.append(b"U", U);
|
||||
|
||||
while m_a.len() > 1 {
|
||||
// recursive step
|
||||
// Recurse with problem of half size
|
||||
let split = m_a.len() / 2;
|
||||
|
||||
// MIPP where n' = split///
|
||||
// a[:n'] a[n':]
|
||||
let (a_l, a_r) = m_a.split_at_mut(split);
|
||||
// y[:n'] y[n':]
|
||||
let (y_l, y_r) = m_y.split_at_mut(split);
|
||||
// h[:n'] y[n':]
|
||||
let (h_l, h_r) = m_h.split_at_mut(split);
|
||||
|
||||
// since we do this in parallel we take reference first so it can be
|
||||
// moved within the macro's rayon scope.
|
||||
let (_rh_l, _rh_r) = (&h_l, &h_r);
|
||||
let (ra_l, ra_r) = (&a_l, &a_r);
|
||||
let (ry_l, ry_r) = (&y_l, &y_r);
|
||||
|
||||
try_par! {
|
||||
// MIPP part
|
||||
// Compute cross commitments
|
||||
// u_l = a[n':] ^ y[:n']
|
||||
// TODO to replace by bitsf_multiexp
|
||||
let comm_u_l = multiexponentiation(ra_l, &ry_r),
|
||||
// u_r = a[:n'] ^ y[n':]
|
||||
let comm_u_r = multiexponentiation(ra_r, &ry_l)
|
||||
};
|
||||
|
||||
par! {
|
||||
// Compute the cross pairing products over the distinct halfs of A
|
||||
// t_l = a[n':] * h[:n']
|
||||
let comm_t_l = pairings_product::<E>(&a_l, h_r),
|
||||
// t_r = a[:n'] * h[n':]
|
||||
let comm_t_r = pairings_product::<E>(&a_r, h_l)
|
||||
|
||||
};
|
||||
|
||||
// Fiat-Shamir challenge
|
||||
transcript.append(b"comm_u_l", &comm_u_l);
|
||||
transcript.append(b"comm_u_r", &comm_u_r);
|
||||
transcript.append(b"comm_t_l", &comm_t_l);
|
||||
transcript.append(b"comm_t_r", &comm_t_r);
|
||||
let c_inv = transcript.challenge_scalar::<E::ScalarField>(b"challenge_i");
|
||||
|
||||
// Optimization for multiexponentiation to rescale G2 elements with
|
||||
// 128-bit challenge Swap 'c' and 'c_inv' since we
|
||||
// can't control bit size of c_inv
|
||||
let c = c_inv.inverse().unwrap();
|
||||
|
||||
// Set up values for next step of recursion by compressing as follows
|
||||
// a[n':] + a[:n']^x
|
||||
compress(&mut m_a, split, &c);
|
||||
// y[n':] + y[:n']^x_inv
|
||||
compress_field(&mut m_y, split, &c_inv);
|
||||
// h[n':] + h[:n']^x_inv
|
||||
compress(&mut m_h, split, &c_inv);
|
||||
|
||||
comms_t.push((comm_t_l, comm_t_r));
|
||||
comms_u.push((comm_u_l.into_affine(), comm_u_r.into_affine()));
|
||||
xs.push(c);
|
||||
xs_inv.push(c_inv);
|
||||
}
|
||||
assert!(m_a.len() == 1 && m_y.len() == 1 && m_h.len() == 1);
|
||||
|
||||
let final_a = m_a[0];
|
||||
let final_h = m_h[0];
|
||||
|
||||
// get the structured polynomial p_h for which final_h = h^p_h(vec{t})
|
||||
// is the PST commitment given generator h and toxic waste \vec{t}
|
||||
let poly = DenseMultilinearExtension::<E::ScalarField>::from_evaluations_vec(
|
||||
xs_inv.len(),
|
||||
Self::polynomial_evaluations_from_transcript::<E::ScalarField>(&xs_inv),
|
||||
);
|
||||
let c = MultilinearPC::<E>::commit_g2(ck, &poly);
|
||||
debug_assert!(c.h_product == final_h);
|
||||
|
||||
// generate a proof of opening final_h at the random point rs
|
||||
// from the transcript
|
||||
let rs: Vec<E::ScalarField> = (0..poly.num_vars)
|
||||
.into_iter()
|
||||
.map(|_| transcript.challenge_scalar::<E::ScalarField>(b"random_point"))
|
||||
.collect();
|
||||
|
||||
let pst_proof_h = MultilinearPC::<E>::open_g1(ck, &poly, &rs);
|
||||
|
||||
Ok(MippProof {
|
||||
comms_t,
|
||||
comms_u,
|
||||
final_a,
|
||||
final_h,
|
||||
pst_proof_h,
|
||||
})
|
||||
}
|
||||
|
||||
// builds the polynomial p_h in Lagrange basis which uses the
|
||||
// inverses of transcript challenges this is the following
|
||||
// structured polynomial $\prod_i(1 - z_i + cs_inv[m - i - 1] * z_i)$
|
||||
// where m is the length of cs_inv and z_i is the unknown
|
||||
fn polynomial_evaluations_from_transcript<F: Field>(cs_inv: &[F]) -> Vec<F> {
|
||||
let m = cs_inv.len();
|
||||
let pow_m = 2_usize.pow(m as u32);
|
||||
|
||||
// constructs the list of evaluations over the boolean hypercube \{0,1\}^m
|
||||
let evals = (0..pow_m)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
let mut res = F::one();
|
||||
for j in 0..m {
|
||||
// we iterate from lsb to msb and, in case the bit is 1,
|
||||
// we multiply by the corresponding challenge i.e whose
|
||||
// index corresponds to the bit's position
|
||||
if (i >> j) & 1 == 1 {
|
||||
res *= cs_inv[m - j - 1];
|
||||
}
|
||||
}
|
||||
res
|
||||
})
|
||||
.collect();
|
||||
evals
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
vk: &VerifierKey<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
proof: &MippProof<E>,
|
||||
point: Vec<E::ScalarField>,
|
||||
U: &E::G1Affine,
|
||||
T: &<E as Pairing>::TargetField,
|
||||
) -> bool {
|
||||
let comms_u = proof.comms_u.clone();
|
||||
let comms_t = proof.comms_t.clone();
|
||||
|
||||
let mut xs = Vec::new();
|
||||
let mut xs_inv = Vec::new();
|
||||
let mut final_y = E::ScalarField::one();
|
||||
|
||||
let mut final_res = MippTU {
|
||||
tc: T.clone(),
|
||||
uc: U.into_group(),
|
||||
};
|
||||
|
||||
transcript.append(b"U", U);
|
||||
|
||||
// Challenges need to be generated first in sequential order so the
|
||||
// prover and the verifier have a consistent view of the transcript
|
||||
for (i, (comm_u, comm_t)) in comms_u.iter().zip(comms_t.iter()).enumerate() {
|
||||
let (comm_u_l, comm_u_r) = comm_u;
|
||||
let (comm_t_l, comm_t_r) = comm_t;
|
||||
|
||||
// Fiat-Shamir challenge
|
||||
transcript.append(b"comm_u_l", comm_u_l);
|
||||
transcript.append(b"comm_u_r", comm_u_r);
|
||||
transcript.append(b"comm_t_l", comm_t_l);
|
||||
transcript.append(b"comm_t_r", comm_t_r);
|
||||
let c_inv = transcript.challenge_scalar::<E::ScalarField>(b"challenge_i");
|
||||
let c = c_inv.inverse().unwrap();
|
||||
|
||||
xs.push(c);
|
||||
xs_inv.push(c_inv);
|
||||
|
||||
// the verifier computes the final_y by themselves because
|
||||
// this is field operations so it's quite fast and parallelisation
|
||||
// doesn't bring much improvement
|
||||
final_y *= E::ScalarField::one() + c_inv.mul(point[i]) - point[i];
|
||||
}
|
||||
|
||||
// First, each entry of T and U are multiplied independently by their
|
||||
// respective challenges which is done in parralel and, at the end,
|
||||
// the results are merged together for each vector following their
|
||||
// corresponding merge operation.
|
||||
enum Op<'a, E: Pairing> {
|
||||
TC(&'a E::TargetField, <E::ScalarField as PrimeField>::BigInt),
|
||||
UC(&'a E::G1Affine, &'a E::ScalarField),
|
||||
}
|
||||
|
||||
let res = comms_t
|
||||
.par_iter()
|
||||
.zip(comms_u.par_iter())
|
||||
.zip(xs.par_iter().zip(xs_inv.par_iter()))
|
||||
.flat_map(|((comm_t, comm_u), (c, c_inv))| {
|
||||
let (comm_t_l, comm_t_r) = comm_t;
|
||||
let (comm_u_l, comm_u_r) = comm_u;
|
||||
|
||||
// we multiple left side by x^-1 and right side by x
|
||||
vec![
|
||||
Op::TC::<E>(comm_t_l, c_inv.into_bigint()),
|
||||
Op::TC(comm_t_r, c.into_bigint()),
|
||||
Op::UC(comm_u_l, c_inv),
|
||||
Op::UC(comm_u_r, c),
|
||||
]
|
||||
})
|
||||
.fold(MippTU::<E>::default, |mut res, op: Op<E>| {
|
||||
match op {
|
||||
Op::TC(tx, c) => {
|
||||
let tx: E::TargetField = tx.pow(c);
|
||||
res.tc.mul_assign(&tx);
|
||||
}
|
||||
Op::UC(zx, c) => {
|
||||
let uxp: E::G1 = zx.mul(c);
|
||||
res.uc.add_assign(&uxp);
|
||||
}
|
||||
}
|
||||
res
|
||||
})
|
||||
.reduce(MippTU::default, |mut acc_res, res| {
|
||||
acc_res.merge(&res);
|
||||
acc_res
|
||||
});
|
||||
|
||||
// the initial values of T and U are also merged to get the final result
|
||||
let ref_final_res = &mut final_res;
|
||||
ref_final_res.merge(&res);
|
||||
|
||||
// get the point rs from the transcript, used by the prover to generate
|
||||
// the PST proof
|
||||
let mut rs: Vec<E::ScalarField> = Vec::new();
|
||||
let m = xs_inv.len();
|
||||
for _i in 0..m {
|
||||
let r = transcript.challenge_scalar::<E::ScalarField>(b"random_point");
|
||||
rs.push(r);
|
||||
}
|
||||
|
||||
// Given p_h is structured as defined above, the verifier can compute
|
||||
// p_h(rs) by themselves in O(m) time
|
||||
let v = (0..m)
|
||||
.into_par_iter()
|
||||
.map(|i| E::ScalarField::one() + rs[i].mul(xs_inv[m - i - 1]) - rs[i])
|
||||
.product();
|
||||
|
||||
let comm_h = CommitmentG2 {
|
||||
nv: m,
|
||||
h_product: proof.final_h,
|
||||
};
|
||||
|
||||
// final_h is the commitment of p_h so the verifier can perform
|
||||
// a PST verification at the random point rs, given the pst proof
|
||||
// received from the prover prover
|
||||
let check_h = MultilinearPC::<E>::check_2(vk, &comm_h, &rs, v, &proof.pst_proof_h);
|
||||
assert!(check_h == true);
|
||||
|
||||
let final_u = proof.final_a.mul(final_y);
|
||||
let final_t: <E as Pairing>::TargetField = E::pairing(proof.final_a, proof.final_h).0;
|
||||
|
||||
let check_t = ref_final_res.tc == final_t;
|
||||
assert!(check_t == true);
|
||||
|
||||
let check_u = ref_final_res.uc == final_u;
|
||||
assert!(check_u == true);
|
||||
check_h & check_u
|
||||
}
|
||||
}
|
||||
|
||||
/// MippTU keeps track of the variables that have been sent by the prover and
|
||||
/// must be multiplied together by the verifier.
|
||||
struct MippTU<E: Pairing> {
|
||||
pub tc: E::TargetField,
|
||||
pub uc: E::G1,
|
||||
}
|
||||
|
||||
impl<E> Default for MippTU<E>
|
||||
where
|
||||
E: Pairing,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tc: E::TargetField::one(),
|
||||
uc: E::G1::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> MippTU<E>
|
||||
where
|
||||
E: Pairing,
|
||||
{
|
||||
fn merge(&mut self, other: &Self) {
|
||||
self.tc.mul_assign(&other.tc);
|
||||
self.uc.add_assign(&other.uc);
|
||||
}
|
||||
}
|
||||
|
||||
/// compress modifies the `vec` vector by setting the value at
|
||||
/// index $i:0 -> split$ $vec[i] = vec[i] + vec[i+split]^scaler$.
|
||||
/// The `vec` vector is half of its size after this call.
|
||||
pub fn compress<C: AffineRepr>(vec: &mut Vec<C>, split: usize, scaler: &C::ScalarField) {
|
||||
let (left, right) = vec.split_at_mut(split);
|
||||
left
|
||||
.par_iter_mut()
|
||||
.zip(right.par_iter())
|
||||
.for_each(|(a_l, a_r)| {
|
||||
// TODO remove that with master version
|
||||
let mut x = a_r.mul(scaler);
|
||||
x.add_assign(a_l.into_group());
|
||||
*a_l = x.into_affine();
|
||||
});
|
||||
let len = left.len();
|
||||
vec.resize(len, C::zero());
|
||||
}
|
||||
|
||||
// TODO make that generic with points as well
|
||||
pub fn compress_field<F: PrimeField>(vec: &mut Vec<F>, split: usize, scaler: &F) {
|
||||
let (left, right) = vec.split_at_mut(split);
|
||||
assert!(left.len() == right.len());
|
||||
left
|
||||
.par_iter_mut()
|
||||
.zip(right.par_iter_mut())
|
||||
.for_each(|(a_l, a_r)| {
|
||||
// TODO remove copy
|
||||
a_r.mul_assign(scaler);
|
||||
a_l.add_assign(a_r.clone());
|
||||
});
|
||||
let len = left.len();
|
||||
vec.resize(len, F::zero());
|
||||
}
|
||||
|
||||
pub fn multiexponentiation<G: AffineRepr>(
|
||||
left: &[G],
|
||||
right: &[G::ScalarField],
|
||||
) -> Result<G::Group, Error> {
|
||||
if left.len() != right.len() {
|
||||
return Err(Error::InvalidIPVectorLength);
|
||||
}
|
||||
|
||||
Ok(<G::Group as VariableBaseMSM>::msm_unchecked(left, right))
|
||||
}
|
||||
|
||||
pub fn pairings_product<E: Pairing>(gs: &[E::G1Affine], hs: &[E::G2Affine]) -> E::TargetField {
|
||||
E::multi_pairing(gs, hs).0
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(#[from] SerializationError),
|
||||
|
||||
#[error("Vectors length do not match for inner product (IP)")]
|
||||
InvalidIPVectorLength,
|
||||
// #[error("Commitment key length invalid")]
|
||||
// InvalidKeyLength,
|
||||
|
||||
// #[error("Invalid pairing result")]
|
||||
// InvalidPairing,
|
||||
|
||||
// #[error("Invalid SRS: {0}")]
|
||||
// InvalidSRS(String),
|
||||
|
||||
// #[error("Invalid proof: {0}")]
|
||||
// InvalidProof(String),
|
||||
|
||||
// #[error("Malformed Groth16 verifying key")]
|
||||
// MalformedVerifyingKey,
|
||||
}
|
||||
@@ -3,28 +3,26 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use super::super::errors::ProofVerifyError;
|
||||
use crate::math::Math;
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
|
||||
use super::super::errors::ProofVerifyError;
|
||||
use super::super::group::{
|
||||
CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement,
|
||||
VartimeMultiscalarMul,
|
||||
};
|
||||
use super::super::scalar::Scalar;
|
||||
use crate::transcript::Transcript;
|
||||
use ark_ec::AffineRepr;
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::Field;
|
||||
use ark_serialize::*;
|
||||
use ark_std::{One, Zero};
|
||||
use core::iter;
|
||||
use std::ops::Mul;
|
||||
use std::ops::MulAssign;
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct BulletReductionProof {
|
||||
L_vec: Vec<CompressedGroup>,
|
||||
R_vec: Vec<CompressedGroup>,
|
||||
pub struct BulletReductionProof<G: CurveGroup> {
|
||||
L_vec: Vec<G>,
|
||||
R_vec: Vec<G>,
|
||||
}
|
||||
|
||||
impl BulletReductionProof {
|
||||
impl<G: CurveGroup> BulletReductionProof<G> {
|
||||
/// Create an inner-product proof.
|
||||
///
|
||||
/// The proof is created with respect to the bases \\(G\\).
|
||||
@@ -36,21 +34,21 @@ impl BulletReductionProof {
|
||||
/// The lengths of the vectors must all be the same, and must all be
|
||||
/// either 0 or a power of 2.
|
||||
pub fn prove(
|
||||
transcript: &mut PoseidonTranscript,
|
||||
Q: &GroupElement,
|
||||
G_vec: &[GroupElement],
|
||||
H: &GroupElement,
|
||||
a_vec: &[Scalar],
|
||||
b_vec: &[Scalar],
|
||||
blind: &Scalar,
|
||||
blinds_vec: &[(Scalar, Scalar)],
|
||||
transcript: &mut PoseidonTranscript<G::ScalarField>,
|
||||
Q: &G::Affine,
|
||||
G_vec: &[G::Affine],
|
||||
H: &G::Affine,
|
||||
a_vec: &[G::ScalarField],
|
||||
b_vec: &[G::ScalarField],
|
||||
blind: &G::ScalarField,
|
||||
blinds_vec: &[(G::ScalarField, G::ScalarField)],
|
||||
) -> (
|
||||
BulletReductionProof,
|
||||
GroupElement,
|
||||
Scalar,
|
||||
Scalar,
|
||||
GroupElement,
|
||||
Scalar,
|
||||
BulletReductionProof<G>,
|
||||
G,
|
||||
G::ScalarField,
|
||||
G::ScalarField,
|
||||
G,
|
||||
G::ScalarField,
|
||||
) {
|
||||
// Create slices G, H, a, b backed by their respective
|
||||
// vectors. This lets us reslice as we compress the lengths
|
||||
@@ -85,68 +83,70 @@ impl BulletReductionProof {
|
||||
let c_R = inner_product(a_R, b_L);
|
||||
|
||||
let (blind_L, blind_R) = blinds_iter.next().unwrap();
|
||||
let gright_vec = G_R
|
||||
.iter()
|
||||
.chain(iter::once(Q))
|
||||
.chain(iter::once(H))
|
||||
.cloned()
|
||||
.collect::<Vec<G::Affine>>();
|
||||
|
||||
let L = GroupElement::vartime_multiscalar_mul(
|
||||
a_L.iter()
|
||||
let L = G::msm_unchecked(
|
||||
&gright_vec,
|
||||
a_L
|
||||
.iter()
|
||||
.chain(iter::once(&c_L))
|
||||
.chain(iter::once(blind_L))
|
||||
.copied()
|
||||
.collect::<Vec<Scalar>>()
|
||||
.as_slice(),
|
||||
G_R.iter()
|
||||
.chain(iter::once(Q))
|
||||
.chain(iter::once(H))
|
||||
.copied()
|
||||
.collect::<Vec<GroupElement>>()
|
||||
.collect::<Vec<G::ScalarField>>()
|
||||
.as_slice(),
|
||||
);
|
||||
|
||||
let R = GroupElement::vartime_multiscalar_mul(
|
||||
a_R.iter()
|
||||
let gl_vec = G_L
|
||||
.iter()
|
||||
.chain(iter::once(Q))
|
||||
.chain(iter::once(H))
|
||||
.cloned()
|
||||
.collect::<Vec<G::Affine>>();
|
||||
let R = G::msm_unchecked(
|
||||
&gl_vec,
|
||||
a_R
|
||||
.iter()
|
||||
.chain(iter::once(&c_R))
|
||||
.chain(iter::once(blind_R))
|
||||
.copied()
|
||||
.collect::<Vec<Scalar>>()
|
||||
.as_slice(),
|
||||
G_L.iter()
|
||||
.chain(iter::once(Q))
|
||||
.chain(iter::once(H))
|
||||
.copied()
|
||||
.collect::<Vec<GroupElement>>()
|
||||
.collect::<Vec<G::ScalarField>>()
|
||||
.as_slice(),
|
||||
);
|
||||
|
||||
transcript.append_point(&L.compress());
|
||||
transcript.append_point(&R.compress());
|
||||
transcript.append_point(b"", &L);
|
||||
transcript.append_point(b"", &R);
|
||||
|
||||
let u = transcript.challenge_scalar();
|
||||
let u: G::ScalarField = transcript.challenge_scalar(b"");
|
||||
let u_inv = u.inverse().unwrap();
|
||||
|
||||
for i in 0..n {
|
||||
a_L[i] = a_L[i] * u + u_inv * a_R[i];
|
||||
b_L[i] = b_L[i] * u_inv + u * b_R[i];
|
||||
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
|
||||
G_L[i] = (G_L[i].mul(u_inv) + G_R[i].mul(u)).into_affine();
|
||||
}
|
||||
|
||||
blind_fin = blind_fin + u * u * blind_L + u_inv * u_inv * blind_R;
|
||||
|
||||
L_vec.push(L.compress());
|
||||
R_vec.push(R.compress());
|
||||
L_vec.push(L);
|
||||
R_vec.push(R);
|
||||
|
||||
a = a_L;
|
||||
b = b_L;
|
||||
G = G_L;
|
||||
}
|
||||
|
||||
let Gamma_hat =
|
||||
GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]);
|
||||
let Gamma_hat = G::msm_unchecked(&[G[0], *Q, *H], &[a[0], a[0] * b[0], blind_fin]);
|
||||
|
||||
(
|
||||
BulletReductionProof { L_vec, R_vec },
|
||||
Gamma_hat,
|
||||
a[0],
|
||||
b[0],
|
||||
G[0],
|
||||
G[0].into_group(),
|
||||
blind_fin,
|
||||
)
|
||||
}
|
||||
@@ -157,8 +157,15 @@ impl BulletReductionProof {
|
||||
fn verification_scalars(
|
||||
&self,
|
||||
n: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
|
||||
transcript: &mut PoseidonTranscript<G::ScalarField>,
|
||||
) -> Result<
|
||||
(
|
||||
Vec<G::ScalarField>,
|
||||
Vec<G::ScalarField>,
|
||||
Vec<G::ScalarField>,
|
||||
),
|
||||
ProofVerifyError,
|
||||
> {
|
||||
let lg_n = self.L_vec.len();
|
||||
if lg_n >= 32 {
|
||||
// 4 billion multiplications should be enough for anyone
|
||||
@@ -172,16 +179,16 @@ impl BulletReductionProof {
|
||||
// 1. Recompute x_k,...,x_1 based on the proof transcript
|
||||
let mut challenges = Vec::with_capacity(lg_n);
|
||||
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
|
||||
transcript.append_point(L);
|
||||
transcript.append_point(R);
|
||||
challenges.push(transcript.challenge_scalar());
|
||||
transcript.append_point(b"", L);
|
||||
transcript.append_point(b"", R);
|
||||
challenges.push(transcript.challenge_scalar(b""));
|
||||
}
|
||||
|
||||
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
|
||||
let mut challenges_inv: Vec<Scalar> = challenges.clone();
|
||||
let mut challenges_inv: Vec<G::ScalarField> = challenges.clone();
|
||||
|
||||
ark_ff::fields::batch_inversion(&mut challenges_inv);
|
||||
let mut allinv: Scalar = Scalar::one();
|
||||
let mut allinv = G::ScalarField::one();
|
||||
for c in challenges.iter().filter(|s| !s.is_zero()) {
|
||||
allinv.mul_assign(c);
|
||||
}
|
||||
@@ -217,42 +224,37 @@ impl BulletReductionProof {
|
||||
pub fn verify(
|
||||
&self,
|
||||
n: usize,
|
||||
a: &[Scalar],
|
||||
transcript: &mut PoseidonTranscript,
|
||||
Gamma: &GroupElement,
|
||||
G: &[GroupElement],
|
||||
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
|
||||
a: &[G::ScalarField],
|
||||
transcript: &mut PoseidonTranscript<G::ScalarField>,
|
||||
Gamma: &G,
|
||||
Gs: &[G::Affine],
|
||||
) -> Result<(G, G, G::ScalarField), ProofVerifyError> {
|
||||
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
|
||||
|
||||
let Ls = self
|
||||
.L_vec
|
||||
.iter()
|
||||
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let Ls = &self.L_vec;
|
||||
let Rs = &self.R_vec;
|
||||
|
||||
let Rs = self
|
||||
.R_vec
|
||||
.iter()
|
||||
.map(|p| GroupElement::decompress(p).ok_or(ProofVerifyError::InternalError))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let G_hat = GroupElement::vartime_multiscalar_mul(s.as_slice(), G);
|
||||
let G_hat = G::msm(Gs, s.as_slice()).map_err(|_| ProofVerifyError::InternalError)?;
|
||||
let a_hat = inner_product(a, &s);
|
||||
|
||||
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
|
||||
u_sq.iter()
|
||||
.chain(u_inv_sq.iter())
|
||||
.chain(iter::once(&Scalar::one()))
|
||||
.copied()
|
||||
.collect::<Vec<Scalar>>()
|
||||
.as_slice(),
|
||||
Ls.iter()
|
||||
let Gamma_hat = G::msm(
|
||||
&G::normalize_batch(
|
||||
&Ls
|
||||
.iter()
|
||||
.chain(Rs.iter())
|
||||
.chain(iter::once(Gamma))
|
||||
.copied()
|
||||
.collect::<Vec<GroupElement>>()
|
||||
.collect::<Vec<G>>(),
|
||||
),
|
||||
u_sq
|
||||
.iter()
|
||||
.chain(u_inv_sq.iter())
|
||||
.chain(iter::once(&G::ScalarField::one()))
|
||||
.copied()
|
||||
.collect::<Vec<G::ScalarField>>()
|
||||
.as_slice(),
|
||||
);
|
||||
)
|
||||
.map_err(|_| ProofVerifyError::InternalError)?;
|
||||
|
||||
Ok((G_hat, Gamma_hat, a_hat))
|
||||
}
|
||||
@@ -263,12 +265,12 @@ impl BulletReductionProof {
|
||||
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
|
||||
/// \\]
|
||||
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
|
||||
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
|
||||
fn inner_product<F: Field>(a: &[F], b: &[F]) -> F {
|
||||
assert!(
|
||||
a.len() == b.len(),
|
||||
"inner_product(a,b): lengths of vectors do not match"
|
||||
);
|
||||
let mut out = Scalar::zero();
|
||||
let mut out = F::zero();
|
||||
for i in 0..a.len() {
|
||||
out += a[i] * b[i];
|
||||
}
|
||||
|
||||
723
src/nizk/mod.rs
723
src/nizk/mod.rs
@@ -1,466 +1,56 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use crate::math::Math;
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::commitments::{MultiCommitGens, PedersenCommit};
|
||||
use super::errors::ProofVerifyError;
|
||||
use super::group::{
|
||||
CompressGroupElement, CompressedGroup, DecompressGroupElement, GroupElement, UnpackGroupElement,
|
||||
};
|
||||
use super::random::RandomTape;
|
||||
use super::scalar::Scalar;
|
||||
use ark_ec::ProjectiveCurve;
|
||||
use ark_ff::PrimeField;
|
||||
use crate::ark_std::UniformRand;
|
||||
use crate::math::Math;
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
use crate::transcript::Transcript;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::CurveGroup;
|
||||
|
||||
use ark_serialize::*;
|
||||
use std::ops::Mul;
|
||||
|
||||
mod bullet;
|
||||
use bullet::BulletReductionProof;
|
||||
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct KnowledgeProof {
|
||||
alpha: CompressedGroup,
|
||||
z1: Scalar,
|
||||
z2: Scalar,
|
||||
}
|
||||
|
||||
impl KnowledgeProof {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"knowledge proof"
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
x: &Scalar,
|
||||
r: &Scalar,
|
||||
) -> (KnowledgeProof, CompressedGroup) {
|
||||
// transcript.append_protocol_name(KnowledgeProof::protocol_name());
|
||||
|
||||
// produce two random Scalars
|
||||
let t1 = random_tape.random_scalar(b"t1");
|
||||
let t2 = random_tape.random_scalar(b"t2");
|
||||
|
||||
let C = x.commit(r, gens_n).compress();
|
||||
C.append_to_poseidon(transcript);
|
||||
|
||||
let alpha = t1.commit(&t2, gens_n).compress();
|
||||
alpha.append_to_poseidon(transcript);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let z1 = c * x + t1;
|
||||
let z2 = c * r + t2;
|
||||
|
||||
(KnowledgeProof { alpha, z1, z2 }, C)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
C: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// transcript.append_protocol_name(KnowledgeProof::protocol_name());
|
||||
C.append_to_poseidon(transcript);
|
||||
self.alpha.append_to_poseidon(transcript);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let lhs = self.z1.commit(&self.z2, gens_n).compress();
|
||||
let rhs = (C.unpack()?.mul(c.into_repr()) + self.alpha.unpack()?).compress();
|
||||
|
||||
if lhs == rhs {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ProofVerifyError::InternalError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct EqualityProof {
|
||||
alpha: CompressedGroup,
|
||||
z: Scalar,
|
||||
}
|
||||
|
||||
impl EqualityProof {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"equality proof"
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
v1: &Scalar,
|
||||
s1: &Scalar,
|
||||
v2: &Scalar,
|
||||
s2: &Scalar,
|
||||
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
|
||||
// transcript.append_protocol_name(EqualityProof::protocol_name());
|
||||
|
||||
// produce a random Scalar
|
||||
let r = random_tape.random_scalar(b"r");
|
||||
|
||||
let C1 = v1.commit(s1, gens_n).compress();
|
||||
transcript.append_point(&C1);
|
||||
|
||||
let C2 = v2.commit(s2, gens_n).compress();
|
||||
transcript.append_point(&C2);
|
||||
|
||||
let alpha = gens_n.h.mul(r.into_repr()).compress();
|
||||
transcript.append_point(&alpha);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let z = c * ((*s1) - s2) + r;
|
||||
|
||||
(EqualityProof { alpha, z }, C1, C2)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
C1: &CompressedGroup,
|
||||
C2: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// transcript.append_protocol_name(EqualityProof::protocol_name());
|
||||
|
||||
transcript.append_point(C1);
|
||||
transcript.append_point(C2);
|
||||
transcript.append_point(&self.alpha);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
let rhs = {
|
||||
let C = C1.unpack()? - C2.unpack()?;
|
||||
(C.mul(c.into_repr()) + self.alpha.unpack()?).compress()
|
||||
};
|
||||
println!("rhs {:?}", rhs);
|
||||
|
||||
let lhs = gens_n.h.mul(self.z.into_repr()).compress();
|
||||
println!("lhs {:?}", lhs);
|
||||
if lhs == rhs {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ProofVerifyError::InternalError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct ProductProof {
|
||||
alpha: CompressedGroup,
|
||||
beta: CompressedGroup,
|
||||
delta: CompressedGroup,
|
||||
z: Vec<Scalar>,
|
||||
}
|
||||
|
||||
impl ProductProof {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"product proof"
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
x: &Scalar,
|
||||
rX: &Scalar,
|
||||
y: &Scalar,
|
||||
rY: &Scalar,
|
||||
z: &Scalar,
|
||||
rZ: &Scalar,
|
||||
) -> (
|
||||
ProductProof,
|
||||
CompressedGroup,
|
||||
CompressedGroup,
|
||||
CompressedGroup,
|
||||
) {
|
||||
// transcript.append_protocol_name(ProductProof::protocol_name());
|
||||
|
||||
// produce five random Scalar
|
||||
let b1 = random_tape.random_scalar(b"b1");
|
||||
let b2 = random_tape.random_scalar(b"b2");
|
||||
let b3 = random_tape.random_scalar(b"b3");
|
||||
let b4 = random_tape.random_scalar(b"b4");
|
||||
let b5 = random_tape.random_scalar(b"b5");
|
||||
|
||||
let X_unc = x.commit(rX, gens_n);
|
||||
|
||||
let X = X_unc.compress();
|
||||
transcript.append_point(&X);
|
||||
let X_new = GroupElement::decompress(&X);
|
||||
|
||||
assert_eq!(X_unc, X_new.unwrap());
|
||||
|
||||
let Y = y.commit(rY, gens_n).compress();
|
||||
transcript.append_point(&Y);
|
||||
|
||||
let Z = z.commit(rZ, gens_n).compress();
|
||||
transcript.append_point(&Z);
|
||||
|
||||
let alpha = b1.commit(&b2, gens_n).compress();
|
||||
transcript.append_point(&alpha);
|
||||
|
||||
let beta = b3.commit(&b4, gens_n).compress();
|
||||
transcript.append_point(&beta);
|
||||
|
||||
let delta = {
|
||||
let gens_X = &MultiCommitGens {
|
||||
n: 1,
|
||||
G: vec![GroupElement::decompress(&X).unwrap()],
|
||||
h: gens_n.h,
|
||||
};
|
||||
b3.commit(&b5, gens_X).compress()
|
||||
};
|
||||
transcript.append_point(&delta);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let z1 = b1 + c * x;
|
||||
let z2 = b2 + c * rX;
|
||||
let z3 = b3 + c * y;
|
||||
let z4 = b4 + c * rY;
|
||||
let z5 = b5 + c * ((*rZ) - (*rX) * y);
|
||||
let z = [z1, z2, z3, z4, z5].to_vec();
|
||||
|
||||
(
|
||||
ProductProof {
|
||||
alpha,
|
||||
beta,
|
||||
delta,
|
||||
z,
|
||||
},
|
||||
X,
|
||||
Y,
|
||||
Z,
|
||||
)
|
||||
}
|
||||
|
||||
fn check_equality(
|
||||
P: &CompressedGroup,
|
||||
X: &CompressedGroup,
|
||||
c: &Scalar,
|
||||
gens_n: &MultiCommitGens,
|
||||
z1: &Scalar,
|
||||
z2: &Scalar,
|
||||
) -> bool {
|
||||
println!("{:?}", X);
|
||||
let lhs = (GroupElement::decompress(P).unwrap()
|
||||
+ GroupElement::decompress(X).unwrap().mul(c.into_repr()))
|
||||
.compress();
|
||||
let rhs = z1.commit(z2, gens_n).compress();
|
||||
|
||||
lhs == rhs
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
X: &CompressedGroup,
|
||||
Y: &CompressedGroup,
|
||||
Z: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
// transcript.append_protocol_name(ProductProof::protocol_name());
|
||||
|
||||
X.append_to_poseidon(transcript);
|
||||
Y.append_to_poseidon(transcript);
|
||||
Z.append_to_poseidon(transcript);
|
||||
self.alpha.append_to_poseidon(transcript);
|
||||
self.beta.append_to_poseidon(transcript);
|
||||
self.delta.append_to_poseidon(transcript);
|
||||
|
||||
let z1 = self.z[0];
|
||||
let z2 = self.z[1];
|
||||
let z3 = self.z[2];
|
||||
let z4 = self.z[3];
|
||||
let z5 = self.z[4];
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
|
||||
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
|
||||
&& ProductProof::check_equality(
|
||||
&self.delta,
|
||||
Z,
|
||||
&c,
|
||||
&MultiCommitGens {
|
||||
n: 1,
|
||||
G: vec![X.unpack()?],
|
||||
h: gens_n.h,
|
||||
},
|
||||
&z3,
|
||||
&z5,
|
||||
)
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ProofVerifyError::InternalError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct DotProductProof {
|
||||
delta: CompressedGroup,
|
||||
beta: CompressedGroup,
|
||||
z: Vec<Scalar>,
|
||||
z_delta: Scalar,
|
||||
z_beta: Scalar,
|
||||
}
|
||||
|
||||
impl DotProductProof {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"dot product proof"
|
||||
}
|
||||
|
||||
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), b.len());
|
||||
(0..a.len()).map(|i| a[i] * b[i]).sum()
|
||||
}
|
||||
|
||||
pub fn prove(
|
||||
gens_1: &MultiCommitGens,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
x_vec: &[Scalar],
|
||||
blind_x: &Scalar,
|
||||
a_vec: &[Scalar],
|
||||
y: &Scalar,
|
||||
blind_y: &Scalar,
|
||||
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
|
||||
// transcript.append_protocol_name(DotProductProof::protocol_name());
|
||||
|
||||
let n = x_vec.len();
|
||||
assert_eq!(x_vec.len(), a_vec.len());
|
||||
assert_eq!(gens_n.n, a_vec.len());
|
||||
assert_eq!(gens_1.n, 1);
|
||||
|
||||
// produce randomness for the proofs
|
||||
let d_vec = random_tape.random_vector(b"d_vec", n);
|
||||
let r_delta = random_tape.random_scalar(b"r_delta");
|
||||
let r_beta = random_tape.random_scalar(b"r_beta");
|
||||
|
||||
let Cx = x_vec.commit(blind_x, gens_n).compress();
|
||||
Cx.append_to_poseidon(transcript);
|
||||
|
||||
let Cy = y.commit(blind_y, gens_1).compress();
|
||||
Cy.append_to_poseidon(transcript);
|
||||
|
||||
transcript.append_scalar_vector(a_vec);
|
||||
|
||||
let delta = d_vec.commit(&r_delta, gens_n).compress();
|
||||
delta.append_to_poseidon(transcript);
|
||||
|
||||
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
|
||||
|
||||
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
|
||||
beta.append_to_poseidon(transcript);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let z = (0..d_vec.len())
|
||||
.map(|i| c * x_vec[i] + d_vec[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
let z_delta = c * blind_x + r_delta;
|
||||
let z_beta = c * blind_y + r_beta;
|
||||
|
||||
(
|
||||
DotProductProof {
|
||||
delta,
|
||||
beta,
|
||||
z,
|
||||
z_delta,
|
||||
z_beta,
|
||||
},
|
||||
Cx,
|
||||
Cy,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens_1: &MultiCommitGens,
|
||||
gens_n: &MultiCommitGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
a: &[Scalar],
|
||||
Cx: &CompressedGroup,
|
||||
Cy: &CompressedGroup,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
assert_eq!(gens_n.n, a.len());
|
||||
assert_eq!(gens_1.n, 1);
|
||||
|
||||
// transcript.append_protocol_name(DotProductProof::protocol_name());
|
||||
Cx.append_to_poseidon(transcript);
|
||||
Cy.append_to_poseidon(transcript);
|
||||
transcript.append_scalar_vector(a);
|
||||
self.delta.append_to_poseidon(transcript);
|
||||
self.beta.append_to_poseidon(transcript);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let mut result = Cx.unpack()?.mul(c.into_repr()) + self.delta.unpack()?
|
||||
== self.z.commit(&self.z_delta, gens_n);
|
||||
|
||||
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
|
||||
result &= Cy.unpack()?.mul(c.into_repr()) + self.beta.unpack()?
|
||||
== dotproduct_z_a.commit(&self.z_beta, gens_1);
|
||||
if result {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ProofVerifyError::InternalError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DotProductProofGens {
|
||||
pub struct DotProductProofGens<G: CurveGroup> {
|
||||
n: usize,
|
||||
pub gens_n: MultiCommitGens,
|
||||
pub gens_1: MultiCommitGens,
|
||||
pub gens_n: MultiCommitGens<G>,
|
||||
pub gens_1: MultiCommitGens<G>,
|
||||
}
|
||||
|
||||
impl DotProductProofGens {
|
||||
impl<G: CurveGroup> DotProductProofGens<G> {
|
||||
pub fn new(n: usize, label: &[u8]) -> Self {
|
||||
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n);
|
||||
let (gens_n, gens_1) = MultiCommitGens::<G>::new(n + 1, label).split_at(n);
|
||||
DotProductProofGens { n, gens_n, gens_1 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct DotProductProofLog {
|
||||
bullet_reduction_proof: BulletReductionProof,
|
||||
delta: CompressedGroup,
|
||||
beta: CompressedGroup,
|
||||
z1: Scalar,
|
||||
z2: Scalar,
|
||||
pub struct DotProductProofLog<G: CurveGroup> {
|
||||
bullet_reduction_proof: BulletReductionProof<G>,
|
||||
delta: G,
|
||||
beta: G,
|
||||
z1: G::ScalarField,
|
||||
z2: G::ScalarField,
|
||||
}
|
||||
|
||||
impl DotProductProofLog {
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"dot product proof (log)"
|
||||
}
|
||||
|
||||
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
|
||||
assert_eq!(a.len(), b.len());
|
||||
(0..a.len()).map(|i| a[i] * b[i]).sum()
|
||||
}
|
||||
|
||||
impl<G> DotProductProofLog<G>
|
||||
where
|
||||
G: CurveGroup,
|
||||
G::ScalarField: Absorb,
|
||||
{
|
||||
pub fn prove(
|
||||
gens: &DotProductProofGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
x_vec: &[Scalar],
|
||||
blind_x: &Scalar,
|
||||
a_vec: &[Scalar],
|
||||
y: &Scalar,
|
||||
blind_y: &Scalar,
|
||||
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
|
||||
gens: &DotProductProofGens<G>,
|
||||
transcript: &mut PoseidonTranscript<G::ScalarField>,
|
||||
x_vec: &[G::ScalarField],
|
||||
blind_x: &G::ScalarField,
|
||||
a_vec: &[G::ScalarField],
|
||||
y: &G::ScalarField,
|
||||
blind_y: &G::ScalarField,
|
||||
) -> (Self, G, G) {
|
||||
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
|
||||
|
||||
let n = x_vec.len();
|
||||
@@ -468,27 +58,30 @@ impl DotProductProofLog {
|
||||
assert_eq!(gens.n, n);
|
||||
|
||||
// produce randomness for generating a proof
|
||||
let d = random_tape.random_scalar(b"d");
|
||||
let r_delta = random_tape.random_scalar(b"r_delta");
|
||||
let r_beta = random_tape.random_scalar(b"r_delta");
|
||||
let d = G::ScalarField::rand(&mut rand::thread_rng());
|
||||
let r_delta = G::ScalarField::rand(&mut rand::thread_rng()).into();
|
||||
let r_beta = G::ScalarField::rand(&mut rand::thread_rng()).into();
|
||||
let blinds_vec = {
|
||||
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2());
|
||||
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2());
|
||||
(0..v1.len())
|
||||
.map(|i| (v1[i], v2[i]))
|
||||
.collect::<Vec<(Scalar, Scalar)>>()
|
||||
(0..2 * n.log_2())
|
||||
.map(|_| {
|
||||
(
|
||||
G::ScalarField::rand(&mut rand::thread_rng()).into(),
|
||||
G::ScalarField::rand(&mut rand::thread_rng()).into(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<(G::ScalarField, G::ScalarField)>>()
|
||||
};
|
||||
|
||||
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
|
||||
transcript.append_point(&Cx);
|
||||
let Cx = PedersenCommit::commit_slice(x_vec, blind_x, &gens.gens_n);
|
||||
transcript.append_point(b"", &Cx);
|
||||
|
||||
let Cy = y.commit(blind_y, &gens.gens_1).compress();
|
||||
transcript.append_point(&Cy);
|
||||
transcript.append_scalar_vector(a_vec);
|
||||
let Cy = PedersenCommit::commit_scalar(y, blind_y, &gens.gens_1);
|
||||
transcript.append_point(b"", &Cy);
|
||||
transcript.append_scalar_vector(b"", &a_vec);
|
||||
|
||||
let blind_Gamma = (*blind_x) + blind_y;
|
||||
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
|
||||
BulletReductionProof::prove(
|
||||
BulletReductionProof::<G>::prove(
|
||||
transcript,
|
||||
&gens.gens_1.G[0],
|
||||
&gens.gens_n.G,
|
||||
@@ -503,23 +96,23 @@ impl DotProductProofLog {
|
||||
let delta = {
|
||||
let gens_hat = MultiCommitGens {
|
||||
n: 1,
|
||||
G: vec![g_hat],
|
||||
G: vec![g_hat.into_affine()],
|
||||
h: gens.gens_1.h,
|
||||
};
|
||||
d.commit(&r_delta, &gens_hat).compress()
|
||||
PedersenCommit::commit_scalar(&d, &r_delta, &gens_hat)
|
||||
};
|
||||
transcript.append_point(&delta);
|
||||
transcript.append_point(b"", &delta);
|
||||
|
||||
let beta = d.commit(&r_beta, &gens.gens_1).compress();
|
||||
transcript.append_point(&beta);
|
||||
let beta = PedersenCommit::commit_scalar(&d, &r_beta, &gens.gens_1);
|
||||
transcript.append_point(b"", &beta);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
let c: G::ScalarField = transcript.challenge_scalar(b"");
|
||||
|
||||
let z1 = d + c * y_hat;
|
||||
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
|
||||
|
||||
(
|
||||
DotProductProofLog {
|
||||
Self {
|
||||
bullet_reduction_proof,
|
||||
delta,
|
||||
beta,
|
||||
@@ -534,49 +127,47 @@ impl DotProductProofLog {
|
||||
pub fn verify(
|
||||
&self,
|
||||
n: usize,
|
||||
gens: &DotProductProofGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
a: &[Scalar],
|
||||
Cx: &CompressedGroup,
|
||||
Cy: &CompressedGroup,
|
||||
gens: &DotProductProofGens<G>,
|
||||
transcript: &mut PoseidonTranscript<G::ScalarField>,
|
||||
a: &[G::ScalarField],
|
||||
Cx: &G,
|
||||
Cy: &G,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
assert_eq!(gens.n, n);
|
||||
assert_eq!(a.len(), n);
|
||||
|
||||
// transcript.append_protocol_name(DotProductProofLog::protocol_name());
|
||||
// Cx.append_to_poseidon( transcript);
|
||||
// Cy.append_to_poseidon( transcript);
|
||||
// a.append_to_poseidon( transcript);
|
||||
// Cx.write_to_transcript( transcript);
|
||||
// Cy.write_to_transcript( transcript);
|
||||
// a.write_to_transcript( transcript);
|
||||
|
||||
transcript.append_point(Cx);
|
||||
transcript.append_point(Cy);
|
||||
transcript.append_scalar_vector(a);
|
||||
transcript.append_point(b"", Cx);
|
||||
transcript.append_point(b"", Cy);
|
||||
transcript.append_scalar_vector(b"", &a);
|
||||
|
||||
let Gamma = Cx.unpack()? + Cy.unpack()?;
|
||||
let Gamma = Cx.add(Cy);
|
||||
|
||||
let (g_hat, Gamma_hat, a_hat) =
|
||||
self.bullet_reduction_proof
|
||||
self
|
||||
.bullet_reduction_proof
|
||||
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
|
||||
// self.delta.append_to_poseidon( transcript);
|
||||
// self.beta.append_to_poseidon( transcript);
|
||||
// self.delta.write_to_transcript( transcript);
|
||||
// self.beta.write_to_transcript( transcript);
|
||||
|
||||
transcript.append_point(&self.delta);
|
||||
transcript.append_point(&self.beta);
|
||||
transcript.append_point(b"", &self.delta);
|
||||
transcript.append_point(b"", &self.beta);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
let c = transcript.challenge_scalar(b"");
|
||||
|
||||
let c_s = &c;
|
||||
let beta_s = self.beta.unpack()?;
|
||||
let beta_s = self.beta;
|
||||
let a_hat_s = &a_hat;
|
||||
let delta_s = self.delta.unpack()?;
|
||||
let delta_s = self.delta;
|
||||
let z1_s = &self.z1;
|
||||
let z2_s = &self.z2;
|
||||
|
||||
let lhs = ((Gamma_hat.mul(c_s.into_repr()) + beta_s).mul(a_hat_s.into_repr()) + delta_s)
|
||||
.compress();
|
||||
let rhs = ((g_hat + gens.gens_1.G[0].mul(a_hat_s.into_repr())).mul(z1_s.into_repr())
|
||||
+ gens.gens_1.h.mul(z2_s.into_repr()))
|
||||
.compress();
|
||||
let lhs = (Gamma_hat.mul(c_s) + beta_s).mul(a_hat_s) + delta_s;
|
||||
let rhs = (g_hat + gens.gens_1.G[0].mul(a_hat_s)).mul(z1_s) + gens.gens_1.h.mul(z2_s);
|
||||
|
||||
assert_eq!(lhs, rhs);
|
||||
|
||||
@@ -595,133 +186,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use ark_std::UniformRand;
|
||||
#[test]
|
||||
fn check_knowledgeproof() {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
|
||||
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
|
||||
|
||||
let x = Scalar::rand(&mut rng);
|
||||
let r = Scalar::rand(&mut rng);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, committed_value) =
|
||||
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&gens_1, &mut verifier_transcript, &committed_value)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_equalityproof() {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let params = poseidon_params();
|
||||
|
||||
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
|
||||
let v1 = Scalar::rand(&mut rng);
|
||||
let v2 = v1;
|
||||
let s1 = Scalar::rand(&mut rng);
|
||||
let s2 = Scalar::rand(&mut rng);
|
||||
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, C1, C2) = EqualityProof::prove(
|
||||
&gens_1,
|
||||
&mut prover_transcript,
|
||||
&mut random_tape,
|
||||
&v1,
|
||||
&s1,
|
||||
&v2,
|
||||
&s2,
|
||||
);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_productproof() {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let pt = GroupElement::rand(&mut rng);
|
||||
let pt_c = pt.compress();
|
||||
let pt2 = GroupElement::decompress(&pt_c).unwrap();
|
||||
assert_eq!(pt, pt2);
|
||||
let params = poseidon_params();
|
||||
|
||||
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
|
||||
let x = Scalar::rand(&mut rng);
|
||||
let rX = Scalar::rand(&mut rng);
|
||||
let y = Scalar::rand(&mut rng);
|
||||
let rY = Scalar::rand(&mut rng);
|
||||
let z = x * y;
|
||||
let rZ = Scalar::rand(&mut rng);
|
||||
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, X, Y, Z) = ProductProof::prove(
|
||||
&gens_1,
|
||||
&mut prover_transcript,
|
||||
&mut random_tape,
|
||||
&x,
|
||||
&rX,
|
||||
&y,
|
||||
&rY,
|
||||
&z,
|
||||
&rZ,
|
||||
);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_dotproductproof() {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
|
||||
let n = 1024;
|
||||
|
||||
let gens_1 = MultiCommitGens::new(1, b"test-two");
|
||||
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
|
||||
let params = poseidon_params();
|
||||
|
||||
let mut x: Vec<Scalar> = Vec::new();
|
||||
let mut a: Vec<Scalar> = Vec::new();
|
||||
for _ in 0..n {
|
||||
x.push(Scalar::rand(&mut rng));
|
||||
a.push(Scalar::rand(&mut rng));
|
||||
}
|
||||
let y = DotProductProofLog::compute_dotproduct(&x, &a);
|
||||
let r_x = Scalar::rand(&mut rng);
|
||||
let r_y = Scalar::rand(&mut rng);
|
||||
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, Cx, Cy) = DotProductProof::prove(
|
||||
&gens_1,
|
||||
&gens_1024,
|
||||
&mut prover_transcript,
|
||||
&mut random_tape,
|
||||
&x,
|
||||
&r_x,
|
||||
&a,
|
||||
&y,
|
||||
&r_y,
|
||||
);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
|
||||
.is_ok());
|
||||
}
|
||||
type F = ark_bls12_377::Fr;
|
||||
type G = ark_bls12_377::G1Projective;
|
||||
|
||||
#[test]
|
||||
fn check_dotproductproof_log() {
|
||||
@@ -729,28 +195,19 @@ mod tests {
|
||||
|
||||
let n = 1024;
|
||||
|
||||
let gens = DotProductProofGens::new(n, b"test-1024");
|
||||
let gens = DotProductProofGens::<G>::new(n, b"test-1024");
|
||||
|
||||
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
|
||||
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::rand(&mut rng)).collect();
|
||||
let y = DotProductProof::compute_dotproduct(&x, &a);
|
||||
let x: Vec<F> = (0..n).map(|_i| F::rand(&mut rng)).collect();
|
||||
let a: Vec<F> = (0..n).map(|_i| F::rand(&mut rng)).collect();
|
||||
let y = crate::dot_product(&x, &a);
|
||||
|
||||
let r_x = Scalar::rand(&mut rng);
|
||||
let r_y = Scalar::rand(&mut rng);
|
||||
let r_x = F::rand(&mut rng);
|
||||
let r_y = F::rand(&mut rng);
|
||||
|
||||
let params = poseidon_params();
|
||||
let mut random_tape = RandomTape::new(b"proof");
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let (proof, Cx, Cy) = DotProductProofLog::prove(
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
&mut random_tape,
|
||||
&x,
|
||||
&r_x,
|
||||
&a,
|
||||
&y,
|
||||
&r_y,
|
||||
);
|
||||
let (proof, Cx, Cy) =
|
||||
DotProductProofLog::<G>::prove(&gens, &mut prover_transcript, &x, &r_x, &a, &y, &r_y);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
|
||||
2320
src/parameters.rs
2320
src/parameters.rs
File diff suppressed because it is too large
Load Diff
@@ -1,82 +1,118 @@
|
||||
use crate::group::{CompressedGroup, Fr};
|
||||
|
||||
use super::scalar::Scalar;
|
||||
use ark_bls12_377::Bls12_377 as I;
|
||||
use ark_poly_commit::multilinear_pc::data_structures::Commitment;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
// use ark_r1cs_std::prelude::*;
|
||||
use ark_sponge::{
|
||||
poseidon::{PoseidonParameters, PoseidonSponge},
|
||||
CryptographicSponge,
|
||||
use crate::transcript::Transcript;
|
||||
use ark_crypto_primitives::sponge::{
|
||||
poseidon::{PoseidonConfig, PoseidonSponge},
|
||||
Absorb, CryptographicSponge,
|
||||
};
|
||||
|
||||
use ark_ec::{pairing::Pairing, CurveGroup};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use ark_serialize::Compress;
|
||||
#[derive(Clone)]
|
||||
/// TODO
|
||||
pub struct PoseidonTranscript {
|
||||
sponge: PoseidonSponge<Fr>,
|
||||
params: PoseidonParameters<Fr>,
|
||||
pub struct PoseidonTranscript<F: PrimeField> {
|
||||
sponge: PoseidonSponge<F>,
|
||||
params: PoseidonConfig<F>,
|
||||
}
|
||||
|
||||
impl PoseidonTranscript {
|
||||
impl<F: PrimeField> Transcript for PoseidonTranscript<F> {
|
||||
fn domain_sep(&mut self) {
|
||||
self.sponge.absorb(&b"testudo".to_vec());
|
||||
}
|
||||
|
||||
fn append<S: CanonicalSerialize>(&mut self, _label: &'static [u8], point: &S) {
|
||||
let mut buf = Vec::new();
|
||||
point
|
||||
.serialize_with_mode(&mut buf, Compress::Yes)
|
||||
.expect("serialization failed");
|
||||
self.sponge.absorb(&buf);
|
||||
}
|
||||
|
||||
fn challenge_scalar<FF: PrimeField>(&mut self, _label: &'static [u8]) -> FF {
|
||||
self.sponge.squeeze_field_elements(1).remove(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField> PoseidonTranscript<F> {
|
||||
/// create a new transcript
|
||||
pub fn new(params: &PoseidonParameters<Fr>) -> Self {
|
||||
pub fn new(params: &PoseidonConfig<F>) -> Self {
|
||||
let sponge = PoseidonSponge::new(params);
|
||||
PoseidonTranscript {
|
||||
sponge,
|
||||
params: params.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_state(&mut self, challenge: &Scalar) {
|
||||
self.sponge = PoseidonSponge::new(&self.params);
|
||||
self.append_scalar(challenge);
|
||||
impl<F: PrimeField + Absorb> PoseidonTranscript<F> {
|
||||
pub fn new_from_state(&mut self, challenge: &F) {
|
||||
self.sponge = PoseidonSponge::new(&self.params.clone());
|
||||
self.append_scalar(b"", challenge);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_u64(&mut self, x: u64) {
|
||||
impl<F: PrimeField> PoseidonTranscript<F> {
|
||||
pub fn append_u64(&mut self, _label: &'static [u8], x: u64) {
|
||||
self.sponge.absorb(&x);
|
||||
}
|
||||
|
||||
pub fn append_bytes(&mut self, x: &Vec<u8>) {
|
||||
pub fn append_bytes(&mut self, _label: &'static [u8], x: &Vec<u8>) {
|
||||
self.sponge.absorb(x);
|
||||
}
|
||||
|
||||
pub fn append_scalar(&mut self, scalar: &Scalar) {
|
||||
pub fn append_scalar<T: PrimeField + Absorb>(&mut self, _label: &'static [u8], scalar: &T) {
|
||||
self.sponge.absorb(&scalar);
|
||||
}
|
||||
|
||||
pub fn append_point(&mut self, point: &CompressedGroup) {
|
||||
self.sponge.absorb(&point.0);
|
||||
pub fn append_point<G>(&mut self, _label: &'static [u8], point: &G)
|
||||
where
|
||||
G: CurveGroup,
|
||||
{
|
||||
let mut point_encoding = Vec::new();
|
||||
point
|
||||
.serialize_with_mode(&mut point_encoding, Compress::Yes)
|
||||
.unwrap();
|
||||
self.sponge.absorb(&point_encoding);
|
||||
}
|
||||
|
||||
pub fn append_scalar_vector(&mut self, scalars: &[Scalar]) {
|
||||
pub fn append_scalar_vector<T: PrimeField + Absorb>(
|
||||
&mut self,
|
||||
_label: &'static [u8],
|
||||
scalars: &[T],
|
||||
) {
|
||||
for scalar in scalars.iter() {
|
||||
self.append_scalar(scalar);
|
||||
self.append_scalar(b"", scalar);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn challenge_scalar(&mut self) -> Scalar {
|
||||
self.sponge.squeeze_field_elements(1).remove(0)
|
||||
}
|
||||
|
||||
pub fn challenge_vector(&mut self, len: usize) -> Vec<Scalar> {
|
||||
self.sponge.squeeze_field_elements(len)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AppendToPoseidon {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript);
|
||||
}
|
||||
|
||||
impl AppendToPoseidon for CompressedGroup {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
|
||||
transcript.append_point(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToPoseidon for Commitment<I> {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
|
||||
pub fn append_gt<E>(&mut self, _label: &'static [u8], g_t: &E::TargetField)
|
||||
where
|
||||
E: Pairing,
|
||||
{
|
||||
let mut bytes = Vec::new();
|
||||
self.serialize(&mut bytes).unwrap();
|
||||
transcript.append_bytes(&bytes);
|
||||
g_t.serialize_with_mode(&mut bytes, Compress::Yes).unwrap();
|
||||
self.append_bytes(b"", &bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TranscriptWriter<F: PrimeField> {
|
||||
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<F>);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_bls12_381::Fr;
|
||||
use ark_ff::PrimeField;
|
||||
use poseidon_paramgen;
|
||||
#[test]
|
||||
fn poseidon_parameters_generation() {
|
||||
print_modulus::<Fr>();
|
||||
println!(
|
||||
"{}",
|
||||
poseidon_paramgen::poseidon_build::compile::<Fr>(128, vec![2], Fr::MODULUS, true)
|
||||
);
|
||||
}
|
||||
|
||||
fn print_modulus<F: PrimeField>() {
|
||||
println!("modulus: {:?}", F::MODULUS);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +1,41 @@
|
||||
#![allow(dead_code)]
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::dense_mlpoly::EqPolynomial;
|
||||
use super::math::Math;
|
||||
use super::scalar::Scalar;
|
||||
use super::sumcheck::SumcheckInstanceProof;
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
use crate::transcript::Transcript;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::*;
|
||||
use ark_std::One;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProductCircuit {
|
||||
left_vec: Vec<DensePolynomial>,
|
||||
right_vec: Vec<DensePolynomial>,
|
||||
pub struct ProductCircuit<F: PrimeField> {
|
||||
left_vec: Vec<DensePolynomial<F>>,
|
||||
right_vec: Vec<DensePolynomial<F>>,
|
||||
}
|
||||
|
||||
impl ProductCircuit {
|
||||
impl<F: PrimeField> ProductCircuit<F> {
|
||||
fn compute_layer(
|
||||
inp_left: &DensePolynomial,
|
||||
inp_right: &DensePolynomial,
|
||||
) -> (DensePolynomial, DensePolynomial) {
|
||||
inp_left: &DensePolynomial<F>,
|
||||
inp_right: &DensePolynomial<F>,
|
||||
) -> (DensePolynomial<F>, DensePolynomial<F>) {
|
||||
let len = inp_left.len() + inp_right.len();
|
||||
let outp_left = (0..len / 4)
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
.collect::<Vec<_>>();
|
||||
let outp_right = (len / 4..len / 2)
|
||||
.map(|i| inp_left[i] * inp_right[i])
|
||||
.collect::<Vec<Scalar>>();
|
||||
|
||||
.collect::<Vec<_>>();
|
||||
(
|
||||
DensePolynomial::new(outp_left),
|
||||
DensePolynomial::new(outp_right),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new(poly: &DensePolynomial) -> Self {
|
||||
let mut left_vec: Vec<DensePolynomial> = Vec::new();
|
||||
let mut right_vec: Vec<DensePolynomial> = Vec::new();
|
||||
pub fn new(poly: &DensePolynomial<F>) -> Self {
|
||||
let mut left_vec: Vec<DensePolynomial<F>> = Vec::new();
|
||||
let mut right_vec: Vec<DensePolynomial<F>> = Vec::new();
|
||||
|
||||
let num_layers = poly.len().log_2();
|
||||
let (outp_left, outp_right) = poly.split(poly.len() / 2);
|
||||
@@ -45,8 +44,7 @@ impl ProductCircuit {
|
||||
right_vec.push(outp_right);
|
||||
|
||||
for i in 0..num_layers - 1 {
|
||||
let (outp_left, outp_right) =
|
||||
ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
|
||||
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
|
||||
left_vec.push(outp_left);
|
||||
right_vec.push(outp_right);
|
||||
}
|
||||
@@ -57,7 +55,7 @@ impl ProductCircuit {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn evaluate(&self) -> Scalar {
|
||||
pub fn evaluate(&self) -> F {
|
||||
let len = self.left_vec.len();
|
||||
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
|
||||
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
|
||||
@@ -65,14 +63,18 @@ impl ProductCircuit {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DotProductCircuit {
|
||||
left: DensePolynomial,
|
||||
right: DensePolynomial,
|
||||
weight: DensePolynomial,
|
||||
pub struct DotProductCircuit<F: PrimeField> {
|
||||
left: DensePolynomial<F>,
|
||||
right: DensePolynomial<F>,
|
||||
weight: DensePolynomial<F>,
|
||||
}
|
||||
|
||||
impl DotProductCircuit {
|
||||
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
|
||||
impl<F: PrimeField> DotProductCircuit<F> {
|
||||
pub fn new(
|
||||
left: DensePolynomial<F>,
|
||||
right: DensePolynomial<F>,
|
||||
weight: DensePolynomial<F>,
|
||||
) -> Self {
|
||||
assert_eq!(left.len(), right.len());
|
||||
assert_eq!(left.len(), weight.len());
|
||||
DotProductCircuit {
|
||||
@@ -82,13 +84,13 @@ impl DotProductCircuit {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn evaluate(&self) -> Scalar {
|
||||
pub fn evaluate(&self) -> F {
|
||||
(0..self.left.len())
|
||||
.map(|i| self.left[i] * self.right[i] * self.weight[i])
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
|
||||
pub fn split(&mut self) -> (Self, Self) {
|
||||
let idx = self.left.len() / 2;
|
||||
assert_eq!(idx * 2, self.left.len());
|
||||
let (l1, l2) = self.left.split(idx);
|
||||
@@ -111,21 +113,22 @@ impl DotProductCircuit {
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct LayerProof {
|
||||
pub proof: SumcheckInstanceProof,
|
||||
pub claims: Vec<Scalar>,
|
||||
pub struct LayerProof<F: PrimeField> {
|
||||
pub proof: SumcheckInstanceProof<F>,
|
||||
pub claims: Vec<F>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl LayerProof {
|
||||
impl<F: PrimeField + Absorb> LayerProof<F> {
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: Scalar,
|
||||
claim: F,
|
||||
num_rounds: usize,
|
||||
degree_bound: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Scalar, Vec<Scalar>) {
|
||||
self.proof
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (F, Vec<F>) {
|
||||
self
|
||||
.proof
|
||||
.verify(claim, num_rounds, degree_bound, transcript)
|
||||
.unwrap()
|
||||
}
|
||||
@@ -133,45 +136,46 @@ impl LayerProof {
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct LayerProofBatched {
|
||||
pub proof: SumcheckInstanceProof,
|
||||
pub claims_prod_left: Vec<Scalar>,
|
||||
pub claims_prod_right: Vec<Scalar>,
|
||||
pub struct LayerProofBatched<F: PrimeField> {
|
||||
pub proof: SumcheckInstanceProof<F>,
|
||||
pub claims_prod_left: Vec<F>,
|
||||
pub claims_prod_right: Vec<F>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl LayerProofBatched {
|
||||
impl<F: PrimeField + Absorb> LayerProofBatched<F> {
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: Scalar,
|
||||
claim: F,
|
||||
num_rounds: usize,
|
||||
degree_bound: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Scalar, Vec<Scalar>) {
|
||||
self.proof
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (F, Vec<F>) {
|
||||
self
|
||||
.proof
|
||||
.verify(claim, num_rounds, degree_bound, transcript)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct ProductCircuitEvalProof {
|
||||
proof: Vec<LayerProof>,
|
||||
pub struct ProductCircuitEvalProof<F: PrimeField> {
|
||||
proof: Vec<LayerProof<F>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct ProductCircuitEvalProofBatched {
|
||||
proof: Vec<LayerProofBatched>,
|
||||
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
|
||||
pub struct ProductCircuitEvalProofBatched<F: PrimeField> {
|
||||
proof: Vec<LayerProofBatched<F>>,
|
||||
claims_dotp: (Vec<F>, Vec<F>, Vec<F>),
|
||||
}
|
||||
|
||||
impl ProductCircuitEvalProof {
|
||||
impl<F: PrimeField + Absorb> ProductCircuitEvalProof<F> {
|
||||
#![allow(dead_code)]
|
||||
pub fn prove(
|
||||
circuit: &mut ProductCircuit,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Self, Scalar, Vec<Scalar>) {
|
||||
let mut proof: Vec<LayerProof> = Vec::new();
|
||||
circuit: &mut ProductCircuit<F>,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, F, Vec<F>) {
|
||||
let mut proof: Vec<LayerProof<F>> = Vec::new();
|
||||
let num_layers = circuit.left_vec.len();
|
||||
|
||||
let mut claim = circuit.evaluate();
|
||||
@@ -183,8 +187,7 @@ impl ProductCircuitEvalProof {
|
||||
assert_eq!(poly_C.len(), len / 2);
|
||||
|
||||
let num_rounds_prod = poly_C.len().log_2();
|
||||
let comb_func_prod =
|
||||
|poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar {
|
||||
let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F {
|
||||
(*poly_A_comp) * poly_B_comp * poly_C_comp
|
||||
};
|
||||
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
|
||||
@@ -197,11 +200,11 @@ impl ProductCircuitEvalProof {
|
||||
transcript,
|
||||
);
|
||||
|
||||
transcript.append_scalar(&claims_prod[0]);
|
||||
transcript.append_scalar(&claims_prod[1]);
|
||||
transcript.append_scalar(b"", &claims_prod[0]);
|
||||
transcript.append_scalar(b"", &claims_prod[1]);
|
||||
|
||||
// produce a random challenge
|
||||
let r_layer = transcript.challenge_scalar();
|
||||
let r_layer = transcript.challenge_scalar(b"");
|
||||
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
@@ -217,36 +220,28 @@ impl ProductCircuitEvalProof {
|
||||
(ProductCircuitEvalProof { proof }, claim, rand)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
eval: Scalar,
|
||||
len: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Scalar, Vec<Scalar>) {
|
||||
pub fn verify(&self, eval: F, len: usize, transcript: &mut PoseidonTranscript<F>) -> (F, Vec<F>) {
|
||||
let num_layers = len.log_2();
|
||||
let mut claim = eval;
|
||||
let mut rand: Vec<Scalar> = Vec::new();
|
||||
let mut rand: Vec<F> = Vec::new();
|
||||
//let mut num_rounds = 0;
|
||||
assert_eq!(self.proof.len(), num_layers);
|
||||
for (num_rounds, i) in (0..num_layers).enumerate() {
|
||||
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
|
||||
|
||||
let claims_prod = &self.proof[i].claims;
|
||||
transcript.append_scalar(&claims_prod[0]);
|
||||
transcript.append_scalar(&claims_prod[1]);
|
||||
transcript.append_scalar(b"", &claims_prod[0]);
|
||||
transcript.append_scalar(b"", &claims_prod[1]);
|
||||
|
||||
assert_eq!(rand.len(), rand_prod.len());
|
||||
let eq: Scalar = (0..rand.len())
|
||||
.map(|i| {
|
||||
rand[i] * rand_prod[i]
|
||||
+ (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
|
||||
})
|
||||
let eq: F = (0..rand.len())
|
||||
.map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i]))
|
||||
.product();
|
||||
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
|
||||
|
||||
// produce a random challenge
|
||||
let r_layer = transcript.challenge_scalar();
|
||||
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
|
||||
let r_layer = transcript.challenge_scalar(b"");
|
||||
claim = (F::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
rand = ext;
|
||||
@@ -256,21 +251,21 @@ impl ProductCircuitEvalProof {
|
||||
}
|
||||
}
|
||||
|
||||
impl ProductCircuitEvalProofBatched {
|
||||
impl<F: PrimeField + Absorb> ProductCircuitEvalProofBatched<F> {
|
||||
pub fn prove(
|
||||
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
|
||||
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Self, Vec<Scalar>) {
|
||||
prod_circuit_vec: &mut Vec<&mut ProductCircuit<F>>,
|
||||
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit<F>>,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, Vec<F>) {
|
||||
assert!(!prod_circuit_vec.is_empty());
|
||||
|
||||
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
|
||||
|
||||
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
|
||||
let mut proof_layers: Vec<LayerProofBatched<F>> = Vec::new();
|
||||
let num_layers = prod_circuit_vec[0].left_vec.len();
|
||||
let mut claims_to_verify = (0..prod_circuit_vec.len())
|
||||
.map(|i| prod_circuit_vec[i].evaluate())
|
||||
.collect::<Vec<Scalar>>();
|
||||
.collect::<Vec<F>>();
|
||||
let mut rand = Vec::new();
|
||||
for layer_id in (0..num_layers).rev() {
|
||||
// prepare paralell instance that share poly_C first
|
||||
@@ -281,13 +276,12 @@ impl ProductCircuitEvalProofBatched {
|
||||
assert_eq!(poly_C_par.len(), len / 2);
|
||||
|
||||
let num_rounds_prod = poly_C_par.len().log_2();
|
||||
let comb_func_prod =
|
||||
|poly_A_comp: &Scalar, poly_B_comp: &Scalar, poly_C_comp: &Scalar| -> Scalar {
|
||||
let comb_func_prod = |poly_A_comp: &F, poly_B_comp: &F, poly_C_comp: &F| -> F {
|
||||
(*poly_A_comp) * poly_B_comp * poly_C_comp
|
||||
};
|
||||
|
||||
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_A_batched_par: Vec<&mut DensePolynomial<F>> = Vec::new();
|
||||
let mut poly_B_batched_par: Vec<&mut DensePolynomial<F>> = Vec::new();
|
||||
for prod_circuit in prod_circuit_vec.iter_mut() {
|
||||
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
|
||||
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
|
||||
@@ -299,9 +293,9 @@ impl ProductCircuitEvalProofBatched {
|
||||
);
|
||||
|
||||
// prepare sequential instances that don't share poly_C
|
||||
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
|
||||
let mut poly_A_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
|
||||
let mut poly_B_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
|
||||
let mut poly_C_batched_seq: Vec<&mut DensePolynomial<F>> = Vec::new();
|
||||
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
|
||||
// add additional claims
|
||||
for item in dotp_circuit_vec.iter() {
|
||||
@@ -324,13 +318,12 @@ impl ProductCircuitEvalProofBatched {
|
||||
);
|
||||
|
||||
// produce a fresh set of coeffs and a joint claim
|
||||
let coeff_vec = transcript.challenge_vector(claims_to_verify.len());
|
||||
let coeff_vec = transcript.challenge_scalar_vec(b"", claims_to_verify.len());
|
||||
let claim = (0..claims_to_verify.len())
|
||||
.map(|i| claims_to_verify[i] * coeff_vec[i])
|
||||
.sum();
|
||||
|
||||
let (proof, rand_prod, claims_prod, claims_dotp) =
|
||||
SumcheckInstanceProof::prove_cubic_batched(
|
||||
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
|
||||
&claim,
|
||||
num_rounds_prod,
|
||||
poly_vec_par,
|
||||
@@ -342,28 +335,26 @@ impl ProductCircuitEvalProofBatched {
|
||||
|
||||
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
|
||||
for i in 0..prod_circuit_vec.len() {
|
||||
transcript.append_scalar(&claims_prod_left[i]);
|
||||
transcript.append_scalar(&claims_prod_right[i]);
|
||||
transcript.append_scalar(b"", &claims_prod_left[i]);
|
||||
transcript.append_scalar(b"", &claims_prod_right[i]);
|
||||
}
|
||||
|
||||
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
|
||||
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
|
||||
for i in 0..dotp_circuit_vec.len() {
|
||||
transcript.append_scalar(&claims_dotp_left[i]);
|
||||
transcript.append_scalar(&claims_dotp_right[i]);
|
||||
transcript.append_scalar(&claims_dotp_weight[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_left[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_right[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_weight[i]);
|
||||
}
|
||||
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
|
||||
}
|
||||
|
||||
// produce a random challenge to condense two claims into a single claim
|
||||
let r_layer = transcript.challenge_scalar();
|
||||
let r_layer = transcript.challenge_scalar(b"");
|
||||
|
||||
claims_to_verify = (0..prod_circuit_vec.len())
|
||||
.map(|i| {
|
||||
claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])
|
||||
})
|
||||
.collect::<Vec<Scalar>>();
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
let mut ext = vec![r_layer];
|
||||
ext.extend(rand_prod);
|
||||
@@ -387,25 +378,25 @@ impl ProductCircuitEvalProofBatched {
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
claims_prod_vec: &[Scalar],
|
||||
claims_dotp_vec: &[Scalar],
|
||||
claims_prod_vec: &[F],
|
||||
claims_dotp_vec: &[F],
|
||||
len: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Vec<F>, Vec<F>, Vec<F>) {
|
||||
let num_layers = len.log_2();
|
||||
let mut rand: Vec<Scalar> = Vec::new();
|
||||
let mut rand: Vec<F> = Vec::new();
|
||||
//let mut num_rounds = 0;
|
||||
assert_eq!(self.proof.len(), num_layers);
|
||||
|
||||
let mut claims_to_verify = claims_prod_vec.to_owned();
|
||||
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
|
||||
let mut claims_to_verify_dotp: Vec<F> = Vec::new();
|
||||
for (num_rounds, i) in (0..num_layers).enumerate() {
|
||||
if i == num_layers - 1 {
|
||||
claims_to_verify.extend(claims_dotp_vec);
|
||||
}
|
||||
|
||||
// produce random coefficients, one for each instance
|
||||
let coeff_vec = transcript.challenge_vector(claims_to_verify.len());
|
||||
let coeff_vec: Vec<F> = transcript.challenge_scalar_vec(b"", claims_to_verify.len());
|
||||
|
||||
// produce a joint claim
|
||||
let claim = (0..claims_to_verify.len())
|
||||
@@ -420,18 +411,15 @@ impl ProductCircuitEvalProofBatched {
|
||||
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
|
||||
|
||||
for i in 0..claims_prod_vec.len() {
|
||||
transcript.append_scalar(&claims_prod_left[i]);
|
||||
transcript.append_scalar(&claims_prod_right[i]);
|
||||
transcript.append_scalar(b"", &claims_prod_left[i]);
|
||||
transcript.append_scalar(b"", &claims_prod_right[i]);
|
||||
}
|
||||
|
||||
assert_eq!(rand.len(), rand_prod.len());
|
||||
let eq: Scalar = (0..rand.len())
|
||||
.map(|i| {
|
||||
rand[i] * rand_prod[i]
|
||||
+ (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
|
||||
})
|
||||
let eq: F = (0..rand.len())
|
||||
.map(|i| rand[i] * rand_prod[i] + (F::one() - rand[i]) * (F::one() - rand_prod[i]))
|
||||
.product();
|
||||
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
|
||||
let mut claim_expected: F = (0..claims_prod_vec.len())
|
||||
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
|
||||
.sum();
|
||||
|
||||
@@ -440,9 +428,9 @@ impl ProductCircuitEvalProofBatched {
|
||||
let num_prod_instances = claims_prod_vec.len();
|
||||
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
|
||||
for i in 0..claims_dotp_left.len() {
|
||||
transcript.append_scalar(&claims_dotp_left[i]);
|
||||
transcript.append_scalar(&claims_dotp_right[i]);
|
||||
transcript.append_scalar(&claims_dotp_weight[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_left[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_right[i]);
|
||||
transcript.append_scalar(b"", &claims_dotp_weight[i]);
|
||||
|
||||
claim_expected += coeff_vec[i + num_prod_instances]
|
||||
* claims_dotp_left[i]
|
||||
@@ -454,12 +442,10 @@ impl ProductCircuitEvalProofBatched {
|
||||
assert_eq!(claim_expected, claim_last);
|
||||
|
||||
// produce a random challenge
|
||||
let r_layer = transcript.challenge_scalar();
|
||||
let r_layer = transcript.challenge_scalar(b"");
|
||||
|
||||
claims_to_verify = (0..claims_prod_left.len())
|
||||
.map(|i| {
|
||||
claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])
|
||||
})
|
||||
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
|
||||
.collect();
|
||||
|
||||
// add claims to verify for dotp circuit
|
||||
|
||||
@@ -1,50 +1,47 @@
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
use crate::transcript::AppendToTranscript;
|
||||
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::errors::ProofVerifyError;
|
||||
use super::math::Math;
|
||||
use super::random::RandomTape;
|
||||
use super::scalar::Scalar;
|
||||
use super::sparse_mlpoly::{
|
||||
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
|
||||
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
|
||||
};
|
||||
use super::timer::Timer;
|
||||
use ark_ff::Field;
|
||||
use ark_serialize::*;
|
||||
use ark_std::{One, UniformRand, Zero};
|
||||
use digest::{ExtendableOutput, Input};
|
||||
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
|
||||
|
||||
use merlin::Transcript;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
use ark_ec::CurveGroup;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::*;
|
||||
use digest::{ExtendableOutput, Input};
|
||||
use sha3::Shake256;
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)]
|
||||
pub struct R1CSInstance {
|
||||
pub struct R1CSInstance<F: PrimeField> {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
A: SparseMatPolynomial,
|
||||
B: SparseMatPolynomial,
|
||||
C: SparseMatPolynomial,
|
||||
A: SparseMatPolynomial<F>,
|
||||
B: SparseMatPolynomial<F>,
|
||||
C: SparseMatPolynomial<F>,
|
||||
}
|
||||
|
||||
pub struct R1CSCommitmentGens {
|
||||
gens: SparseMatPolyCommitmentGens,
|
||||
pub struct R1CSCommitmentGens<E: Pairing> {
|
||||
gens: SparseMatPolyCommitmentGens<E>,
|
||||
}
|
||||
|
||||
impl R1CSCommitmentGens {
|
||||
pub fn new(
|
||||
impl<E: Pairing> R1CSCommitmentGens<E> {
|
||||
pub fn setup(
|
||||
label: &'static [u8],
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
num_nz_entries: usize,
|
||||
) -> R1CSCommitmentGens {
|
||||
) -> Self {
|
||||
assert!(num_inputs < num_vars);
|
||||
let num_poly_vars_x = num_cons.log_2();
|
||||
let num_poly_vars_y = (2 * num_vars).log_2();
|
||||
let gens = SparseMatPolyCommitmentGens::new(
|
||||
let gens = SparseMatPolyCommitmentGens::setup(
|
||||
label,
|
||||
num_poly_vars_x,
|
||||
num_poly_vars_y,
|
||||
@@ -56,36 +53,27 @@ impl R1CSCommitmentGens {
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct R1CSCommitment {
|
||||
pub struct R1CSCommitment<G: CurveGroup> {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
comm: SparseMatPolyCommitment,
|
||||
comm: SparseMatPolyCommitment<G>,
|
||||
}
|
||||
|
||||
impl AppendToTranscript for R1CSCommitment {
|
||||
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_u64(b"num_cons", self.num_cons as u64);
|
||||
transcript.append_u64(b"num_vars", self.num_vars as u64);
|
||||
transcript.append_u64(b"num_inputs", self.num_inputs as u64);
|
||||
self.comm.append_to_transcript(b"comm", transcript);
|
||||
impl<G: CurveGroup> TranscriptWriter<G::ScalarField> for R1CSCommitment<G> {
|
||||
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<G::ScalarField>) {
|
||||
transcript.append_u64(b"", self.num_cons as u64);
|
||||
transcript.append_u64(b"", self.num_vars as u64);
|
||||
transcript.append_u64(b"", self.num_inputs as u64);
|
||||
self.comm.write_to_transcript(transcript);
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToPoseidon for R1CSCommitment {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
|
||||
transcript.append_u64(self.num_cons as u64);
|
||||
transcript.append_u64(self.num_vars as u64);
|
||||
transcript.append_u64(self.num_inputs as u64);
|
||||
self.comm.append_to_poseidon(transcript);
|
||||
}
|
||||
pub struct R1CSDecommitment<F: PrimeField> {
|
||||
dense: MultiSparseMatPolynomialAsDense<F>,
|
||||
}
|
||||
|
||||
pub struct R1CSDecommitment {
|
||||
dense: MultiSparseMatPolynomialAsDense,
|
||||
}
|
||||
|
||||
impl R1CSCommitment {
|
||||
impl<G: CurveGroup> R1CSCommitment<G> {
|
||||
pub fn get_num_cons(&self) -> usize {
|
||||
self.num_cons
|
||||
}
|
||||
@@ -99,15 +87,15 @@ impl R1CSCommitment {
|
||||
}
|
||||
}
|
||||
|
||||
impl R1CSInstance {
|
||||
impl<F: PrimeField> R1CSInstance<F> {
|
||||
pub fn new(
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
A: &[(usize, usize, Scalar)],
|
||||
B: &[(usize, usize, Scalar)],
|
||||
C: &[(usize, usize, Scalar)],
|
||||
) -> R1CSInstance {
|
||||
A: &[(usize, usize, F)],
|
||||
B: &[(usize, usize, F)],
|
||||
C: &[(usize, usize, F)],
|
||||
) -> Self {
|
||||
Timer::print(&format!("number_of_constraints {}", num_cons));
|
||||
Timer::print(&format!("number_of_variables {}", num_vars));
|
||||
Timer::print(&format!("number_of_inputs {}", num_inputs));
|
||||
@@ -130,13 +118,13 @@ impl R1CSInstance {
|
||||
|
||||
let mat_A = (0..A.len())
|
||||
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
|
||||
.collect::<Vec<SparseMatEntry>>();
|
||||
.collect::<Vec<_>>();
|
||||
let mat_B = (0..B.len())
|
||||
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
|
||||
.collect::<Vec<SparseMatEntry>>();
|
||||
.collect::<Vec<_>>();
|
||||
let mat_C = (0..C.len())
|
||||
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
|
||||
.collect::<Vec<SparseMatEntry>>();
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
|
||||
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
|
||||
@@ -166,7 +154,7 @@ impl R1CSInstance {
|
||||
|
||||
pub fn get_digest(&self) -> Vec<u8> {
|
||||
let mut bytes = Vec::new();
|
||||
self.serialize(&mut bytes).unwrap();
|
||||
self.serialize_with_mode(&mut bytes, Compress::Yes).unwrap();
|
||||
let mut shake = Shake256::default();
|
||||
shake.input(bytes);
|
||||
let mut reader = shake.xof_result();
|
||||
@@ -179,7 +167,7 @@ impl R1CSInstance {
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
|
||||
) -> (Self, Vec<F>, Vec<F>) {
|
||||
Timer::print(&format!("number_of_constraints {}", num_cons));
|
||||
Timer::print(&format!("number_of_variables {}", num_vars));
|
||||
Timer::print(&format!("number_of_inputs {}", num_inputs));
|
||||
@@ -198,18 +186,16 @@ impl R1CSInstance {
|
||||
|
||||
// produce a random satisfying assignment
|
||||
let Z = {
|
||||
let mut Z: Vec<Scalar> = (0..size_z)
|
||||
.map(|_i| Scalar::rand(&mut rng))
|
||||
.collect::<Vec<Scalar>>();
|
||||
Z[num_vars] = Scalar::one(); // set the constant term to 1
|
||||
let mut Z: Vec<F> = (0..size_z).map(|_i| F::rand(&mut rng)).collect::<Vec<F>>();
|
||||
Z[num_vars] = F::one(); // set the constant term to 1
|
||||
Z
|
||||
};
|
||||
|
||||
// three sparse matrices
|
||||
let mut A: Vec<SparseMatEntry> = Vec::new();
|
||||
let mut B: Vec<SparseMatEntry> = Vec::new();
|
||||
let mut C: Vec<SparseMatEntry> = Vec::new();
|
||||
let one = Scalar::one();
|
||||
let mut A: Vec<SparseMatEntry<F>> = Vec::new();
|
||||
let mut B: Vec<SparseMatEntry<F>> = Vec::new();
|
||||
let mut C: Vec<SparseMatEntry<F>> = Vec::new();
|
||||
let one = F::one();
|
||||
for i in 0..num_cons {
|
||||
let A_idx = i % size_z;
|
||||
let B_idx = (i + 2) % size_z;
|
||||
@@ -220,7 +206,7 @@ impl R1CSInstance {
|
||||
let C_idx = (i + 3) % size_z;
|
||||
let C_val = Z[C_idx];
|
||||
|
||||
if C_val == Scalar::zero() {
|
||||
if C_val == F::zero() {
|
||||
C.push(SparseMatEntry::new(i, num_vars, AB_val));
|
||||
} else {
|
||||
C.push(SparseMatEntry::new(
|
||||
@@ -255,13 +241,13 @@ impl R1CSInstance {
|
||||
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
|
||||
}
|
||||
|
||||
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
|
||||
pub fn is_sat(&self, vars: &[F], input: &[F]) -> bool {
|
||||
assert_eq!(vars.len(), self.num_vars);
|
||||
assert_eq!(input.len(), self.num_inputs);
|
||||
|
||||
let z = {
|
||||
let mut z = vars.to_vec();
|
||||
z.extend(&vec![Scalar::one()]);
|
||||
z.extend(&vec![F::one()]);
|
||||
z.extend(input);
|
||||
z
|
||||
};
|
||||
@@ -291,8 +277,8 @@ impl R1CSInstance {
|
||||
&self,
|
||||
num_rows: usize,
|
||||
num_cols: usize,
|
||||
z: &[Scalar],
|
||||
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
|
||||
z: &[F],
|
||||
) -> (DensePolynomial<F>, DensePolynomial<F>, DensePolynomial<F>) {
|
||||
assert_eq!(num_rows, self.num_cons);
|
||||
assert_eq!(z.len(), num_cols);
|
||||
assert!(num_cols > self.num_vars);
|
||||
@@ -307,8 +293,8 @@ impl R1CSInstance {
|
||||
&self,
|
||||
num_rows: usize,
|
||||
num_cols: usize,
|
||||
evals: &[Scalar],
|
||||
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
|
||||
evals: &[F],
|
||||
) -> (Vec<F>, Vec<F>, Vec<F>) {
|
||||
assert_eq!(num_rows, self.num_cons);
|
||||
assert!(num_cols > self.num_vars);
|
||||
|
||||
@@ -319,14 +305,19 @@ impl R1CSInstance {
|
||||
(evals_A, evals_B, evals_C)
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) {
|
||||
pub fn evaluate(&self, rx: &[F], ry: &[F]) -> (F, F, F) {
|
||||
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
|
||||
(evals[0], evals[1], evals[2])
|
||||
}
|
||||
|
||||
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
|
||||
let (comm, dense) =
|
||||
SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
|
||||
pub fn commit<E: Pairing<ScalarField = F>>(
|
||||
&self,
|
||||
gens: &R1CSCommitmentGens<E>,
|
||||
) -> (R1CSCommitment<E::G1>, R1CSDecommitment<F>) {
|
||||
// Noting that matrices A, B and C are sparse, produces a combined dense
|
||||
// dense polynomial from the non-zero entry that we commit to. This
|
||||
// represents the computational commitment.
|
||||
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
|
||||
let r1cs_comm = R1CSCommitment {
|
||||
num_cons: self.num_cons,
|
||||
num_vars: self.num_vars,
|
||||
@@ -334,6 +325,8 @@ impl R1CSInstance {
|
||||
comm,
|
||||
};
|
||||
|
||||
// The decommitment is used by the prover to convince the verifier
|
||||
// the received openings of A, B and C are correct.
|
||||
let r1cs_decomm = R1CSDecommitment { dense };
|
||||
|
||||
(r1cs_comm, r1cs_decomm)
|
||||
@@ -341,20 +334,23 @@ impl R1CSInstance {
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
pub struct R1CSEvalProof {
|
||||
proof: SparseMatPolyEvalProof,
|
||||
pub struct R1CSEvalProof<E: Pairing> {
|
||||
proof: SparseMatPolyEvalProof<E>,
|
||||
}
|
||||
|
||||
impl R1CSEvalProof {
|
||||
impl<E> R1CSEvalProof<E>
|
||||
where
|
||||
E: Pairing,
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
pub fn prove(
|
||||
decomm: &R1CSDecommitment,
|
||||
rx: &[Scalar], // point at which the polynomial is evaluated
|
||||
ry: &[Scalar],
|
||||
evals: &(Scalar, Scalar, Scalar),
|
||||
gens: &R1CSCommitmentGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
random_tape: &mut RandomTape,
|
||||
) -> R1CSEvalProof {
|
||||
decomm: &R1CSDecommitment<E::ScalarField>,
|
||||
rx: &[E::ScalarField], // point at which the polynomial is evaluated
|
||||
ry: &[E::ScalarField],
|
||||
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
gens: &R1CSCommitmentGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> Self {
|
||||
let timer = Timer::new("R1CSEvalProof::prove");
|
||||
let proof = SparseMatPolyEvalProof::prove(
|
||||
&decomm.dense,
|
||||
@@ -363,7 +359,6 @@ impl R1CSEvalProof {
|
||||
&[evals.0, evals.1, evals.2],
|
||||
&gens.gens,
|
||||
transcript,
|
||||
random_tape,
|
||||
);
|
||||
timer.stop();
|
||||
|
||||
@@ -372,12 +367,12 @@ impl R1CSEvalProof {
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
comm: &R1CSCommitment,
|
||||
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
|
||||
ry: &[Scalar],
|
||||
evals: &(Scalar, Scalar, Scalar),
|
||||
gens: &R1CSCommitmentGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
comm: &R1CSCommitment<E::G1>,
|
||||
rx: &[E::ScalarField], // point at which the R1CS matrix polynomials are evaluated
|
||||
ry: &[E::ScalarField],
|
||||
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
gens: &R1CSCommitmentGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> Result<(), ProofVerifyError> {
|
||||
self.proof.verify(
|
||||
&comm.comm,
|
||||
|
||||
623
src/r1csproof.rs
623
src/r1csproof.rs
@@ -1,104 +1,203 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use crate::constraints::{VerifierCircuit, VerifierConfig};
|
||||
use crate::group::{Fq, Fr};
|
||||
use crate::math::Math;
|
||||
use crate::parameters::poseidon_params;
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
use crate::sumcheck::SumcheckInstanceProof;
|
||||
use ark_bls12_377::Bls12_377 as I;
|
||||
use ark_bw6_761::BW6_761 as P;
|
||||
use ark_poly::MultilinearExtension;
|
||||
use ark_poly_commit::multilinear_pc::data_structures::{Commitment, Proof};
|
||||
use ark_poly_commit::multilinear_pc::MultilinearPC;
|
||||
|
||||
use super::commitments::MultiCommitGens;
|
||||
use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyCommitmentGens};
|
||||
use super::errors::ProofVerifyError;
|
||||
use crate::constraints::{R1CSVerificationCircuit, SumcheckVerificationCircuit, VerifierConfig};
|
||||
use crate::math::Math;
|
||||
use crate::mipp::MippProof;
|
||||
use crate::poseidon_transcript::PoseidonTranscript;
|
||||
use crate::sqrt_pst::Polynomial;
|
||||
use crate::sumcheck::SumcheckInstanceProof;
|
||||
use crate::transcript::Transcript;
|
||||
use crate::unipoly::UniPoly;
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
|
||||
use ark_poly_commit::multilinear_pc::data_structures::{Commitment, Proof};
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::r1csinstance::R1CSInstance;
|
||||
|
||||
use super::scalar::Scalar;
|
||||
use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
|
||||
use super::timer::Timer;
|
||||
use ark_crypto_primitives::{CircuitSpecificSetupSNARK, SNARK};
|
||||
use ark_snark::{CircuitSpecificSetupSNARK, SNARK};
|
||||
|
||||
use crate::ark_std::UniformRand;
|
||||
use ark_groth16::{Groth16, ProvingKey, VerifyingKey};
|
||||
|
||||
use ark_groth16::Groth16;
|
||||
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
|
||||
use ark_serialize::*;
|
||||
use ark_std::{One, Zero};
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct R1CSProof {
|
||||
pub struct R1CSProof<E: Pairing> {
|
||||
// The PST commitment to the multilinear extension of the witness.
|
||||
comm: Commitment<I>,
|
||||
sc_proof_phase1: SumcheckInstanceProof,
|
||||
claims_phase2: (Scalar, Scalar, Scalar, Scalar),
|
||||
sc_proof_phase2: SumcheckInstanceProof,
|
||||
eval_vars_at_ry: Scalar,
|
||||
proof_eval_vars_at_ry: Proof<I>,
|
||||
rx: Vec<Scalar>,
|
||||
ry: Vec<Scalar>,
|
||||
pub comm: Commitment<E>,
|
||||
sc_proof_phase1: SumcheckInstanceProof<E::ScalarField>,
|
||||
claims_phase2: (
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
E::ScalarField,
|
||||
),
|
||||
sc_proof_phase2: SumcheckInstanceProof<E::ScalarField>,
|
||||
pub eval_vars_at_ry: E::ScalarField,
|
||||
pub proof_eval_vars_at_ry: Proof<E>,
|
||||
rx: Vec<E::ScalarField>,
|
||||
ry: Vec<E::ScalarField>,
|
||||
// The transcript state after the satisfiability proof was computed.
|
||||
pub transcript_sat_state: Scalar,
|
||||
pub transcript_sat_state: E::ScalarField,
|
||||
pub initial_state: E::ScalarField,
|
||||
pub t: E::TargetField,
|
||||
pub mipp_proof: MippProof<E>,
|
||||
}
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize, Clone)]
|
||||
pub struct R1CSVerifierProof<E: Pairing> {
|
||||
comm: Commitment<E>,
|
||||
circuit_proof: ark_groth16::Proof<E>,
|
||||
initial_state: E::ScalarField,
|
||||
transcript_sat_state: E::ScalarField,
|
||||
eval_vars_at_ry: E::ScalarField,
|
||||
proof_eval_vars_at_ry: Proof<E>,
|
||||
t: E::TargetField,
|
||||
mipp_proof: MippProof<E>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct R1CSSumcheckGens {
|
||||
gens_1: MultiCommitGens,
|
||||
gens_3: MultiCommitGens,
|
||||
gens_4: MultiCommitGens,
|
||||
pub struct CircuitGens<E: Pairing> {
|
||||
pk: ProvingKey<E>,
|
||||
vk: VerifyingKey<E>,
|
||||
}
|
||||
|
||||
// TODO: fix passing gens_1_ref
|
||||
impl R1CSSumcheckGens {
|
||||
pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
|
||||
let gens_1 = gens_1_ref.clone();
|
||||
let gens_3 = MultiCommitGens::new(3, label);
|
||||
let gens_4 = MultiCommitGens::new(4, label);
|
||||
impl<E> CircuitGens<E>
|
||||
where
|
||||
E: Pairing,
|
||||
{
|
||||
// Performs the circuit-specific setup required by Groth16 for the sumcheck
|
||||
// circuit. This is done by filling the struct with dummy elements, ensuring
|
||||
// the sizes are correct so the setup matches the circuit that will be proved.
|
||||
pub fn setup(
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Self {
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
R1CSSumcheckGens {
|
||||
gens_1,
|
||||
gens_3,
|
||||
gens_4,
|
||||
}
|
||||
let uni_polys_round1 = (0..num_cons.log_2())
|
||||
.map(|_i| {
|
||||
UniPoly::<E::ScalarField>::from_evals(&[
|
||||
E::ScalarField::rand(&mut rng),
|
||||
E::ScalarField::rand(&mut rng),
|
||||
E::ScalarField::rand(&mut rng),
|
||||
E::ScalarField::rand(&mut rng),
|
||||
])
|
||||
})
|
||||
.collect::<Vec<UniPoly<E::ScalarField>>>();
|
||||
|
||||
let uni_polys_round2 = (0..num_vars.log_2() + 1)
|
||||
.map(|_i| {
|
||||
UniPoly::<E::ScalarField>::from_evals(&[
|
||||
E::ScalarField::rand(&mut rng),
|
||||
E::ScalarField::rand(&mut rng),
|
||||
E::ScalarField::rand(&mut rng),
|
||||
])
|
||||
})
|
||||
.collect::<Vec<UniPoly<E::ScalarField>>>();
|
||||
|
||||
let circuit = R1CSVerificationCircuit {
|
||||
num_vars: num_vars,
|
||||
num_cons: num_cons,
|
||||
input: (0..num_inputs)
|
||||
.map(|_i| E::ScalarField::rand(&mut rng))
|
||||
.collect_vec(),
|
||||
input_as_sparse_poly: SparsePolynomial::new(
|
||||
num_vars.log_2(),
|
||||
(0..num_inputs + 1)
|
||||
.map(|i| SparsePolyEntry::new(i, E::ScalarField::rand(&mut rng)))
|
||||
.collect::<Vec<SparsePolyEntry<E::ScalarField>>>(),
|
||||
),
|
||||
evals: (
|
||||
E::ScalarField::zero(),
|
||||
E::ScalarField::zero(),
|
||||
E::ScalarField::zero(),
|
||||
),
|
||||
params: poseidon,
|
||||
prev_challenge: E::ScalarField::zero(),
|
||||
claims_phase2: (
|
||||
E::ScalarField::zero(),
|
||||
E::ScalarField::zero(),
|
||||
E::ScalarField::zero(),
|
||||
E::ScalarField::zero(),
|
||||
),
|
||||
eval_vars_at_ry: E::ScalarField::zero(),
|
||||
sc_phase1: SumcheckVerificationCircuit {
|
||||
polys: uni_polys_round1,
|
||||
},
|
||||
sc_phase2: SumcheckVerificationCircuit {
|
||||
polys: uni_polys_round2,
|
||||
},
|
||||
claimed_rx: (0..num_cons.log_2())
|
||||
.map(|_i| E::ScalarField::rand(&mut rng))
|
||||
.collect_vec(),
|
||||
claimed_ry: (0..num_vars.log_2() + 1)
|
||||
.map(|_i| E::ScalarField::rand(&mut rng))
|
||||
.collect_vec(),
|
||||
claimed_transcript_sat_state: E::ScalarField::zero(),
|
||||
};
|
||||
let (pk, vk) = Groth16::<E>::setup(circuit.clone(), &mut rng).unwrap();
|
||||
CircuitGens { pk, vk }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct R1CSGens {
|
||||
gens_sc: R1CSSumcheckGens,
|
||||
gens_pc: PolyCommitmentGens,
|
||||
pub struct R1CSGens<E: Pairing> {
|
||||
gens_pc: PolyCommitmentGens<E>,
|
||||
gens_gc: CircuitGens<E>,
|
||||
}
|
||||
|
||||
impl R1CSGens {
|
||||
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
|
||||
impl<E: Pairing> R1CSGens<E> {
|
||||
// Performs the setup for the polynomial commitment PST and for Groth16.
|
||||
pub fn setup(
|
||||
label: &'static [u8],
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Self {
|
||||
let num_poly_vars = num_vars.log_2();
|
||||
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
|
||||
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
|
||||
R1CSGens { gens_sc, gens_pc }
|
||||
let gens_pc = PolyCommitmentGens::setup(num_poly_vars, label);
|
||||
let gens_gc = CircuitGens::setup(num_cons, num_vars, num_inputs, poseidon);
|
||||
R1CSGens { gens_pc, gens_gc }
|
||||
}
|
||||
}
|
||||
|
||||
impl R1CSProof {
|
||||
impl<E> R1CSProof<E>
|
||||
where
|
||||
E: Pairing,
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
fn prove_phase_one(
|
||||
num_rounds: usize,
|
||||
evals_tau: &mut DensePolynomial,
|
||||
evals_Az: &mut DensePolynomial,
|
||||
evals_Bz: &mut DensePolynomial,
|
||||
evals_Cz: &mut DensePolynomial,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (SumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>) {
|
||||
let comb_func = |poly_tau_comp: &Scalar,
|
||||
poly_A_comp: &Scalar,
|
||||
poly_B_comp: &Scalar,
|
||||
poly_C_comp: &Scalar|
|
||||
-> Scalar {
|
||||
(*poly_tau_comp) * ((*poly_A_comp) * poly_B_comp - poly_C_comp)
|
||||
};
|
||||
evals_tau: &mut DensePolynomial<E::ScalarField>,
|
||||
evals_Az: &mut DensePolynomial<E::ScalarField>,
|
||||
evals_Bz: &mut DensePolynomial<E::ScalarField>,
|
||||
evals_Cz: &mut DensePolynomial<E::ScalarField>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> (
|
||||
SumcheckInstanceProof<E::ScalarField>,
|
||||
Vec<E::ScalarField>,
|
||||
Vec<E::ScalarField>,
|
||||
) {
|
||||
let comb_func =
|
||||
|poly_tau_comp: &E::ScalarField,
|
||||
poly_A_comp: &E::ScalarField,
|
||||
poly_B_comp: &E::ScalarField,
|
||||
poly_C_comp: &E::ScalarField|
|
||||
-> E::ScalarField { (*poly_tau_comp) * ((*poly_A_comp) * poly_B_comp - poly_C_comp) };
|
||||
|
||||
let (sc_proof_phase_one, r, claims) = SumcheckInstanceProof::prove_cubic_with_additive_term(
|
||||
&Scalar::zero(), // claim is zero
|
||||
&E::ScalarField::zero(), // claim is zero
|
||||
num_rounds,
|
||||
evals_tau,
|
||||
evals_Az,
|
||||
@@ -113,13 +212,18 @@ impl R1CSProof {
|
||||
|
||||
fn prove_phase_two(
|
||||
num_rounds: usize,
|
||||
claim: &Scalar,
|
||||
evals_z: &mut DensePolynomial,
|
||||
evals_ABC: &mut DensePolynomial,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (SumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>) {
|
||||
let comb_func =
|
||||
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { (*poly_A_comp) * poly_B_comp };
|
||||
claim: &E::ScalarField,
|
||||
evals_z: &mut DensePolynomial<E::ScalarField>,
|
||||
evals_ABC: &mut DensePolynomial<E::ScalarField>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> (
|
||||
SumcheckInstanceProof<E::ScalarField>,
|
||||
Vec<E::ScalarField>,
|
||||
Vec<E::ScalarField>,
|
||||
) {
|
||||
let comb_func = |poly_A_comp: &E::ScalarField,
|
||||
poly_B_comp: &E::ScalarField|
|
||||
-> E::ScalarField { (*poly_A_comp) * poly_B_comp };
|
||||
let (sc_proof_phase_two, r, claims) = SumcheckInstanceProof::prove_quad(
|
||||
claim, num_rounds, evals_z, evals_ABC, comb_func, transcript,
|
||||
);
|
||||
@@ -127,34 +231,36 @@ impl R1CSProof {
|
||||
(sc_proof_phase_two, r, claims)
|
||||
}
|
||||
|
||||
fn protocol_name() -> &'static [u8] {
|
||||
b"R1CS proof"
|
||||
}
|
||||
|
||||
// Proves the R1CS instance inst is satisfiable given the assignment
|
||||
// vars.
|
||||
pub fn prove(
|
||||
inst: &R1CSInstance,
|
||||
vars: Vec<Scalar>,
|
||||
input: &[Scalar],
|
||||
gens: &R1CSGens,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (R1CSProof, Vec<Scalar>, Vec<Scalar>) {
|
||||
inst: &R1CSInstance<E::ScalarField>,
|
||||
vars: Vec<E::ScalarField>,
|
||||
input: &[E::ScalarField],
|
||||
gens: &R1CSGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
) -> (Self, Vec<E::ScalarField>, Vec<E::ScalarField>) {
|
||||
let timer_prove = Timer::new("R1CSProof::prove");
|
||||
// we currently require the number of |inputs| + 1 to be at most number of vars
|
||||
assert!(input.len() < vars.len());
|
||||
|
||||
// create the multilinear witness polynomial from the satisfying assiment
|
||||
let poly_vars = DensePolynomial::new(vars.clone());
|
||||
// expressed as the list of sqrt-sized polynomials
|
||||
let mut pl = Polynomial::from_evaluations(&vars.clone());
|
||||
|
||||
let timer_commit = Timer::new("polycommit");
|
||||
// commitment to the satisfying witness polynomial
|
||||
let comm = MultilinearPC::<I>::commit(&gens.gens_pc.ck, &poly_vars);
|
||||
comm.append_to_poseidon(transcript);
|
||||
|
||||
// commitment list to the satisfying witness polynomial list
|
||||
let (comm_list, t) = pl.commit(&gens.gens_pc.ck);
|
||||
|
||||
transcript.append_gt::<E>(b"", &t);
|
||||
|
||||
timer_commit.stop();
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
transcript.new_from_state(&c);
|
||||
let initial_state = transcript.challenge_scalar(b"");
|
||||
transcript.new_from_state(&initial_state);
|
||||
|
||||
transcript.append_scalar_vector(input);
|
||||
transcript.append_scalar_vector(b"", &input);
|
||||
|
||||
let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one");
|
||||
|
||||
@@ -163,21 +269,21 @@ impl R1CSProof {
|
||||
let num_inputs = input.len();
|
||||
let num_vars = vars.len();
|
||||
let mut z = vars;
|
||||
z.extend(&vec![Scalar::one()]); // add constant term in z
|
||||
z.extend(&vec![E::ScalarField::one()]); // add constant term in z
|
||||
z.extend(input);
|
||||
z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
|
||||
z.extend(&vec![E::ScalarField::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
|
||||
z
|
||||
};
|
||||
|
||||
// derive the verifier's challenge tau
|
||||
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2());
|
||||
let tau = transcript.challenge_vector(num_rounds_x);
|
||||
let tau = transcript.challenge_scalar_vec(b"", num_rounds_x);
|
||||
// compute the initial evaluation table for R(\tau, x)
|
||||
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
|
||||
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
|
||||
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
|
||||
|
||||
let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::prove_phase_one(
|
||||
let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::<E>::prove_phase_one(
|
||||
num_rounds_x,
|
||||
&mut poly_tau,
|
||||
&mut poly_Az,
|
||||
@@ -201,9 +307,9 @@ impl R1CSProof {
|
||||
|
||||
let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two");
|
||||
// combine the three claims into a single claim
|
||||
let r_A = transcript.challenge_scalar();
|
||||
let r_B = transcript.challenge_scalar();
|
||||
let r_C = transcript.challenge_scalar();
|
||||
let r_A: E::ScalarField = transcript.challenge_scalar(b"");
|
||||
let r_B: E::ScalarField = transcript.challenge_scalar(b"");
|
||||
let r_C: E::ScalarField = transcript.challenge_scalar(b"");
|
||||
let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim;
|
||||
|
||||
let evals_ABC = {
|
||||
@@ -216,11 +322,11 @@ impl R1CSProof {
|
||||
assert_eq!(evals_A.len(), evals_C.len());
|
||||
(0..evals_A.len())
|
||||
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
|
||||
.collect::<Vec<Scalar>>()
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// another instance of the sum-check protocol
|
||||
let (sc_proof_phase2, ry, _claims_phase2) = R1CSProof::prove_phase_two(
|
||||
let (sc_proof_phase2, ry, _claims_phase2) = R1CSProof::<E>::prove_phase_two(
|
||||
num_rounds_y,
|
||||
&claim_phase2,
|
||||
&mut DensePolynomial::new(z),
|
||||
@@ -228,31 +334,24 @@ impl R1CSProof {
|
||||
transcript,
|
||||
);
|
||||
timer_sc_proof_phase2.stop();
|
||||
let transcript_sat_state = transcript.challenge_scalar(b"");
|
||||
transcript.new_from_state(&transcript_sat_state);
|
||||
|
||||
// TODO: modify the polynomial evaluation in Spartan to be consistent
|
||||
// with the evaluation in ark-poly-commit so that reversing is not needed
|
||||
// anymore
|
||||
let timmer_opening = Timer::new("polyopening");
|
||||
let mut dummy = ry[1..].to_vec().clone();
|
||||
dummy.reverse();
|
||||
let proof_eval_vars_at_ry = MultilinearPC::<I>::open(&gens.gens_pc.ck, &poly_vars, &dummy);
|
||||
println!(
|
||||
"proof size (no of quotients): {:?}",
|
||||
proof_eval_vars_at_ry.proofs.len()
|
||||
);
|
||||
|
||||
let (comm, proof_eval_vars_at_ry, mipp_proof) =
|
||||
pl.open(transcript, comm_list, &gens.gens_pc.ck, &ry[1..], &t);
|
||||
|
||||
timmer_opening.stop();
|
||||
|
||||
let timer_polyeval = Timer::new("polyeval");
|
||||
let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]);
|
||||
let eval_vars_at_ry = pl.eval(&ry[1..]);
|
||||
timer_polyeval.stop();
|
||||
|
||||
timer_prove.stop();
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
(
|
||||
R1CSProof {
|
||||
comm,
|
||||
initial_state,
|
||||
sc_proof_phase1,
|
||||
claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim, prod_Az_Bz_claims),
|
||||
sc_proof_phase2,
|
||||
@@ -260,181 +359,141 @@ impl R1CSProof {
|
||||
proof_eval_vars_at_ry,
|
||||
rx: rx.clone(),
|
||||
ry: ry.clone(),
|
||||
transcript_sat_state: c,
|
||||
transcript_sat_state,
|
||||
t,
|
||||
mipp_proof,
|
||||
},
|
||||
rx,
|
||||
ry,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify_groth16(
|
||||
// Creates a Groth16 proof for the verification of sumcheck, expressed
|
||||
// as a circuit.
|
||||
pub fn prove_verifier(
|
||||
&self,
|
||||
num_vars: usize,
|
||||
num_cons: usize,
|
||||
input: &[Scalar],
|
||||
evals: &(Scalar, Scalar, Scalar),
|
||||
transcript: &mut PoseidonTranscript,
|
||||
gens: &R1CSGens,
|
||||
) -> Result<(u128, u128, u128), ProofVerifyError> {
|
||||
self.comm.append_to_poseidon(transcript);
|
||||
input: &[E::ScalarField],
|
||||
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
gens: &R1CSGens<E>,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Result<R1CSVerifierProof<E>, ProofVerifyError> {
|
||||
// serialise and add the IPP commitment to the transcript
|
||||
transcript.append_gt::<E>(b"", &self.t);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
let initial_state = transcript.challenge_scalar(b"");
|
||||
transcript.new_from_state(&initial_state);
|
||||
|
||||
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
|
||||
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, E::ScalarField::one())];
|
||||
//remaining inputs
|
||||
input_as_sparse_poly_entries.extend(
|
||||
(0..input.len())
|
||||
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
|
||||
.collect::<Vec<SparsePolyEntry>>(),
|
||||
.collect::<Vec<SparsePolyEntry<E::ScalarField>>>(),
|
||||
);
|
||||
|
||||
let n = num_vars;
|
||||
let input_as_sparse_poly =
|
||||
SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries);
|
||||
SparsePolynomial::new(num_vars.log_2() as usize, input_as_sparse_poly_entries);
|
||||
|
||||
let config = VerifierConfig {
|
||||
num_vars,
|
||||
num_cons,
|
||||
input: input.to_vec(),
|
||||
evals: *evals,
|
||||
params: poseidon_params(),
|
||||
prev_challenge: c,
|
||||
params: poseidon,
|
||||
prev_challenge: initial_state,
|
||||
claims_phase2: self.claims_phase2,
|
||||
polys_sc1: self.sc_proof_phase1.polys.clone(),
|
||||
polys_sc2: self.sc_proof_phase2.polys.clone(),
|
||||
eval_vars_at_ry: self.eval_vars_at_ry,
|
||||
input_as_sparse_poly,
|
||||
// rx: self.rx.clone(),
|
||||
comm: self.comm.clone(),
|
||||
rx: self.rx.clone(),
|
||||
ry: self.ry.clone(),
|
||||
transcript_sat_state: self.transcript_sat_state,
|
||||
};
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let circuit = R1CSVerificationCircuit::new(&config);
|
||||
|
||||
let prove_inner = Timer::new("proveinnercircuit");
|
||||
let start = Instant::now();
|
||||
let circuit = VerifierCircuit::new(&config, &mut rng).unwrap();
|
||||
let dp1 = start.elapsed().as_millis();
|
||||
prove_inner.stop();
|
||||
let circuit_prover_timer = Timer::new("provecircuit");
|
||||
let proof = Groth16::<E>::prove(&gens.gens_gc.pk, circuit, &mut rand::thread_rng()).unwrap();
|
||||
circuit_prover_timer.stop();
|
||||
|
||||
let start = Instant::now();
|
||||
let (pk, vk) = Groth16::<P>::setup(circuit.clone(), &mut rng).unwrap();
|
||||
let ds = start.elapsed().as_millis();
|
||||
Ok(R1CSVerifierProof {
|
||||
comm: self.comm.clone(),
|
||||
circuit_proof: proof,
|
||||
initial_state: self.initial_state,
|
||||
transcript_sat_state: self.transcript_sat_state,
|
||||
eval_vars_at_ry: self.eval_vars_at_ry,
|
||||
proof_eval_vars_at_ry: self.proof_eval_vars_at_ry.clone(),
|
||||
t: self.t,
|
||||
mipp_proof: self.mipp_proof.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
let prove_outer = Timer::new("proveoutercircuit");
|
||||
let start = Instant::now();
|
||||
let proof = Groth16::<P>::prove(&pk, circuit, &mut rng).unwrap();
|
||||
let dp2 = start.elapsed().as_millis();
|
||||
prove_outer.stop();
|
||||
impl<E: Pairing> R1CSVerifierProof<E>
|
||||
where
|
||||
<E as Pairing>::ScalarField: Absorb,
|
||||
{
|
||||
// Verifier the Groth16 proof for the sumcheck circuit and the PST polynomial
|
||||
// commitment opening.
|
||||
pub fn verify(
|
||||
&self,
|
||||
r: (Vec<E::ScalarField>, Vec<E::ScalarField>),
|
||||
input: &[E::ScalarField],
|
||||
evals: &(E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
gens: &R1CSGens<E>,
|
||||
) -> Result<bool, ProofVerifyError> {
|
||||
let (rx, ry) = &r;
|
||||
let (Ar, Br, Cr) = evals;
|
||||
let mut pubs = vec![self.initial_state];
|
||||
pubs.extend(input.clone());
|
||||
pubs.extend(rx.clone());
|
||||
pubs.extend(ry.clone());
|
||||
pubs.extend(vec![
|
||||
self.eval_vars_at_ry,
|
||||
*Ar,
|
||||
*Br,
|
||||
*Cr,
|
||||
self.transcript_sat_state,
|
||||
]);
|
||||
transcript.new_from_state(&self.transcript_sat_state);
|
||||
par! {
|
||||
// verifies the Groth16 proof for the spartan verifier
|
||||
let is_verified = Groth16::<E>::verify(&gens.gens_gc.vk, &pubs, &self.circuit_proof).unwrap(),
|
||||
|
||||
let start = Instant::now();
|
||||
let is_verified = Groth16::<P>::verify(&vk, &[], &proof).unwrap();
|
||||
assert!(is_verified);
|
||||
|
||||
let timer_verification = Timer::new("commitverification");
|
||||
let mut dummy = self.ry[1..].to_vec();
|
||||
// TODO: ensure ark-poly-commit and Spartan produce consistent results
|
||||
// when evaluating a polynomial at a given point so this reverse is not
|
||||
// needed.
|
||||
dummy.reverse();
|
||||
|
||||
// Verifies the proof of opening against the result of evaluating the
|
||||
// witness polynomial at point ry.
|
||||
let res = MultilinearPC::<I>::check(
|
||||
// verifies the proof of opening against the result of evaluating the
|
||||
// witness polynomial at point ry
|
||||
let res = Polynomial::verify(
|
||||
transcript,
|
||||
&gens.gens_pc.vk,
|
||||
&self.comm,
|
||||
&dummy,
|
||||
&ry[1..],
|
||||
self.eval_vars_at_ry,
|
||||
&self.proof_eval_vars_at_ry,
|
||||
);
|
||||
|
||||
timer_verification.stop();
|
||||
assert!(res == true);
|
||||
let dv = start.elapsed().as_millis();
|
||||
|
||||
Ok((ds, dp1 + dp2, dv))
|
||||
}
|
||||
|
||||
// Helper function to find the number of constraint in the circuit which
|
||||
// requires executing it.
|
||||
pub fn circuit_size(
|
||||
&self,
|
||||
num_vars: usize,
|
||||
num_cons: usize,
|
||||
input: &[Scalar],
|
||||
evals: &(Scalar, Scalar, Scalar),
|
||||
transcript: &mut PoseidonTranscript,
|
||||
_gens: &R1CSGens,
|
||||
) -> Result<usize, ProofVerifyError> {
|
||||
self.comm.append_to_poseidon(transcript);
|
||||
|
||||
let c = transcript.challenge_scalar();
|
||||
|
||||
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
|
||||
//remaining inputs
|
||||
input_as_sparse_poly_entries.extend(
|
||||
(0..input.len())
|
||||
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
|
||||
.collect::<Vec<SparsePolyEntry>>(),
|
||||
);
|
||||
|
||||
let n = num_vars;
|
||||
let input_as_sparse_poly =
|
||||
SparsePolynomial::new(n.log_2() as usize, input_as_sparse_poly_entries);
|
||||
|
||||
let config = VerifierConfig {
|
||||
num_vars,
|
||||
num_cons,
|
||||
input: input.to_vec(),
|
||||
evals: *evals,
|
||||
params: poseidon_params(),
|
||||
prev_challenge: c,
|
||||
claims_phase2: self.claims_phase2,
|
||||
polys_sc1: self.sc_proof_phase1.polys.clone(),
|
||||
polys_sc2: self.sc_proof_phase2.polys.clone(),
|
||||
eval_vars_at_ry: self.eval_vars_at_ry,
|
||||
input_as_sparse_poly,
|
||||
// rx: self.rx.clone(),
|
||||
ry: self.ry.clone(),
|
||||
transcript_sat_state: self.transcript_sat_state,
|
||||
&self.mipp_proof,
|
||||
&self.t,
|
||||
)
|
||||
};
|
||||
|
||||
let mut rng = ark_std::test_rng();
|
||||
let circuit = VerifierCircuit::new(&config, &mut rng).unwrap();
|
||||
|
||||
let nc_inner = verify_constraints_inner(circuit.clone(), &num_cons);
|
||||
|
||||
let nc_outer = verify_constraints_outer(circuit, &num_cons);
|
||||
Ok(nc_inner + nc_outer)
|
||||
assert!(is_verified == true);
|
||||
assert!(res == true);
|
||||
Ok(is_verified && res)
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_constraints_outer(circuit: VerifierCircuit, _num_cons: &usize) -> usize {
|
||||
let cs = ConstraintSystem::<Fq>::new_ref();
|
||||
circuit.generate_constraints(cs.clone()).unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
cs.num_constraints()
|
||||
}
|
||||
|
||||
fn verify_constraints_inner(circuit: VerifierCircuit, _num_cons: &usize) -> usize {
|
||||
let cs = ConstraintSystem::<Fr>::new_ref();
|
||||
circuit
|
||||
.inner_circuit
|
||||
.generate_constraints(cs.clone())
|
||||
.unwrap();
|
||||
assert!(cs.is_satisfied().unwrap());
|
||||
cs.num_constraints()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::parameters::poseidon_params;
|
||||
|
||||
use super::*;
|
||||
|
||||
use ark_ff::PrimeField;
|
||||
use ark_std::UniformRand;
|
||||
type F = ark_bls12_377::Fr;
|
||||
|
||||
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
|
||||
fn produce_tiny_r1cs() -> (R1CSInstance<F>, Vec<F>, Vec<F>) {
|
||||
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5
|
||||
// rounded to the nearest power of two
|
||||
let num_cons = 128;
|
||||
@@ -442,11 +501,11 @@ mod tests {
|
||||
let num_inputs = 2;
|
||||
|
||||
// encode the above constraints into three matrices
|
||||
let mut A: Vec<(usize, usize, Scalar)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, Scalar)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, Scalar)> = Vec::new();
|
||||
let mut A: Vec<(usize, usize, F)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, F)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, F)> = Vec::new();
|
||||
|
||||
let one = Scalar::one();
|
||||
let one = F::one();
|
||||
// constraint 0 entries
|
||||
// (Z1 + Z2) * I0 - Z3 = 0;
|
||||
A.push((0, 0, one));
|
||||
@@ -469,22 +528,22 @@ mod tests {
|
||||
|
||||
// compute a satisfying assignment
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let i0 = Scalar::rand(&mut rng);
|
||||
let i1 = Scalar::rand(&mut rng);
|
||||
let z1 = Scalar::rand(&mut rng);
|
||||
let z2 = Scalar::rand(&mut rng);
|
||||
let i0 = F::rand(&mut rng);
|
||||
let i1 = F::rand(&mut rng);
|
||||
let z1 = F::rand(&mut rng);
|
||||
let z2 = F::rand(&mut rng);
|
||||
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
|
||||
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
|
||||
let z5 = Scalar::zero(); //constraint 3
|
||||
let z5 = F::zero(); //constraint 3
|
||||
|
||||
let mut vars = vec![Scalar::zero(); num_vars];
|
||||
let mut vars = vec![F::zero(); num_vars];
|
||||
vars[0] = z1;
|
||||
vars[1] = z2;
|
||||
vars[2] = z3;
|
||||
vars[3] = z4;
|
||||
vars[4] = z5;
|
||||
|
||||
let mut input = vec![Scalar::zero(); num_inputs];
|
||||
let mut input = vec![F::zero(); num_inputs];
|
||||
input[0] = i0;
|
||||
input[1] = i1;
|
||||
|
||||
@@ -500,42 +559,74 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_synthetic_r1cs() {
|
||||
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
|
||||
type F = ark_bls12_377::Fr;
|
||||
let (inst, vars, input) = R1CSInstance::<F>::produce_synthetic_r1cs(1024, 1024, 10);
|
||||
let is_sat = inst.is_sat(&vars, &input);
|
||||
assert!(is_sat);
|
||||
}
|
||||
|
||||
use crate::parameters::PoseidonConfiguration;
|
||||
#[test]
|
||||
pub fn check_r1cs_proof() {
|
||||
fn check_r1cs_proof_ark_blst() {
|
||||
let params = ark_blst::Scalar::poseidon_params();
|
||||
check_r1cs_proof::<ark_blst::Bls12>(params);
|
||||
}
|
||||
#[test]
|
||||
fn check_r1cs_proof_bls12_377() {
|
||||
let params = ark_bls12_377::Fr::poseidon_params();
|
||||
check_r1cs_proof::<ark_bls12_377::Bls12_377>(params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_r1cs_proof_bls12_381() {
|
||||
let params = ark_bls12_381::Fr::poseidon_params();
|
||||
check_r1cs_proof::<ark_bls12_381::Bls12_381>(params);
|
||||
}
|
||||
fn check_r1cs_proof<P>(params: PoseidonConfig<P::ScalarField>)
|
||||
where
|
||||
P: Pairing,
|
||||
P::ScalarField: PrimeField,
|
||||
P::ScalarField: Absorb,
|
||||
{
|
||||
let num_vars = 1024;
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
let num_inputs = 3;
|
||||
let (inst, vars, input) =
|
||||
R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
R1CSInstance::<P::ScalarField>::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
let gens = R1CSGens::new(b"test-m", num_cons, num_vars);
|
||||
let gens = R1CSGens::<P>::setup(b"test-m", num_cons, num_vars, num_inputs, params.clone());
|
||||
|
||||
let params = poseidon_params();
|
||||
//let params = poseidon_params();
|
||||
// let mut random_tape = RandomTape::new(b"proof");
|
||||
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
let c = prover_transcript.challenge_scalar::<P::ScalarField>(b"");
|
||||
prover_transcript.new_from_state(&c);
|
||||
let (proof, rx, ry) = R1CSProof::prove(&inst, vars, &input, &gens, &mut prover_transcript);
|
||||
|
||||
let inst_evals = inst.evaluate(&rx, &ry);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
prover_transcript.new_from_state(&c);
|
||||
let verifer_proof = proof
|
||||
.prove_verifier(
|
||||
num_vars,
|
||||
num_cons,
|
||||
&input,
|
||||
&inst_evals,
|
||||
&mut prover_transcript,
|
||||
&gens,
|
||||
params.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// if you want to check the test fails
|
||||
// input[0] = Scalar::zero();
|
||||
|
||||
assert!(proof
|
||||
.verify_groth16(
|
||||
inst.get_num_vars(),
|
||||
inst.get_num_cons(),
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms.clone());
|
||||
assert!(verifer_proof
|
||||
.verify(
|
||||
(rx, ry),
|
||||
&input,
|
||||
&inst_evals,
|
||||
&mut verifier_transcript,
|
||||
&gens,
|
||||
&gens
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
use super::scalar::Scalar;
|
||||
use super::transcript::ProofTranscript;
|
||||
use ark_std::UniformRand;
|
||||
use merlin::Transcript;
|
||||
|
||||
pub struct RandomTape {
|
||||
tape: Transcript,
|
||||
}
|
||||
|
||||
impl RandomTape {
|
||||
pub fn new(name: &'static [u8]) -> Self {
|
||||
let tape = {
|
||||
let mut rng = ark_std::rand::thread_rng();
|
||||
let mut tape = Transcript::new(name);
|
||||
tape.append_scalar(b"init_randomness", &Scalar::rand(&mut rng));
|
||||
tape
|
||||
};
|
||||
Self { tape }
|
||||
}
|
||||
|
||||
pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
|
||||
self.tape.challenge_scalar(label)
|
||||
}
|
||||
|
||||
pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
|
||||
self.tape.challenge_vector(label, len)
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
pub use ark_bls12_377::Fr as Scalar;
|
||||
// mod ristretto255;
|
||||
|
||||
// pub type Scalar = ristretto255::Scalar;
|
||||
// pub type ScalarBytes = curve25519_dalek::scalar::Scalar;
|
||||
|
||||
// pub trait ScalarFromPrimitives {
|
||||
// fn to_scalar(self) -> Scalar;
|
||||
// }
|
||||
|
||||
// impl ScalarFromPrimitives for usize {
|
||||
// #[inline]
|
||||
// fn to_scalar(self) -> Scalar {
|
||||
// (0..self).map(|_i| Scalar::one()).sum()
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl ScalarFromPrimitives for bool {
|
||||
// #[inline]
|
||||
// fn to_scalar(self) -> Scalar {
|
||||
// if self {
|
||||
// Scalar::one()
|
||||
// } else {
|
||||
// Scalar::zero()
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub trait ScalarBytesFromScalar {
|
||||
// fn decompress_scalar(s: &Scalar) -> ScalarBytes;
|
||||
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
|
||||
// }
|
||||
|
||||
// impl ScalarBytesFromScalar for Scalar {
|
||||
// fn decompress_scalar(s: &Scalar) -> ScalarBytes {
|
||||
// ScalarBytes::from_bytes_mod_order(s.to_bytes())
|
||||
// }
|
||||
|
||||
// fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
|
||||
// (0..s.len())
|
||||
// .map(|i| Scalar::decompress_scalar(&s[i]))
|
||||
// .collect::<Vec<ScalarBytes>>()
|
||||
// }
|
||||
// }
|
||||
File diff suppressed because it is too large
Load Diff
343
src/sqrt_pst.rs
Normal file
343
src/sqrt_pst.rs
Normal file
@@ -0,0 +1,343 @@
|
||||
use crate::mipp::MippProof;
|
||||
use ark_ec::{pairing::Pairing, scalar_mul::variable_base::VariableBaseMSM, CurveGroup};
|
||||
use ark_ff::One;
|
||||
use ark_poly_commit::multilinear_pc::{
|
||||
data_structures::{Commitment, CommitterKey, Proof, VerifierKey},
|
||||
MultilinearPC,
|
||||
};
|
||||
use rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator};
|
||||
|
||||
use crate::{
|
||||
dense_mlpoly::DensePolynomial, math::Math, poseidon_transcript::PoseidonTranscript, timer::Timer,
|
||||
};
|
||||
|
||||
pub struct Polynomial<E: Pairing> {
|
||||
m: usize,
|
||||
odd: usize,
|
||||
polys: Vec<DensePolynomial<E::ScalarField>>,
|
||||
q: Option<DensePolynomial<E::ScalarField>>,
|
||||
chis_b: Option<Vec<E::ScalarField>>,
|
||||
}
|
||||
|
||||
impl<E: Pairing> Polynomial<E> {
|
||||
// Given the evaluations over the boolean hypercube of a polynomial p of size
|
||||
// n compute the sqrt-sized polynomials p_i as
|
||||
// p_i(X) = \sum_{j \in \{0,1\}^m} p(j, i) * chi_j(X)
|
||||
// where p(X,Y) = \sum_{i \in \{0,\1}^m}
|
||||
// (\sum_{j \in \{0, 1\}^{m}} p(j, i) * \chi_j(X)) * \chi_i(Y)
|
||||
// and m is n/2.
|
||||
// To handle the case in which n is odd, the number of variables in the
|
||||
// sqrt-sized polynomials will be increased by a factor of 2 (i.e. 2^{m+1})
|
||||
// while the number of polynomials remains the same (i.e. 2^m)
|
||||
pub fn from_evaluations(Z: &[E::ScalarField]) -> Self {
|
||||
let pl_timer = Timer::new("poly_list_build");
|
||||
// check the evaluation list is a power of 2
|
||||
debug_assert!(Z.len() & (Z.len() - 1) == 0);
|
||||
|
||||
let num_vars = Z.len().log_2();
|
||||
let m_col = num_vars / 2;
|
||||
let m_row = if num_vars % 2 == 0 {
|
||||
num_vars / 2
|
||||
} else {
|
||||
num_vars / 2 + 1
|
||||
};
|
||||
|
||||
let pow_m_col = 2_usize.pow(m_col as u32);
|
||||
let pow_m_row = 2_usize.pow(m_row as u32);
|
||||
|
||||
let polys: Vec<DensePolynomial<E::ScalarField>> = (0..pow_m_col)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
let z: Vec<E::ScalarField> = (0..pow_m_row)
|
||||
.into_par_iter()
|
||||
// viewing the list of evaluation as a square matrix
|
||||
// we select by row j and column i
|
||||
// to handle the odd case, we add another row to the matrix i.e.
|
||||
// we add an extra variable to the polynomials while keeping their
|
||||
// number tje same
|
||||
.map(|j| Z[(j << m_col) | i])
|
||||
.collect();
|
||||
DensePolynomial::new(z)
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug_assert!(polys.len() == pow_m_col);
|
||||
debug_assert!(polys[0].len == pow_m_row);
|
||||
|
||||
pl_timer.stop();
|
||||
Self {
|
||||
m: m_col,
|
||||
odd: if num_vars % 2 == 1 { 1 } else { 0 },
|
||||
polys,
|
||||
q: None,
|
||||
chis_b: None,
|
||||
}
|
||||
}
|
||||
|
||||
// Given point = (\vec{a}, \vec{b}), compute the polynomial q as
|
||||
// q(Y) =
|
||||
// \sum_{j \in \{0,1\}^m}(\sum_{i \in \{0,1\}^m} p(j,i) * chi_i(b)) * chi_j(Y)
|
||||
// and p(a,b) = q(a) where p is the initial polynomial
|
||||
fn get_q(&mut self, point: &[E::ScalarField]) {
|
||||
let q_timer = Timer::new("build_q");
|
||||
|
||||
debug_assert!(point.len() == 2 * self.m + self.odd);
|
||||
let b = &point[self.m + self.odd..];
|
||||
let pow_m = 2_usize.pow(self.m as u32);
|
||||
|
||||
let chis: Vec<E::ScalarField> = (0..pow_m)
|
||||
.into_par_iter()
|
||||
.map(|i| Self::get_chi_i(b, i))
|
||||
.collect();
|
||||
|
||||
let z_q: Vec<E::ScalarField> = (0..(pow_m * 2_usize.pow(self.odd as u32)))
|
||||
.into_par_iter()
|
||||
.map(|j| (0..pow_m).map(|i| self.polys[i].Z[j] * chis[i]).sum())
|
||||
.collect();
|
||||
q_timer.stop();
|
||||
|
||||
self.q = Some(DensePolynomial::new(z_q));
|
||||
self.chis_b = Some(chis);
|
||||
}
|
||||
|
||||
// Given point = (\vec{a}, \vec{b}) used to construct q
|
||||
// compute q(a) = p(a,b).
|
||||
pub fn eval(&mut self, point: &[E::ScalarField]) -> E::ScalarField {
|
||||
let a = &point[0..point.len() / 2 + self.odd];
|
||||
if self.q.is_none() {
|
||||
self.get_q(point);
|
||||
}
|
||||
let q = self.q.clone().unwrap();
|
||||
(0..q.Z.len())
|
||||
.into_par_iter()
|
||||
.map(|j| q.Z[j] * Polynomial::<E>::get_chi_i(&a, j))
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn commit(&self, ck: &CommitterKey<E>) -> (Vec<Commitment<E>>, E::TargetField) {
|
||||
let timer_commit = Timer::new("sqrt_commit");
|
||||
let timer_list = Timer::new("comm_list");
|
||||
// commit to each of the sqrt sized p_i
|
||||
let comm_list: Vec<Commitment<E>> = self
|
||||
.polys
|
||||
.par_iter()
|
||||
.map(|p| MultilinearPC::<E>::commit(&ck, p))
|
||||
.collect();
|
||||
timer_list.stop();
|
||||
|
||||
let h_vec = ck.powers_of_h[self.odd].clone();
|
||||
assert!(comm_list.len() == h_vec.len());
|
||||
|
||||
let ipp_timer = Timer::new("ipp");
|
||||
let left_pairs: Vec<_> = comm_list
|
||||
.clone()
|
||||
.into_par_iter()
|
||||
.map(|c| E::G1Prepared::from(c.g_product))
|
||||
.collect();
|
||||
let right_pairs: Vec<_> = h_vec
|
||||
.into_par_iter()
|
||||
.map(|h| E::G2Prepared::from(h))
|
||||
.collect();
|
||||
|
||||
// compute the IPP commitment
|
||||
let t = E::multi_pairing(left_pairs, right_pairs).0;
|
||||
ipp_timer.stop();
|
||||
|
||||
timer_commit.stop();
|
||||
|
||||
(comm_list, t)
|
||||
}
|
||||
|
||||
// computes \chi_i(\vec{b}) = \prod_{i_j = 0}(1 - b_j)\prod_{i_j = 1}(b_j)
|
||||
pub fn get_chi_i(b: &[E::ScalarField], i: usize) -> E::ScalarField {
|
||||
let m = b.len();
|
||||
let mut prod = E::ScalarField::one();
|
||||
for j in 0..m {
|
||||
let b_j = b[j];
|
||||
// iterate from first (msb) to last (lsb) bit of i
|
||||
// to build chi_i using the formula above
|
||||
if i >> (m - j - 1) & 1 == 1 {
|
||||
prod = prod * b_j;
|
||||
} else {
|
||||
prod = prod * (E::ScalarField::one() - b_j)
|
||||
};
|
||||
}
|
||||
prod
|
||||
}
|
||||
|
||||
pub fn open(
|
||||
&mut self,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
comm_list: Vec<Commitment<E>>,
|
||||
ck: &CommitterKey<E>,
|
||||
point: &[E::ScalarField],
|
||||
t: &E::TargetField,
|
||||
) -> (Commitment<E>, Proof<E>, MippProof<E>) {
|
||||
let a = &point[0..self.m + self.odd];
|
||||
if self.q.is_none() {
|
||||
self.get_q(point);
|
||||
}
|
||||
|
||||
let q = self.q.clone().unwrap();
|
||||
|
||||
let timer_open = Timer::new("sqrt_open");
|
||||
|
||||
// Compute the PST commitment to q obtained as the inner products of the
|
||||
// commitments to the polynomials p_i and chi_i(\vec{b}) for i ranging over
|
||||
// the boolean hypercube of size m.
|
||||
let timer_msm = Timer::new("msm");
|
||||
if self.chis_b.is_none() {
|
||||
panic!("chis(b) should have been computed for q");
|
||||
}
|
||||
// TODO remove that cloning - the whole option thing
|
||||
let chis = self.chis_b.clone().unwrap();
|
||||
assert!(chis.len() == comm_list.len());
|
||||
|
||||
let comms: Vec<_> = comm_list.par_iter().map(|c| c.g_product).collect();
|
||||
|
||||
let c_u = <E::G1 as VariableBaseMSM>::msm_unchecked(&comms, &chis).into_affine();
|
||||
timer_msm.stop();
|
||||
|
||||
let U: Commitment<E> = Commitment {
|
||||
nv: q.num_vars,
|
||||
g_product: c_u,
|
||||
};
|
||||
let comm = MultilinearPC::<E>::commit(ck, &q);
|
||||
debug_assert!(c_u == comm.g_product);
|
||||
let h_vec = ck.powers_of_h[self.odd].clone();
|
||||
|
||||
// construct MIPP proof that U is the inner product of the vector A
|
||||
// and the vector y, where A is the opening vector to T
|
||||
let timer_mipp_proof = Timer::new("mipp_prove");
|
||||
let mipp_proof =
|
||||
MippProof::<E>::prove(transcript, ck, comms, chis.to_vec(), h_vec, &c_u, t).unwrap();
|
||||
timer_mipp_proof.stop();
|
||||
|
||||
let timer_proof = Timer::new("pst_open");
|
||||
|
||||
// reversing a is necessary because the sumcheck code in spartan generates
|
||||
// the point in reverse order compared to how the polynomial commitment
|
||||
// expects it
|
||||
let mut a_rev = a.to_vec().clone();
|
||||
a_rev.reverse();
|
||||
|
||||
// construct PST proof for opening q at a
|
||||
let pst_proof = MultilinearPC::<E>::open(ck, &q, &a_rev);
|
||||
timer_proof.stop();
|
||||
|
||||
timer_open.stop();
|
||||
(U, pst_proof, mipp_proof)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
vk: &VerifierKey<E>,
|
||||
U: &Commitment<E>,
|
||||
point: &[E::ScalarField],
|
||||
v: E::ScalarField,
|
||||
pst_proof: &Proof<E>,
|
||||
mipp_proof: &MippProof<E>,
|
||||
T: &E::TargetField,
|
||||
) -> bool {
|
||||
let len = point.len();
|
||||
let odd = if len % 2 == 1 { 1 } else { 0 };
|
||||
let a = &point[0..len / 2 + odd];
|
||||
let b = &point[len / 2 + odd..len];
|
||||
|
||||
let timer_mipp_verify = Timer::new("mipp_verify");
|
||||
// verify that U = A^y where A is the opening vector of T
|
||||
let res_mipp = MippProof::<E>::verify(vk, transcript, mipp_proof, b.to_vec(), &U.g_product, T);
|
||||
assert!(res_mipp == true);
|
||||
timer_mipp_verify.stop();
|
||||
|
||||
// reversing a is necessary because the sumcheck code in spartan generates
|
||||
// the point in reverse order compared to how the polynomial commitment
|
||||
// expects
|
||||
let mut a_rev = a.to_vec().clone();
|
||||
a_rev.reverse();
|
||||
|
||||
let timer_pst_verify = Timer::new("pst_verify");
|
||||
// PST proof that q(a) is indeed equal to value claimed by the prover
|
||||
let res = MultilinearPC::<E>::check(vk, U, &a_rev, v, pst_proof);
|
||||
timer_pst_verify.stop();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::parameters::poseidon_params;
|
||||
|
||||
use super::*;
|
||||
type F = ark_bls12_377::Fr;
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
|
||||
use ark_std::UniformRand;
|
||||
#[test]
|
||||
fn check_sqrt_poly_eval() {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let num_vars = 6;
|
||||
let len = 2_usize.pow(num_vars);
|
||||
let Z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
|
||||
let r: Vec<F> = (0..num_vars)
|
||||
.into_iter()
|
||||
.map(|_| F::rand(&mut rng))
|
||||
.collect();
|
||||
|
||||
let p = DensePolynomial::new(Z.clone());
|
||||
let res1 = p.evaluate(&r);
|
||||
|
||||
let mut pl = Polynomial::<E>::from_evaluations(&Z.clone());
|
||||
let res2 = pl.eval(&r);
|
||||
|
||||
assert!(res1 == res2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_commit() {
|
||||
// check odd case
|
||||
check_sqrt_poly_commit(5);
|
||||
|
||||
// check even case
|
||||
check_sqrt_poly_commit(6);
|
||||
}
|
||||
|
||||
fn check_sqrt_poly_commit(num_vars: u32) {
|
||||
let mut rng = ark_std::test_rng();
|
||||
let len = 2_usize.pow(num_vars);
|
||||
let Z: Vec<F> = (0..len).into_iter().map(|_| F::rand(&mut rng)).collect();
|
||||
let r: Vec<F> = (0..num_vars)
|
||||
.into_iter()
|
||||
.map(|_| F::rand(&mut rng))
|
||||
.collect();
|
||||
|
||||
let gens = MultilinearPC::<E>::setup(3, &mut rng);
|
||||
let (ck, vk) = MultilinearPC::<E>::trim(&gens, 3);
|
||||
|
||||
let mut pl = Polynomial::from_evaluations(&Z.clone());
|
||||
|
||||
let v = pl.eval(&r);
|
||||
|
||||
let (comm_list, t) = pl.commit(&ck);
|
||||
|
||||
let params = poseidon_params();
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
|
||||
let (u, pst_proof, mipp_proof) = pl.open(&mut prover_transcript, comm_list, &ck, &r, &t);
|
||||
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
|
||||
let res = Polynomial::verify(
|
||||
&mut verifier_transcript,
|
||||
&vk,
|
||||
&u,
|
||||
&r,
|
||||
v,
|
||||
&pst_proof,
|
||||
&mipp_proof,
|
||||
&t,
|
||||
);
|
||||
assert!(res == true);
|
||||
}
|
||||
}
|
||||
659
src/sumcheck.rs
659
src/sumcheck.rs
@@ -1,37 +1,37 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
|
||||
use super::dense_mlpoly::DensePolynomial;
|
||||
use super::errors::ProofVerifyError;
|
||||
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
|
||||
use crate::transcript::Transcript;
|
||||
|
||||
use super::scalar::Scalar;
|
||||
use super::unipoly::UniPoly;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use ark_ff::Zero;
|
||||
use ark_serialize::*;
|
||||
|
||||
use itertools::izip;
|
||||
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct SumcheckInstanceProof {
|
||||
pub polys: Vec<UniPoly>,
|
||||
pub struct SumcheckInstanceProof<F: PrimeField> {
|
||||
pub polys: Vec<UniPoly<F>>,
|
||||
}
|
||||
|
||||
impl SumcheckInstanceProof {
|
||||
pub fn new(polys: Vec<UniPoly>) -> SumcheckInstanceProof {
|
||||
impl<F: PrimeField + Absorb> SumcheckInstanceProof<F> {
|
||||
pub fn new(polys: Vec<UniPoly<F>>) -> Self {
|
||||
SumcheckInstanceProof { polys }
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
claim: Scalar,
|
||||
claim: F,
|
||||
num_rounds: usize,
|
||||
degree_bound: usize,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> Result<(Scalar, Vec<Scalar>), ProofVerifyError> {
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> Result<(F, Vec<F>), ProofVerifyError> {
|
||||
let mut e = claim;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
|
||||
// verify that there is a univariate polynomial for each round
|
||||
assert_eq!(self.polys.len(), num_rounds);
|
||||
@@ -45,10 +45,10 @@ impl SumcheckInstanceProof {
|
||||
assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_poseidon(transcript);
|
||||
poly.write_to_transcript(transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_i = transcript.challenge_scalar();
|
||||
let r_i = transcript.challenge_scalar(b"");
|
||||
|
||||
r.push(r_i);
|
||||
|
||||
@@ -60,146 +60,27 @@ impl SumcheckInstanceProof {
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
// pub struct ZKSumcheckInstanceProof {
|
||||
// comm_polys: Vec<CompressedGroup>,
|
||||
// comm_evals: Vec<CompressedGroup>,
|
||||
// proofs: Vec<DotProductProof>,
|
||||
// }
|
||||
|
||||
// impl ZKSumcheckInstanceProof {
|
||||
// pub fn new(
|
||||
// comm_polys: Vec<CompressedGroup>,
|
||||
// comm_evals: Vec<CompressedGroup>,
|
||||
// proofs: Vec<DotProductProof>,
|
||||
// ) -> Self {
|
||||
// ZKSumcheckInstanceProof {
|
||||
// comm_polys,
|
||||
// comm_evals,
|
||||
// proofs,
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn verify(
|
||||
// &self,
|
||||
// comm_claim: &CompressedGroup,
|
||||
// num_rounds: usize,
|
||||
// degree_bound: usize,
|
||||
// gens_1: &MultiCommitGens,
|
||||
// gens_n: &MultiCommitGens,
|
||||
// transcript: &mut Transcript,
|
||||
// ) -> Result<(CompressedGroup, Vec<Scalar>), ProofVerifyError> {
|
||||
// // verify degree bound
|
||||
// assert_eq!(gens_n.n, degree_bound + 1);
|
||||
|
||||
// // verify that there is a univariate polynomial for each round
|
||||
// assert_eq!(self.comm_polys.len(), num_rounds);
|
||||
// assert_eq!(self.comm_evals.len(), num_rounds);
|
||||
|
||||
// let mut r: Vec<Scalar> = Vec::new();
|
||||
// for i in 0..self.comm_polys.len() {
|
||||
// let comm_poly = &self.comm_polys[i];
|
||||
|
||||
// // append the prover's polynomial to the transcript
|
||||
// comm_poly.append_to_transcript(b"comm_poly", transcript);
|
||||
|
||||
// //derive the verifier's challenge for the next round
|
||||
// let r_i = transcript.challenge_scalar(b"challenge_nextround");
|
||||
|
||||
// // verify the proof of sum-check and evals
|
||||
// let res = {
|
||||
// let comm_claim_per_round = if i == 0 {
|
||||
// comm_claim
|
||||
// } else {
|
||||
// &self.comm_evals[i - 1]
|
||||
// };
|
||||
// let mut comm_eval = &self.comm_evals[i];
|
||||
|
||||
// // add two claims to transcript
|
||||
// comm_claim_per_round.append_to_transcript(transcript);
|
||||
// comm_eval.append_to_transcript(transcript);
|
||||
|
||||
// // produce two weights
|
||||
// let w = transcript.challenge_vector(2);
|
||||
|
||||
// // compute a weighted sum of the RHS
|
||||
// let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||
// w.as_slice(),
|
||||
// iter::once(&comm_claim_per_round)
|
||||
// .chain(iter::once(&comm_eval))
|
||||
// .map(|pt| GroupElement::decompress(pt).unwrap())
|
||||
// .collect::<Vec<GroupElement>>()
|
||||
// .as_slice(),
|
||||
// )
|
||||
// .compress();
|
||||
|
||||
// let a = {
|
||||
// // the vector to use to decommit for sum-check test
|
||||
// let a_sc = {
|
||||
// let mut a = vec![Scalar::one(); degree_bound + 1];
|
||||
// a[0] += Scalar::one();
|
||||
// a
|
||||
// };
|
||||
|
||||
// // the vector to use to decommit for evaluation
|
||||
// let a_eval = {
|
||||
// let mut a = vec![Scalar::one(); degree_bound + 1];
|
||||
// for j in 1..a.len() {
|
||||
// a[j] = a[j - 1] * r_i;
|
||||
// }
|
||||
// a
|
||||
// };
|
||||
|
||||
// // take weighted sum of the two vectors using w
|
||||
// assert_eq!(a_sc.len(), a_eval.len());
|
||||
// (0..a_sc.len())
|
||||
// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
// .collect::<Vec<Scalar>>()
|
||||
// };
|
||||
|
||||
// self.proofs[i]
|
||||
// .verify(
|
||||
// gens_1,
|
||||
// gens_n,
|
||||
// transcript,
|
||||
// &a,
|
||||
// &self.comm_polys[i],
|
||||
// &comm_target,
|
||||
// )
|
||||
// .is_ok()
|
||||
// };
|
||||
// if !res {
|
||||
// return Err(ProofVerifyError::InternalError);
|
||||
// }
|
||||
|
||||
// r.push(r_i);
|
||||
// }
|
||||
|
||||
// Ok((self.comm_evals[&self.comm_evals.len() - 1].clone(), r))
|
||||
// }
|
||||
// }
|
||||
|
||||
impl SumcheckInstanceProof {
|
||||
pub fn prove_cubic_with_additive_term<F>(
|
||||
claim: &Scalar,
|
||||
impl<F: PrimeField + Absorb> SumcheckInstanceProof<F> {
|
||||
pub fn prove_cubic_with_additive_term<C>(
|
||||
claim: &F,
|
||||
num_rounds: usize,
|
||||
poly_tau: &mut DensePolynomial,
|
||||
poly_A: &mut DensePolynomial,
|
||||
poly_B: &mut DensePolynomial,
|
||||
poly_C: &mut DensePolynomial,
|
||||
comb_func: F,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Self, Vec<Scalar>, Vec<Scalar>)
|
||||
poly_tau: &mut DensePolynomial<F>,
|
||||
poly_A: &mut DensePolynomial<F>,
|
||||
poly_B: &mut DensePolynomial<F>,
|
||||
poly_C: &mut DensePolynomial<F>,
|
||||
comb_func: C,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, Vec<F>, Vec<F>)
|
||||
where
|
||||
F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
|
||||
C: Fn(&F, &F, &F, &F) -> F,
|
||||
{
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly<F>> = Vec::new();
|
||||
for _j in 0..num_rounds {
|
||||
let mut eval_point_0 = Scalar::zero();
|
||||
let mut eval_point_2 = Scalar::zero();
|
||||
let mut eval_point_3 = Scalar::zero();
|
||||
let mut eval_point_0 = F::zero();
|
||||
let mut eval_point_2 = F::zero();
|
||||
let mut eval_point_3 = F::zero();
|
||||
|
||||
let len = poly_tau.len() / 2;
|
||||
for i in 0..len {
|
||||
@@ -237,9 +118,9 @@ impl SumcheckInstanceProof {
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_poseidon(transcript);
|
||||
poly.write_to_transcript(transcript);
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_j = transcript.challenge_scalar();
|
||||
let r_j = transcript.challenge_scalar(b"");
|
||||
r.push(r_j);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
@@ -257,25 +138,25 @@ impl SumcheckInstanceProof {
|
||||
vec![poly_tau[0], poly_A[0], poly_B[0], poly_C[0]],
|
||||
)
|
||||
}
|
||||
pub fn prove_cubic<F>(
|
||||
claim: &Scalar,
|
||||
pub fn prove_cubic<C>(
|
||||
claim: &F,
|
||||
num_rounds: usize,
|
||||
poly_A: &mut DensePolynomial,
|
||||
poly_B: &mut DensePolynomial,
|
||||
poly_C: &mut DensePolynomial,
|
||||
comb_func: F,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Self, Vec<Scalar>, Vec<Scalar>)
|
||||
poly_A: &mut DensePolynomial<F>,
|
||||
poly_B: &mut DensePolynomial<F>,
|
||||
poly_C: &mut DensePolynomial<F>,
|
||||
comb_func: C,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, Vec<F>, Vec<F>)
|
||||
where
|
||||
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
|
||||
C: Fn(&F, &F, &F) -> F,
|
||||
{
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly<F>> = Vec::new();
|
||||
for _j in 0..num_rounds {
|
||||
let mut eval_point_0 = Scalar::zero();
|
||||
let mut eval_point_2 = Scalar::zero();
|
||||
let mut eval_point_3 = Scalar::zero();
|
||||
let mut eval_point_0 = F::zero();
|
||||
let mut eval_point_2 = F::zero();
|
||||
let mut eval_point_3 = F::zero();
|
||||
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
@@ -308,10 +189,10 @@ impl SumcheckInstanceProof {
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_poseidon(transcript);
|
||||
poly.write_to_transcript(transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_j = transcript.challenge_scalar();
|
||||
let r_j = transcript.challenge_scalar(b"");
|
||||
r.push(r_j);
|
||||
// bound all tables to the verifier's challenege
|
||||
poly_A.bound_poly_var_top(&r_j);
|
||||
@@ -328,46 +209,41 @@ impl SumcheckInstanceProof {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn prove_cubic_batched<F>(
|
||||
claim: &Scalar,
|
||||
pub fn prove_cubic_batched<C>(
|
||||
claim: &F,
|
||||
num_rounds: usize,
|
||||
poly_vec_par: (
|
||||
&mut Vec<&mut DensePolynomial>,
|
||||
&mut Vec<&mut DensePolynomial>,
|
||||
&mut DensePolynomial,
|
||||
&mut Vec<&mut DensePolynomial<F>>,
|
||||
&mut Vec<&mut DensePolynomial<F>>,
|
||||
&mut DensePolynomial<F>,
|
||||
),
|
||||
poly_vec_seq: (
|
||||
&mut Vec<&mut DensePolynomial>,
|
||||
&mut Vec<&mut DensePolynomial>,
|
||||
&mut Vec<&mut DensePolynomial>,
|
||||
&mut Vec<&mut DensePolynomial<F>>,
|
||||
&mut Vec<&mut DensePolynomial<F>>,
|
||||
&mut Vec<&mut DensePolynomial<F>>,
|
||||
),
|
||||
coeffs: &[Scalar],
|
||||
comb_func: F,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (
|
||||
Self,
|
||||
Vec<Scalar>,
|
||||
(Vec<Scalar>, Vec<Scalar>, Scalar),
|
||||
(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
|
||||
)
|
||||
coeffs: &[F],
|
||||
comb_func: C,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, Vec<F>, (Vec<F>, Vec<F>, F), (Vec<F>, Vec<F>, Vec<F>))
|
||||
where
|
||||
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
|
||||
C: Fn(&F, &F, &F) -> F,
|
||||
{
|
||||
let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par;
|
||||
let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
|
||||
|
||||
//let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
let mut cubic_polys: Vec<UniPoly<F>> = Vec::new();
|
||||
|
||||
for _j in 0..num_rounds {
|
||||
let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new();
|
||||
let mut evals: Vec<(F, F, F)> = Vec::new();
|
||||
|
||||
for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) {
|
||||
let mut eval_point_0 = Scalar::zero();
|
||||
let mut eval_point_2 = Scalar::zero();
|
||||
let mut eval_point_3 = Scalar::zero();
|
||||
let mut eval_point_0 = F::zero();
|
||||
let mut eval_point_2 = F::zero();
|
||||
let mut eval_point_3 = F::zero();
|
||||
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
@@ -377,8 +253,7 @@ impl SumcheckInstanceProof {
|
||||
// eval 2: bound_func is -A(low) + 2*A(high)
|
||||
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point =
|
||||
poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i];
|
||||
let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i];
|
||||
eval_point_2 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
&poly_B_bound_point,
|
||||
@@ -388,8 +263,7 @@ impl SumcheckInstanceProof {
|
||||
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
let poly_C_bound_point =
|
||||
poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i];
|
||||
let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i];
|
||||
|
||||
eval_point_3 += comb_func(
|
||||
&poly_A_bound_point,
|
||||
@@ -406,9 +280,9 @@ impl SumcheckInstanceProof {
|
||||
poly_B_vec_seq.iter(),
|
||||
poly_C_vec_seq.iter()
|
||||
) {
|
||||
let mut eval_point_0 = Scalar::zero();
|
||||
let mut eval_point_2 = Scalar::zero();
|
||||
let mut eval_point_3 = Scalar::zero();
|
||||
let mut eval_point_0 = F::zero();
|
||||
let mut eval_point_2 = F::zero();
|
||||
let mut eval_point_3 = F::zero();
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
// eval 0: bound_func is A(low)
|
||||
@@ -448,10 +322,10 @@ impl SumcheckInstanceProof {
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_poseidon(transcript);
|
||||
poly.write_to_transcript(transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_j = transcript.challenge_scalar();
|
||||
let r_j = transcript.challenge_scalar(b"");
|
||||
r.push(r_j);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
@@ -502,24 +376,24 @@ impl SumcheckInstanceProof {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn prove_quad<F>(
|
||||
claim: &Scalar,
|
||||
pub fn prove_quad<C>(
|
||||
claim: &F,
|
||||
num_rounds: usize,
|
||||
poly_A: &mut DensePolynomial,
|
||||
poly_B: &mut DensePolynomial,
|
||||
comb_func: F,
|
||||
transcript: &mut PoseidonTranscript,
|
||||
) -> (Self, Vec<Scalar>, Vec<Scalar>)
|
||||
poly_A: &mut DensePolynomial<F>,
|
||||
poly_B: &mut DensePolynomial<F>,
|
||||
comb_func: C,
|
||||
transcript: &mut PoseidonTranscript<F>,
|
||||
) -> (Self, Vec<F>, Vec<F>)
|
||||
where
|
||||
F: Fn(&Scalar, &Scalar) -> Scalar,
|
||||
C: Fn(&F, &F) -> F,
|
||||
{
|
||||
let mut e = *claim;
|
||||
let mut r: Vec<Scalar> = Vec::new();
|
||||
let mut quad_polys: Vec<UniPoly> = Vec::new();
|
||||
let mut r: Vec<F> = Vec::new();
|
||||
let mut quad_polys: Vec<UniPoly<F>> = Vec::new();
|
||||
|
||||
for _j in 0..num_rounds {
|
||||
let mut eval_point_0 = Scalar::zero();
|
||||
let mut eval_point_2 = Scalar::zero();
|
||||
let mut eval_point_0 = F::zero();
|
||||
let mut eval_point_2 = F::zero();
|
||||
|
||||
let len = poly_A.len() / 2;
|
||||
for i in 0..len {
|
||||
@@ -536,10 +410,10 @@ impl SumcheckInstanceProof {
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
// append the prover's message to the transcript
|
||||
poly.append_to_poseidon(transcript);
|
||||
poly.write_to_transcript(transcript);
|
||||
|
||||
//derive the verifier's challenge for the next round
|
||||
let r_j = transcript.challenge_scalar();
|
||||
let r_j = transcript.challenge_scalar(b"");
|
||||
r.push(r_j);
|
||||
|
||||
// bound all tables to the verifier's challenege
|
||||
@@ -556,360 +430,3 @@ impl SumcheckInstanceProof {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// impl ZKSumcheckInstanceProof {
|
||||
// pub fn prove_quad<F>(
|
||||
// claim: &Scalar,
|
||||
// blind_claim: &Scalar,
|
||||
// num_rounds: usize,
|
||||
// poly_A: &mut DensePolynomial,
|
||||
// poly_B: &mut DensePolynomial,
|
||||
// comb_func: F,
|
||||
// gens_1: &MultiCommitGens,
|
||||
// gens_n: &MultiCommitGens,
|
||||
// transcript: &mut Transcript,
|
||||
// random_tape: &mut RandomTape,
|
||||
// ) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
|
||||
// where
|
||||
// F: Fn(&Scalar, &Scalar) -> Scalar,
|
||||
// {
|
||||
// let (blinds_poly, blinds_evals) = (
|
||||
// random_tape.random_vector(b"blinds_poly", num_rounds),
|
||||
// random_tape.random_vector(b"blinds_evals", num_rounds),
|
||||
// );
|
||||
// let mut claim_per_round = *claim;
|
||||
// let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
|
||||
|
||||
// let mut r: Vec<Scalar> = Vec::new();
|
||||
// let mut comm_polys: Vec<Group> = Vec::new();
|
||||
// let mut comm_evals: Vec<CompressedGroup> = Vec::new();
|
||||
// let mut proofs: Vec<DotProductProof> = Vec::new();
|
||||
|
||||
// for j in 0..num_rounds {
|
||||
// let (poly, comm_poly) = {
|
||||
// let mut eval_point_0 = Scalar::zero();
|
||||
// let mut eval_point_2 = Scalar::zero();
|
||||
|
||||
// let len = poly_A.len() / 2;
|
||||
// for i in 0..len {
|
||||
// // eval 0: bound_func is A(low)
|
||||
// eval_point_0 += comb_func(&poly_A[i], &poly_B[i]);
|
||||
|
||||
// // eval 2: bound_func is -A(low) + 2*A(high)
|
||||
// let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
// let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
// eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point);
|
||||
// }
|
||||
|
||||
// let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
|
||||
// let poly = UniPoly::from_evals(&evals);
|
||||
// let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
|
||||
// (poly, comm_poly)
|
||||
// };
|
||||
|
||||
// // append the prover's message to the transcript
|
||||
// comm_poly.append_to_transcript(b"comm_poly", transcript);
|
||||
// comm_polys.push(comm_poly);
|
||||
|
||||
// //derive the verifier's challenge for the next round
|
||||
// let r_j = transcript.challenge_scalar(b"challenge_nextround");
|
||||
|
||||
// // bound all tables to the verifier's challenege
|
||||
// poly_A.bound_poly_var_top(&r_j);
|
||||
// poly_B.bound_poly_var_top(&r_j);
|
||||
|
||||
// // produce a proof of sum-check and of evaluation
|
||||
// let (proof, claim_next_round, comm_claim_next_round) = {
|
||||
// let eval = poly.evaluate(&r_j);
|
||||
// let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
|
||||
|
||||
// // we need to prove the following under homomorphic commitments:
|
||||
// // (1) poly(0) + poly(1) = claim_per_round
|
||||
// // (2) poly(r_j) = eval
|
||||
|
||||
// // Our technique is to leverage dot product proofs:
|
||||
// // (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
|
||||
// // (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
|
||||
// // for efficiency we batch them using random weights
|
||||
|
||||
// // add two claims to transcript
|
||||
// comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
||||
// comm_eval.append_to_transcript(b"comm_eval", transcript);
|
||||
|
||||
// // produce two weights
|
||||
// let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
||||
|
||||
// // compute a weighted sum of the RHS
|
||||
// let target = w[0] * claim_per_round + w[1] * eval;
|
||||
// let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||
// w.as_slice(),
|
||||
// iter::once(&comm_claim_per_round)
|
||||
// .chain(iter::once(&comm_eval))
|
||||
// .map(|pt| GroupElement::decompress(pt).unwrap())
|
||||
// .collect::<Vec<GroupElement>>()
|
||||
// .as_slice(),
|
||||
// )
|
||||
// .compress();
|
||||
|
||||
// let blind = {
|
||||
// let blind_sc = if j == 0 {
|
||||
// blind_claim
|
||||
// } else {
|
||||
// &blinds_evals[j - 1]
|
||||
// };
|
||||
|
||||
// let blind_eval = &blinds_evals[j];
|
||||
|
||||
// w[0] * blind_sc + w[1] * blind_eval
|
||||
// };
|
||||
// assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
|
||||
|
||||
// let a = {
|
||||
// // the vector to use to decommit for sum-check test
|
||||
// let a_sc = {
|
||||
// let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
// a[0] += Scalar::one();
|
||||
// a
|
||||
// };
|
||||
|
||||
// // the vector to use to decommit for evaluation
|
||||
// let a_eval = {
|
||||
// let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
// for j in 1..a.len() {
|
||||
// a[j] = a[j - 1] * r_j;
|
||||
// }
|
||||
// a
|
||||
// };
|
||||
|
||||
// // take weighted sum of the two vectors using w
|
||||
// assert_eq!(a_sc.len(), a_eval.len());
|
||||
// (0..a_sc.len())
|
||||
// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
// .collect::<Vec<Scalar>>()
|
||||
// };
|
||||
|
||||
// let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
|
||||
// gens_1,
|
||||
// gens_n,
|
||||
// transcript,
|
||||
// random_tape,
|
||||
// &poly.as_vec(),
|
||||
// &blinds_poly[j],
|
||||
// &a,
|
||||
// &target,
|
||||
// &blind,
|
||||
// );
|
||||
|
||||
// (proof, eval, comm_eval)
|
||||
// };
|
||||
|
||||
// claim_per_round = claim_next_round;
|
||||
// comm_claim_per_round = comm_claim_next_round;
|
||||
|
||||
// proofs.push(proof);
|
||||
// r.push(r_j);
|
||||
// comm_evals.push(comm_claim_per_round.clone());
|
||||
// }
|
||||
|
||||
// (
|
||||
// ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
|
||||
// r,
|
||||
// vec![poly_A[0], poly_B[0]],
|
||||
// blinds_evals[num_rounds - 1],
|
||||
// )
|
||||
// }
|
||||
|
||||
// pub fn prove_cubic_with_additive_term<F>(
|
||||
// claim: &Scalar,
|
||||
// blind_claim: &Scalar,
|
||||
// num_rounds: usize,
|
||||
// poly_A: &mut DensePolynomial,
|
||||
// poly_B: &mut DensePolynomial,
|
||||
// poly_C: &mut DensePolynomial,
|
||||
// poly_D: &mut DensePolynomial,
|
||||
// comb_func: F,
|
||||
// gens_1: &MultiCommitGens,
|
||||
// gens_n: &MultiCommitGens,
|
||||
// transcript: &mut Transcript,
|
||||
// random_tape: &mut RandomTape,
|
||||
// ) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
|
||||
// where
|
||||
// F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
|
||||
// {
|
||||
// let (blinds_poly, blinds_evals) = (
|
||||
// random_tape.random_vector(b"blinds_poly", num_rounds),
|
||||
// random_tape.random_vector(b"blinds_evals", num_rounds),
|
||||
// );
|
||||
|
||||
// let mut claim_per_round = *claim;
|
||||
// let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
|
||||
|
||||
// let mut r: Vec<Scalar> = Vec::new();
|
||||
// let mut comm_polys: Vec<CompressedGroup> = Vec::new();
|
||||
// let mut comm_evals: Vec<CompressedGroup> = Vec::new();
|
||||
// let mut proofs: Vec<DotProductProof> = Vec::new();
|
||||
|
||||
// for j in 0..num_rounds {
|
||||
// let (poly, comm_poly) = {
|
||||
// let mut eval_point_0 = Scalar::zero();
|
||||
// let mut eval_point_2 = Scalar::zero();
|
||||
// let mut eval_point_3 = Scalar::zero();
|
||||
|
||||
// let len = poly_A.len() / 2;
|
||||
// for i in 0..len {
|
||||
// // eval 0: bound_func is A(low)
|
||||
// eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
|
||||
|
||||
// // eval 2: bound_func is -A(low) + 2*A(high)
|
||||
// let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
|
||||
// let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
|
||||
// let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
|
||||
// let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
|
||||
// eval_point_2 += comb_func(
|
||||
// &poly_A_bound_point,
|
||||
// &poly_B_bound_point,
|
||||
// &poly_C_bound_point,
|
||||
// &poly_D_bound_point,
|
||||
// );
|
||||
|
||||
// // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
|
||||
// let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
|
||||
// let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
|
||||
// let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
|
||||
// let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
|
||||
// eval_point_3 += comb_func(
|
||||
// &poly_A_bound_point,
|
||||
// &poly_B_bound_point,
|
||||
// &poly_C_bound_point,
|
||||
// &poly_D_bound_point,
|
||||
// );
|
||||
// }
|
||||
|
||||
// let evals = vec![
|
||||
// eval_point_0,
|
||||
// claim_per_round - eval_point_0,
|
||||
// eval_point_2,
|
||||
// eval_point_3,
|
||||
// ];
|
||||
// let poly = UniPoly::from_evals(&evals);
|
||||
// let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
|
||||
// (poly, comm_poly)
|
||||
// };
|
||||
|
||||
// // append the prover's message to the transcript
|
||||
// comm_poly.append_to_transcript(b"comm_poly", transcript);
|
||||
// comm_polys.push(comm_poly);
|
||||
|
||||
// //derive the verifier's challenge for the next round
|
||||
// let r_j = transcript.challenge_scalar(b"challenge_nextround");
|
||||
|
||||
// // bound all tables to the verifier's challenege
|
||||
// poly_A.bound_poly_var_top(&r_j);
|
||||
// poly_B.bound_poly_var_top(&r_j);
|
||||
// poly_C.bound_poly_var_top(&r_j);
|
||||
// poly_D.bound_poly_var_top(&r_j);
|
||||
|
||||
// // produce a proof of sum-check and of evaluation
|
||||
// let (proof, claim_next_round, comm_claim_next_round) = {
|
||||
// let eval = poly.evaluate(&r_j);
|
||||
// let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
|
||||
|
||||
// // we need to prove the following under homomorphic commitments:
|
||||
// // (1) poly(0) + poly(1) = claim_per_round
|
||||
// // (2) poly(r_j) = eval
|
||||
|
||||
// // Our technique is to leverage dot product proofs:
|
||||
// // (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
|
||||
// // (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
|
||||
// // for efficiency we batch them using random weights
|
||||
|
||||
// // add two claims to transcript
|
||||
// comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
|
||||
// comm_eval.append_to_transcript(b"comm_eval", transcript);
|
||||
|
||||
// // produce two weights
|
||||
// let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
|
||||
|
||||
// // compute a weighted sum of the RHS
|
||||
// let target = w[0] * claim_per_round + w[1] * eval;
|
||||
|
||||
// let comm_target = GroupElement::vartime_multiscalar_mul(
|
||||
// w.as_slice(),
|
||||
// iter::once(&comm_claim_per_round)
|
||||
// .chain(iter::once(&comm_eval))
|
||||
// .map(|pt| GroupElement::decompress(&pt).unwrap())
|
||||
// .collect::<Vec<GroupElement>>()
|
||||
// .as_slice(),
|
||||
// )
|
||||
// .compress();
|
||||
|
||||
// let blind = {
|
||||
// let blind_sc = if j == 0 {
|
||||
// blind_claim
|
||||
// } else {
|
||||
// &blinds_evals[j - 1]
|
||||
// };
|
||||
|
||||
// let blind_eval = &blinds_evals[j];
|
||||
|
||||
// w[0] * blind_sc + w[1] * blind_eval
|
||||
// };
|
||||
|
||||
// let res = target.commit(&blind, gens_1);
|
||||
|
||||
// assert_eq!(res.compress(), comm_target);
|
||||
|
||||
// let a = {
|
||||
// // the vector to use to decommit for sum-check test
|
||||
// let a_sc = {
|
||||
// let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
// a[0] += Scalar::one();
|
||||
// a
|
||||
// };
|
||||
|
||||
// // the vector to use to decommit for evaluation
|
||||
// let a_eval = {
|
||||
// let mut a = vec![Scalar::one(); poly.degree() + 1];
|
||||
// for j in 1..a.len() {
|
||||
// a[j] = a[j - 1] * r_j;
|
||||
// }
|
||||
// a
|
||||
// };
|
||||
|
||||
// // take weighted sum of the two vectors using w
|
||||
// assert_eq!(a_sc.len(), a_eval.len());
|
||||
// (0..a_sc.len())
|
||||
// .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
|
||||
// .collect::<Vec<Scalar>>()
|
||||
// };
|
||||
|
||||
// let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
|
||||
// gens_1,
|
||||
// gens_n,
|
||||
// transcript,
|
||||
// random_tape,
|
||||
// &poly.as_vec(),
|
||||
// &blinds_poly[j],
|
||||
// &a,
|
||||
// &target,
|
||||
// &blind,
|
||||
// );
|
||||
|
||||
// (proof, eval, comm_eval)
|
||||
// };
|
||||
|
||||
// proofs.push(proof);
|
||||
// claim_per_round = claim_next_round;
|
||||
// comm_claim_per_round = comm_claim_next_round;
|
||||
// r.push(r_j);
|
||||
// comm_evals.push(comm_claim_per_round.clone());
|
||||
// }
|
||||
|
||||
// (
|
||||
// ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
|
||||
// r,
|
||||
// vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
|
||||
// blinds_evals[num_rounds - 1],
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
|
||||
202
src/testudo_nizk.rs
Normal file
202
src/testudo_nizk.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use std::cmp::max;
|
||||
|
||||
use crate::errors::ProofVerifyError;
|
||||
use crate::r1csproof::R1CSVerifierProof;
|
||||
use crate::{
|
||||
poseidon_transcript::PoseidonTranscript,
|
||||
r1csproof::{R1CSGens, R1CSProof},
|
||||
transcript::Transcript,
|
||||
InputsAssignment, Instance, VarsAssignment,
|
||||
};
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
|
||||
// TestudoNizk is suitable for uniform circuits where the
|
||||
// evaluation of R1CS matrices A, B and C is cheap and can
|
||||
// be done by the verifier. For more complex circuits this
|
||||
// operation has to be offloaded to the prover.
|
||||
pub struct TestudoNizk<E: Pairing> {
|
||||
pub r1cs_verifier_proof: R1CSVerifierProof<E>,
|
||||
pub r: (Vec<E::ScalarField>, Vec<E::ScalarField>),
|
||||
}
|
||||
|
||||
pub struct TestudoNizkGens<E: Pairing> {
|
||||
gens_r1cs_sat: R1CSGens<E>,
|
||||
}
|
||||
|
||||
impl<E: Pairing> TestudoNizkGens<E> {
|
||||
/// Performs the setup required by the polynomial commitment PST and Groth16
|
||||
pub fn setup(
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Self {
|
||||
// ensure num_vars is a power of 2
|
||||
let num_vars_padded = {
|
||||
let mut num_vars_padded = max(num_vars, num_inputs + 1);
|
||||
if num_vars_padded != num_vars_padded.next_power_of_two() {
|
||||
num_vars_padded = num_vars_padded.next_power_of_two();
|
||||
}
|
||||
num_vars_padded
|
||||
};
|
||||
|
||||
let num_cons_padded = {
|
||||
let mut num_cons_padded = num_cons;
|
||||
|
||||
// ensure that num_cons_padded is at least 2
|
||||
if num_cons_padded == 0 || num_cons_padded == 1 {
|
||||
num_cons_padded = 2;
|
||||
}
|
||||
|
||||
// ensure that num_cons_padded is a power of 2
|
||||
if num_cons.next_power_of_two() != num_cons {
|
||||
num_cons_padded = num_cons.next_power_of_two();
|
||||
}
|
||||
num_cons_padded
|
||||
};
|
||||
|
||||
let gens_r1cs_sat = R1CSGens::setup(
|
||||
b"gens_r1cs_sat",
|
||||
num_cons_padded,
|
||||
num_vars_padded,
|
||||
num_inputs,
|
||||
poseidon,
|
||||
);
|
||||
TestudoNizkGens { gens_r1cs_sat }
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Pairing> TestudoNizk<E>
|
||||
where
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
// Returns a proof that the R1CS instance is satisfiable
|
||||
pub fn prove(
|
||||
inst: &Instance<E::ScalarField>,
|
||||
vars: VarsAssignment<E::ScalarField>,
|
||||
inputs: &InputsAssignment<E::ScalarField>,
|
||||
gens: &TestudoNizkGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Result<TestudoNizk<E>, ProofVerifyError> {
|
||||
transcript.append_bytes(b"", &inst.digest);
|
||||
|
||||
let c: E::ScalarField = transcript.challenge_scalar(b"");
|
||||
transcript.new_from_state(&c);
|
||||
|
||||
// we might need to pad variables
|
||||
let padded_vars = {
|
||||
let num_padded_vars = inst.inst.get_num_vars();
|
||||
let num_vars = vars.assignment.len();
|
||||
if num_padded_vars > num_vars {
|
||||
vars.pad(num_padded_vars)
|
||||
} else {
|
||||
vars
|
||||
}
|
||||
};
|
||||
|
||||
let (r1cs_sat_proof, rx, ry) = R1CSProof::prove(
|
||||
&inst.inst,
|
||||
padded_vars.assignment,
|
||||
&inputs.assignment,
|
||||
&gens.gens_r1cs_sat,
|
||||
transcript,
|
||||
);
|
||||
|
||||
let inst_evals = inst.inst.evaluate(&rx, &ry);
|
||||
|
||||
transcript.new_from_state(&c);
|
||||
let r1cs_verifier_proof = r1cs_sat_proof
|
||||
.prove_verifier(
|
||||
inst.inst.get_num_vars(),
|
||||
inst.inst.get_num_cons(),
|
||||
&inputs.assignment,
|
||||
&inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
poseidon,
|
||||
)
|
||||
.unwrap();
|
||||
Ok(TestudoNizk {
|
||||
r1cs_verifier_proof,
|
||||
r: (rx, ry),
|
||||
})
|
||||
}
|
||||
|
||||
// Verifies the satisfiability proof for the R1CS instance. In NIZK mode, the
|
||||
// verifier evaluates matrices A, B and C themselves, which is a linear
|
||||
// operation and hence this is not a SNARK.
|
||||
// However, for highly structured circuits this operation is fast.
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens: &TestudoNizkGens<E>,
|
||||
inst: &Instance<E::ScalarField>,
|
||||
input: &InputsAssignment<E::ScalarField>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
_poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Result<bool, ProofVerifyError> {
|
||||
transcript.append_bytes(b"", &inst.digest);
|
||||
let (claimed_rx, claimed_ry) = &self.r;
|
||||
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
|
||||
|
||||
let sat_verified = self.r1cs_verifier_proof.verify(
|
||||
(claimed_rx.clone(), claimed_ry.clone()),
|
||||
&input.assignment,
|
||||
&inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
)?;
|
||||
assert!(sat_verified == true);
|
||||
Ok(sat_verified)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
parameters::poseidon_params,
|
||||
poseidon_transcript::PoseidonTranscript,
|
||||
testudo_nizk::{TestudoNizk, TestudoNizkGens},
|
||||
Instance,
|
||||
};
|
||||
|
||||
#[test]
|
||||
pub fn check_testudo_nizk() {
|
||||
let num_vars = 256;
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
|
||||
// produce public generators
|
||||
let gens = TestudoNizkGens::<E>::setup(num_cons, num_vars, num_inputs, poseidon_params());
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof =
|
||||
TestudoNizk::prove(&inst, vars, &inputs, &gens, &mut prover_transcript, params).unwrap();
|
||||
|
||||
// verify the proof
|
||||
let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params());
|
||||
assert!(proof
|
||||
.verify(
|
||||
&gens,
|
||||
&inst,
|
||||
&inputs,
|
||||
&mut verifier_transcript,
|
||||
poseidon_params()
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
}
|
||||
377
src/testudo_snark.rs
Normal file
377
src/testudo_snark.rs
Normal file
@@ -0,0 +1,377 @@
|
||||
use std::cmp::max;
|
||||
|
||||
use crate::errors::ProofVerifyError;
|
||||
use crate::r1csinstance::{R1CSCommitmentGens, R1CSEvalProof};
|
||||
use crate::r1csproof::R1CSVerifierProof;
|
||||
|
||||
use crate::timer::Timer;
|
||||
use crate::transcript::TranscriptWriter;
|
||||
use crate::{
|
||||
poseidon_transcript::PoseidonTranscript,
|
||||
r1csproof::{R1CSGens, R1CSProof},
|
||||
transcript::Transcript,
|
||||
InputsAssignment, Instance, VarsAssignment,
|
||||
};
|
||||
use crate::{ComputationCommitment, ComputationDecommitment};
|
||||
use ark_crypto_primitives::sponge::poseidon::PoseidonConfig;
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ec::pairing::Pairing;
|
||||
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
|
||||
#[derive(Debug, CanonicalSerialize, CanonicalDeserialize)]
|
||||
|
||||
pub struct TestudoSnark<E: Pairing> {
|
||||
pub r1cs_verifier_proof: R1CSVerifierProof<E>,
|
||||
pub r1cs_eval_proof: R1CSEvalProof<E>,
|
||||
pub inst_evals: (E::ScalarField, E::ScalarField, E::ScalarField),
|
||||
pub r: (Vec<E::ScalarField>, Vec<E::ScalarField>),
|
||||
}
|
||||
|
||||
pub struct TestudoSnarkGens<E: Pairing> {
|
||||
gens_r1cs_sat: R1CSGens<E>,
|
||||
gens_r1cs_eval: R1CSCommitmentGens<E>,
|
||||
}
|
||||
|
||||
impl<E: Pairing> TestudoSnarkGens<E> {
|
||||
/// Performs the setups required by the polynomial commitment PST, Groth16
|
||||
/// and the computational commitment given the size of the R1CS statement,
|
||||
/// `num_nz_entries` specifies the maximum number of non-zero entries in
|
||||
/// any of the three R1CS matrices.
|
||||
pub fn setup(
|
||||
num_cons: usize,
|
||||
num_vars: usize,
|
||||
num_inputs: usize,
|
||||
num_nz_entries: usize,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Self {
|
||||
// ensure num_vars is a power of 2
|
||||
let num_vars_padded = {
|
||||
let mut num_vars_padded = max(num_vars, num_inputs + 1);
|
||||
if num_vars_padded != num_vars_padded.next_power_of_two() {
|
||||
num_vars_padded = num_vars_padded.next_power_of_two();
|
||||
}
|
||||
num_vars_padded
|
||||
};
|
||||
|
||||
let num_cons_padded = {
|
||||
let mut num_cons_padded = num_cons;
|
||||
|
||||
// ensure that num_cons_padded is at least 2
|
||||
if num_cons_padded == 0 || num_cons_padded == 1 {
|
||||
num_cons_padded = 2;
|
||||
}
|
||||
|
||||
// ensure that num_cons_padded is a power of 2
|
||||
if num_cons.next_power_of_two() != num_cons {
|
||||
num_cons_padded = num_cons.next_power_of_two();
|
||||
}
|
||||
num_cons_padded
|
||||
};
|
||||
|
||||
let gens_r1cs_sat = R1CSGens::setup(
|
||||
b"gens_r1cs_sat",
|
||||
num_cons_padded,
|
||||
num_vars_padded,
|
||||
num_inputs,
|
||||
poseidon,
|
||||
);
|
||||
let gens_r1cs_eval = R1CSCommitmentGens::setup(
|
||||
b"gens_r1cs_eval",
|
||||
num_cons_padded,
|
||||
num_vars_padded,
|
||||
num_inputs,
|
||||
num_nz_entries,
|
||||
);
|
||||
TestudoSnarkGens {
|
||||
gens_r1cs_sat,
|
||||
gens_r1cs_eval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Pairing> TestudoSnark<E>
|
||||
where
|
||||
E::ScalarField: Absorb,
|
||||
{
|
||||
// Constructs the computational commitment, used to prove that the
|
||||
// evaluations of matrices A, B and C sent by the prover to the verifier
|
||||
// are correct.
|
||||
pub fn encode(
|
||||
inst: &Instance<E::ScalarField>,
|
||||
gens: &TestudoSnarkGens<E>,
|
||||
) -> (
|
||||
ComputationCommitment<E::G1>,
|
||||
ComputationDecommitment<E::ScalarField>,
|
||||
) {
|
||||
let timer_encode = Timer::new("SNARK::encode");
|
||||
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
|
||||
timer_encode.stop();
|
||||
(
|
||||
ComputationCommitment { comm },
|
||||
ComputationDecommitment { decomm },
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the Testudo SNARK proof which has two components:
|
||||
// * proof that the R1CS instance is satisfiable
|
||||
// * proof that the evlauation of matrices A, B and C on point (x,y)
|
||||
// resulted from the two rounda of sumcheck are correct
|
||||
pub fn prove(
|
||||
inst: &Instance<E::ScalarField>,
|
||||
comm: &ComputationCommitment<E::G1>,
|
||||
decomm: &ComputationDecommitment<E::ScalarField>,
|
||||
vars: VarsAssignment<E::ScalarField>,
|
||||
inputs: &InputsAssignment<E::ScalarField>,
|
||||
gens: &TestudoSnarkGens<E>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Result<TestudoSnark<E>, ProofVerifyError> {
|
||||
comm.comm.write_to_transcript(transcript);
|
||||
let c: E::ScalarField = transcript.challenge_scalar(b"");
|
||||
transcript.new_from_state(&c);
|
||||
|
||||
// we might need to pad variables
|
||||
let padded_vars = {
|
||||
let num_padded_vars = inst.inst.get_num_vars();
|
||||
let num_vars = vars.assignment.len();
|
||||
if num_padded_vars > num_vars {
|
||||
vars.pad(num_padded_vars)
|
||||
} else {
|
||||
vars
|
||||
}
|
||||
};
|
||||
|
||||
let (r1cs_sat_proof, rx, ry) = R1CSProof::prove(
|
||||
&inst.inst,
|
||||
padded_vars.assignment,
|
||||
&inputs.assignment,
|
||||
&gens.gens_r1cs_sat,
|
||||
transcript,
|
||||
);
|
||||
|
||||
// We send evaluations of A, B, C at r = (rx, ry) as claims
|
||||
// to enable the verifier complete the first sum-check
|
||||
let timer_eval = Timer::new("eval_sparse_polys");
|
||||
let inst_evals = {
|
||||
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
|
||||
transcript.append_scalar(b"", &Ar);
|
||||
transcript.append_scalar(b"", &Br);
|
||||
transcript.append_scalar(b"", &Cr);
|
||||
(Ar, Br, Cr)
|
||||
};
|
||||
timer_eval.stop();
|
||||
|
||||
let timer_eval_proof = Timer::new("r1cs_eval_proof");
|
||||
let r1cs_eval_proof = R1CSEvalProof::prove(
|
||||
&decomm.decomm,
|
||||
&rx,
|
||||
&ry,
|
||||
&inst_evals,
|
||||
&gens.gens_r1cs_eval,
|
||||
transcript,
|
||||
);
|
||||
timer_eval_proof.stop();
|
||||
|
||||
transcript.new_from_state(&c);
|
||||
let timer_sat_circuit_verification = Timer::new("r1cs_sat_circuit_verification");
|
||||
let r1cs_verifier_proof = r1cs_sat_proof
|
||||
.prove_verifier(
|
||||
inst.inst.get_num_vars(),
|
||||
inst.inst.get_num_cons(),
|
||||
&inputs.assignment,
|
||||
&inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
poseidon,
|
||||
)
|
||||
.unwrap();
|
||||
timer_sat_circuit_verification.stop();
|
||||
Ok(TestudoSnark {
|
||||
r1cs_verifier_proof,
|
||||
r1cs_eval_proof,
|
||||
inst_evals,
|
||||
r: (rx, ry),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
gens: &TestudoSnarkGens<E>,
|
||||
comm: &ComputationCommitment<E::G1>,
|
||||
input: &InputsAssignment<E::ScalarField>,
|
||||
transcript: &mut PoseidonTranscript<E::ScalarField>,
|
||||
_poseidon: PoseidonConfig<E::ScalarField>,
|
||||
) -> Result<bool, ProofVerifyError> {
|
||||
let (rx, ry) = &self.r;
|
||||
|
||||
let timer_sat_verification = Timer::new("r1cs_sat_verification");
|
||||
let sat_verified = self.r1cs_verifier_proof.verify(
|
||||
(rx.clone(), ry.clone()),
|
||||
&input.assignment,
|
||||
&self.inst_evals,
|
||||
transcript,
|
||||
&gens.gens_r1cs_sat,
|
||||
)?;
|
||||
timer_sat_verification.stop();
|
||||
assert!(sat_verified == true);
|
||||
|
||||
let (Ar, Br, Cr) = &self.inst_evals;
|
||||
transcript.append_scalar(b"", Ar);
|
||||
transcript.append_scalar(b"", Br);
|
||||
transcript.append_scalar(b"", Cr);
|
||||
|
||||
let timer_eval_verification = Timer::new("r1cs_eval_verification");
|
||||
let eval_verified = self.r1cs_eval_proof.verify(
|
||||
&comm.comm,
|
||||
rx,
|
||||
ry,
|
||||
&self.inst_evals,
|
||||
&gens.gens_r1cs_eval,
|
||||
transcript,
|
||||
);
|
||||
timer_eval_verification.stop();
|
||||
Ok(sat_verified && eval_verified.is_ok())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::ark_std::Zero;
|
||||
use crate::{
|
||||
parameters::poseidon_params,
|
||||
poseidon_transcript::PoseidonTranscript,
|
||||
testudo_snark::{TestudoSnark, TestudoSnarkGens},
|
||||
InputsAssignment, Instance, VarsAssignment,
|
||||
};
|
||||
use ark_ff::{BigInteger, One, PrimeField};
|
||||
|
||||
#[test]
|
||||
pub fn check_testudo_snark() {
|
||||
let num_vars = 256;
|
||||
let num_cons = num_vars;
|
||||
let num_inputs = 10;
|
||||
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
|
||||
// produce public generators
|
||||
let gens =
|
||||
TestudoSnarkGens::<E>::setup(num_cons, num_vars, num_inputs, num_cons, poseidon_params());
|
||||
|
||||
// produce a synthetic R1CSInstance
|
||||
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
|
||||
|
||||
// create a commitment to R1CSInstance
|
||||
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a proof
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = TestudoSnark::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
vars,
|
||||
&inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
params,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// verify the proof
|
||||
let mut verifier_transcript = PoseidonTranscript::new(&poseidon_params());
|
||||
assert!(proof
|
||||
.verify(
|
||||
&gens,
|
||||
&comm,
|
||||
&inputs,
|
||||
&mut verifier_transcript,
|
||||
poseidon_params()
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_padded_constraints() {
|
||||
type F = ark_bls12_377::Fr;
|
||||
type E = ark_bls12_377::Bls12_377;
|
||||
// parameters of the R1CS instance
|
||||
let num_cons = 1;
|
||||
let num_vars = 0;
|
||||
let num_inputs = 3;
|
||||
let num_non_zero_entries = 3;
|
||||
|
||||
// We will encode the above constraints into three matrices, where
|
||||
// the coefficients in the matrix are in the little-endian byte order
|
||||
let mut A: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut B: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
let mut C: Vec<(usize, usize, Vec<u8>)> = Vec::new();
|
||||
|
||||
// Create a^2 + b + 13
|
||||
A.push((0, num_vars + 2, (F::one().into_bigint().to_bytes_le()))); // 1*a
|
||||
B.push((0, num_vars + 2, F::one().into_bigint().to_bytes_le())); // 1*a
|
||||
C.push((0, num_vars + 1, F::one().into_bigint().to_bytes_le())); // 1*z
|
||||
C.push((0, num_vars, (-F::from(13u64)).into_bigint().to_bytes_le())); // -13*1
|
||||
C.push((0, num_vars + 3, (-F::one()).into_bigint().to_bytes_le())); // -1*b
|
||||
|
||||
// Var Assignments (Z_0 = 16 is the only output)
|
||||
let vars = vec![F::zero().into_bigint().to_bytes_le(); num_vars];
|
||||
|
||||
// create an InputsAssignment (a = 1, b = 2)
|
||||
let mut inputs = vec![F::zero().into_bigint().to_bytes_le(); num_inputs];
|
||||
inputs[0] = F::from(16u64).into_bigint().to_bytes_le();
|
||||
inputs[1] = F::from(1u64).into_bigint().to_bytes_le();
|
||||
inputs[2] = F::from(2u64).into_bigint().to_bytes_le();
|
||||
|
||||
let assignment_inputs = InputsAssignment::<F>::new(&inputs).unwrap();
|
||||
let assignment_vars = VarsAssignment::new(&vars).unwrap();
|
||||
|
||||
// Check if instance is satisfiable
|
||||
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
|
||||
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
|
||||
assert!(res.unwrap(), "should be satisfied");
|
||||
|
||||
// Testudo public params
|
||||
let gens = TestudoSnarkGens::<E>::setup(
|
||||
num_cons,
|
||||
num_vars,
|
||||
num_inputs,
|
||||
num_non_zero_entries,
|
||||
poseidon_params(),
|
||||
);
|
||||
|
||||
// create a commitment to the R1CS instance
|
||||
let (comm, decomm) = TestudoSnark::encode(&inst, &gens);
|
||||
|
||||
let params = poseidon_params();
|
||||
|
||||
// produce a TestudoSnark
|
||||
let mut prover_transcript = PoseidonTranscript::new(¶ms);
|
||||
let proof = TestudoSnark::prove(
|
||||
&inst,
|
||||
&comm,
|
||||
&decomm,
|
||||
assignment_vars.clone(),
|
||||
&assignment_inputs,
|
||||
&gens,
|
||||
&mut prover_transcript,
|
||||
poseidon_params(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// verify the TestudoSnark
|
||||
let mut verifier_transcript = PoseidonTranscript::new(¶ms);
|
||||
assert!(proof
|
||||
.verify(
|
||||
&gens,
|
||||
&comm,
|
||||
&assignment_inputs,
|
||||
&mut verifier_transcript,
|
||||
poseidon_params()
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
/// Timer is a simple utility to profile the execution time of a block of code.
|
||||
#[cfg(feature = "profile")]
|
||||
use colored::Colorize;
|
||||
#[cfg(feature = "profile")]
|
||||
|
||||
@@ -1,67 +1,16 @@
|
||||
use super::scalar::Scalar;
|
||||
use crate::group::CompressedGroup;
|
||||
use ark_ff::{BigInteger, PrimeField};
|
||||
use ark_ff::PrimeField;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use merlin::Transcript;
|
||||
|
||||
pub trait ProofTranscript {
|
||||
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
|
||||
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
|
||||
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup);
|
||||
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
|
||||
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar>;
|
||||
}
|
||||
|
||||
impl ProofTranscript for Transcript {
|
||||
fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
|
||||
self.append_message(b"protocol-name", protocol_name);
|
||||
}
|
||||
|
||||
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
|
||||
self.append_message(label, scalar.into_repr().to_bytes_le().as_slice());
|
||||
}
|
||||
|
||||
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
|
||||
let mut point_encoded = Vec::new();
|
||||
point.serialize(&mut point_encoded).unwrap();
|
||||
self.append_message(label, point_encoded.as_slice());
|
||||
}
|
||||
|
||||
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
|
||||
let mut buf = [0u8; 64];
|
||||
self.challenge_bytes(label, &mut buf);
|
||||
Scalar::from_le_bytes_mod_order(&buf)
|
||||
}
|
||||
|
||||
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
|
||||
(0..len)
|
||||
.map(|_i| self.challenge_scalar(label))
|
||||
.collect::<Vec<Scalar>>()
|
||||
/// Transcript is the application level transcript to derive the challenges
|
||||
/// needed for Fiat Shamir during aggregation. It is given to the
|
||||
/// prover/verifier so that the transcript can be fed with any other data first.
|
||||
/// TODO: Make this trait the only Transcript trait
|
||||
pub trait Transcript {
|
||||
fn domain_sep(&mut self);
|
||||
fn append<S: CanonicalSerialize>(&mut self, label: &'static [u8], point: &S);
|
||||
fn challenge_scalar<F: PrimeField>(&mut self, label: &'static [u8]) -> F;
|
||||
fn challenge_scalar_vec<F: PrimeField>(&mut self, label: &'static [u8], n: usize) -> Vec<F> {
|
||||
(0..n).map(|_| self.challenge_scalar(label)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AppendToTranscript {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript);
|
||||
}
|
||||
|
||||
impl AppendToTranscript for Scalar {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_scalar(label, self);
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToTranscript for [Scalar] {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_message(label, b"begin_append_vector");
|
||||
for item in self {
|
||||
transcript.append_scalar(label, item);
|
||||
}
|
||||
transcript.append_message(label, b"end_append_vector");
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToTranscript for CompressedGroup {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_point(label, self);
|
||||
}
|
||||
}
|
||||
pub use crate::poseidon_transcript::TranscriptWriter;
|
||||
|
||||
169
src/unipoly.rs
169
src/unipoly.rs
@@ -1,34 +1,29 @@
|
||||
use crate::poseidon_transcript::{AppendToPoseidon, PoseidonTranscript};
|
||||
|
||||
use super::commitments::{Commitments, MultiCommitGens};
|
||||
use super::group::GroupElement;
|
||||
use super::scalar::Scalar;
|
||||
use super::transcript::{AppendToTranscript, ProofTranscript};
|
||||
use ark_ff::Field;
|
||||
use crate::poseidon_transcript::{PoseidonTranscript, TranscriptWriter};
|
||||
use ark_crypto_primitives::sponge::Absorb;
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_serialize::*;
|
||||
use merlin::Transcript;
|
||||
// ax^2 + bx + c stored as vec![c,b,a]
|
||||
// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a]
|
||||
#[derive(Debug, CanonicalDeserialize, CanonicalSerialize, Clone)]
|
||||
pub struct UniPoly {
|
||||
pub coeffs: Vec<Scalar>,
|
||||
pub struct UniPoly<F: Field> {
|
||||
pub coeffs: Vec<F>,
|
||||
// pub coeffs_fq: Vec<Fq>,
|
||||
}
|
||||
|
||||
// ax^2 + bx + c stored as vec![c,a]
|
||||
// ax^3 + bx^2 + cx + d stored as vec![d,b,a]
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Debug)]
|
||||
pub struct CompressedUniPoly {
|
||||
pub coeffs_except_linear_term: Vec<Scalar>,
|
||||
pub struct CompressedUniPoly<F: Field> {
|
||||
pub coeffs_except_linear_term: Vec<F>,
|
||||
}
|
||||
|
||||
impl UniPoly {
|
||||
pub fn from_evals(evals: &[Scalar]) -> Self {
|
||||
impl<F: Field> UniPoly<F> {
|
||||
pub fn from_evals(evals: &[F]) -> Self {
|
||||
// we only support degree-2 or degree-3 univariate polynomials
|
||||
assert!(evals.len() == 3 || evals.len() == 4);
|
||||
let coeffs = if evals.len() == 3 {
|
||||
// ax^2 + bx + c
|
||||
let two_inv = Scalar::from(2).inverse().unwrap();
|
||||
let two_inv = F::from(2 as u8).inverse().unwrap();
|
||||
|
||||
let c = evals[0];
|
||||
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
|
||||
@@ -36,13 +31,12 @@ impl UniPoly {
|
||||
vec![c, b, a]
|
||||
} else {
|
||||
// ax^3 + bx^2 + cx + d
|
||||
let two_inv = Scalar::from(2).inverse().unwrap();
|
||||
let six_inv = Scalar::from(6).inverse().unwrap();
|
||||
let two_inv = F::from(2 as u8).inverse().unwrap();
|
||||
let six_inv = F::from(6 as u8).inverse().unwrap();
|
||||
|
||||
let d = evals[0];
|
||||
let a = six_inv
|
||||
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1]
|
||||
- evals[0]);
|
||||
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
|
||||
let b = two_inv
|
||||
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
|
||||
+ evals[2]
|
||||
@@ -61,19 +55,15 @@ impl UniPoly {
|
||||
self.coeffs.len() - 1
|
||||
}
|
||||
|
||||
pub fn as_vec(&self) -> Vec<Scalar> {
|
||||
self.coeffs.clone()
|
||||
}
|
||||
|
||||
pub fn eval_at_zero(&self) -> Scalar {
|
||||
pub fn eval_at_zero(&self) -> F {
|
||||
self.coeffs[0]
|
||||
}
|
||||
|
||||
pub fn eval_at_one(&self) -> Scalar {
|
||||
pub fn eval_at_one(&self) -> F {
|
||||
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
|
||||
}
|
||||
|
||||
pub fn evaluate(&self, r: &Scalar) -> Scalar {
|
||||
pub fn evaluate(&self, r: &F) -> F {
|
||||
let mut eval = self.coeffs[0];
|
||||
let mut power = *r;
|
||||
for i in 1..self.coeffs.len() {
|
||||
@@ -82,57 +72,42 @@ impl UniPoly {
|
||||
}
|
||||
eval
|
||||
}
|
||||
|
||||
pub fn compress(&self) -> CompressedUniPoly {
|
||||
let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
|
||||
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
|
||||
CompressedUniPoly {
|
||||
coeffs_except_linear_term,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement {
|
||||
self.coeffs.commit(blind, gens)
|
||||
}
|
||||
// pub fn compress(&self) -> CompressedUniPoly<F> {
|
||||
// let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
|
||||
// assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
|
||||
// CompressedUniPoly {
|
||||
// coeffs_except_linear_term,
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
impl CompressedUniPoly {
|
||||
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
|
||||
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
|
||||
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
|
||||
let mut linear_term =
|
||||
(*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
||||
for i in 1..self.coeffs_except_linear_term.len() {
|
||||
linear_term -= self.coeffs_except_linear_term[i];
|
||||
}
|
||||
// impl<F: PrimeField> CompressedUniPoly<F> {
|
||||
// // we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
|
||||
// // linear_term = hint - 2 * constant_term - deg2 term - deg3 term
|
||||
// pub fn decompress(&self, hint: &F) -> UniPoly<F> {
|
||||
// let mut linear_term =
|
||||
// (*hint) - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
|
||||
// for i in 1..self.coeffs_except_linear_term.len() {
|
||||
// linear_term -= self.coeffs_except_linear_term[i];
|
||||
// }
|
||||
|
||||
let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
|
||||
coeffs.extend(&self.coeffs_except_linear_term[1..]);
|
||||
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
|
||||
UniPoly { coeffs }
|
||||
}
|
||||
}
|
||||
// let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
|
||||
// coeffs.extend(&self.coeffs_except_linear_term[1..]);
|
||||
// assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
|
||||
// UniPoly { coeffs }
|
||||
// }
|
||||
// }
|
||||
|
||||
impl AppendToPoseidon for UniPoly {
|
||||
fn append_to_poseidon(&self, transcript: &mut PoseidonTranscript) {
|
||||
impl<F: PrimeField + Absorb> TranscriptWriter<F> for UniPoly<F> {
|
||||
fn write_to_transcript(&self, transcript: &mut PoseidonTranscript<F>) {
|
||||
// transcript.append_message(label, b"UniPoly_begin");
|
||||
for i in 0..self.coeffs.len() {
|
||||
transcript.append_scalar(&self.coeffs[i]);
|
||||
transcript.append_scalar(b"", &self.coeffs[i]);
|
||||
}
|
||||
// transcript.append_message(label, b"UniPoly_end");
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendToTranscript for UniPoly {
|
||||
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
|
||||
transcript.append_message(label, b"UniPoly_begin");
|
||||
for i in 0..self.coeffs.len() {
|
||||
transcript.append_scalar(b"coeff", &self.coeffs[i]);
|
||||
}
|
||||
transcript.append_message(label, b"UniPoly_end");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -140,59 +115,61 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
type F = ark_bls12_377::Fr;
|
||||
|
||||
#[test]
|
||||
fn test_from_evals_quad() {
|
||||
// polynomial is 2x^2 + 3x + 1
|
||||
let e0 = Scalar::one();
|
||||
let e1 = Scalar::from(6);
|
||||
let e2 = Scalar::from(15);
|
||||
let e0 = F::one();
|
||||
let e1 = F::from(6 as u8);
|
||||
let e2 = F::from(15 as u8);
|
||||
let evals = vec![e0, e1, e2];
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
assert_eq!(poly.eval_at_zero(), e0);
|
||||
assert_eq!(poly.eval_at_one(), e1);
|
||||
assert_eq!(poly.coeffs.len(), 3);
|
||||
assert_eq!(poly.coeffs[0], Scalar::one());
|
||||
assert_eq!(poly.coeffs[1], Scalar::from(3));
|
||||
assert_eq!(poly.coeffs[2], Scalar::from(2));
|
||||
assert_eq!(poly.coeffs[0], F::one());
|
||||
assert_eq!(poly.coeffs[1], F::from(3 as u8));
|
||||
assert_eq!(poly.coeffs[2], F::from(2 as u8));
|
||||
|
||||
let hint = e0 + e1;
|
||||
let compressed_poly = poly.compress();
|
||||
let decompressed_poly = compressed_poly.decompress(&hint);
|
||||
for i in 0..decompressed_poly.coeffs.len() {
|
||||
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
|
||||
}
|
||||
// let hint = e0 + e1;
|
||||
// // let compressed_poly = poly.compress();
|
||||
// // let decompressed_poly = compressed_poly.decompress(&hint);
|
||||
// for i in 0..poly.coeffs.len() {
|
||||
// assert_eq!(poly.coeffs[i], poly.coeffs[i]);
|
||||
// }
|
||||
|
||||
let e3 = Scalar::from(28);
|
||||
assert_eq!(poly.evaluate(&Scalar::from(3)), e3);
|
||||
let e3 = F::from(28 as u8);
|
||||
assert_eq!(poly.evaluate(&F::from(3 as u8)), e3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_evals_cubic() {
|
||||
// polynomial is x^3 + 2x^2 + 3x + 1
|
||||
let e0 = Scalar::one();
|
||||
let e1 = Scalar::from(7);
|
||||
let e2 = Scalar::from(23);
|
||||
let e3 = Scalar::from(55);
|
||||
let e0 = F::one();
|
||||
let e1 = F::from(7);
|
||||
let e2 = F::from(23);
|
||||
let e3 = F::from(55);
|
||||
let evals = vec![e0, e1, e2, e3];
|
||||
let poly = UniPoly::from_evals(&evals);
|
||||
|
||||
assert_eq!(poly.eval_at_zero(), e0);
|
||||
assert_eq!(poly.eval_at_one(), e1);
|
||||
assert_eq!(poly.coeffs.len(), 4);
|
||||
assert_eq!(poly.coeffs[0], Scalar::one());
|
||||
assert_eq!(poly.coeffs[1], Scalar::from(3));
|
||||
assert_eq!(poly.coeffs[2], Scalar::from(2));
|
||||
assert_eq!(poly.coeffs[3], Scalar::from(1));
|
||||
assert_eq!(poly.coeffs[0], F::one());
|
||||
assert_eq!(poly.coeffs[1], F::from(3));
|
||||
assert_eq!(poly.coeffs[2], F::from(2));
|
||||
assert_eq!(poly.coeffs[3], F::from(1));
|
||||
|
||||
let hint = e0 + e1;
|
||||
let compressed_poly = poly.compress();
|
||||
let decompressed_poly = compressed_poly.decompress(&hint);
|
||||
for i in 0..decompressed_poly.coeffs.len() {
|
||||
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
|
||||
}
|
||||
// let hint = e0 + e1;
|
||||
// let compressed_poly = poly.compress();
|
||||
// let decompressed_poly = compressed_poly.decompress(&hint);
|
||||
// for i in 0..decompressed_poly.coeffs.len() {
|
||||
// assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
|
||||
// }
|
||||
|
||||
let e4 = Scalar::from(109);
|
||||
assert_eq!(poly.evaluate(&Scalar::from(4)), e4);
|
||||
let e4 = F::from(109);
|
||||
assert_eq!(poly.evaluate(&F::from(4)), e4);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user