Browse Source

initial commit

master
Srinath Setty 4 years ago
parent
commit
95b9ad35a6
33 changed files with 8986 additions and 14 deletions
  1. +21
    -0
      .github/workflows/rust.yml
  2. +12
    -0
      .gitignore
  3. +12
    -0
      CONTRIBUTING.md
  4. +64
    -0
      Cargo.toml
  5. +121
    -0
      NOTICE.md
  6. +35
    -14
      README.md
  7. +48
    -0
      benches/commitments.rs
  8. +86
    -0
      benches/dotproduct.rs
  9. +200
    -0
      benches/polycommit.rs
  10. +123
    -0
      benches/r1csproof.rs
  11. +138
    -0
      benches/spartan.rs
  12. +162
    -0
      benches/sumcheck.rs
  13. +5
    -0
      rustfmt.toml
  14. +247
    -0
      src/bullet.rs
  15. +109
    -0
      src/commitments.rs
  16. +740
    -0
      src/dense_mlpoly.rs
  17. +15
    -0
      src/errors.rs
  18. +101
    -0
      src/group.rs
  19. +31
    -0
      src/lib.rs
  20. +31
    -0
      src/math.rs
  21. +750
    -0
      src/nizk.rs
  22. +489
    -0
      src/product_tree.rs
  23. +55
    -0
      src/profiler.rs
  24. +346
    -0
      src/r1csinstance.rs
  25. +632
    -0
      src/r1csproof.rs
  26. +48
    -0
      src/scalar.rs
  27. +1213
    -0
      src/scalar_25519.rs
  28. +1721
    -0
      src/sparse_mlpoly.rs
  29. +190
    -0
      src/spartan.rs
  30. +911
    -0
      src/sumcheck.rs
  31. +83
    -0
      src/timer.rs
  32. +63
    -0
      src/transcript.rs
  33. +184
    -0
      src/unipoly.rs

+ 21
- 0
.github/workflows/rust.yml

@ -0,0 +1,21 @@
name: Rust
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install
run: rustup default nightly
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose

+ 12
- 0
.gitignore

@ -0,0 +1,12 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
*.txt

+ 12
- 0
CONTRIBUTING.md

@ -0,0 +1,12 @@
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

+ 64
- 0
Cargo.toml

@ -0,0 +1,64 @@
[package]
name = "spartan"
version = "0.1.0"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2018"
[dependencies]
curve25519-dalek = { version = "2", features = ["serde"]}
merlin = "2.0.0"
rand = "0.7.3"
digest = "0.8.1"
sha3 = "0.8.2"
byteorder = "1.3.4"
rayon = "1.3.0"
serde = { version = "1.0.106", features = ["derive"] }
bincode = "1.2.1"
subtle = { version = "^2.2.2", default-features = false }
rand_core = { version = "0.5", default-features = false }
zeroize = { version = "1", default-features = false }
itertools = "0.9.0"
colored = "1.9.3"
flate2 = "1.0.14"
[dev-dependencies]
criterion = "0.3.1"
[lib]
name = "libspartan"
path = "src/lib.rs"
[[bin]]
name = "profiler"
path = "src/profiler.rs"
[[bench]]
name = "commitments"
harness = false
[[bench]]
name = "dotproduct"
harness = false
[[bench]]
name = "polycommit"
harness = false
[[bench]]
name = "r1csproof"
harness = false
[[bench]]
name = "spartan"
harness = false
[[bench]]
name = "sumcheck"
harness = false
[features]
simd_backend = ["curve25519-dalek/simd_backend"]
rayon_par = []
profile = []
default = ["simd_backend"]

+ 121
- 0
NOTICE.md

@ -0,0 +1,121 @@
This repository includes the following third-party open-source code.
* The code in scalar_25519.rs is derived from [bls12-381](https://github.com/zkcrypto/bls12_381).
Specifically, from [src/bls12_381/scalar.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/scalar.rs) and [src/bls12_381/util.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/util.rs), which has the following copyright and license.
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
* The invert and batch_invert methods in src/scalar_25519.rs is from [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek), which has the following copyright and license.
Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
========================================================================
Portions of curve25519-dalek were originally derived from Adam Langley's
Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
under the following licence:
========================================================================
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* The bullet.rs is derived from [bulletproofs](https://github.com/dalek-cryptography/bulletproofs/), which has the following license:
MIT License
Copyright (c) 2018 Chain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

+ 35
- 14
README.md

@ -1,14 +1,35 @@
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
# Spartan: High-speed zkSNARKs without trusted setup
![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg)
Spartan is a research project to design high-speed zero-knowledge proof systems, a cryptographic protocol that enables a prover to prove a mathematical statement (e.g., that a given program was executed correctly) without revealing anything besides the validity of the statement.
The current repository includes a library that implements
a zero-knowledge succinct non-interactive arguments of knowledge (zkSNARKs), a type of zero-knowledge proof system with short proofs and verification times. Unlike many other zkSNARKs, Spartan does not require a trusted setup and its security relies on the hardness of computing discrete logarithms (a well-studied assumption). The scheme is described in our [paper](https://eprint.iacr.org/2019/550).
## Building libspartan
cargo build
# On a machine that supports avx2 or ifma instructions:
export RUSTFLAGS="-C target_cpu=native"
cargo build --features "simd_backend" --release
## Performance
cargo build
# On a machine that supports avx2 or ifma instructions:
export RUSTFLAGS="-C target_cpu=native"
cargo build --features "simd_backend,profile" --release
./target/release/profiler
cargo bench
# On a machine that supports avx2 or ifma instructions:
export RUSTFLAGS="-C target_cpu=native"
cargo bench --features "simd_backend"
## LICENSE
See [LICENSE](./LICENSE)
## Contributing
See [CONTRIBUTING](./CONTRIBUTING.md)

+ 48
- 0
benches/commitments.rs

@ -0,0 +1,48 @@
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate curve25519_dalek;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::commitments::{Commitments, MultiCommitGens};
use libspartan::math::Math;
use libspartan::scalar::Scalar;
use rand::rngs::OsRng;
use criterion::*;
fn commitment_benchmark(c: &mut Criterion) {
let mut rng = OsRng;
for &s in [20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("commitment_bools");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let gens = MultiCommitGens::new(n, b"test-m");
let blind = Scalar::random(&mut rng);
let vec: Vec<bool> = vec![true; n];
let name = format!("commitment_bools_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| vec.commit(black_box(&blind), black_box(&gens)));
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_commitment;
config = set_duration();
targets = commitment_benchmark
}
criterion_main!(benches_commitment);

+ 86
- 0
benches/dotproduct.rs

@ -0,0 +1,86 @@
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate curve25519_dalek;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::math::Math;
use libspartan::nizk::DotProductProof;
use libspartan::scalar::Scalar;
use libspartan::scalar::ScalarBytes;
use rand::rngs::OsRng;
use criterion::*;
fn dotproduct_benchmark_dalek(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("dotproduct_benchmark_dalek");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let vec_a = (0..n)
.map(|_i| ScalarBytes::random(&mut csprng))
.collect::<Vec<ScalarBytes>>();
let vec_b = (0..n)
.map(|_i| ScalarBytes::random(&mut csprng))
.collect::<Vec<ScalarBytes>>();
let name = format!("dotproduct_dalek_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| compute_dotproduct(black_box(&vec_a), black_box(&vec_b)));
});
group.finish();
}
}
fn compute_dotproduct(a: &Vec<ScalarBytes>, b: &Vec<ScalarBytes>) -> ScalarBytes {
let mut res = ScalarBytes::zero();
for i in 0..a.len() {
res = &res + &a[i] * &b[i];
}
res
}
fn dotproduct_benchmark_opt(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("dotproduct_benchmark_opt");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let vec_a = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let vec_b = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let name = format!("dotproduct_opt_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| DotProductProof::compute_dotproduct(black_box(&vec_a), black_box(&vec_b)));
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_dotproduct;
config = set_duration();
targets = dotproduct_benchmark_dalek, dotproduct_benchmark_opt
}
criterion_main!(benches_dotproduct);

+ 200
- 0
benches/polycommit.rs

@ -0,0 +1,200 @@
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use criterion::*;
use libspartan::dense_mlpoly::{DensePolynomial, PolyCommitmentGens, PolyEvalProof};
use libspartan::math::Math;
use libspartan::scalar::Scalar;
use libspartan::transcript::ProofTranscript;
use merlin::Transcript;
use rand::rngs::OsRng;
fn commit_benchmark(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [4, 8, 12, 14, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("commit_benchmark");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let m = n.square_root();
let z = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
assert_eq!(m * m, z.len()); // check if Z's size if a perfect square
let poly = DensePolynomial::new(z);
let gens = PolyCommitmentGens::new(s, b"test-m");
let name = format!("polycommit_commit_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| poly.commit(black_box(false), black_box(&gens), black_box(None)));
});
group.finish();
}
}
fn eval_benchmark(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [4, 8, 12, 14, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("eval_benchmark");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let m = n.square_root();
let mut z: Vec<Scalar> = Vec::new();
for _ in 0..n {
z.push(Scalar::random(&mut csprng));
}
assert_eq!(m * m, z.len()); // check if Z's size if a perfect square
let poly = DensePolynomial::new(z);
let mut r: Vec<Scalar> = Vec::new();
for _ in 0..s {
r.push(Scalar::random(&mut csprng));
}
let name = format!("polycommit_eval_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| poly.evaluate(black_box(&r)));
});
group.finish();
}
}
fn evalproof_benchmark(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [4, 8, 12, 14, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("evalproof_benchmark");
group.plot_config(plot_config);
let n = (s as usize).pow2();
let m = n.square_root();
let mut z: Vec<Scalar> = Vec::new();
for _ in 0..n {
z.push(Scalar::random(&mut csprng));
}
assert_eq!(m * m, z.len()); // check if Z's size if a perfect square
let poly = DensePolynomial::new(z);
let gens = PolyCommitmentGens::new(s, b"test-m");
let mut r: Vec<Scalar> = Vec::new();
for _ in 0..s {
r.push(Scalar::random(&mut csprng));
}
let eval = poly.evaluate(&r);
let name = format!("polycommit_evalproof_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
PolyEvalProof::prove(
black_box(&poly),
black_box(None),
black_box(&r),
black_box(&eval),
black_box(None),
black_box(&gens),
black_box(&mut prover_transcript),
black_box(&mut random_tape),
)
});
});
group.finish();
}
}
fn evalproofverify_benchmark(c: &mut Criterion) {
let mut csprng: OsRng = OsRng;
for &s in [4, 8, 12, 14, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("evalproofverify_benchmark");
group.plot_config(plot_config);
let n = s.pow2();
let m = n.square_root();
let mut z: Vec<Scalar> = Vec::new();
for _ in 0..n {
z.push(Scalar::random(&mut csprng));
}
assert_eq!(m * m, z.len()); // check if Z's size if a perfect square
let poly = DensePolynomial::new(z);
let gens = PolyCommitmentGens::new(s, b"test-m");
let mut r: Vec<Scalar> = Vec::new();
for _ in 0..s {
r.push(Scalar::random(&mut csprng));
}
let (poly_commitment, blinds) = poly.commit(false, &gens, None);
let eval = poly.evaluate(&r);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, c_zr) = PolyEvalProof::prove(
black_box(&poly),
black_box(Some(&blinds)),
black_box(&r),
black_box(&eval),
black_box(None),
black_box(&gens),
black_box(&mut prover_transcript),
black_box(&mut random_tape),
);
let name = format!("polycommit_evalproofverify_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
proof.verify(
black_box(&gens),
black_box(&mut verifier_transcript),
black_box(&r),
black_box(&c_zr),
black_box(&poly_commitment),
)
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_polycommit;
config = set_duration();
targets = commit_benchmark, eval_benchmark, evalproof_benchmark, evalproofverify_benchmark
}
criterion_main!(benches_polycommit);

+ 123
- 0
benches/r1csproof.rs

@ -0,0 +1,123 @@
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::dense_mlpoly::EqPolynomial;
use libspartan::math::Math;
use libspartan::r1csinstance::R1CSInstance;
use libspartan::r1csproof::{R1CSGens, R1CSProof};
use libspartan::scalar::Scalar;
use libspartan::transcript::ProofTranscript;
use merlin::Transcript;
use rand::rngs::OsRng;
use criterion::*;
fn prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("r1cs_prove_benchmark");
group.plot_config(plot_config);
let num_vars = s.pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let n = inst.get_num_vars();
let gens = R1CSGens::new(num_cons, num_vars, b"test-m");
let name = format!("r1cs_prove_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
R1CSProof::prove(
black_box(&inst),
black_box(vars.clone()),
black_box(&input),
black_box(&gens),
black_box(&mut prover_transcript),
black_box(&mut random_tape),
)
});
});
group.finish();
}
}
fn verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("r1cs_verify_benchmark");
group.plot_config(plot_config);
let num_vars = s.pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let n = inst.get_num_vars();
let gens = R1CSGens::new(num_cons, num_vars, b"test-m");
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, rx, ry) = R1CSProof::prove(
&inst,
vars,
&input,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let eval_table_rx = EqPolynomial::new(rx.clone()).evals();
let eval_table_ry = EqPolynomial::new(ry.clone()).evals();
let inst_evals = inst.evaluate_with_tables(&eval_table_rx, &eval_table_ry);
let name = format!("r1cs_verify_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(num_vars),
black_box(num_cons),
black_box(&input),
black_box(&inst_evals),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_r1cs;
config = set_duration();
targets = prove_benchmark, verify_benchmark
}
criterion_main!(benches_r1cs);

+ 138
- 0
benches/spartan.rs

@ -0,0 +1,138 @@
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::math::Math;
use libspartan::r1csinstance::{R1CSCommitmentGens, R1CSInstance};
use libspartan::r1csproof::R1CSGens;
use libspartan::spartan::{SpartanGens, SpartanProof};
use merlin::Transcript;
use criterion::*;
fn encode_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("spartan_encode_benchmark");
group.plot_config(plot_config);
let num_vars = s.pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _input) =
R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let n = inst.get_num_vars();
let m = n.square_root();
assert_eq!(n, m * m);
let r1cs_size = inst.size();
let gens_r1cs = R1CSCommitmentGens::new(&r1cs_size, b"gens_r1cs");
let name = format!("spartan_encode_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
SpartanProof::encode(black_box(&inst), black_box(&gens_r1cs));
});
});
group.finish();
}
}
fn prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("spartan_prove_benchmark");
group.plot_config(plot_config);
let num_vars = s.pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let n = inst.get_num_vars();
let r1cs_size = inst.size();
let gens_r1cs_eval = R1CSCommitmentGens::new(&r1cs_size, b"gens_r1cs_eval");
let gens_r1cs_sat = R1CSGens::new(num_cons, num_vars, b"gens_r1cs_sat");
// produce a proof of satisfiability
let (_comm, decomm) = SpartanProof::encode(&inst, &gens_r1cs_eval);
let gens = SpartanGens::new(gens_r1cs_sat, gens_r1cs_eval);
let name = format!("spartan_prove_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
SpartanProof::prove(
black_box(&inst),
black_box(&decomm),
black_box(vars.clone()),
black_box(&input),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("spartan_verify_benchmark");
group.plot_config(plot_config);
let num_vars = s.pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let n = inst.get_num_vars();
let r1cs_size = inst.size();
let gens_r1cs_eval = R1CSCommitmentGens::new(&r1cs_size, b"gens_r1cs_eval");
// create a commitment to R1CSInstance
let (comm, decomm) = SpartanProof::encode(&inst, &gens_r1cs_eval);
let gens_r1cs_sat = R1CSGens::new(num_cons, num_vars, b"gens_r1cs_sat");
let gens = SpartanGens::new(gens_r1cs_sat, gens_r1cs_eval);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = SpartanProof::prove(&inst, &decomm, vars, &input, &gens, &mut prover_transcript);
let name = format!("spartan_verify_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&comm),
black_box(&input),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_spartan;
config = set_duration();
targets = encode_benchmark, prove_benchmark, verify_benchmark
}
criterion_main!(benches_spartan);

+ 162
- 0
benches/sumcheck.rs

@ -0,0 +1,162 @@
#![allow(non_snake_case)]
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::commitments::Commitments;
use libspartan::commitments::MultiCommitGens;
use libspartan::dense_mlpoly::DensePolynomial;
use libspartan::math::Math;
use libspartan::nizk::DotProductProof;
use libspartan::scalar::Scalar;
use libspartan::sumcheck::ZKSumcheckInstanceProof;
use libspartan::transcript::ProofTranscript;
use merlin::Transcript;
use rand::rngs::OsRng;
use criterion::*;
fn prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("zksumcheck_prove_benchmark");
group.plot_config(plot_config);
// produce tables
let gens_n = MultiCommitGens::new(3, b"test-m");
let gens_1 = MultiCommitGens::new(1, b"test-1");
let num_rounds = s;
let n = s.pow2();
let mut csprng: OsRng = OsRng;
let vec_A = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let vec_B = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let claim = DotProductProof::compute_dotproduct(&vec_A, &vec_B);
let mut poly_A = DensePolynomial::new(vec_A);
let mut poly_B = DensePolynomial::new(vec_B);
let blind_claim = Scalar::random(&mut csprng);
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let name = format!("zksumcheck_prove_{}", n);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
ZKSumcheckInstanceProof::prove_quad(
black_box(&claim),
black_box(&blind_claim),
black_box(num_rounds),
black_box(&mut poly_A),
black_box(&mut poly_B),
black_box(comb_func),
black_box(&gens_1),
black_box(&gens_n),
black_box(&mut prover_transcript),
black_box(&mut random_tape),
)
});
});
group.finish();
}
}
fn verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16, 20].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("zksumcheck_verify_benchmark");
group.plot_config(plot_config);
// produce tables
let gens_n = MultiCommitGens::new(3, b"test-m");
let gens_1 = MultiCommitGens::new(1, b"test-1");
let num_rounds = s;
let n = s.pow2();
let mut csprng: OsRng = OsRng;
let vec_A = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let vec_B = (0..n)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
let claim = DotProductProof::compute_dotproduct(&vec_A, &vec_B);
let mut poly_A = DensePolynomial::new(vec_A);
let mut poly_B = DensePolynomial::new(vec_B);
let blind_claim = Scalar::random(&mut csprng);
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, _r, _v, _blind_post_claim) = ZKSumcheckInstanceProof::prove_quad(
&claim,
&blind_claim,
num_rounds,
&mut poly_A,
&mut poly_B,
comb_func,
&gens_1,
&gens_n,
&mut prover_transcript,
&mut random_tape,
);
let name = format!("zksumcheck_verify_{}", n);
let degree_bound = 2;
let comm_claim = claim.commit(&blind_claim, &gens_1).compress();
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&comm_claim),
black_box(num_rounds),
black_box(degree_bound),
black_box(&gens_1),
black_box(&gens_n),
black_box(&mut verifier_transcript)
)
.is_ok())
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
// .measurement_time(Duration::new(0, 50000000))
}
criterion_group! {
name = benches_r1cs;
config = set_duration();
targets = verify_benchmark, prove_benchmark
}
criterion_main!(benches_r1cs);

+ 5
- 0
rustfmt.toml

@ -0,0 +1,5 @@
edition = "2018"
tab_spaces = 2
newline_style = "Unix"
report_fixme = "Always"
use_try_shorthand = true

+ 247
- 0
src/bullet.rs

@ -0,0 +1,247 @@
#![allow(non_snake_case)]
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::scalar::Scalar;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
use std::iter;
#[derive(Debug, Serialize, Deserialize)]
pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>,
}
impl BulletReductionProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut Transcript,
Q: &GroupElement,
G_vec: &Vec<GroupElement>,
H: &GroupElement,
a_vec: &Vec<Scalar>,
b_vec: &Vec<Scalar>,
blind: &Scalar,
blinds_vec: &Vec<(Scalar, Scalar)>,
) -> (
BulletReductionProof,
GroupElement,
Scalar,
Scalar,
GroupElement,
Scalar,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.clone()[..];
let mut a = &mut a_vec.clone()[..];
let mut b = &mut b_vec.clone()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log2();
let G_factors: Vec<Scalar> = iter::repeat(Scalar::one()).take(n).collect();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(G_factors.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
//transcript.innerproduct_domain_sep(n as u64);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n = n / 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(&a_L, &b_R);
let c_R = inner_product(&a_R, &b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let L = GroupElement::vartime_multiscalar_mul(
a_L
.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L)),
G_R.iter().chain(iter::once(Q)).chain(iter::once(H)),
);
let R = GroupElement::vartime_multiscalar_mul(
a_R
.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R)),
G_L.iter().chain(iter::once(Q)).chain(iter::once(H)),
);
transcript.append_point(b"L", &L.compress());
transcript.append_point(b"R", &R.compress());
let u = transcript.challenge_scalar(b"u");
let u_inv = u.invert().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
}
blind_fin = blind_fin + blind_L * &u * &u + blind_R * &u_inv * &u_inv;
L_vec.push(L.compress());
R_vec.push(R.compress());
a = a_L;
b = b_L;
G = G_L;
}
let Gamma_hat =
GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]);
(
BulletReductionProof {
L_vec: L_vec,
R_vec: R_vec,
},
Gamma_hat,
a[0],
b[0],
G[0],
blind_fin,
)
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut Transcript,
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError);
}
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(b"L", L);
transcript.append_point(b"R", R);
challenges.push(transcript.challenge_scalar(b"u"));
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i] * challenges[i];
challenges_inv[i] = challenges_inv[i] * challenges_inv[i];
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
Ok((challenges_sq, challenges_inv_sq, s))
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &Vec<Scalar>,
transcript: &mut Transcript,
Gamma: &GroupElement,
G: &[GroupElement],
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = self
.L_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError))
.collect::<Result<Vec<_>, _>>()?;
let Rs = self
.R_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError))
.collect::<Result<Vec<_>, _>>()?;
let G_hat = GroupElement::vartime_multiscalar_mul(s.iter(), G.iter());
let a_hat = inner_product(a, &s);
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
u_sq
.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&Scalar::one())),
Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)),
);
Ok((G_hat, Gamma_hat, a_hat))
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
let mut out = Scalar::zero();
if a.len() != b.len() {
panic!("inner_product(a,b): lengths of vectors do not match");
}
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}

+ 109
- 0
src/commitments.rs

@ -0,0 +1,109 @@
use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT_COMPRESSED};
use super::scalar::Scalar;
use digest::{ExtendableOutput, Input, XofReader};
use sha3::Shake256;
#[derive(Debug)]
pub struct MultiCommitGens {
pub n: usize,
pub G: Vec<GroupElement>,
pub h: GroupElement,
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(GROUP_BASEPOINT_COMPRESSED.as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<GroupElement> = Vec::new();
let mut uniform_bytes = [0u8; 64];
for _ in 0..n + 1 {
reader.read(&mut uniform_bytes);
gens.push(GroupElement::from_uniform_bytes(&uniform_bytes));
}
MultiCommitGens {
n,
G: gens[0..n].to_vec(),
h: gens[n],
}
}
pub fn clone(&self) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
}
}
pub fn split_at_mut(&mut self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
let (G1, G2) = self.G.split_at_mut(mid);
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
}
pub trait Commitments {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
}
impl Commitments for Scalar {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert!(gens_n.n == 1);
GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h])
}
}
impl Commitments for Vec<Scalar> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert!(gens_n.n == self.len());
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * &gens_n.h
}
}
impl Commitments for [Scalar] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * &gens_n.h
}
}
impl Commitments for Vec<bool> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert!(gens_n.n == self.len());
let mut comm = blind * &gens_n.h;
for i in 0..self.len() {
if self[i] {
comm = comm + gens_n.G[i];
}
}
comm
}
}
impl Commitments for [bool] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert!(gens_n.n == self.len());
let mut comm = blind * &gens_n.h;
for i in 0..self.len() {
if self[i] {
comm = comm + gens_n.G[i];
}
}
comm
}
}

+ 740
- 0
src/dense_mlpoly.rs

@ -0,0 +1,740 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{DotProductProofGens, DotProductProofLog};
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use core::ops::Index;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[cfg(feature = "rayon_par")]
use rayon::prelude::*;
#[derive(Debug)]
pub struct DensePolynomial {
num_vars: usize, //the number of variables in the multilinear polynomial
len: usize,
Z: Vec<Scalar>, // a vector that holds the evaluations of the polynomial in all the 2^num_vars Boolean inputs
}
pub struct PolyCommitmentGens {
pub gens: DotProductProofGens,
}
impl PolyCommitmentGens {
// the number of variables in the multilinear polynomial
pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
let (_left, right) = EqPolynomial::compute_factored_lens(num_vars);
let gens = DotProductProofGens::new(right.pow2(), label);
PolyCommitmentGens { gens }
}
}
pub struct PolyCommitmentBlinds {
blinds: Vec<Scalar>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyCommitment {
C: Vec<CompressedGroup>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ConstPolyCommitment {
C: CompressedGroup,
}
impl PolyCommitment {
pub fn combine(&self, comm: &PolyCommitment, s: &Scalar) -> PolyCommitment {
assert_eq!(comm.C.len(), self.C.len());
let C = (0..self.C.len())
.map(|i| (self.C[i].decompress().unwrap() + s * comm.C[i].decompress().unwrap()).compress())
.collect::<Vec<CompressedGroup>>();
PolyCommitment { C }
}
pub fn combine_const(&self, comm: &ConstPolyCommitment) -> PolyCommitment {
let C = (0..self.C.len())
.map(|i| (self.C[i].decompress().unwrap() + comm.C.decompress().unwrap()).compress())
.collect::<Vec<CompressedGroup>>();
PolyCommitment { C }
}
}
pub struct EqPolynomial {
r: Vec<Scalar>,
}
impl EqPolynomial {
pub fn new(r: Vec<Scalar>) -> Self {
EqPolynomial { r }
}
pub fn evaluate(&self, rx: &Vec<Scalar>) -> Scalar {
assert_eq!(self.r.len(), rx.len());
(0..rx.len())
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
.product()
}
pub fn evals(&self) -> Vec<Scalar> {
let ell = self.r.len();
let mut evals: Vec<Scalar> = vec![Scalar::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size = size * 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
// evals[i - 1] = scalar * (Scalar::one() - tau[j]);
// evals[i] = scalar * tau[j];
evals[i] = scalar * self.r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn compute_factored_lens(ell: usize) -> (usize, usize) {
(ell / 2, ell - ell / 2)
}
pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
let ell = self.r.len();
let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L = EqPolynomial::new(self.r[0..left_num_vars].to_vec()).evals();
let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals();
(L, R)
}
}
pub struct ConstPolynomial {
num_vars: usize,
c: Scalar,
}
impl ConstPolynomial {
pub fn new(num_vars: usize, c: Scalar) -> Self {
ConstPolynomial { num_vars, c }
}
pub fn evaluate(&self, rx: &Vec<Scalar>) -> Scalar {
assert_eq!(self.num_vars, rx.len());
self.c
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
/// produces a binding commitment
pub fn commit(&self, gens: &PolyCommitmentGens) -> PolyCommitment {
let ell = self.get_num_vars();
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, ell.pow2());
let vec = vec![self.c; R_size];
let c = vec.commit(&Scalar::zero(), &gens.gens.gens_n).compress();
PolyCommitment { C: vec![c; L_size] }
}
}
pub struct IdentityPolynomial {
size_point: usize,
}
impl IdentityPolynomial {
pub fn new(size_point: usize) -> Self {
IdentityPolynomial { size_point }
}
pub fn evaluate(&self, r: &Vec<Scalar>) -> Scalar {
let len = r.len();
assert_eq!(len, self.size_point);
(0..len)
.map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i])
.sum()
}
}
impl DensePolynomial {
pub fn new(Z: Vec<Scalar>) -> Self {
let len = Z.len();
let num_vars = len.log2();
DensePolynomial { num_vars, Z, len }
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn len(&self) -> usize {
self.len
}
pub fn clone(&self) -> DensePolynomial {
DensePolynomial::new(self.Z[0..self.len].to_vec())
}
pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
assert!(idx < self.len());
(
DensePolynomial::new(self.Z[0..idx].to_vec()),
DensePolynomial::new(self.Z[idx..2 * idx].to_vec()),
)
}
#[cfg(feature = "rayon_par")]
fn commit_inner(&self, blinds: &Vec<Scalar>, gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
#[cfg(not(feature = "rayon_par"))]
fn commit_inner(&self, blinds: &Vec<Scalar>, gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
pub fn commit(
&self,
hiding: bool,
gens: &PolyCommitmentGens,
random_tape: Option<&mut Transcript>,
) -> (PolyCommitment, PolyCommitmentBlinds) {
let n = self.Z.len();
let ell = self.get_num_vars();
assert_eq!(n, ell.pow2());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, n);
let blinds = match hiding {
true => PolyCommitmentBlinds {
blinds: random_tape
.unwrap()
.challenge_vector(b"poly_blinds", L_size),
},
false => PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
},
};
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
}
pub fn bound(&self, L: &Vec<Scalar>) -> Vec<Scalar> {
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
(0..R_size)
.map(|i| (0..L_size).map(|j| &L[j] * &self.Z[j * R_size + i]).sum())
.collect::<Vec<Scalar>>()
}
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = &self.Z[i] + r * (&self.Z[i + n] - &self.Z[i]);
}
self.num_vars = self.num_vars - 1;
self.len = n;
}
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = &self.Z[2 * i] + r * (&self.Z[2 * i + 1] - &self.Z[2 * i]);
}
self.num_vars = self.num_vars - 1;
self.len = n;
}
pub fn dotproduct(&self, other: &DensePolynomial) -> Scalar {
assert_eq!(self.len(), other.len());
let mut res = Scalar::zero();
for i in 0..self.len() {
res = &res + &self.Z[i] * &other[i];
}
res
}
// returns Z(r) in O(n) time
pub fn evaluate(&self, r: &Vec<Scalar>) -> Scalar {
// r must have a value for each variable
assert_eq!(r.len(), self.get_num_vars());
let chis = EqPolynomial::new(r.to_vec()).evals();
assert_eq!(chis.len(), self.Z.len());
DotProductProofLog::compute_dotproduct(&self.Z, &chis)
}
fn vec(&self) -> &Vec<Scalar> {
&self.Z
}
pub fn extend(&mut self, other: &DensePolynomial) {
// TODO: allow extension even when some vars are bound
assert_eq!(self.Z.len(), self.len);
let other_vec = other.vec();
assert_eq!(other_vec.len(), self.len);
self.Z.extend(other_vec);
self.num_vars = self.num_vars + 1;
self.len = 2 * self.len;
assert_eq!(self.Z.len(), self.len);
}
pub fn merge<'a, I>(polys: I) -> DensePolynomial
where
I: IntoIterator<Item = &'a DensePolynomial>,
{
//assert!(polys.len() > 0);
//let num_vars = polys[0].num_vars();
let mut Z: Vec<Scalar> = Vec::new();
for poly in polys.into_iter() {
//assert_eq!(poly.get_num_vars(), num_vars); // ensure each polynomial has the same number of variables
//assert_eq!(poly.len, poly.vec().len()); // ensure no variable is already bound
Z.extend(poly.vec());
}
// pad the polynomial with zero polynomial at the end
Z.resize(Z.len().next_power_of_two(), Scalar::zero());
DensePolynomial::new(Z)
}
pub fn from_usize(Z: &Vec<usize>) -> Self {
DensePolynomial::new(
(0..Z.len())
.map(|i| Scalar::from(Z[i] as u64))
.collect::<Vec<Scalar>>(),
)
}
}
impl Index<usize> for DensePolynomial {
type Output = Scalar;
#[inline(always)]
fn index(&self, _index: usize) -> &Scalar {
&(self.Z[_index])
}
}
impl AppendToTranscript for PolyCommitment {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"poly_commitment_begin");
for i in 0..self.C.len() {
transcript.append_point(b"poly_commitment_share", &self.C[i]);
}
transcript.append_message(label, b"poly_commitment_end");
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyEvalProof {
proof: DotProductProofLog,
}
impl PolyEvalProof {
fn protocol_name() -> &'static [u8] {
b"polynomial evaluation proof"
}
pub fn prove(
poly: &DensePolynomial,
blinds_opt: Option<&PolyCommitmentBlinds>,
r: &Vec<Scalar>, // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (PolyEvalProof, CompressedGroup) {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// assert vectors are of the right size
assert_eq!(poly.get_num_vars(), r.len());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
let default_blinds = PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
};
let blinds = match blinds_opt {
Some(p) => p,
None => &default_blinds,
};
assert_eq!(blinds.blinds.len(), L_size);
let zero = Scalar::zero();
let blind_Zr = match blind_Zr_opt {
Some(p) => p,
None => &zero,
};
// compute the L and R vectors
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
assert_eq!(L.len(), L_size);
assert_eq!(R.len(), R_size);
// compute the vector underneath L*Z and the L*blinds
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = poly.bound(&L);
let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
// a dot product proof of size R_size
let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove(
&gens.gens,
transcript,
random_tape,
&LZ,
&LZ_blind,
&R,
&Zr,
blind_Zr,
);
(PolyEvalProof { proof }, C_Zr_prime)
}
pub fn verify(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &Vec<Scalar>, // point at which the polynomial is evaluated
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// compute L and R
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
// compute a weighted sum of commitments and L
let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap());
let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed).compress();
self
.proof
.verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr)
}
pub fn verify_batched(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &Vec<Scalar>, // point at which the polynomial is evaluated
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
comm: &[&PolyCommitment],
coeff: &[&Scalar],
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// compute L and R
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
// compute a weighted sum of commitments and L
let C_decompressed: Vec<Vec<GroupElement>> = (0..comm.len())
.map(|i| {
comm[i]
.C
.iter()
.map(|pt| pt.decompress().unwrap())
.collect()
})
.collect();
let C_LZ: Vec<GroupElement> = (0..comm.len())
.map(|i| GroupElement::vartime_multiscalar_mul(&L, &C_decompressed[i]))
.collect();
let C_LZ_combined: GroupElement = (0..C_LZ.len()).map(|i| C_LZ[i] * coeff[i]).sum();
self.proof.verify(
R.len(),
&gens.gens,
transcript,
&R,
&C_LZ_combined.compress(),
C_Zr,
)
}
pub fn verify_plain(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &Vec<Scalar>, // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
// compute a commitment to Zr with a blind of zero
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
self.verify(gens, transcript, r, &C_Zr, comm)
}
pub fn verify_plain_batched(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &Vec<Scalar>, // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation \widetilde{Z}(r)
comm: &[&PolyCommitment],
coeff: &[&Scalar],
) -> Result<(), ProofVerifyError> {
// compute a commitment to Zr with a blind of zero
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
assert_eq!(comm.len(), coeff.len());
self.verify_batched(gens, transcript, r, &C_Zr, comm, coeff)
}
}
#[cfg(test)]
mod tests {
use super::super::scalar::ScalarFromPrimitives;
use super::*;
use rand::rngs::OsRng;
fn evaluate_with_LR(Z: &Vec<Scalar>, r: &Vec<Scalar>) -> Scalar {
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
let ell = r.len();
// ensure ell is even
assert!(ell % 2 == 0);
// compute n = 2^\ell
let n = ell.pow2();
// compute m = sqrt(n) = 2^{\ell/2}
let m = n.square_root();
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = (0..m)
.map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum())
.collect::<Vec<Scalar>>();
// compute dot product between LZ and R
DotProductProofLog::compute_dotproduct(&LZ, &R)
}
#[test]
fn check_polynomial_evaluation() {
let mut Z: Vec<Scalar> = Vec::new(); // Z = [1, 2, 1, 4]
Z.push(Scalar::one());
Z.push((2 as usize).to_scalar());
Z.push((1 as usize).to_scalar());
Z.push((4 as usize).to_scalar());
// r = [4,3]
let mut r: Vec<Scalar> = Vec::new();
r.push((4 as usize).to_scalar());
r.push((3 as usize).to_scalar());
let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r);
assert_eq!(eval, (28 as usize).to_scalar());
assert_eq!(eval_with_LR, eval);
}
pub fn compute_factored_chis_at_r(r: &Vec<Scalar>) -> (Vec<Scalar>, Vec<Scalar>) {
let mut L: Vec<Scalar> = Vec::new();
let mut R: Vec<Scalar> = Vec::new();
let ell = r.len();
assert!(ell % 2 == 0); // ensure ell is even
let n = ell.pow2();
let m = n.square_root();
// compute row vector L
for i in 0..m {
let mut chi_i = Scalar::one();
for j in 0..ell / 2 {
let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
L.push(chi_i);
}
// compute column vector R
for i in 0..m {
let mut chi_i = Scalar::one();
for j in ell / 2..ell {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
R.push(chi_i);
}
(L, R)
}
pub fn compute_chis_at_r(r: &Vec<Scalar>) -> Vec<Scalar> {
let ell = r.len();
let n = ell.pow2();
let mut chis: Vec<Scalar> = Vec::new();
for i in 0..n {
let mut chi_i = Scalar::one();
for j in 0..r.len() {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
chis.push(chi_i);
}
chis
}
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
assert_eq!(L.len(), R.len());
let mut O: Vec<Scalar> = Vec::new();
let m = L.len();
for i in 0..m {
for j in 0..m {
O.push(L[i] * R[j]);
}
}
O
}
#[test]
fn check_memoized_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = tests::compute_chis_at_r(&r);
let chis_m = EqPolynomial::new(r).evals();
assert_eq!(chis, chis_m);
}
#[test]
fn check_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = EqPolynomial::new(r.clone()).evals();
let (L, R) = EqPolynomial::new(r).compute_factored_evals();
let O = compute_outerproduct(L, R);
assert_eq!(chis, O);
}
#[test]
fn check_memoized_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let (L, R) = tests::compute_factored_chis_at_r(&r);
let eq = EqPolynomial::new(r);
let (L2, R2) = eq.compute_factored_evals();
assert_eq!(L, L2);
assert_eq!(R, R2);
}
#[test]
fn check_polynomial_commit() {
let mut Z: Vec<Scalar> = Vec::new(); // Z = [1, 2, 1, 4]
Z.push((1 as usize).to_scalar());
Z.push((2 as usize).to_scalar());
Z.push((1 as usize).to_scalar());
Z.push((4 as usize).to_scalar());
let poly = DensePolynomial::new(Z);
// r = [4,3]
let mut r: Vec<Scalar> = Vec::new();
r.push((4 as usize).to_scalar());
r.push((3 as usize).to_scalar());
let eval = poly.evaluate(&r);
assert_eq!(eval, (28 as usize).to_scalar());
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(false, &gens, None);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, C_Zr) = PolyEvalProof::prove(
&poly,
Some(&blinds),
&r,
&eval,
None,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment)
.is_ok());
}
}

+ 15
- 0
src/errors.rs

@ -0,0 +1,15 @@
use std::fmt;
pub struct ProofVerifyError;
impl fmt::Display for ProofVerifyError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Proof verification failed")
}
}
impl fmt::Debug for ProofVerifyError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{{ file: {}, line: {} }}", file!(), line!())
}
}

+ 101
- 0
src/group.rs

@ -0,0 +1,101 @@
use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar};
use core::borrow::Borrow;
use core::ops::{Mul, MulAssign};
pub type GroupElement = curve25519_dalek::ristretto::RistrettoPoint;
pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto;
pub const GROUP_BASEPOINT_COMPRESSED: CompressedGroup =
curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED;
impl<'b> MulAssign<&'b Scalar> for GroupElement {
fn mul_assign(&mut self, scalar: &'b Scalar) {
let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar);
*self = result;
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
type Output = GroupElement;
fn mul(self, scalar: &'b Scalar) -> GroupElement {
self * Scalar::decompress_scalar(scalar)
}
}
impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
type Output = GroupElement;
fn mul(self, point: &'b GroupElement) -> GroupElement {
Scalar::decompress_scalar(self) * point
}
}
macro_rules! define_mul_variants {
(LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: &'b $rhs) -> $out {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
&self * &rhs
}
}
};
}
macro_rules! define_mul_assign_variants {
(LHS = $lhs:ty, RHS = $rhs:ty) => {
impl MulAssign<$rhs> for $lhs {
fn mul_assign(&mut self, rhs: $rhs) {
*self *= &rhs;
}
}
};
}
define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
pub trait VartimeMultiscalarMul {
type Scalar;
fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
where
I: IntoIterator,
I::Item: Borrow<Self::Scalar>,
J: IntoIterator,
J::Item: Borrow<Self>,
Self: Clone;
}
impl VartimeMultiscalarMul for GroupElement {
type Scalar = super::scalar::Scalar;
fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self
where
I: IntoIterator,
I::Item: Borrow<Self::Scalar>,
J: IntoIterator,
J::Item: Borrow<Self>,
Self: Clone,
{
use curve25519_dalek::traits::VartimeMultiscalarMul;
<Self as VartimeMultiscalarMul>::vartime_multiscalar_mul(
scalars
.into_iter()
.map(|s| Scalar::decompress_scalar(s.borrow()))
.collect::<Vec<ScalarBytes>>(),
points,
)
}
}

+ 31
- 0
src/lib.rs

@ -0,0 +1,31 @@
#![allow(non_snake_case)]
#![feature(test)]
extern crate byteorder;
extern crate core;
extern crate curve25519_dalek;
extern crate digest;
extern crate merlin;
extern crate rand;
extern crate rayon;
extern crate sha3;
extern crate test;
mod bullet;
pub mod commitments;
pub mod dense_mlpoly;
mod errors;
mod group;
pub mod math;
pub mod nizk;
mod product_tree;
pub mod r1csinstance;
pub mod r1csproof;
pub mod scalar;
mod scalar_25519;
pub mod sparse_mlpoly;
pub mod spartan;
pub mod sumcheck;
pub mod timer;
pub mod transcript;
mod unipoly;

+ 31
- 0
src/math.rs

@ -0,0 +1,31 @@
pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn log2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
}
impl Math for usize {
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
#[inline]
fn log2(self) -> usize {
(self as f64).log2() as usize
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
}

+ 750
- 0
src/nizk.rs

@ -0,0 +1,750 @@
use super::bullet::BulletReductionProof;
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::CompressedGroup;
use super::math::Math;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct KnowledgeProof {
alpha: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl KnowledgeProof {
fn protocol_name() -> &'static [u8] {
b"knowledge proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
x: &Scalar,
r: &Scalar,
) -> (KnowledgeProof, CompressedGroup) {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
// produce two random Scalars
let t1 = random_tape.challenge_scalar(b"t1");
let t2 = random_tape.challenge_scalar(b"t2");
let C = x.commit(&r, gens_n).compress();
C.append_to_transcript(b"C", transcript);
let alpha = t1.commit(&t2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = x * &c + &t1;
let z2 = r * &c + &t2;
(KnowledgeProof { alpha, z1, z2 }, C)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
C.append_to_transcript(b"C", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let lhs = self.z1.commit(&self.z2, gens_n).compress();
let rhs = (&c * C.decompress().expect("Could not decompress C")
+ self
.alpha
.decompress()
.expect("Could not decompress self.alpha"))
.compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct EqualityProof {
alpha: CompressedGroup,
z: Scalar,
}
impl EqualityProof {
fn protocol_name() -> &'static [u8] {
b"equality proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
v1: &Scalar,
s1: &Scalar,
v2: &Scalar,
s2: &Scalar,
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(EqualityProof::protocol_name());
// produce a random Scalar
let r = random_tape.challenge_scalar(b"r");
let C1 = v1.commit(&s1, gens_n).compress();
C1.append_to_transcript(b"C1", transcript);
let C2 = v2.commit(&s2, gens_n).compress();
C2.append_to_transcript(b"C2", transcript);
let alpha = (&r * gens_n.h).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z = &c * (s1 - s2) + &r;
(EqualityProof { alpha, z }, C1, C2)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C1: &CompressedGroup,
C2: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(EqualityProof::protocol_name());
C1.append_to_transcript(b"C1", transcript);
C2.append_to_transcript(b"C2", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let rhs = {
let C = &C1.decompress().unwrap() - &C2.decompress().unwrap();
(&c * C + &self.alpha.decompress().unwrap()).compress()
};
let lhs = (&self.z * gens_n.h).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ProductProof {
alpha: CompressedGroup,
beta: CompressedGroup,
delta: CompressedGroup,
z: [Scalar; 5],
}
impl ProductProof {
fn protocol_name() -> &'static [u8] {
b"product proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
x: &Scalar,
rX: &Scalar,
y: &Scalar,
rY: &Scalar,
z: &Scalar,
rZ: &Scalar,
) -> (
ProductProof,
CompressedGroup,
CompressedGroup,
CompressedGroup,
) {
transcript.append_protocol_name(ProductProof::protocol_name());
// produce five random Scalar
let b1 = random_tape.challenge_scalar(b"b1");
let b2 = random_tape.challenge_scalar(b"b2");
let b3 = random_tape.challenge_scalar(b"b3");
let b4 = random_tape.challenge_scalar(b"b4");
let b5 = random_tape.challenge_scalar(b"b5");
let X = x.commit(&rX, gens_n).compress();
X.append_to_transcript(b"X", transcript);
let Y = y.commit(&rY, gens_n).compress();
Y.append_to_transcript(b"Y", transcript);
let Z = z.commit(&rZ, gens_n).compress();
Z.append_to_transcript(b"Z", transcript);
let alpha = b1.commit(&b2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let beta = b3.commit(&b4, gens_n).compress();
beta.append_to_transcript(b"beta", transcript);
let delta = {
let gens_X = &MultiCommitGens {
n: 1,
G: vec![X.decompress().unwrap()],
h: gens_n.h,
};
b3.commit(&b5, gens_X).compress()
};
delta.append_to_transcript(b"delta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = &b1 + &c * x;
let z2 = &b2 + &c * rX;
let z3 = &b3 + &c * y;
let z4 = &b4 + &c * rY;
let z5 = &b5 + &c * (rZ - rX * y);
let z = [z1, z2, z3, z4, z5];
(
ProductProof {
alpha,
beta,
delta,
z,
},
X,
Y,
Z,
)
}
fn check_equality(
P: &CompressedGroup,
X: &CompressedGroup,
c: &Scalar,
gens_n: &MultiCommitGens,
z1: &Scalar,
z2: &Scalar,
) -> bool {
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
let rhs = z1.commit(&z2, gens_n).compress();
if lhs == rhs {
true
} else {
false
}
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
X: &CompressedGroup,
Y: &CompressedGroup,
Z: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(ProductProof::protocol_name());
X.append_to_transcript(b"X", transcript);
Y.append_to_transcript(b"Y", transcript);
Z.append_to_transcript(b"Z", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
self.beta.append_to_transcript(b"beta", transcript);
self.delta.append_to_transcript(b"delta", transcript);
let z1 = self.z[0];
let z2 = self.z[1];
let z3 = self.z[2];
let z4 = self.z[3];
let z5 = self.z[4];
let c = transcript.challenge_scalar(b"c");
if ProductProof::check_equality(&self.alpha, &X, &c, &gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, &Y, &c, &gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
&Z,
&c,
&MultiCommitGens {
n: 1,
G: vec![X.decompress().unwrap()],
h: gens_n.h,
},
&z3,
&z5,
)
{
Ok(())
} else {
Err(ProofVerifyError)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProof {
delta: CompressedGroup,
beta: CompressedGroup,
z: Vec<Scalar>,
z_delta: Scalar,
z_beta: Scalar,
}
impl DotProductProof {
fn protocol_name() -> &'static [u8] {
b"dot product proof"
}
pub fn compute_dotproduct(a: &Vec<Scalar>, b: &Vec<Scalar>) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| &a[i] * &b[i]).sum()
}
pub fn prove(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
x: &Vec<Scalar>,
r_x: &Scalar,
a: &Vec<Scalar>,
y: &Scalar,
r_y: &Scalar,
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProof::protocol_name());
let n = x.len();
assert_eq!(x.len(), a.len());
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
// produce randomness for the proofs
let d = random_tape.challenge_vector(b"d", n);
let r_delta = random_tape.challenge_scalar(b"r_delta");
let r_beta = random_tape.challenge_scalar(b"r_beta");
let Cx = x.commit(&r_x, gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(&r_y, gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
let delta = d.commit(&r_delta, gens_n).compress();
delta.append_to_transcript(b"delta", transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(&a, &d);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z = (0..d.len())
.map(|i| c * x[i] + d[i])
.collect::<Vec<Scalar>>();
let z_delta = c * r_x + r_delta;
let z_beta = c * r_y + r_beta;
(
DotProductProof {
delta,
beta,
z,
z_delta,
z_beta,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
a: &Vec<Scalar>,
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
transcript.append_protocol_name(DotProductProof::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let mut result = &c * Cx.decompress().unwrap() + self.delta.decompress().unwrap()
== self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, &a);
result &= &c * Cy.decompress().unwrap() + self.beta.decompress().unwrap()
== dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
Ok(())
} else {
Err(ProofVerifyError)
}
}
}
pub struct DotProductProofGens {
n: usize,
pub gens_n: MultiCommitGens,
pub gens_1: MultiCommitGens,
}
impl DotProductProofGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at_mut(n);
DotProductProofGens { n, gens_n, gens_1 }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup,
beta: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl DotProductProofLog {
fn protocol_name() -> &'static [u8] {
b"dot product proof (log)"
}
pub fn compute_dotproduct(a: &Vec<Scalar>, b: &Vec<Scalar>) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| &a[i] * &b[i]).sum()
}
pub fn prove(
gens: &DotProductProofGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
x: &Vec<Scalar>,
r_x: &Scalar,
a: &Vec<Scalar>,
y: &Scalar,
r_y: &Scalar,
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x.len();
assert_eq!(x.len(), a.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = random_tape.challenge_scalar(b"d");
let r_delta = random_tape.challenge_scalar(b"r_delta");
let r_beta = random_tape.challenge_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.challenge_vector(b"blinds_vec_1", 2 * n.log2());
let v2 = random_tape.challenge_vector(b"blinds_vec_2", 2 * n.log2());
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x.commit(&r_x, &gens.gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(&r_y, &gens.gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
let r_Gamma = r_x + r_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::prove(
transcript,
&gens.gens_1.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x,
a,
&r_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat],
h: gens.gens_1.h,
};
d.commit(&r_delta, &gens_hat).compress()
};
delta.append_to_transcript(b"delta", transcript);
let beta = d.commit(&r_beta, &gens.gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
DotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens,
transcript: &mut Transcript,
a: &Vec<Scalar>,
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
transcript.append_protocol_name(DotProductProofLog::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
let Gamma = Cx.decompress().unwrap() + Cy.decompress().unwrap();
let (g_hat, Gamma_hat, a_hat) = self
.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)
.unwrap();
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let c_s = &c;
let beta_s = self.beta.decompress().unwrap();
let a_hat_s = &a_hat;
let delta_s = self.delta.decompress().unwrap();
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
let rhs = ((g_hat + &gens.gens_1.G[0] * a_hat_s) * z1_s + gens.gens_1.h * z2_s).compress();
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::OsRng;
#[test]
fn check_knowledgeproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
let x = Scalar::random(&mut csprng);
let r = Scalar::random(&mut csprng);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, committed_value) =
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &committed_value)
.is_ok());
}
#[test]
fn check_equalityproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::random(&mut csprng);
let v2 = v1;
let s1 = Scalar::random(&mut csprng);
let s2 = Scalar::random(&mut csprng);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, C1, C2) = EqualityProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&v1,
&s1,
&v2,
&s2,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
.is_ok());
}
#[test]
fn check_productproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::random(&mut csprng);
let rX = Scalar::random(&mut csprng);
let y = Scalar::random(&mut csprng);
let rY = Scalar::random(&mut csprng);
let z = x * y;
let rZ = Scalar::random(&mut csprng);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, X, Y, Z) = ProductProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&x,
&rX,
&y,
&rY,
&z,
&rZ,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
.is_ok());
}
#[test]
fn check_dotproductproof() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens_1 = MultiCommitGens::new(1, b"test-two");
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n {
x.push(Scalar::random(&mut csprng));
a.push(Scalar::random(&mut csprng));
}
let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProof::prove(
&gens_1,
&gens_1024,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
#[test]
fn check_dotproductproof_log() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProofLog::prove(
&gens,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
}

+ 489
- 0
src/product_tree.rs

@ -0,0 +1,489 @@
#[allow(dead_code)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial;
use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug)]
pub struct ProductCircuit {
left_vec: Vec<DensePolynomial>,
right_vec: Vec<DensePolynomial>,
}
impl ProductCircuit {
fn compute_layer(
inp_left: &DensePolynomial,
inp_right: &DensePolynomial,
) -> (DensePolynomial, DensePolynomial) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| &inp_left[i] * &inp_right[i])
.collect::<Vec<Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| &inp_left[i] * &inp_right[i])
.collect::<Vec<Scalar>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial) -> Self {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
ProductCircuit {
left_vec,
right_vec,
}
}
pub fn evaluate(&self) -> Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
}
pub struct DotProductCircuit {
left: DensePolynomial,
right: DensePolynomial,
weight: DensePolynomial,
}
impl DotProductCircuit {
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
}
}
pub fn evaluate(&self) -> Scalar {
(0..self.left.len())
.map(|i| &self.left[i] * &self.right[i] * &self.weight[i])
.sum()
}
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProof {
pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProof {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>,
pub claims_prod_right: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProofBatched {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
}
impl ProductCircuitEvalProof {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit,
transcript: &mut Transcript,
) -> (Self, Scalar, Vec<Scalar>) {
let mut proof: Vec<LayerProof> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = &claims_prod[0] + &r_layer * (&claims_prod[1] - &claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
(ProductCircuitEvalProof { proof }, claim, rand)
}
pub fn verify(
&self,
eval: Scalar,
len: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log2();
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for i in 0..num_layers {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
num_rounds = num_rounds + 1;
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claim, rand)
}
}
impl ProductCircuitEvalProofBatched {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>) {
assert!(prod_circuit_vec.len() > 0);
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<Scalar>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
if layer_id == 0 && dotp_circuit_vec.len() > 0 {
// add additional claims
for i in 0..dotp_circuit_vec.len() {
claims_to_verify.push(dotp_circuit_vec[i].evaluate());
assert_eq!(len / 2, dotp_circuit_vec[i].left.len());
assert_eq!(len / 2, dotp_circuit_vec[i].right.len());
assert_eq!(len / 2, dotp_circuit_vec[i].weight.len());
}
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
if layer_id == 0 && dotp_circuit_vec.len() > 0 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| &claims_prod_left[i] + &r_layer * (&claims_prod_right[i] - &claims_prod_left[i]))
.collect::<Vec<Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
}
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
pub fn verify(
&self,
claims_prod_vec: &Vec<Scalar>,
claims_dotp_vec: &Vec<Scalar>,
len: usize,
transcript: &mut Transcript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log2();
let mut rand: Vec<Scalar> = Vec::new();
let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.clone();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for i in 0..num_layers {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
claim_expected = &claim_expected
+ &coeff_vec[i + num_prod_instances]
* &claims_dotp_left[i]
* &claims_dotp_right[i]
* &claims_dotp_weight[i];
}
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..claims_prod_left.len())
.map(|i| &claims_prod_left[i] + &r_layer * (&claims_prod_right[i] - &claims_prod_left[i]))
.collect::<Vec<Scalar>>();
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = &claims_dotp_left[2 * i]
+ &r_layer * (&claims_dotp_left[2 * i + 1] - &claims_dotp_left[2 * i]);
let claim_right = &claims_dotp_right[2 * i]
+ &r_layer * (&claims_dotp_right[2 * i + 1] - &claims_dotp_right[2 * i]);
let claim_weight = &claims_dotp_weight[2 * i]
+ &r_layer * (&claims_dotp_weight[2 * i + 1] - &claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
}
}
num_rounds = num_rounds + 1;
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claims_to_verify, claims_to_verify_dotp, rand)
}
}

+ 55
- 0
src/profiler.rs

@ -0,0 +1,55 @@
#![allow(non_snake_case)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
extern crate rand;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::math::Math;
use libspartan::r1csinstance::{R1CSCommitmentGens, R1CSInstance};
use libspartan::r1csproof::R1CSGens;
use libspartan::spartan::{SpartanGens, SpartanProof};
use libspartan::timer::Timer;
use merlin::Transcript;
pub fn main() {
for &s in [12, 16, 20].iter() {
let num_vars = (s as usize).pow2();
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let r1cs_size = inst.size();
let gens_r1cs_eval = R1CSCommitmentGens::new(&r1cs_size, b"gens_r1cs_eval");
Timer::print(&format!("number_of_constraints {}", num_cons));
// create a commitment to R1CSInstance
let timer_encode = Timer::new("SpartanProof::encode");
let (comm, decomm) = SpartanProof::encode(&inst, &gens_r1cs_eval);
timer_encode.stop();
let gens_r1cs_sat = R1CSGens::new(num_cons, num_vars, b"gens_r1cs_sat");
let gens = SpartanGens::new(gens_r1cs_sat, gens_r1cs_eval);
// produce a proof of satisfiability
let timer_prove = Timer::new("SpartanProof::prove");
let mut prover_transcript = Transcript::new(b"example");
let proof = SpartanProof::prove(&inst, &decomm, vars, &input, &gens, &mut prover_transcript);
timer_prove.stop();
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("proof_compressed_len {:?}", proof_encoded.len());
Timer::print(&msg_proof_len);
let timer_verify = Timer::new("SpartanProof::verify");
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&comm, &input, &mut verifier_transcript, &gens)
.is_ok());
timer_verify.stop();
println!();
}
}

+ 346
- 0
src/r1csinstance.rs

@ -0,0 +1,346 @@
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::math::Math;
use super::scalar::Scalar;
use super::sparse_mlpoly::{
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
SparseMatPolynomialSize,
};
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
#[derive(Debug)]
pub struct R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
}
pub struct R1CSInstanceSize {
size_A: SparseMatPolynomialSize,
size_B: SparseMatPolynomialSize,
size_C: SparseMatPolynomialSize,
}
pub struct R1CSCommitmentGens {
gens: SparseMatPolyCommitmentGens,
}
impl R1CSCommitmentGens {
pub fn new(size: &R1CSInstanceSize, label: &'static [u8]) -> R1CSCommitmentGens {
assert_eq!(size.size_A, size.size_B);
assert_eq!(size.size_A, size.size_C);
let gens = SparseMatPolyCommitmentGens::new(&size.size_A, 3, label);
R1CSCommitmentGens { gens }
}
}
pub struct R1CSCommitment {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
comm: SparseMatPolyCommitment,
}
pub struct R1CSDecommitment {
dense: MultiSparseMatPolynomialAsDense,
}
impl R1CSCommitment {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSInstanceEvals {
eval_A_r: Scalar,
eval_B_r: Scalar,
eval_C_r: Scalar,
}
impl R1CSInstanceEvals {
pub fn get_evaluations(&self) -> (Scalar, Scalar, Scalar) {
(self.eval_A_r, self.eval_B_r, self.eval_C_r)
}
}
impl AppendToTranscript for R1CSInstanceEvals {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"R1CSInstanceEvals_begin");
transcript.append_scalar(b"Ar_eval", &self.eval_A_r);
transcript.append_scalar(b"Br_eval", &self.eval_B_r);
transcript.append_scalar(b"Cr_eval", &self.eval_C_r);
transcript.append_message(label, b"R1CSInstanceEvals_end");
}
}
impl R1CSInstance {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
) -> Self {
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A,
B,
C,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn size(&self) -> R1CSInstanceSize {
R1CSInstanceSize {
size_A: self.A.size(),
size_B: self.B.size(),
size_C: self.C.size(),
}
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
let mut csprng: OsRng = OsRng;
// assert num_cons and num_vars are power of 2
assert_eq!(num_cons.log2().pow2(), num_cons);
assert_eq!(num_vars.log2().pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs + 1 <= num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == Scalar::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.invert().unwrap(),
));
}
}
let num_poly_vars_x = num_cons.log2();
let num_poly_vars_y = (2 * num_vars).log2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, poly_A, poly_B, poly_C);
assert_eq!(
inst.is_sat(&Z[0..num_vars].to_vec(), &Z[num_vars + 1..].to_vec()),
true,
);
(inst, Z[0..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &Vec<Scalar>, input: &Vec<Scalar>) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.clone();
z.extend(&vec![Scalar::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| if Az[i] * Bz[i] == Cz[i] { 0 } else { 1 })
.sum();
if res > 0 {
false
} else {
true
}
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &Vec<Scalar>,
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &Vec<Scalar>,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(&evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(&evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(&evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}
pub fn evaluate_with_tables(
&self,
evals_rx: &Vec<Scalar>,
evals_ry: &Vec<Scalar>,
) -> R1CSInstanceEvals {
R1CSInstanceEvals {
eval_A_r: self.A.evaluate_with_tables(evals_rx, evals_ry),
eval_B_r: self.B.evaluate_with_tables(evals_rx, evals_ry),
eval_C_r: self.C.evaluate_with_tables(evals_rx, evals_ry),
}
}
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
assert_eq!(self.A.get_num_nz_entries(), self.B.get_num_nz_entries());
assert_eq!(self.A.get_num_nz_entries(), self.C.get_num_nz_entries());
let (comm, dense) =
SparseMatPolynomial::multi_commit(&vec![&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof,
}
impl R1CSEvalProof {
pub fn prove(
decomm: &R1CSDecommitment,
rx: &Vec<Scalar>, // point at which the polynomial is evaluated
ry: &Vec<Scalar>,
evals: &R1CSInstanceEvals,
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> R1CSEvalProof {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&vec![evals.eval_A_r, evals.eval_B_r, evals.eval_C_r],
&gens.gens,
transcript,
random_tape,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment,
rx: &Vec<Scalar>, // point at which the R1CS matrix polynomials are evaluated
ry: &Vec<Scalar>,
eval: &R1CSInstanceEvals,
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
assert!(self
.proof
.verify(
&comm.comm,
rx,
ry,
&vec![eval.eval_A_r, eval.eval_B_r, eval.eval_C_r],
&gens.gens,
transcript
)
.is_ok());
Ok(())
}
}

+ 632
- 0
src/r1csproof.rs

@ -0,0 +1,632 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::{
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
use super::r1csinstance::{R1CSInstance, R1CSInstanceEvals};
use super::scalar::Scalar;
use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
use super::sumcheck::ZKSumcheckInstanceProof;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use serde::{Deserialize, Serialize};
use std::iter;
#[cfg(test)]
use super::sparse_mlpoly::{SparseMatEntry, SparseMatPolynomial};
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSProof {
comm_vars: PolyCommitment,
sc_proof_phase1: ZKSumcheckInstanceProof,
claims_phase2: (
CompressedGroup,
CompressedGroup,
CompressedGroup,
CompressedGroup,
),
pok_claims_phase2: (KnowledgeProof, ProductProof),
proof_eq_sc_phase1: EqualityProof,
sc_proof_phase2: ZKSumcheckInstanceProof,
comm_vars_at_ry: CompressedGroup,
proof_eval_vars_at_ry: PolyEvalProof,
proof_eq_sc_phase2: EqualityProof,
}
pub struct R1CSSumcheckGens {
gens_1: MultiCommitGens,
gens_3: MultiCommitGens,
gens_4: MultiCommitGens,
}
// TODO: fix passing gens_1_ref
impl R1CSSumcheckGens {
pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
let gens_1 = gens_1_ref.clone();
let gens_3 = MultiCommitGens::new(3, label);
let gens_4 = MultiCommitGens::new(4, label);
R1CSSumcheckGens {
gens_1,
gens_3,
gens_4,
}
}
}
pub struct R1CSGens {
gens_sc: R1CSSumcheckGens,
gens_pc: PolyCommitmentGens,
}
impl R1CSGens {
pub fn new(_num_cons: usize, num_vars: usize, label: &'static [u8]) -> Self {
let num_poly_vars = num_vars.log2();
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
R1CSGens { gens_sc, gens_pc }
}
}
impl R1CSProof {
fn prove_phase_one(
num_rounds: usize,
evals_tau: &mut DensePolynomial,
evals_Az: &mut DensePolynomial,
evals_Bz: &mut DensePolynomial,
evals_Cz: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar,
poly_D_comp: &Scalar|
-> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) };
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
&Scalar::zero(), // claim is zero
&Scalar::zero(), // blind for claim is also zero
num_rounds,
evals_tau,
evals_Az,
evals_Bz,
evals_Cz,
comb_func,
&gens.gens_1,
&gens.gens_4,
transcript,
random_tape,
);
(sc_proof_phase_one, r, claims, blind_claim_postsc)
}
fn prove_phase_two(
num_rounds: usize,
claim: &Scalar,
blind_claim: &Scalar,
evals_z: &mut DensePolynomial,
evals_ABC: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
claim,
blind_claim,
num_rounds,
evals_z,
evals_ABC,
comb_func,
&gens.gens_1,
&gens.gens_3,
transcript,
random_tape,
);
(sc_proof_phase_two, r, claims, blind_claim_postsc)
}
fn protocol_name() -> &'static [u8] {
b"R1CS proof"
}
pub fn prove(
inst: &R1CSInstance,
vars: Vec<Scalar>,
input: &Vec<Scalar>,
gens: &R1CSGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (R1CSProof, Vec<Scalar>, Vec<Scalar>) {
let timer_prove = Timer::new("R1CSProof::prove");
transcript.append_protocol_name(R1CSProof::protocol_name());
// we currently require the number of |inputs| + 1 to be at most number of vars
assert!(input.len() + 1 <= vars.len());
let timer_commit = Timer::new("polycommit");
let (poly_vars, comm_vars, blinds_vars) = {
// create a multilinear polynomial using the supplied assignment for variables
let poly_vars = DensePolynomial::new(vars.clone());
// produce a commitment to the satisfying assignment
let (comm_vars, blinds_vars) = poly_vars.commit(true, &gens.gens_pc, Some(random_tape));
// add the commitment to the prover's transcript
comm_vars.append_to_transcript(b"poly_commitment", transcript);
(poly_vars, comm_vars, blinds_vars)
};
timer_commit.stop();
let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one");
// append input to variables to create a single vector z
let z = {
let num_inputs = input.len();
let num_vars = vars.len();
let mut z = vars;
z.extend(&vec![Scalar::one()]); // add constant term in z
z.extend(input);
z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
z
};
// derive the verifier's challenge tau
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log2(), z.len().log2());
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// compute the initial evaluation table for R(\tau, x)
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau.clone()).evals());
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one(
num_rounds_x,
&mut poly_tau,
&mut poly_Az,
&mut poly_Bz,
&mut poly_Cz,
&gens.gens_sc,
transcript,
random_tape,
);
assert_eq!(poly_tau.len(), 1);
assert_eq!(poly_Az.len(), 1);
assert_eq!(poly_Bz.len(), 1);
assert_eq!(poly_Cz.len(), 1);
timer_sc_proof_phase1.stop();
let (tau_claim, Az_claim, Bz_claim, Cz_claim) =
(&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]);
let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = (
random_tape.challenge_scalar(b"Az_blind"),
random_tape.challenge_scalar(b"Bz_blind"),
random_tape.challenge_scalar(b"Cz_blind"),
random_tape.challenge_scalar(b"prod_Az_Bz_blind"),
);
let (pok_Cz_claim, comm_Cz_claim) = {
KnowledgeProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&Cz_claim,
&Cz_blind,
)
};
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
let prod = Az_claim * Bz_claim;
ProductProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&Az_claim,
&Az_blind,
&Bz_claim,
&Bz_blind,
&prod,
&prod_Az_Bz_blind,
)
};
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
// prove the final step of sum-check #1
let taus_bound_rx = tau_claim;
let blind_expected_claim_postsc1 = taus_bound_rx * (&prod_Az_Bz_blind - &Cz_blind);
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&claim_post_phase1,
&blind_expected_claim_postsc1,
&claim_post_phase1,
&blind_claim_postsc1,
);
let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two");
// combine the three claims into a single claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let claim_phase2 = &r_A * Az_claim + &r_B * Bz_claim + &r_C * Cz_claim;
let blind_claim_phase2 = &r_A * Az_blind + &r_B * Bz_blind + &r_C * Cz_blind;
let evals_ABC = {
// compute the initial evaluation table for R(\tau, x)
let evals_rx = EqPolynomial::new(rx.clone()).evals();
let (evals_A, evals_B, evals_C) =
inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx);
assert_eq!(evals_A.len(), evals_B.len());
assert_eq!(evals_A.len(), evals_C.len());
(0..evals_A.len())
.map(|i| &r_A * &evals_A[i] + &r_B * &evals_B[i] + &r_C * &evals_C[i])
.collect::<Vec<Scalar>>()
};
// another instance of the sum-check protocol
let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two(
num_rounds_y,
&claim_phase2,
&blind_claim_phase2,
&mut DensePolynomial::new(z),
&mut DensePolynomial::new(evals_ABC),
&gens.gens_sc,
transcript,
random_tape,
);
timer_sc_proof_phase2.stop();
let timer_polyeval = Timer::new("polyeval");
let eval_vars_at_ry = poly_vars.evaluate(&ry[1..].to_vec());
let blind_eval = random_tape.challenge_scalar(b"blind_eval");
let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove(
&poly_vars,
Some(&blinds_vars),
&ry[1..].to_vec(),
&eval_vars_at_ry,
Some(&blind_eval),
&gens.gens_pc,
transcript,
random_tape,
);
timer_polyeval.stop();
// prove the final step of sum-check #2
let blind_eval_Z_at_ry = (Scalar::one() - &ry[0]) * blind_eval;
let blind_expected_claim_postsc2 = &claims_phase2[1] * &blind_eval_Z_at_ry;
let claim_post_phase2 = &claims_phase2[0] * &claims_phase2[1];
let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove(
&gens.gens_pc.gens.gens_1,
transcript,
random_tape,
&claim_post_phase2,
&blind_expected_claim_postsc2,
&claim_post_phase2,
&blind_claim_postsc2,
);
timer_prove.stop();
(
R1CSProof {
comm_vars,
sc_proof_phase1,
claims_phase2: (
comm_Az_claim,
comm_Bz_claim,
comm_Cz_claim,
comm_prod_Az_Bz_claims,
),
pok_claims_phase2: (pok_Cz_claim, proof_prod),
proof_eq_sc_phase1,
sc_proof_phase2,
comm_vars_at_ry,
proof_eval_vars_at_ry,
proof_eq_sc_phase2,
},
rx,
ry,
)
}
pub fn verify(
&self,
num_vars: usize,
num_cons: usize,
input: &Vec<Scalar>,
evals: &R1CSInstanceEvals,
transcript: &mut Transcript,
gens: &R1CSGens,
) -> Result<(Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
transcript.append_protocol_name(R1CSProof::protocol_name());
let n = num_vars;
// add the commitment to the verifier's transcript
self
.comm_vars
.append_to_transcript(b"poly_commitment", transcript);
let (num_rounds_x, num_rounds_y) = (num_cons.log2(), (2 * num_vars).log2());
// derive the verifier's challenge tau
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// verify the first sum-check instance
let claim_phase1 = Scalar::zero()
.commit(&Scalar::zero(), &gens.gens_sc.gens_1)
.compress();
let (comm_claim_post_phase1, rx) = self
.sc_proof_phase1
.verify(
&claim_phase1,
num_rounds_x,
3,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_4,
transcript,
)
.unwrap();
// perform the intermediate sum-check test with claimed Az, Bz, and Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
assert!(pok_Cz_claim
.verify(&gens.gens_sc.gens_1, transcript, &comm_Cz_claim)
.is_ok());
assert!(proof_prod
.verify(
&gens.gens_sc.gens_1,
transcript,
&comm_Az_claim,
&comm_Bz_claim,
&comm_prod_Az_Bz_claims
)
.is_ok());
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
let taus_bound_rx: Scalar = (0..rx.len())
.map(|i| &rx[i] * &tau[i] + (&Scalar::one() - &rx[i]) * (&Scalar::one() - &tau[i]))
.product();
let expected_claim_post_phase1 = (&taus_bound_rx
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
.compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
assert!(self
.proof_eq_sc_phase1
.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
)
.is_ok());
// derive three public challenges and then derive a joint claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
iter::once(&r_A)
.chain(iter::once(&r_B))
.chain(iter::once(&r_C)),
iter::once(&comm_Az_claim)
.chain(iter::once(&comm_Bz_claim))
.chain(iter::once(&comm_Cz_claim))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
// verify the joint claim with a sum-check protocol
let (comm_claim_post_phase2, ry) = self
.sc_proof_phase2
.verify(
&comm_claim_phase2,
num_rounds_y,
2,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_3,
transcript,
)
.unwrap();
// verify Z(ry) proof against the initial commitment
assert!(self
.proof_eval_vars_at_ry
.verify(
&gens.gens_pc,
transcript,
&ry[1..].to_vec(),
&self.comm_vars_at_ry,
&self.comm_vars
)
.is_ok());
let poly_input_eval = {
// constant term
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
//remaining inputs
input_as_sparse_poly_entries.extend(
(0..input.len())
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
.collect::<Vec<SparsePolyEntry>>(),
);
SparsePolynomial::new(n.log2(), input_as_sparse_poly_entries).evaluate(&ry[1..].to_vec())
};
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
iter::once(Scalar::one() - &ry[0]).chain(iter::once(ry[0])),
iter::once(&self.comm_vars_at_ry.decompress().unwrap()).chain(iter::once(
&poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
)),
);
// perform the final check in the second sum-check protocol
let (eval_A_r, eval_B_r, eval_C_r) = evals.get_evaluations();
let expected_claim_post_phase2 =
(&(&r_A * &eval_A_r + &r_B * &eval_B_r + &r_C * &eval_C_r) * comm_eval_Z_at_ry).compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
assert!(self
.proof_eq_sc_phase2
.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase2,
&comm_claim_post_phase2,
)
.is_ok());
Ok((rx, ry))
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::OsRng;
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5
// rounded to the nearest power of two
let num_cons = 128;
let num_vars = 256;
let num_inputs = 2;
// encode the above constraints into three matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
// constraint 0 entries
// (Z1 + Z2) * I0 - Z3 = 0;
A.push(SparseMatEntry::new(0, 0, one));
A.push(SparseMatEntry::new(0, 1, one));
B.push(SparseMatEntry::new(0, num_vars + 1, one));
C.push(SparseMatEntry::new(0, 2, one));
// constraint 1 entries
// (Z1 + I1) * (Z3) - Z4 = 0
A.push(SparseMatEntry::new(1, 0, one));
A.push(SparseMatEntry::new(1, num_vars + 2, one));
B.push(SparseMatEntry::new(1, 2, one));
C.push(SparseMatEntry::new(1, 3, one));
// constraint 3 entries
// Z5 * 1 - 0 = 0
A.push(SparseMatEntry::new(2, 4, one));
B.push(SparseMatEntry::new(2, num_vars, one));
let num_vars_x = num_cons.log2();
let num_vars_y = (2 * num_vars).log2();
let poly_A = SparseMatPolynomial::new(num_vars_x, num_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_vars_x, num_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_vars_x, num_vars_y, C);
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, poly_A, poly_B, poly_C);
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let z2 = Scalar::random(&mut csprng);
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
let z5 = Scalar::zero(); //constraint 3
let mut vars = vec![Scalar::zero(); num_vars];
vars[0] = z1;
vars[1] = z2;
vars[2] = z3;
vars[3] = z4;
vars[4] = z5;
let mut input = vec![Scalar::zero(); num_inputs];
input[0] = i0;
input[1] = i1;
(inst, vars, input)
}
#[test]
fn test_tiny_r1cs() {
let (inst, vars, input) = tests::produce_tiny_r1cs();
let is_sat = inst.is_sat(&vars, &input);
assert_eq!(is_sat, true);
}
#[test]
fn test_synthetic_r1cs() {
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
let is_sat = inst.is_sat(&vars, &input);
assert_eq!(is_sat, true);
}
#[test]
pub fn check_r1cs_proof() {
let num_vars = 1024;
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = R1CSGens::new(num_cons, num_vars, b"test-m");
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"proof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
let mut prover_transcript = Transcript::new(b"example");
let (proof, rx, ry) = R1CSProof::prove(
&inst,
vars,
&input,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let eval_table_rx = EqPolynomial::new(rx).evals();
let eval_table_ry = EqPolynomial::new(ry).evals();
let inst_evals = inst.evaluate_with_tables(&eval_table_rx, &eval_table_ry);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
inst.get_num_vars(),
inst.get_num_cons(),
&input,
&inst_evals,
&mut verifier_transcript,
&gens,
)
.is_ok());
}
}

+ 48
- 0
src/scalar.rs

@ -0,0 +1,48 @@
pub type Scalar = super::scalar_25519::Scalar;
pub type ScalarBytes = curve25519_dalek::scalar::Scalar;
pub trait ScalarFromPrimitives {
fn to_scalar(self) -> Scalar;
}
impl ScalarFromPrimitives for usize {
#[inline]
fn to_scalar(self) -> Scalar {
(0..self).map(|_i| Scalar::one()).sum()
}
}
impl ScalarFromPrimitives for bool {
#[inline]
fn to_scalar(self) -> Scalar {
if self {
Scalar::one()
} else {
Scalar::zero()
}
}
}
pub trait ScalarBytesFromScalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
fn decompress_vec(v: &Vec<Scalar>) -> Vec<ScalarBytes>;
fn decompress_seq(s: &[Scalar]) -> Vec<ScalarBytes>;
}
impl ScalarBytesFromScalar for Scalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes {
ScalarBytes::from_bytes_mod_order(s.to_bytes())
}
fn decompress_vec(v: &Vec<Scalar>) -> Vec<ScalarBytes> {
(0..v.len())
.map(|i| Scalar::decompress_scalar(&v[i]))
.collect::<Vec<ScalarBytes>>()
}
fn decompress_seq(s: &[Scalar]) -> Vec<ScalarBytes> {
(0..s.len())
.map(|i| Scalar::decompress_scalar(&s[i]))
.collect::<Vec<ScalarBytes>>()
}
}

+ 1213
- 0
src/scalar_25519.rs
File diff suppressed because it is too large
View File


+ 1721
- 0
src/sparse_mlpoly.rs
File diff suppressed because it is too large
View File


+ 190
- 0
src/spartan.rs

@ -0,0 +1,190 @@
use super::dense_mlpoly::EqPolynomial;
use super::errors::ProofVerifyError;
use super::r1csinstance::{
R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance,
R1CSInstanceEvals,
};
use super::r1csproof::{R1CSGens, R1CSProof};
use super::scalar::Scalar;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
pub struct SpartanGens {
gens_r1cs_sat: R1CSGens,
gens_r1cs_eval: R1CSCommitmentGens,
}
impl SpartanGens {
pub fn new(gens_r1cs_sat: R1CSGens, gens_r1cs_eval: R1CSCommitmentGens) -> SpartanGens {
SpartanGens {
gens_r1cs_sat,
gens_r1cs_eval,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SpartanProof {
r1cs_sat_proof: R1CSProof,
inst_evals: R1CSInstanceEvals,
r1cs_eval_proof: R1CSEvalProof,
}
impl SpartanProof {
fn protocol_name() -> &'static [u8] {
b"Spartan proof"
}
/// A public computation to create a commitment to an R1CS instance
pub fn encode(
inst: &R1CSInstance,
gens: &R1CSCommitmentGens,
) -> (R1CSCommitment, R1CSDecommitment) {
inst.commit(gens)
}
/// A method to produce a proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &R1CSInstance,
decomm: &R1CSDecommitment,
vars: Vec<Scalar>,
input: &Vec<Scalar>,
gens: &SpartanGens,
transcript: &mut Transcript,
) -> SpartanProof {
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = {
let mut csprng: OsRng = OsRng;
let mut tape = Transcript::new(b"SpartanProof");
tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng));
tape
};
transcript.append_protocol_name(SpartanProof::protocol_name());
let (r1cs_sat_proof, rx, ry) = {
let (proof, rx, ry) = R1CSProof::prove(
inst,
vars,
input,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let inst_evals = {
let eval_table_rx = EqPolynomial::new(rx.clone()).evals();
let eval_table_ry = EqPolynomial::new(ry.clone()).evals();
inst.evaluate_with_tables(&eval_table_rx, &eval_table_ry)
};
inst_evals.append_to_transcript(b"r1cs_inst_evals", transcript);
timer_eval.stop();
let r1cs_eval_proof = {
let proof = R1CSEvalProof::prove(
decomm,
&rx,
&ry,
&inst_evals,
&gens.gens_r1cs_eval,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
proof
};
SpartanProof {
r1cs_sat_proof,
inst_evals,
r1cs_eval_proof,
}
}
/// A method to verify the proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
comm: &R1CSCommitment,
input: &Vec<Scalar>,
transcript: &mut Transcript,
gens: &SpartanGens,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(SpartanProof::protocol_name());
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.len(), comm.get_num_inputs());
let (rx, ry) = self
.r1cs_sat_proof
.verify(
comm.get_num_vars(),
comm.get_num_cons(),
input,
&self.inst_evals,
transcript,
&gens.gens_r1cs_sat,
)
.unwrap();
timer_sat_proof.stop();
let timer_eval_proof = Timer::new("verify_eval_proof");
self
.inst_evals
.append_to_transcript(b"r1cs_inst_evals", transcript);
assert!(self
.r1cs_eval_proof
.verify(
comm,
&rx,
&ry,
&self.inst_evals,
&gens.gens_r1cs_eval,
transcript
)
.is_ok());
timer_eval_proof.stop();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn check_spartan_proof() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let r1cs_size = inst.size();
let gens_r1cs_eval = R1CSCommitmentGens::new(&r1cs_size, b"gens_r1cs_eval");
// create a commitment to R1CSInstance
let (comm, decomm) = SpartanProof::encode(&inst, &gens_r1cs_eval);
let gens_r1cs_sat = R1CSGens::new(num_cons, num_vars, b"gens_r1cs_sat");
let gens = SpartanGens::new(gens_r1cs_sat, gens_r1cs_eval);
let mut prover_transcript = Transcript::new(b"example");
let proof = SpartanProof::prove(&inst, &decomm, vars, &input, &gens, &mut prover_transcript);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&comm, &input, &mut verifier_transcript, &gens)
.is_ok());
}
}

+ 911
- 0
src/sumcheck.rs

@ -0,0 +1,911 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::nizk::DotProductProof;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use super::unipoly::{CompressedUniPoly, UniPoly};
use itertools::izip;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
use std::iter;
#[derive(Serialize, Deserialize, Debug)]
pub struct SumcheckInstanceProof {
compressed_polys: Vec<CompressedUniPoly>,
}
impl SumcheckInstanceProof {
pub fn new(compressed_polys: Vec<CompressedUniPoly>) -> SumcheckInstanceProof {
SumcheckInstanceProof { compressed_polys }
}
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> Result<(Scalar, Vec<Scalar>), ProofVerifyError> {
let mut e = claim;
let mut r: Vec<Scalar> = Vec::new();
// verify that there is a univariate polynomial for each round
assert_eq!(self.compressed_polys.len(), num_rounds);
for i in 0..self.compressed_polys.len() {
let poly = self.compressed_polys[i].decompress(&e);
// verify degree bound
assert_eq!(poly.degree(), degree_bound);
// check if G_k(0) + G_k(1) = e
assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_i);
// evaluate the claimed degree-ell polynomial at r_i
e = poly.evaluate(&r_i);
}
Ok((e, r))
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKSumcheckInstanceProof {
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
}
impl ZKSumcheckInstanceProof {
pub fn new(
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
) -> Self {
ZKSumcheckInstanceProof {
comm_polys,
comm_evals,
proofs,
}
}
pub fn verify(
&self,
comm_claim: &CompressedGroup,
num_rounds: usize,
degree_bound: usize,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) -> Result<(CompressedGroup, Vec<Scalar>), ProofVerifyError> {
// verify degree bound
assert_eq!(gens_n.n, degree_bound + 1);
// verify that there is a univariate polynomial for each round
assert_eq!(self.comm_polys.len(), num_rounds);
assert_eq!(self.comm_evals.len(), num_rounds);
let mut r: Vec<Scalar> = Vec::new();
for i in 0..self.comm_polys.len() {
let comm_poly = &self.comm_polys[i];
// append the prover's polynomial to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
// verify the proof of sum-check and evals
let res = {
let comm_claim_per_round = if i == 0 {
comm_claim
} else {
&self.comm_evals[i - 1]
};
let comm_eval = &self.comm_evals[i];
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let comm_target = GroupElement::vartime_multiscalar_mul(
w.iter(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); degree_bound + 1];
a[0] = a[0] + Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); degree_bound + 1];
for j in 1..a.len() {
a[j] = &a[j - 1] * &r_i;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
.collect::<Vec<Scalar>>()
};
self.proofs[i]
.verify(
gens_1,
gens_n,
transcript,
&a,
&self.comm_polys[i],
&comm_target,
)
.is_ok()
};
assert!(res);
r.push(r_i);
}
Ok((self.comm_evals[self.comm_evals.len() - 1], r))
}
}
impl SumcheckInstanceProof {
pub fn prove_quad<F>(
claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
comb_func: F,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>)
where
F: Fn(&Scalar, &Scalar) -> Scalar,
{
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut quad_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
eval_point_2 = &eval_point_2 + comb_func(&poly_A_bound_point, &poly_B_bound_point);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
e = poly.evaluate(&r_j);
quad_polys.push(poly.compress());
}
(
SumcheckInstanceProof::new(quad_polys),
r,
vec![poly_A[0], poly_B[0]],
)
}
pub fn prove_cubic<F>(
claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
comb_func: F,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
eval_point_2 = &eval_point_2
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
eval_point_3 = &eval_point_3
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
(
SumcheckInstanceProof::new(cubic_polys),
r,
vec![poly_A[0], poly_B[0], poly_C[0]],
)
}
pub fn prove_cubic_with_additive_term<F>(
claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
poly_D: &mut DensePolynomial,
comb_func: F,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>)
where
F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
{
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
let poly_D_bound_point = &poly_D[len + i] + &poly_D[len + i] - &poly_D[i];
eval_point_2 = &eval_point_2
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
let poly_D_bound_point = &poly_D_bound_point + &poly_D[len + i] - &poly_D[i];
eval_point_3 = &eval_point_3
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
poly_D.bound_poly_var_top(&r_j);
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
(
SumcheckInstanceProof::new(cubic_polys),
r,
vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
)
}
pub fn prove_cubic_batched<F>(
claim: &Scalar,
num_rounds: usize,
poly_vec_par: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut DensePolynomial,
),
poly_vec_seq: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
),
coeffs: &[Scalar],
comb_func: F,
transcript: &mut Transcript,
) -> (
Self,
Vec<Scalar>,
(Vec<Scalar>, Vec<Scalar>, Scalar),
(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par;
let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
//let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new();
for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_par[len + i] + &poly_C_par[len + i] - &poly_C_par[i];
eval_point_2 = &eval_point_2
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_bound_point + &poly_C_par[len + i] - &poly_C_par[i];
eval_point_3 = &eval_point_3
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter(),
poly_B_vec_seq.iter(),
poly_C_vec_seq.iter()
) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
eval_point_2 = &eval_point_2
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
eval_point_3 = &eval_point_3
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum();
let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum();
let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum();
let evals = vec![
evals_combined_0,
e - evals_combined_0,
evals_combined_2,
evals_combined_3,
];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
}
poly_C_par.bound_poly_var_top(&r_j);
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter_mut(),
poly_B_vec_seq.iter_mut(),
poly_C_vec_seq.iter_mut()
) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
}
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
let poly_A_par_final = (0..poly_A_vec_par.len())
.map(|i| poly_A_vec_par[i][0])
.collect();
let poly_B_par_final = (0..poly_B_vec_par.len())
.map(|i| poly_B_vec_par[i][0])
.collect();
let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]);
let poly_A_seq_final = (0..poly_A_vec_seq.len())
.map(|i| poly_A_vec_seq[i][0])
.collect();
let poly_B_seq_final = (0..poly_B_vec_seq.len())
.map(|i| poly_B_vec_seq[i][0])
.collect();
let poly_C_seq_final = (0..poly_C_vec_seq.len())
.map(|i| poly_C_vec_seq[i][0])
.collect();
let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final);
(
SumcheckInstanceProof::new(cubic_polys),
r,
claims_prod,
claims_dotp,
)
}
}
impl ZKSumcheckInstanceProof {
pub fn prove_quad<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.challenge_vector(b"blinds_poly", num_rounds),
random_tape.challenge_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(&blind_claim, &gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
eval_point_2 = &eval_point_2 + comb_func(&poly_A_bound_point, &poly_B_bound_point);
}
let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = &w[0] * &claim_per_round + &w[1] * &eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.iter(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
&w[0] * blind_sc + &w[1] * blind_eval
};
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] = a[0] + Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = &a[j - 1] * &r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
proofs.push(proof);
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0]],
blinds_evals[num_rounds - 1],
)
}
pub fn prove_cubic_with_additive_term<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
poly_D: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.challenge_vector(b"blinds_poly", num_rounds),
random_tape.challenge_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(&blind_claim, &gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 = &eval_point_0 + comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = &poly_A[len + i] + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B[len + i] + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C[len + i] + &poly_C[len + i] - &poly_C[i];
let poly_D_bound_point = &poly_D[len + i] + &poly_D[len + i] - &poly_D[i];
eval_point_2 = &eval_point_2
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = &poly_A_bound_point + &poly_A[len + i] - &poly_A[i];
let poly_B_bound_point = &poly_B_bound_point + &poly_B[len + i] - &poly_B[i];
let poly_C_bound_point = &poly_C_bound_point + &poly_C[len + i] - &poly_C[i];
let poly_D_bound_point = &poly_D_bound_point + &poly_D[len + i] - &poly_D[i];
eval_point_3 = &eval_point_3
+ comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
}
let evals = vec![
eval_point_0,
claim_per_round - eval_point_0,
eval_point_2,
eval_point_3,
];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
poly_D.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = &w[0] * &claim_per_round + &w[1] * &eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.iter(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
&w[0] * blind_sc + &w[1] * blind_eval
};
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] = a[0] + Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = &a[j - 1] * &r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| &w[0] * &a_sc[i] + &w[1] * &a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
proofs.push(proof);
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
blinds_evals[num_rounds - 1],
)
}
}

+ 83
- 0
src/timer.rs

@ -0,0 +1,83 @@
#[cfg(feature = "profile")]
use colored::Colorize;
#[cfg(feature = "profile")]
use std::sync::atomic::AtomicUsize;
#[cfg(feature = "profile")]
use std::{sync::atomic::Ordering, time::Instant};
#[cfg(feature = "profile")]
pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "profile")]
pub struct Timer {
label: String,
timer: Instant,
}
#[cfg(feature = "profile")]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
let timer = Instant::now();
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
println!(
"{:indent$}{}{}",
"",
"* ",
label.yellow().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
Self {
label: label.to_string(),
timer,
}
}
#[inline(always)]
pub fn stop(&self) {
let duration = self.timer.elapsed();
println!(
"{:indent$}{}{} {:?}",
"",
"* ",
self.label.blue().bold(),
duration,
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn print(msg: &str) {
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
println!(
"{:indent$}{}{}",
"",
"* ",
msg.to_string().green().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
}
#[cfg(not(feature = "profile"))]
pub struct Timer {
_label: String,
}
#[cfg(not(feature = "profile"))]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
Self {
_label: label.to_string(),
}
}
#[inline(always)]
pub fn stop(&self) {}
#[inline(always)]
pub fn print(_msg: &str) {}
}

+ 63
- 0
src/transcript.rs

@ -0,0 +1,63 @@
use super::group::CompressedGroup;
use super::scalar::Scalar;
use merlin::Transcript;
pub trait ProofTranscript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup);
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar>;
}
impl ProofTranscript for Transcript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
self.append_message(b"protocol-name", protocol_name);
}
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
self.append_message(label, &scalar.to_bytes());
}
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
self.append_message(label, point.as_bytes());
}
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
let mut buf = [0u8; 64];
self.challenge_bytes(label, &mut buf);
Scalar::from_bytes_wide(&buf)
}
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
(0..len)
.map(|_i| self.challenge_scalar(label))
.collect::<Vec<Scalar>>()
}
}
pub trait AppendToTranscript {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript);
}
impl AppendToTranscript for Scalar {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_scalar(label, self);
}
}
impl AppendToTranscript for Vec<Scalar> {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"begin_append_vector");
for i in 0..self.len() {
transcript.append_scalar(label, &self[i]);
}
transcript.append_message(label, b"end_append_vector");
}
}
impl AppendToTranscript for CompressedGroup {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_point(label, self);
}
}

+ 184
- 0
src/unipoly.rs

@ -0,0 +1,184 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::group::GroupElement;
use super::scalar::{Scalar, ScalarFromPrimitives};
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use serde::{Deserialize, Serialize};
// ax^2 + bx + c stored as vec![a,b,c]
// ax^3 + bx^2 + cx + d stored as vec![a,b,c,d]
#[derive(Debug)]
pub struct UniPoly {
coeffs: Vec<Scalar>,
}
// ax^2 + bx + c stored as vec![a,c]
// ax^3 + bx^2 + cx + d stored as vec![a,c,d]
#[derive(Serialize, Deserialize, Debug)]
pub struct CompressedUniPoly {
coeffs_except_linear_term: Vec<Scalar>,
}
impl UniPoly {
pub fn from_evals(evals: &Vec<Scalar>) -> Self {
// we only support degree-2 or degree-3 univariate polynomials
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = (2 as usize).to_scalar().invert().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
let b = evals[1] - c - a;
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = (2 as usize).to_scalar().invert().unwrap();
let six_inv = (6 as usize).to_scalar().invert().unwrap();
let d = evals[0];
let a = six_inv
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
let b = two_inv
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
+ evals[2]
+ evals[2]
+ evals[2]
+ evals[2]
- evals[3]);
let c = evals[1] - d - a - b;
vec![d, c, b, a]
};
UniPoly { coeffs }
}
pub fn degree(&self) -> usize {
self.coeffs.len() - 1
}
pub fn as_vec(&self) -> Vec<Scalar> {
self.coeffs.clone()
}
pub fn eval_at_zero(&self) -> Scalar {
self.coeffs[0]
}
pub fn eval_at_one(&self) -> Scalar {
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
}
pub fn evaluate(&self, r: &Scalar) -> Scalar {
let mut eval = self.coeffs[0];
let mut power = *r;
for i in 1..self.coeffs.len() {
eval = &eval + &power * &self.coeffs[i];
power = &power * r;
}
eval
}
pub fn compress(&self) -> CompressedUniPoly {
let coeffs_except_linear_term = [&self.coeffs[0..1], &self.coeffs[2..]].concat();
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
CompressedUniPoly {
coeffs_except_linear_term,
}
}
pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement {
self.coeffs.commit(blind, gens)
}
}
impl CompressedUniPoly {
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
let mut linear_term =
hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
for i in 1..self.coeffs_except_linear_term.len() {
linear_term = linear_term - self.coeffs_except_linear_term[i];
}
let mut coeffs: Vec<Scalar> = Vec::new();
coeffs.extend(vec![&self.coeffs_except_linear_term[0]]);
coeffs.extend(vec![&linear_term]);
coeffs.extend(self.coeffs_except_linear_term[1..].to_vec());
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
UniPoly { coeffs }
}
}
impl AppendToTranscript for UniPoly {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(b"coeff", &self.coeffs[i]);
}
transcript.append_message(label, b"UniPoly_end");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (6 as usize).to_scalar();
let e2 = (15 as usize).to_scalar();
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3 as usize).to_scalar());
assert_eq!(poly.coeffs[2], (2 as usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e3 = (28 as usize).to_scalar();
assert_eq!(poly.evaluate(&(3 as usize).to_scalar()), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (7 as usize).to_scalar();
let e2 = (23 as usize).to_scalar();
let e3 = (55 as usize).to_scalar();
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3 as usize).to_scalar());
assert_eq!(poly.coeffs[2], (2 as usize).to_scalar());
assert_eq!(poly.coeffs[3], (1 as usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e4 = (109 as usize).to_scalar();
assert_eq!(poly.evaluate(&(4 as usize).to_scalar()), e4);
}
}

Loading…
Cancel
Save