Browse Source

Update to latest Rust and fix Clippy warnings (#37)

* Update to latest Rust and fix Clippy warnings

* cleanup
master
Srinath Setty 3 years ago
committed by GitHub
parent
commit
19d1d63703
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 227 additions and 249 deletions
  1. +4
    -0
      .cargo/config
  2. +11
    -4
      .github/workflows/rust.yml
  3. +3
    -3
      Cargo.toml
  4. +2
    -2
      benches/nizk.rs
  5. +3
    -3
      benches/snark.rs
  6. +1
    -1
      examples/cubic.rs
  7. +1
    -1
      profiler/nizk.rs
  8. +1
    -1
      profiler/snark.rs
  9. +36
    -38
      src/dense_mlpoly.rs
  10. +20
    -25
      src/lib.rs
  11. +0
    -6
      src/math.rs
  12. +7
    -12
      src/nizk/bullet.rs
  13. +18
    -19
      src/nizk/mod.rs
  14. +9
    -12
      src/product_tree.rs
  15. +17
    -22
      src/r1csinstance.rs
  16. +15
    -14
      src/r1csproof.rs
  17. +57
    -62
      src/sparse_mlpoly.rs
  18. +4
    -4
      src/sumcheck.rs
  19. +18
    -20
      src/unipoly.rs

+ 4
- 0
.cargo/config

@ -0,0 +1,4 @@
[build]
rustflags = [
"-C", "target-cpu=native",
]

+ 11
- 4
.github/workflows/rust.yml

@ -1,4 +1,4 @@
name: Rust
name: Build and Test Spartan
on:
push:
@ -8,16 +8,23 @@ on:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install
run: rustup default nightly-2021-01-31
run: rustup default nightly
- name: Install rustfmt Components
run: rustup component add rustfmt
- name: Install clippy
run: rustup component add clippy
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
- name: Build examples
run: cargo build --examples --verbose
- name: Check Rustfmt Code Style
run: cargo fmt --all -- --check
- name: Check clippy warnings
run: cargo clippy --all-targets --all-features -- -D warnings

+ 3
- 3
Cargo.toml

@ -1,6 +1,6 @@
[package]
name = "spartan"
version = "0.3.0"
version = "0.4.0"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2018"
description = "High-speed zkSNARKs without trusted setup"
@ -11,7 +11,7 @@ license-file = "LICENSE"
keywords = ["zkSNARKs", "cryptography", "proofs"]
[dependencies]
curve25519-dalek = {version = "3.0.0", features = ["serde", "simd_backend"]}
curve25519-dalek = {version = "3.2.0", features = ["serde", "simd_backend"]}
merlin = "3.0.0"
rand = "0.7.3"
digest = "0.8.1"
@ -20,7 +20,7 @@ byteorder = "1.3.4"
rayon = { version = "1.3.0", optional = true }
serde = { version = "1.0.106", features = ["derive"] }
bincode = "1.2.1"
subtle = { version = "^2.2.3", default-features = false }
subtle = { version = "2.4", default-features = false }
rand_core = { version = "0.5", default-features = false }
zeroize = { version = "1", default-features = false }
itertools = "0.10.0"

+ 2
- 2
benches/nizk.rs

@ -18,7 +18,7 @@ fn nizk_prove_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("NIZK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
@ -49,7 +49,7 @@ fn nizk_verify_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("NIZK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);

+ 3
- 3
benches/snark.rs

@ -12,7 +12,7 @@ fn snark_encode_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("SNARK_encode_benchmark");
group.plot_config(plot_config);
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
@ -37,7 +37,7 @@ fn snark_prove_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("SNARK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
@ -74,7 +74,7 @@ fn snark_verify_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("SNARK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);

+ 1
- 1
examples/cubic.rs

@ -93,7 +93,7 @@ fn produce_r1cs() -> (
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert_eq!(res.unwrap(), true, "should be satisfied");
assert!(res.unwrap(), "should be satisfied");
(
num_cons,

+ 1
- 1
profiler/nizk.rs

@ -19,7 +19,7 @@ pub fn main() {
println!("Profiler:: NIZK");
for &s in inst_sizes.iter() {
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;

+ 1
- 1
profiler/snark.rs

@ -18,7 +18,7 @@ pub fn main() {
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2 as usize).pow(s as u32);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;

+ 36
- 38
src/dense_mlpoly.rs

@ -117,9 +117,11 @@ impl IdentityPolynomial {
impl DensePolynomial {
pub fn new(Z: Vec<Scalar>) -> Self {
let len = Z.len();
let num_vars = len.log2();
DensePolynomial { num_vars, Z, len }
DensePolynomial {
num_vars: Z.len().log2() as usize,
len: Z.len(),
Z,
}
}
pub fn get_num_vars(&self) -> usize {
@ -143,7 +145,7 @@ impl DensePolynomial {
}
#[cfg(feature = "multicore")]
fn commit_inner(&self, blinds: &Vec<Scalar>, gens: &MultiCommitGens) -> PolyCommitment {
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
@ -187,9 +189,9 @@ impl DensePolynomial {
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, n);
let blinds = if random_tape.is_some() {
let blinds = if let Some(t) = random_tape {
PolyCommitmentBlinds {
blinds: random_tape.unwrap().random_vector(b"poly_blinds", L_size),
blinds: t.random_vector(b"poly_blinds", L_size),
}
} else {
PolyCommitmentBlinds {
@ -352,7 +354,7 @@ impl PolyEvalProof {
&LZ,
&LZ_blind,
&R,
&Zr,
Zr,
blind_Zr,
);
@ -404,7 +406,7 @@ mod tests {
use super::*;
use rand::rngs::OsRng;
fn evaluate_with_LR(Z: &Vec<Scalar>, r: &Vec<Scalar>) -> Scalar {
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
@ -427,25 +429,26 @@ mod tests {
#[test]
fn check_polynomial_evaluation() {
let mut Z: Vec<Scalar> = Vec::new(); // Z = [1, 2, 1, 4]
Z.push(Scalar::one());
Z.push((2 as usize).to_scalar());
Z.push((1 as usize).to_scalar());
Z.push((4 as usize).to_scalar());
// Z = [1, 2, 1, 4]
let Z = vec![
Scalar::one(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
// r = [4,3]
let mut r: Vec<Scalar> = Vec::new();
r.push((4 as usize).to_scalar());
r.push((3 as usize).to_scalar());
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r);
assert_eq!(eval, (28 as usize).to_scalar());
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval_with_LR, eval);
}
pub fn compute_factored_chis_at_r(r: &Vec<Scalar>) -> (Vec<Scalar>, Vec<Scalar>) {
pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
let mut L: Vec<Scalar> = Vec::new();
let mut R: Vec<Scalar> = Vec::new();
@ -484,7 +487,7 @@ mod tests {
(L, R)
}
pub fn compute_chis_at_r(r: &Vec<Scalar>) -> Vec<Scalar> {
pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
let ell = r.len();
let n = ell.pow2();
let mut chis: Vec<Scalar> = Vec::new();
@ -505,15 +508,12 @@ mod tests {
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
assert_eq!(L.len(), R.len());
let mut O: Vec<Scalar> = Vec::new();
let m = L.len();
for i in 0..m {
for j in 0..m {
O.push(L[i] * R[j]);
}
}
O
(0..L.len())
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<Scalar>>())
.collect::<Vec<Vec<Scalar>>>()
.into_iter()
.flatten()
.collect::<Vec<Scalar>>()
}
#[test]
@ -563,20 +563,18 @@ mod tests {
#[test]
fn check_polynomial_commit() {
let mut Z: Vec<Scalar> = Vec::new(); // Z = [1, 2, 1, 4]
Z.push((1 as usize).to_scalar());
Z.push((2 as usize).to_scalar());
Z.push((1 as usize).to_scalar());
Z.push((4 as usize).to_scalar());
let Z = vec![
(1_usize).to_scalar(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
let poly = DensePolynomial::new(Z);
// r = [4,3]
let mut r: Vec<Scalar> = Vec::new();
r.push((4 as usize).to_scalar());
r.push((3 as usize).to_scalar());
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval = poly.evaluate(&r);
assert_eq!(eval, (28 as usize).to_scalar());
assert_eq!(eval, (28_usize).to_scalar());
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(&gens, None);

+ 20
- 25
src/lib.rs

@ -1,8 +1,8 @@
#![allow(non_snake_case)]
#![feature(test)]
#![feature(int_log)]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![feature(external_doc)]
#![doc(include = "../README.md")]
extern crate byteorder;
extern crate core;
@ -64,11 +64,11 @@ pub struct Assignment {
impl Assignment {
/// Constructs a new `Assignment` from a vector
pub fn new(assignment: &Vec<[u8; 32]>) -> Result<Assignment, R1CSError> {
let bytes_to_scalar = |vec: &Vec<[u8; 32]>| -> Result<Vec<Scalar>, R1CSError> {
pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result<Vec<Scalar>, R1CSError> {
let mut vec_scalar: Vec<Scalar> = Vec::new();
for i in 0..vec.len() {
let val = Scalar::from_bytes(&vec[i]);
for v in vec {
let val = Scalar::from_bytes(v);
if val.is_some().unwrap_u8() == 1 {
vec_scalar.push(val.unwrap());
} else {
@ -124,9 +124,9 @@ impl Instance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &Vec<(usize, usize, [u8; 32])>,
B: &Vec<(usize, usize, [u8; 32])>,
C: &Vec<(usize, usize, [u8; 32])>,
A: &[(usize, usize, [u8; 32])],
B: &[(usize, usize, [u8; 32])],
C: &[(usize, usize, [u8; 32])],
) -> Result<Instance, R1CSError> {
let (num_vars_padded, num_cons_padded) = {
let num_vars_padded = {
@ -161,11 +161,9 @@ impl Instance {
};
let bytes_to_scalar =
|tups: &Vec<(usize, usize, [u8; 32])>| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
|tups: &[(usize, usize, [u8; 32])]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
for i in 0..tups.len() {
let (row, col, val_bytes) = tups[i];
for &(row, col, val_bytes) in tups {
// row must be smaller than num_cons
if row >= num_cons {
return Err(R1CSError::InvalidIndex);
@ -246,12 +244,11 @@ impl Instance {
let padded_vars = {
let num_padded_vars = self.inst.get_num_vars();
let num_vars = vars.assignment.len();
let padded_vars = if num_padded_vars > num_vars {
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars.clone()
};
padded_vars
}
};
Ok(
@ -357,12 +354,11 @@ impl SNARK {
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
let padded_vars = if num_padded_vars > num_vars {
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
};
padded_vars
}
};
R1CSProof::prove(
@ -513,12 +509,11 @@ impl NIZK {
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
let padded_vars = if num_padded_vars > num_vars {
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
};
padded_vars
}
};
let (proof, rx, ry) = R1CSProof::prove(
@ -627,7 +622,7 @@ mod tests {
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert_eq!(inst.is_err(), true);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidIndex));
}
@ -652,7 +647,7 @@ mod tests {
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert_eq!(inst.is_err(), true);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidScalar));
}
@ -692,7 +687,7 @@ mod tests {
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert_eq!(res.unwrap(), true, "should be satisfied");
assert!(res.unwrap(), "should be satisfied");
// SNARK public params
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);

+ 0
- 6
src/math.rs

@ -1,7 +1,6 @@
pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn log2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
}
@ -17,11 +16,6 @@ impl Math for usize {
base.pow(self as u32)
}
#[inline]
fn log2(self) -> usize {
(self as f64).log2() as usize
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)

+ 7
- 12
src/nizk/bullet.rs

@ -5,7 +5,6 @@
#![allow(clippy::too_many_arguments)]
use super::super::errors::ProofVerifyError;
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::super::math::Math;
use super::super::scalar::Scalar;
use super::super::transcript::ProofTranscript;
use core::iter;
@ -56,19 +55,14 @@ impl BulletReductionProof {
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log2();
let G_factors: Vec<Scalar> = iter::repeat(Scalar::one()).take(n).collect();
let lg_n = n.log2() as usize;
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(G_factors.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
//transcript.innerproduct_domain_sep(n as u64);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
@ -80,8 +74,8 @@ impl BulletReductionProof {
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(&a_L, &b_R);
let c_R = inner_product(&a_R, &b_L);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
@ -236,10 +230,11 @@ impl BulletReductionProof {
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert!(
!(a.len() != b.len()),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = Scalar::zero();
if a.len() != b.len() {
panic!("inner_product(a,b): lengths of vectors do not match");
}
for i in 0..a.len() {
out += a[i] * b[i];
}

+ 18
- 19
src/nizk/mod.rs

@ -2,7 +2,6 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, CompressedGroupExt};
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
@ -37,7 +36,7 @@ impl KnowledgeProof {
let t1 = random_tape.random_scalar(b"t1");
let t2 = random_tape.random_scalar(b"t2");
let C = x.commit(&r, gens_n).compress();
let C = x.commit(r, gens_n).compress();
C.append_to_transcript(b"C", transcript);
let alpha = t1.commit(&t2, gens_n).compress();
@ -99,10 +98,10 @@ impl EqualityProof {
// produce a random Scalar
let r = random_tape.random_scalar(b"r");
let C1 = v1.commit(&s1, gens_n).compress();
let C1 = v1.commit(s1, gens_n).compress();
C1.append_to_transcript(b"C1", transcript);
let C2 = v2.commit(&s2, gens_n).compress();
let C2 = v2.commit(s2, gens_n).compress();
C2.append_to_transcript(b"C2", transcript);
let alpha = (r * gens_n.h).compress();
@ -181,13 +180,13 @@ impl ProductProof {
let b4 = random_tape.random_scalar(b"b4");
let b5 = random_tape.random_scalar(b"b5");
let X = x.commit(&rX, gens_n).compress();
let X = x.commit(rX, gens_n).compress();
X.append_to_transcript(b"X", transcript);
let Y = y.commit(&rY, gens_n).compress();
let Y = y.commit(rY, gens_n).compress();
Y.append_to_transcript(b"Y", transcript);
let Z = z.commit(&rZ, gens_n).compress();
let Z = z.commit(rZ, gens_n).compress();
Z.append_to_transcript(b"Z", transcript);
let alpha = b1.commit(&b2, gens_n).compress();
@ -237,7 +236,7 @@ impl ProductProof {
z2: &Scalar,
) -> bool {
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
let rhs = z1.commit(&z2, gens_n).compress();
let rhs = z1.commit(z2, gens_n).compress();
lhs == rhs
}
@ -267,11 +266,11 @@ impl ProductProof {
let c = transcript.challenge_scalar(b"c");
if ProductProof::check_equality(&self.alpha, &X, &c, &gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, &Y, &c, &gens_n, &z3, &z4)
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
&Z,
Z,
&c,
&MultiCommitGens {
n: 1,
@ -331,16 +330,16 @@ impl DotProductProof {
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_beta");
let Cx = x_vec.commit(&blind_x, gens_n).compress();
let Cx = x_vec.commit(blind_x, gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(&blind_y, gens_1).compress();
let Cy = y.commit(blind_y, gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
let delta = d_vec.commit(&r_delta, gens_n).compress();
delta.append_to_transcript(b"delta", transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(&a_vec, &d_vec);
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
@ -390,7 +389,7 @@ impl DotProductProof {
let mut result =
c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, &a);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
@ -454,17 +453,17 @@ impl DotProductProofLog {
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log2());
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log2());
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log2() as usize);
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log2() as usize);
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x_vec.commit(&blind_x, &gens.gens_n).compress();
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(&blind_y, &gens.gens_1).compress();
let Cy = y.commit(blind_y, &gens.gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
let blind_Gamma = blind_x + blind_y;

+ 9
- 12
src/product_tree.rs

@ -1,7 +1,6 @@
#![allow(dead_code)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial;
use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript;
@ -37,7 +36,7 @@ impl ProductCircuit {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log2();
let num_layers = poly.len().log2() as usize;
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
@ -182,7 +181,7 @@ impl ProductCircuitEvalProof {
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log2();
let num_rounds_prod = poly_C.len().log2() as usize;
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
@ -223,12 +222,12 @@ impl ProductCircuitEvalProof {
len: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log2();
let num_layers = len.log2() as usize;
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
let mut num_rounds = 0;
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for i in 0..num_layers {
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
@ -246,7 +245,6 @@ impl ProductCircuitEvalProof {
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
num_rounds += 1;
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
@ -280,7 +278,7 @@ impl ProductCircuitEvalProofBatched {
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log2();
let num_rounds_prod = poly_C_par.len().log2() as usize;
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
@ -390,14 +388,14 @@ impl ProductCircuitEvalProofBatched {
len: usize,
transcript: &mut Transcript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log2();
let num_layers = len.log2() as usize;
let mut rand: Vec<Scalar> = Vec::new();
let mut num_rounds = 0;
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for i in 0..num_layers {
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
@ -478,7 +476,6 @@ impl ProductCircuitEvalProofBatched {
}
}
num_rounds += 1;
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;

+ 17
- 22
src/r1csinstance.rs

@ -35,8 +35,8 @@ impl R1CSCommitmentGens {
num_nz_entries: usize,
) -> R1CSCommitmentGens {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log2();
let num_poly_vars_y = (2 * num_vars).log2();
let num_poly_vars_x = num_cons.log2() as usize;
let num_poly_vars_y = (2 * num_vars).log2() as usize;
let gens =
SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3);
R1CSCommitmentGens { gens }
@ -73,9 +73,9 @@ impl R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &Vec<(usize, usize, Scalar)>,
B: &Vec<(usize, usize, Scalar)>,
C: &Vec<(usize, usize, Scalar)>,
A: &[(usize, usize, Scalar)],
B: &[(usize, usize, Scalar)],
C: &[(usize, usize, Scalar)],
) -> R1CSInstance {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
@ -94,8 +94,8 @@ impl R1CSInstance {
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log2();
let num_poly_vars_y = (2 * num_vars).log2();
let num_poly_vars_x = num_cons.log2() as usize;
let num_poly_vars_y = (2 * num_vars).log2() as usize;
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
@ -111,16 +111,14 @@ impl R1CSInstance {
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
let inst = R1CSInstance {
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
inst
}
}
pub fn get_num_vars(&self) -> usize {
@ -147,8 +145,8 @@ impl R1CSInstance {
let mut csprng: OsRng = OsRng;
// assert num_cons and num_vars are power of 2
assert_eq!(num_cons.log2().pow2(), num_cons);
assert_eq!(num_vars.log2().pow2(), num_vars);
assert_eq!((num_cons.log2() as usize).pow2(), num_cons);
assert_eq!((num_vars.log2() as usize).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
@ -195,8 +193,8 @@ impl R1CSInstance {
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log2();
let num_poly_vars_y = (2 * num_vars).log2();
let num_poly_vars_x = num_cons.log2() as usize;
let num_poly_vars_y = (2 * num_vars).log2() as usize;
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
@ -210,10 +208,7 @@ impl R1CSInstance {
C: poly_C,
};
assert_eq!(
inst.is_sat(&Z[..num_vars].to_vec(), &Z[num_vars + 1..].to_vec()),
true,
);
assert!(inst.is_sat(&Z[..num_vars].to_vec(), &Z[num_vars + 1..].to_vec()));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
@ -275,9 +270,9 @@ impl R1CSInstance {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(&evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(&evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(&evals, num_rows, num_cols);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}

+ 15
- 14
src/r1csproof.rs

@ -5,7 +5,6 @@ use super::dense_mlpoly::{
};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
use super::r1csinstance::R1CSInstance;
use super::random::RandomTape;
@ -64,7 +63,7 @@ pub struct R1CSGens {
impl R1CSGens {
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
let num_poly_vars = num_vars.log2();
let num_poly_vars = num_vars.log2() as usize;
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
R1CSGens { gens_sc, gens_pc }
@ -181,7 +180,8 @@ impl R1CSProof {
};
// derive the verifier's challenge tau
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log2(), z.len().log2());
let (num_rounds_x, num_rounds_y) =
(inst.get_num_cons().log2() as usize, z.len().log2() as usize);
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// compute the initial evaluation table for R(\tau, x)
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
@ -218,7 +218,7 @@ impl R1CSProof {
&gens.gens_sc.gens_1,
transcript,
random_tape,
&Cz_claim,
Cz_claim,
&Cz_blind,
)
};
@ -229,9 +229,9 @@ impl R1CSProof {
&gens.gens_sc.gens_1,
transcript,
random_tape,
&Az_claim,
Az_claim,
&Az_blind,
&Bz_claim,
Bz_claim,
&Bz_blind,
&prod,
&prod_Az_Bz_blind,
@ -361,7 +361,7 @@ impl R1CSProof {
.comm_vars
.append_to_transcript(b"poly_commitment", transcript);
let (num_rounds_x, num_rounds_y) = (num_cons.log2(), (2 * num_vars).log2());
let (num_rounds_x, num_rounds_y) = (num_cons.log2() as usize, (2 * num_vars).log2() as usize);
// derive the verifier's challenge tau
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
@ -383,15 +383,15 @@ impl R1CSProof {
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
assert!(pok_Cz_claim
.verify(&gens.gens_sc.gens_1, transcript, &comm_Cz_claim)
.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)
.is_ok());
assert!(proof_prod
.verify(
&gens.gens_sc.gens_1,
transcript,
&comm_Az_claim,
&comm_Bz_claim,
&comm_prod_Az_Bz_claims
comm_Az_claim,
comm_Bz_claim,
comm_prod_Az_Bz_claims
)
.is_ok());
@ -467,7 +467,8 @@ impl R1CSProof {
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
.collect::<Vec<SparsePolyEntry>>(),
);
SparsePolynomial::new(n.log2(), input_as_sparse_poly_entries).evaluate(&ry[1..].to_vec())
SparsePolynomial::new(n.log2() as usize, input_as_sparse_poly_entries)
.evaluate(&ry[1..].to_vec())
};
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
@ -563,14 +564,14 @@ mod tests {
fn test_tiny_r1cs() {
let (inst, vars, input) = tests::produce_tiny_r1cs();
let is_sat = inst.is_sat(&vars, &input);
assert_eq!(is_sat, true);
assert!(is_sat);
}
#[test]
fn test_synthetic_r1cs() {
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
let is_sat = inst.is_sat(&vars, &input);
assert_eq!(is_sat, true);
assert!(is_sat);
}
#[test]

+ 57
- 62
src/sparse_mlpoly.rs

@ -89,7 +89,10 @@ impl DerefsEvalProof {
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> PolyEvalProof {
assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log2());
assert_eq!(
joint_poly.get_num_vars(),
r.len() + evals.len().log2() as usize
);
// append the claimed evaluations to transcript
evals.append_to_transcript(b"evals_ops_val", transcript);
@ -97,7 +100,7 @@ impl DerefsEvalProof {
// n-to-1 reduction
let (r_joint, eval_joint) = {
let challenges =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2());
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2() as usize);
let mut poly_evals = DensePolynomial::new(evals);
for i in (0..challenges.len()).rev() {
poly_evals.bound_poly_var_bot(&challenges[i]);
@ -162,7 +165,8 @@ impl DerefsEvalProof {
evals.append_to_transcript(b"evals_ops_val", transcript);
// n-to-1 reduction
let challenges = transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2());
let challenges =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log2() as usize);
let mut poly_evals = DensePolynomial::new(evals);
for i in (0..challenges.len()).rev() {
poly_evals.bound_poly_var_bot(&challenges[i]);
@ -175,7 +179,7 @@ impl DerefsEvalProof {
// decommit the joint polynomial at r_joint
joint_claim_eval.append_to_transcript(b"joint_claim_eval", transcript);
assert!(proof
.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, &comm)
.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, comm)
.is_ok());
Ok(())
@ -249,7 +253,7 @@ impl AddrTimestamps {
audit_ts[addr] = w_ts;
}
ops_addr_vec.push(DensePolynomial::from_usize(&ops_addr_inst));
ops_addr_vec.push(DensePolynomial::from_usize(ops_addr_inst));
read_ts_vec.push(DensePolynomial::from_usize(&read_ts));
}
@ -302,15 +306,15 @@ impl SparseMatPolyCommitmentGens {
num_nz_entries: usize,
batch_size: usize,
) -> SparseMatPolyCommitmentGens {
let num_vars_ops =
num_nz_entries.next_power_of_two().log2() + (batch_size * 5).next_power_of_two().log2();
let num_vars_ops = num_nz_entries.next_power_of_two().log2() as usize
+ (batch_size * 5).next_power_of_two().log2() as usize;
let num_vars_mem = if num_vars_x > num_vars_y {
num_vars_x
} else {
num_vars_y
} + 1;
let num_vars_derefs =
num_nz_entries.next_power_of_two().log2() + (batch_size * 2).next_power_of_two().log2();
let num_vars_derefs = num_nz_entries.next_power_of_two().log2() as usize
+ (batch_size * 2).next_power_of_two().log2() as usize;
let gens_ops = PolyCommitmentGens::new(num_vars_ops, label);
let gens_mem = PolyCommitmentGens::new(num_vars_mem, label);
@ -510,14 +514,6 @@ impl MultiSparseMatPolynomialAsDense {
}
}
#[derive(Debug)]
struct HashLayer {
init: DensePolynomial,
read_vec: Vec<DensePolynomial>,
write_vec: Vec<DensePolynomial>,
audit: DensePolynomial,
}
#[derive(Debug)]
struct ProductLayer {
init: ProductCircuit,
@ -528,7 +524,6 @@ struct ProductLayer {
#[derive(Debug)]
struct Layers {
hash_layer: HashLayer,
prod_layer: ProductLayer,
}
@ -623,7 +618,7 @@ impl Layers {
poly_ops_val,
&addr_timestamps.read_ts,
&addr_timestamps.audit_ts,
&r_mem_check,
r_mem_check,
);
let prod_init = ProductCircuit::new(&poly_init_hashed);
@ -656,13 +651,6 @@ impl Layers {
write_vec: prod_write_vec,
audit: prod_audit,
},
hash_layer: HashLayer {
init: poly_init_hashed,
read_vec: poly_read_hashed_vec,
write_vec: poly_write_hashed_vec,
audit: poly_audit_hashed,
},
}
}
}
@ -747,16 +735,16 @@ impl HashLayerProof {
// decommit derefs at rand_ops
let eval_row_ops_val = (0..derefs.row_ops_val.len())
.map(|i| derefs.row_ops_val[i].evaluate(&rand_ops))
.map(|i| derefs.row_ops_val[i].evaluate(rand_ops))
.collect::<Vec<Scalar>>();
let eval_col_ops_val = (0..derefs.col_ops_val.len())
.map(|i| derefs.col_ops_val[i].evaluate(&rand_ops))
.map(|i| derefs.col_ops_val[i].evaluate(rand_ops))
.collect::<Vec<Scalar>>();
let proof_derefs = DerefsEvalProof::prove(
derefs,
&eval_row_ops_val,
&eval_col_ops_val,
&rand_ops,
rand_ops,
&gens.gens_derefs,
transcript,
random_tape,
@ -766,11 +754,11 @@ impl HashLayerProof {
// evaluate row_addr, row_read-ts, col_addr, col_read-ts, val at rand_ops
// evaluate row_audit_ts and col_audit_ts at rand_mem
let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) =
HashLayerProof::prove_helper((&rand_mem, &rand_ops), &dense.row);
HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.row);
let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) =
HashLayerProof::prove_helper((&rand_mem, &rand_ops), &dense.col);
HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.col);
let eval_val_vec = (0..dense.val.len())
.map(|i| dense.val[i].evaluate(&rand_ops))
.map(|i| dense.val[i].evaluate(rand_ops))
.collect::<Vec<Scalar>>();
// form a single decommitment using comm_comb_ops
@ -782,8 +770,10 @@ impl HashLayerProof {
evals_ops.extend(&eval_val_vec);
evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero());
evals_ops.append_to_transcript(b"claim_evals_ops", transcript);
let challenges_ops =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log2());
let challenges_ops = transcript.challenge_vector(
b"challenge_combine_n_to_one",
evals_ops.len().log2() as usize,
);
let mut poly_evals_ops = DensePolynomial::new(evals_ops);
for i in (0..challenges_ops.len()).rev() {
@ -809,8 +799,10 @@ impl HashLayerProof {
// form a single decommitment using comb_comb_mem at rand_mem
let evals_mem: Vec<Scalar> = vec![eval_row_audit_ts, eval_col_audit_ts];
evals_mem.append_to_transcript(b"claim_evals_mem", transcript);
let challenges_mem =
transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log2());
let challenges_mem = transcript.challenge_vector(
b"challenge_combine_two_to_one",
evals_mem.len().log2() as usize,
);
let mut poly_evals_mem = DensePolynomial::new(evals_mem);
for i in (0..challenges_mem.len()).rev() {
@ -889,7 +881,7 @@ impl HashLayerProof {
let eval_audit_addr = eval_init_addr;
let eval_audit_val = eval_init_val;
let hash_audit_at_rand_mem =
hash_func(&eval_audit_addr, &eval_audit_val, &eval_audit_ts) - r_multiset_check;
hash_func(&eval_audit_addr, &eval_audit_val, eval_audit_ts) - r_multiset_check;
assert_eq!(&hash_audit_at_rand_mem, claim_audit); // verify the last step of the sum-check for audit
Ok(())
@ -921,9 +913,9 @@ impl HashLayerProof {
assert!(self
.proof_derefs
.verify(
&rand_ops,
&eval_row_ops_val,
&eval_col_ops_val,
rand_ops,
eval_row_ops_val,
eval_col_ops_val,
&gens.gens_derefs,
comm_derefs,
transcript
@ -955,8 +947,10 @@ impl HashLayerProof {
evals_ops.extend(eval_val_vec);
evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero());
evals_ops.append_to_transcript(b"claim_evals_ops", transcript);
let challenges_ops =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log2());
let challenges_ops = transcript.challenge_vector(
b"challenge_combine_n_to_one",
evals_ops.len().log2() as usize,
);
let mut poly_evals_ops = DensePolynomial::new(evals_ops);
for i in (0..challenges_ops.len()).rev() {
@ -982,8 +976,10 @@ impl HashLayerProof {
// form a single decommitment using comb_comb_mem at rand_mem
let evals_mem: Vec<Scalar> = vec![*eval_row_audit_ts, *eval_col_audit_ts];
evals_mem.append_to_transcript(b"claim_evals_mem", transcript);
let challenges_mem =
transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log2());
let challenges_mem = transcript.challenge_vector(
b"challenge_combine_two_to_one",
evals_mem.len().log2() as usize,
);
let mut poly_evals_mem = DensePolynomial::new(evals_mem);
for i in (0..challenges_mem.len()).rev() {
@ -1009,11 +1005,11 @@ impl HashLayerProof {
let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_row;
assert!(HashLayerProof::verify_helper(
&(rand_mem, rand_ops),
&claims_row,
&eval_row_ops_val,
&eval_ops_addr,
&eval_read_ts,
&eval_audit_ts,
claims_row,
eval_row_ops_val,
eval_ops_addr,
eval_read_ts,
eval_audit_ts,
rx,
r_hash,
r_multiset_check,
@ -1023,11 +1019,11 @@ impl HashLayerProof {
let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_col;
assert!(HashLayerProof::verify_helper(
&(rand_mem, rand_ops),
&claims_col,
&eval_col_ops_val,
&eval_ops_addr,
&eval_read_ts,
&eval_audit_ts,
claims_col,
eval_col_ops_val,
eval_ops_addr,
eval_read_ts,
eval_audit_ts,
ry,
r_hash,
r_multiset_check,
@ -1523,7 +1519,7 @@ impl SparseMatPolyEvalProof {
let timer_eval_network = Timer::new("evalproof_layered_network");
let poly_eval_network_proof = PolyEvalNetworkProof::prove(
&mut net,
&dense,
dense,
&derefs,
evals,
gens,
@ -1641,11 +1637,11 @@ mod tests {
fn check_sparse_polyeval_proof() {
let mut csprng: OsRng = OsRng;
let num_nz_entries = 256;
let num_rows = 256;
let num_cols = 256;
let num_vars_x = num_rows.log2();
let num_vars_y = num_cols.log2();
let num_nz_entries: usize = 256;
let num_rows: usize = 256;
let num_cols: usize = 256;
let num_vars_x: usize = num_rows.log2() as usize;
let num_vars_y: usize = num_cols.log2() as usize;
let mut M: Vec<SparseMatEntry> = Vec::new();
@ -1667,8 +1663,7 @@ mod tests {
);
// commitment
let (poly_comm, dense) =
SparseMatPolynomial::multi_commit(&vec![&poly_M, &poly_M, &poly_M], &gens);
let (poly_comm, dense) = SparseMatPolynomial::multi_commit(&[&poly_M, &poly_M, &poly_M], &gens);
// evaluation
let rx: Vec<Scalar> = (0..num_vars_x)

+ 4
- 4
src/sumcheck.rs

@ -443,7 +443,7 @@ impl ZKSumcheckInstanceProof {
random_tape.random_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(&blind_claim, &gens_1).compress();
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
@ -526,7 +526,7 @@ impl ZKSumcheckInstanceProof {
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
@ -606,7 +606,7 @@ impl ZKSumcheckInstanceProof {
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(&blind_claim, &gens_1).compress();
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
@ -717,7 +717,7 @@ impl ZKSumcheckInstanceProof {
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, &gens_1).compress(), comm_target);
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test

+ 18
- 20
src/unipoly.rs

@ -25,7 +25,7 @@ impl UniPoly {
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = (2 as usize).to_scalar().invert().unwrap();
let two_inv = (2_usize).to_scalar().invert().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
@ -33,8 +33,8 @@ impl UniPoly {
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = (2 as usize).to_scalar().invert().unwrap();
let six_inv = (6 as usize).to_scalar().invert().unwrap();
let two_inv = (2_usize).to_scalar().invert().unwrap();
let six_inv = (6_usize).to_scalar().invert().unwrap();
let d = evals[0];
let a = six_inv
@ -102,9 +102,7 @@ impl CompressedUniPoly {
linear_term -= self.coeffs_except_linear_term[i];
}
let mut coeffs: Vec<Scalar> = Vec::new();
coeffs.push(self.coeffs_except_linear_term[0]);
coeffs.push(linear_term);
let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
coeffs.extend(&self.coeffs_except_linear_term[1..]);
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
UniPoly { coeffs }
@ -130,8 +128,8 @@ mod tests {
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (6 as usize).to_scalar();
let e2 = (15 as usize).to_scalar();
let e1 = (6_usize).to_scalar();
let e2 = (15_usize).to_scalar();
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
@ -139,8 +137,8 @@ mod tests {
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3 as usize).to_scalar());
assert_eq!(poly.coeffs[2], (2 as usize).to_scalar());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
@ -149,17 +147,17 @@ mod tests {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e3 = (28 as usize).to_scalar();
assert_eq!(poly.evaluate(&(3 as usize).to_scalar()), e3);
let e3 = (28_usize).to_scalar();
assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (7 as usize).to_scalar();
let e2 = (23 as usize).to_scalar();
let e3 = (55 as usize).to_scalar();
let e1 = (7_usize).to_scalar();
let e2 = (23_usize).to_scalar();
let e3 = (55_usize).to_scalar();
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
@ -167,9 +165,9 @@ mod tests {
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3 as usize).to_scalar());
assert_eq!(poly.coeffs[2], (2 as usize).to_scalar());
assert_eq!(poly.coeffs[3], (1 as usize).to_scalar());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
assert_eq!(poly.coeffs[3], (1_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
@ -178,7 +176,7 @@ mod tests {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e4 = (109 as usize).to_scalar();
assert_eq!(poly.evaluate(&(4 as usize).to_scalar()), e4);
let e4 = (109_usize).to_scalar();
assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4);
}
}

Loading…
Cancel
Save