Browse Source

A more optimal preprocessing SNARK (#158)

* a more optimal preprocessing SNARK

* update version

* cleanup; address clippy
main
Srinath Setty 1 year ago
committed by GitHub
parent
commit
3b3ae70db3
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 2112 additions and 1595 deletions
  1. +1
    -1
      Cargo.toml
  2. +2
    -4
      benches/compressed-snark.rs
  3. +2
    -4
      examples/minroot.rs
  4. +3
    -0
      src/errors.rs
  5. +4
    -8
      src/lib.rs
  6. +2
    -2
      src/r1cs.rs
  7. +63
    -121
      src/spartan/mod.rs
  8. +1
    -9
      src/spartan/polynomial.rs
  9. +1317
    -0
      src/spartan/pp/mod.rs
  10. +629
    -0
      src/spartan/pp/product.rs
  11. +0
    -245
      src/spartan/spark/mod.rs
  12. +0
    -477
      src/spartan/spark/product.rs
  13. +0
    -724
      src/spartan/spark/sparse.rs
  14. +88
    -0
      src/spartan/sumcheck.rs

+ 1
- 1
Cargo.toml

@ -1,6 +1,6 @@
[package]
name = "nova-snark"
version = "0.19.1"
version = "0.20.0"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2021"
description = "Recursive zkSNARKs without trusted setup"

+ 2
- 4
benches/compressed-snark.rs

@ -17,10 +17,8 @@ type G1 = pasta_curves::pallas::Point;
type G2 = pasta_curves::vesta::Point;
type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine<G1>;
type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine<G2>;
type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine<G1, EE1>;
type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine<G2, EE2>;
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1>;
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2>;
type C1 = NonTrivialTestCircuit<<G1 as Group>::Scalar>;
type C2 = TrivialTestCircuit<<G2 as Group>::Scalar>;

+ 2
- 4
examples/minroot.rs

@ -261,10 +261,8 @@ fn main() {
let start = Instant::now();
type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine<G1>;
type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine<G2>;
type CC1 = nova_snark::spartan::spark::TrivialCompComputationEngine<G1, EE1>;
type CC2 = nova_snark::spartan::spark::TrivialCompComputationEngine<G2, EE2>;
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
type S1 = nova_snark::spartan::RelaxedR1CSSNARK<G1, EE1>;
type S2 = nova_snark::spartan::RelaxedR1CSSNARK<G2, EE2>;
let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark);
println!(

+ 3
- 0
src/errors.rs

@ -50,4 +50,7 @@ pub enum NovaError {
/// returned when the product proof check fails
#[error("InvalidProductProof")]
InvalidProductProof,
/// returned when the consistency with public IO and assignment used fails
#[error("IncorrectWitness")]
IncorrectWitness,
}

+ 4
- 8
src/lib.rs

@ -788,10 +788,8 @@ mod tests {
type G2 = pasta_curves::vesta::Point;
type EE1 = provider::ipa_pc::EvaluationEngine<G1>;
type EE2 = provider::ipa_pc::EvaluationEngine<G2>;
type CC1 = spartan::spark::TrivialCompComputationEngine<G1, EE1>;
type CC2 = spartan::spark::TrivialCompComputationEngine<G2, EE2>;
type S1 = spartan::RelaxedR1CSSNARK<G1, EE1, CC1>;
type S2 = spartan::RelaxedR1CSSNARK<G2, EE2, CC2>;
type S1 = spartan::RelaxedR1CSSNARK<G1, EE1>;
type S2 = spartan::RelaxedR1CSSNARK<G2, EE2>;
use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};
use core::marker::PhantomData;
use ff::PrimeField;
@ -1095,10 +1093,8 @@ mod tests {
assert_eq!(zn_secondary, vec![<G2 as Group>::Scalar::from(2460515u64)]);
// run the compressed snark with Spark compiler
type CC1Prime = spartan::spark::SparkEngine<G1>;
type CC2Prime = spartan::spark::SparkEngine<G2>;
type S1Prime = spartan::RelaxedR1CSSNARK<G1, EE1, CC1Prime>;
type S2Prime = spartan::RelaxedR1CSSNARK<G2, EE2, CC2Prime>;
type S1Prime = spartan::pp::RelaxedR1CSSNARK<G1, EE1>;
type S2Prime = spartan::pp::RelaxedR1CSSNARK<G2, EE2>;
// produce the prover and verifier keys for compressed snark
let (pk, vk) = CompressedSNARK::<_, _, _, _, S1Prime, S2Prime>::setup(&pp).unwrap();

+ 2
- 2
src/r1cs.rs

@ -75,8 +75,8 @@ impl R1CS {
pub fn commitment_key(S: &R1CSShape<G>) -> CommitmentKey<G> {
let num_cons = S.num_cons;
let num_vars = S.num_vars;
let num_nz = max(max(S.A.len(), S.B.len()), S.C.len());
G::CE::setup(b"ck", max(max(num_cons, num_vars), num_nz))
let total_nz = S.A.len() + S.B.len() + S.C.len();
G::CE::setup(b"ck", max(max(num_cons, num_vars), total_nz))
}
}

+ 63
- 121
src/spartan/mod.rs

@ -2,7 +2,7 @@
//! over the polynomial commitment and evaluation argument (i.e., a PCS)
mod math;
pub(crate) mod polynomial;
pub mod spark;
pub mod pp;
mod sumcheck;
use crate::{
@ -10,7 +10,6 @@ use crate::{
r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness},
traits::{
evaluation::EvaluationEngineTrait, snark::RelaxedR1CSSNARKTrait, Group, TranscriptEngineTrait,
TranscriptReprTrait,
},
Commitment, CommitmentKey,
};
@ -22,7 +21,6 @@ use serde::{Deserialize, Serialize};
use sumcheck::SumcheckProof;
/// A type that holds a witness to a polynomial evaluation instance
#[allow(dead_code)]
pub struct PolyEvalWitness<G: Group> {
p: Vec<G::Scalar>, // polynomial
}
@ -56,7 +54,6 @@ impl PolyEvalWitness {
}
/// A type that holds a polynomial evaluation instance
#[allow(dead_code)]
pub struct PolyEvalInstance<G: Group> {
c: Commitment<G>, // commitment to the polynomial
x: Vec<G::Scalar>, // evaluation point
@ -80,79 +77,20 @@ impl PolyEvalInstance {
}
}
/// A trait that defines the behavior of a computation commitment engine
pub trait CompCommitmentEngineTrait<G: Group> {
/// A type that holds opening hint
type Decommitment: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>;
/// A type that holds a commitment
type Commitment: Clone
+ Send
+ Sync
+ TranscriptReprTrait<G>
+ Serialize
+ for<'de> Deserialize<'de>;
/// A type that holds an evaluation argument
type EvaluationArgument: Send + Sync + Serialize + for<'de> Deserialize<'de>;
/// commits to R1CS matrices
fn commit(
ck: &CommitmentKey<G>,
S: &R1CSShape<G>,
) -> Result<(Self::Commitment, Self::Decommitment), NovaError>;
/// proves an evaluation of R1CS matrices viewed as polynomials
fn prove(
ck: &CommitmentKey<G>,
S: &R1CSShape<G>,
decomm: &Self::Decommitment,
comm: &Self::Commitment,
r: &(&[G::Scalar], &[G::Scalar]),
transcript: &mut G::TE,
) -> Result<
(
Self::EvaluationArgument,
Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)>,
),
NovaError,
>;
/// verifies an evaluation of R1CS matrices viewed as polynomials and returns verified evaluations
fn verify(
comm: &Self::Commitment,
r: &(&[G::Scalar], &[G::Scalar]),
arg: &Self::EvaluationArgument,
transcript: &mut G::TE,
) -> Result<(G::Scalar, G::Scalar, G::Scalar, Vec<PolyEvalInstance<G>>), NovaError>;
}
/// A type that represents the prover's key
#[derive(Serialize, Deserialize)]
#[serde(bound = "")]
pub struct ProverKey<
G: Group,
EE: EvaluationEngineTrait<G, CE = G::CE>,
CC: CompCommitmentEngineTrait<G>,
> {
pub struct ProverKey<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
pk_ee: EE::ProverKey,
S: R1CSShape<G>,
decomm: CC::Decommitment,
comm: CC::Commitment,
}
/// A type that represents the verifier's key
#[derive(Serialize, Deserialize)]
#[serde(bound = "")]
pub struct VerifierKey<
G: Group,
EE: EvaluationEngineTrait<G, CE = G::CE>,
CC: CompCommitmentEngineTrait<G>,
> {
num_cons: usize,
num_vars: usize,
pub struct VerifierKey<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
vk_ee: EE::VerifierKey,
comm: CC::Commitment,
S: R1CSShape<G>,
}
/// A succinct proof of knowledge of a witness to a relaxed R1CS instance
@ -160,27 +98,22 @@ pub struct VerifierKey<
/// the commitment to a vector viewed as a polynomial commitment
#[derive(Serialize, Deserialize)]
#[serde(bound = "")]
pub struct RelaxedR1CSSNARK<
G: Group,
EE: EvaluationEngineTrait<G, CE = G::CE>,
CC: CompCommitmentEngineTrait<G>,
> {
pub struct RelaxedR1CSSNARK<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
sc_proof_outer: SumcheckProof<G>,
claims_outer: (G::Scalar, G::Scalar, G::Scalar),
eval_E: G::Scalar,
sc_proof_inner: SumcheckProof<G>,
eval_W: G::Scalar,
eval_arg_cc: CC::EvaluationArgument,
sc_proof_batch: SumcheckProof<G>,
evals_batch: Vec<G::Scalar>,
eval_arg: EE::EvaluationArgument,
}
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>, CC: CompCommitmentEngineTrait<G>>
RelaxedR1CSSNARKTrait<G> for RelaxedR1CSSNARK<G, EE, CC>
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> RelaxedR1CSSNARKTrait<G>
for RelaxedR1CSSNARK<G, EE>
{
type ProverKey = ProverKey<G, EE, CC>;
type VerifierKey = VerifierKey<G, EE, CC>;
type ProverKey = ProverKey<G, EE>;
type VerifierKey = VerifierKey<G, EE>;
fn setup(
ck: &CommitmentKey<G>,
@ -190,21 +123,12 @@ impl, CC: CompCommitmentEngin
let S = S.pad();
let (comm, decomm) = CC::commit(ck, &S)?;
let vk = VerifierKey {
num_cons: S.num_cons,
num_vars: S.num_vars,
vk_ee,
comm: comm.clone(),
S: S.clone(),
};
let pk = ProverKey {
pk_ee,
S,
comm,
decomm,
};
let pk = ProverKey { pk_ee, S };
Ok((pk, vk))
}
@ -225,8 +149,8 @@ impl, CC: CompCommitmentEngin
assert_eq!(pk.S.num_io.next_power_of_two(), pk.S.num_io);
assert!(pk.S.num_io < pk.S.num_vars);
// append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript
transcript.absorb(b"C", &pk.comm);
// append the digest of R1CS matrices and the RelaxedR1CSInstance to the transcript
transcript.absorb(b"S", &pk.S);
transcript.absorb(b"U", U);
// compute the full satisfying assignment by concatenating W.W, U.u, and U.X
@ -353,17 +277,8 @@ impl, CC: CompCommitmentEngin
&mut transcript,
)?;
// we now prove evaluations of R1CS matrices at (r_x, r_y)
let (eval_arg_cc, mut w_u_vec) = CC::prove(
ck,
&pk.S,
&pk.decomm,
&pk.comm,
&(&r_x, &r_y),
&mut transcript,
)?;
// add additional claims about W and E polynomials to the list from CC
let mut w_u_vec = Vec::new();
let eval_W = MultilinearPolynomial::evaluate_with(&W.W, &r_y[1..]);
w_u_vec.push((
PolyEvalWitness { p: W.W.clone() },
@ -475,7 +390,6 @@ impl, CC: CompCommitmentEngin
eval_E,
sc_proof_inner,
eval_W,
eval_arg_cc,
sc_proof_batch,
evals_batch: claims_batch_left,
eval_arg,
@ -486,13 +400,13 @@ impl, CC: CompCommitmentEngin
fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance<G>) -> Result<(), NovaError> {
let mut transcript = G::TE::new(b"RelaxedR1CSSNARK");
// append the commitment to R1CS matrices and the RelaxedR1CSInstance to the transcript
transcript.absorb(b"C", &vk.comm);
// append the digest of R1CS matrices and the RelaxedR1CSInstance to the transcript
transcript.absorb(b"S", &vk.S);
transcript.absorb(b"U", U);
let (num_rounds_x, num_rounds_y) = (
(vk.num_cons as f64).log2() as usize,
((vk.num_vars as f64).log2() as usize + 1),
(vk.S.num_cons as f64).log2() as usize,
((vk.S.num_vars as f64).log2() as usize + 1),
);
// outer sum-check
@ -546,32 +460,60 @@ impl, CC: CompCommitmentEngin
.map(|i| (i + 1, U.X[i]))
.collect::<Vec<(usize, G::Scalar)>>(),
);
SparsePolynomial::new((vk.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..])
SparsePolynomial::new((vk.S.num_vars as f64).log2() as usize, poly_X).evaluate(&r_y[1..])
};
(G::Scalar::one() - r_y[0]) * self.eval_W + r_y[0] * eval_X
};
// verify evaluation argument to retrieve evaluations of R1CS matrices
let (eval_A, eval_B, eval_C, mut u_vec) =
CC::verify(&vk.comm, &(&r_x, &r_y), &self.eval_arg_cc, &mut transcript)?;
// compute evaluations of R1CS matrices
let multi_evaluate = |M_vec: &[&[(usize, usize, G::Scalar)]],
r_x: &[G::Scalar],
r_y: &[G::Scalar]|
-> Vec<G::Scalar> {
let evaluate_with_table =
|M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar {
(0..M.len())
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| {
let (row, col, val) = M[i];
T_x[row] * T_y[col] * val
})
.reduce(G::Scalar::zero, |acc, x| acc + x)
};
let (T_x, T_y) = rayon::join(
|| EqPolynomial::new(r_x.to_vec()).evals(),
|| EqPolynomial::new(r_y.to_vec()).evals(),
);
(0..M_vec.len())
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y))
.collect()
};
let evals = multi_evaluate(&[&vk.S.A, &vk.S.B, &vk.S.C], &r_x, &r_y);
let claim_inner_final_expected = (eval_A + r * eval_B + r * r * eval_C) * eval_Z;
let claim_inner_final_expected = (evals[0] + r * evals[1] + r * r * evals[2]) * eval_Z;
if claim_inner_final != claim_inner_final_expected {
return Err(NovaError::InvalidSumcheckProof);
}
// add additional claims about W and E polynomials to the list from CC
u_vec.push(PolyEvalInstance {
c: U.comm_W,
x: r_y[1..].to_vec(),
e: self.eval_W,
});
u_vec.push(PolyEvalInstance {
c: U.comm_E,
x: r_x,
e: self.eval_E,
});
// add claims about W and E polynomials
let u_vec: Vec<PolyEvalInstance<G>> = vec![
PolyEvalInstance {
c: U.comm_W,
x: r_y[1..].to_vec(),
e: self.eval_W,
},
PolyEvalInstance {
c: U.comm_E,
x: r_x,
e: self.eval_E,
},
];
let u_vec_padded = PolyEvalInstance::pad(&u_vec); // pad the evaluation points

+ 1
- 9
src/spartan/polynomial.rs

@ -107,14 +107,6 @@ impl MultilinearPolynomial {
.map(|(a, b)| a * b)
.reduce(Scalar::zero, |x, y| x + y)
}
pub fn split(&self, idx: usize) -> (Self, Self) {
assert!(idx < self.len());
(
Self::new(self.Z[..idx].to_vec()),
Self::new(self.Z[idx..2 * idx].to_vec()),
)
}
}
impl<Scalar: PrimeField> Index<usize> for MultilinearPolynomial<Scalar> {
@ -149,7 +141,7 @@ impl SparsePolynomial {
chi_i
}
// Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z
// Takes O(n log n)
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
assert_eq!(self.num_vars, r.len());

+ 1317
- 0
src/spartan/pp/mod.rs
File diff suppressed because it is too large
View File


+ 629
- 0
src/spartan/pp/product.rs

@ -0,0 +1,629 @@
use crate::{
errors::NovaError,
spartan::{
math::Math,
polynomial::{EqPolynomial, MultilinearPolynomial},
sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly},
PolyEvalInstance, PolyEvalWitness,
},
traits::{commitment::CommitmentEngineTrait, Group, TranscriptEngineTrait},
Commitment, CommitmentKey,
};
use core::marker::PhantomData;
use ff::{Field, PrimeField};
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
pub(crate) struct IdentityPolynomial<Scalar: PrimeField> {
ell: usize,
_p: PhantomData<Scalar>,
}
impl<Scalar: PrimeField> IdentityPolynomial<Scalar> {
pub fn new(ell: usize) -> Self {
IdentityPolynomial {
ell,
_p: Default::default(),
}
}
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
assert_eq!(self.ell, r.len());
(0..self.ell)
.map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i])
.fold(Scalar::zero(), |acc, item| acc + item)
}
}
impl<G: Group> SumcheckProof<G> {
pub fn prove_cubic_with_additive_term_batched<F>(
claim: &G::Scalar,
num_rounds: usize,
poly_vec: (
&mut MultilinearPolynomial<G::Scalar>,
&mut Vec<MultilinearPolynomial<G::Scalar>>,
&mut Vec<MultilinearPolynomial<G::Scalar>>,
&mut Vec<MultilinearPolynomial<G::Scalar>>,
),
coeffs: &[G::Scalar],
comb_func: F,
transcript: &mut G::TE,
) -> Result<
(
Self,
Vec<G::Scalar>,
(G::Scalar, Vec<G::Scalar>, Vec<G::Scalar>, Vec<G::Scalar>),
),
NovaError,
>
where
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar,
{
let (poly_A, poly_B_vec, poly_C_vec, poly_D_vec) = poly_vec;
let mut e = *claim;
let mut r: Vec<G::Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly<G>> = Vec::new();
for _j in 0..num_rounds {
let mut evals: Vec<(G::Scalar, G::Scalar, G::Scalar)> = Vec::new();
for ((poly_B, poly_C), poly_D) in poly_B_vec
.iter()
.zip(poly_C_vec.iter())
.zip(poly_D_vec.iter())
{
let mut eval_point_0 = G::Scalar::zero();
let mut eval_point_2 = G::Scalar::zero();
let mut eval_point_3 = G::Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
let evals_combined_0 = (0..evals.len())
.map(|i| evals[i].0 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals_combined_2 = (0..evals.len())
.map(|i| evals[i].1 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals_combined_3 = (0..evals.len())
.map(|i| evals[i].2 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals = vec![
evals_combined_0,
e - evals_combined_0,
evals_combined_2,
evals_combined_3,
];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
transcript.absorb(b"p", &poly);
// derive the verifier's challenge for the next round
let r_i = transcript.squeeze(b"c")?;
r.push(r_i);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_i);
for ((poly_B, poly_C), poly_D) in poly_B_vec
.iter_mut()
.zip(poly_C_vec.iter_mut())
.zip(poly_D_vec.iter_mut())
{
poly_B.bound_poly_var_top(&r_i);
poly_C.bound_poly_var_top(&r_i);
poly_D.bound_poly_var_top(&r_i);
}
e = poly.evaluate(&r_i);
cubic_polys.push(poly.compress());
}
let poly_B_final = (0..poly_B_vec.len()).map(|i| poly_B_vec[i][0]).collect();
let poly_C_final = (0..poly_C_vec.len()).map(|i| poly_C_vec[i][0]).collect();
let poly_D_final = (0..poly_D_vec.len()).map(|i| poly_D_vec[i][0]).collect();
let claims_prod = (poly_A[0], poly_B_final, poly_C_final, poly_D_final);
Ok((SumcheckProof::new(cubic_polys), r, claims_prod))
}
}
/// Provides a product argument using the algorithm described by Setty-Lee, 2020
#[derive(Serialize, Deserialize)]
#[serde(bound = "")]
pub struct ProductArgument<G: Group> {
comm_output_vec: Vec<Commitment<G>>,
sc_proof: SumcheckProof<G>,
eval_left_vec: Vec<G::Scalar>,
eval_right_vec: Vec<G::Scalar>,
eval_output_vec: Vec<G::Scalar>,
eval_input_vec: Vec<G::Scalar>,
eval_output2_vec: Vec<G::Scalar>,
}
impl<G: Group> ProductArgument<G> {
pub fn prove(
ck: &CommitmentKey<G>,
input_vec: &[Vec<G::Scalar>], // a commitment to the input and the input vector to multiplied together
transcript: &mut G::TE,
) -> Result<
(
Self,
Vec<G::Scalar>,
Vec<G::Scalar>,
Vec<G::Scalar>,
Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)>,
),
NovaError,
> {
let num_claims = input_vec.len();
let compute_layer = |input: &[G::Scalar]| -> (Vec<G::Scalar>, Vec<G::Scalar>, Vec<G::Scalar>) {
let left = (0..input.len() / 2)
.map(|i| input[2 * i])
.collect::<Vec<G::Scalar>>();
let right = (0..input.len() / 2)
.map(|i| input[2 * i + 1])
.collect::<Vec<G::Scalar>>();
assert_eq!(left.len(), right.len());
let output = (0..left.len())
.map(|i| left[i] * right[i])
.collect::<Vec<G::Scalar>>();
(left, right, output)
};
// a closure that returns left, right, output, product
let prepare_inputs =
|input: &[G::Scalar]| -> (Vec<G::Scalar>, Vec<G::Scalar>, Vec<G::Scalar>, G::Scalar) {
let mut left: Vec<G::Scalar> = Vec::new();
let mut right: Vec<G::Scalar> = Vec::new();
let mut output: Vec<G::Scalar> = Vec::new();
let mut out = input.to_vec();
for _i in 0..input.len().log_2() {
let (l, r, o) = compute_layer(&out);
out = o.clone();
left.extend(l);
right.extend(r);
output.extend(o);
}
// add a dummy product operation to make the left.len() == right.len() == output.len() == input.len()
left.push(output[output.len() - 1]);
right.push(G::Scalar::zero());
output.push(G::Scalar::zero());
// output is stored at the last but one position
let product = output[output.len() - 2];
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), output.len());
(left, right, output, product)
};
let mut left_vec = Vec::new();
let mut right_vec = Vec::new();
let mut output_vec = Vec::new();
let mut prod_vec = Vec::new();
for input in input_vec {
let (l, r, o, p) = prepare_inputs(input);
left_vec.push(l);
right_vec.push(r);
output_vec.push(o);
prod_vec.push(p);
}
// commit to the outputs
let comm_output_vec = (0..output_vec.len())
.into_par_iter()
.map(|i| G::CE::commit(ck, &output_vec[i]))
.collect::<Vec<_>>();
// absorb the output commitment and the claimed product
transcript.absorb(b"o", &comm_output_vec.as_slice());
transcript.absorb(b"r", &prod_vec.as_slice());
// this assumes all vectors passed have the same length
let num_rounds = output_vec[0].len().log_2();
// produce a fresh set of coeffs and a joint claim
let coeff_vec = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..num_claims {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
// generate randomness for the eq polynomial
let rand_eq = (0..num_rounds)
.map(|_i| transcript.squeeze(b"e"))
.collect::<Result<Vec<G::Scalar>, NovaError>>()?;
let mut poly_A = MultilinearPolynomial::new(EqPolynomial::new(rand_eq).evals());
let mut poly_B_vec = left_vec
.clone()
.into_par_iter()
.map(MultilinearPolynomial::new)
.collect::<Vec<_>>();
let mut poly_C_vec = right_vec
.clone()
.into_par_iter()
.map(MultilinearPolynomial::new)
.collect::<Vec<_>>();
let mut poly_D_vec = output_vec
.clone()
.into_par_iter()
.map(MultilinearPolynomial::new)
.collect::<Vec<_>>();
let comb_func =
|poly_A_comp: &G::Scalar,
poly_B_comp: &G::Scalar,
poly_C_comp: &G::Scalar,
poly_D_comp: &G::Scalar|
-> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) };
let (sc_proof, rand, _claims) = SumcheckProof::prove_cubic_with_additive_term_batched(
&G::Scalar::zero(),
num_rounds,
(
&mut poly_A,
&mut poly_B_vec,
&mut poly_C_vec,
&mut poly_D_vec,
),
&coeff_vec,
comb_func,
transcript,
)?;
// claims[0] is about the Eq polynomial, which the verifier computes directly
// claims[1] =? weighed sum of left(rand)
// claims[2] =? weighted sum of right(rand)
// claims[3] =? weighetd sum of output(rand), which is easy to verify by querying output
// we also need to prove that output(output.len()-2) = claimed_product
let eval_left_vec = (0..left_vec.len())
.into_par_iter()
.map(|i| MultilinearPolynomial::evaluate_with(&left_vec[i], &rand))
.collect::<Vec<G::Scalar>>();
let eval_right_vec = (0..right_vec.len())
.into_par_iter()
.map(|i| MultilinearPolynomial::evaluate_with(&right_vec[i], &rand))
.collect::<Vec<G::Scalar>>();
let eval_output_vec = (0..output_vec.len())
.into_par_iter()
.map(|i| MultilinearPolynomial::evaluate_with(&output_vec[i], &rand))
.collect::<Vec<G::Scalar>>();
// we now combine eval_left = left(rand) and eval_right = right(rand)
// into claims about input and output
transcript.absorb(b"l", &eval_left_vec.as_slice());
transcript.absorb(b"r", &eval_right_vec.as_slice());
transcript.absorb(b"o", &eval_output_vec.as_slice());
let c = transcript.squeeze(b"c")?;
// eval = (G::Scalar::one() - c) * eval_left + c * eval_right
// eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c)
let rand_ext = {
let mut r = rand.clone();
r.extend(&[c]);
r
};
let eval_input_vec = (0..input_vec.len())
.into_par_iter()
.map(|i| MultilinearPolynomial::evaluate_with(&input_vec[i], &rand_ext[1..]))
.collect::<Vec<G::Scalar>>();
let eval_output2_vec = (0..output_vec.len())
.into_par_iter()
.map(|i| MultilinearPolynomial::evaluate_with(&output_vec[i], &rand_ext[1..]))
.collect::<Vec<G::Scalar>>();
// add claimed evaluations to the transcript
transcript.absorb(b"i", &eval_input_vec.as_slice());
transcript.absorb(b"o", &eval_output2_vec.as_slice());
// squeeze a challenge to combine multiple claims into one
let powers_of_rho = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..num_claims {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
// take weighted sum of input, output, and their commitments
let product = prod_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let eval_output = eval_output_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let comm_output = comm_output_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(c, r_i)| *c * *r_i)
.fold(Commitment::<G>::default(), |acc, item| acc + item);
let weighted_sum = |W: &[Vec<G::Scalar>], s: &[G::Scalar]| -> Vec<G::Scalar> {
assert_eq!(W.len(), s.len());
let mut p = vec![G::Scalar::zero(); W[0].len()];
for i in 0..W.len() {
for (j, item) in W[i].iter().enumerate().take(W[i].len()) {
p[j] += *item * s[i]
}
}
p
};
let poly_output = weighted_sum(&output_vec, &powers_of_rho);
let eval_output2 = eval_output2_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let mut w_u_vec = Vec::new();
// eval_output = output(rand)
w_u_vec.push((
PolyEvalWitness {
p: poly_output.clone(),
},
PolyEvalInstance {
c: comm_output,
x: rand.clone(),
e: eval_output,
},
));
// claimed_product = output(1, ..., 1, 0)
let x = {
let mut x = vec![G::Scalar::one(); rand.len()];
x[rand.len() - 1] = G::Scalar::zero();
x
};
w_u_vec.push((
PolyEvalWitness {
p: poly_output.clone(),
},
PolyEvalInstance {
c: comm_output,
x,
e: product,
},
));
// eval_output2 = output(rand_ext[1..])
w_u_vec.push((
PolyEvalWitness { p: poly_output },
PolyEvalInstance {
c: comm_output,
x: rand_ext[1..].to_vec(),
e: eval_output2,
},
));
let prod_arg = Self {
comm_output_vec,
sc_proof,
// claimed evaluations at rand
eval_left_vec,
eval_right_vec,
eval_output_vec,
// claimed evaluations at rand_ext[1..]
eval_input_vec: eval_input_vec.clone(),
eval_output2_vec,
};
Ok((
prod_arg,
prod_vec,
rand_ext[1..].to_vec(),
eval_input_vec,
w_u_vec,
))
}
pub fn verify(
&self,
prod_vec: &[G::Scalar], // claimed products
len: usize,
transcript: &mut G::TE,
) -> Result<(Vec<G::Scalar>, Vec<G::Scalar>, Vec<PolyEvalInstance<G>>), NovaError> {
// absorb the provided commitment and claimed output
transcript.absorb(b"o", &self.comm_output_vec.as_slice());
transcript.absorb(b"r", &prod_vec.to_vec().as_slice());
let num_rounds = len.log_2();
let num_claims = prod_vec.len();
// produce a fresh set of coeffs and a joint claim
let coeff_vec = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..num_claims {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
// generate randomness for the eq polynomial
let rand_eq = (0..num_rounds)
.map(|_i| transcript.squeeze(b"e"))
.collect::<Result<Vec<G::Scalar>, NovaError>>()?;
let (final_claim, rand) = self.sc_proof.verify(
G::Scalar::zero(), // claim
num_rounds,
3, // degree bound
transcript,
)?;
// verify the final claim along with output[output.len() - 2 ] = claim
let eq = EqPolynomial::new(rand_eq).evaluate(&rand);
let final_claim_expected = (0..num_claims)
.map(|i| {
coeff_vec[i]
* eq
* (self.eval_left_vec[i] * self.eval_right_vec[i] - self.eval_output_vec[i])
})
.fold(G::Scalar::zero(), |acc, item| acc + item);
if final_claim != final_claim_expected {
return Err(NovaError::InvalidSumcheckProof);
}
transcript.absorb(b"l", &self.eval_left_vec.as_slice());
transcript.absorb(b"r", &self.eval_right_vec.as_slice());
transcript.absorb(b"o", &self.eval_output_vec.as_slice());
let c = transcript.squeeze(b"c")?;
let eval_vec = self
.eval_left_vec
.iter()
.zip(self.eval_right_vec.iter())
.map(|(l, r)| (G::Scalar::one() - c) * l + c * r)
.collect::<Vec<G::Scalar>>();
// eval is claimed evaluation of input||output(r, c), which can be proven by proving input(r[1..], c) and output(r[1..], c)
let rand_ext = {
let mut r = rand.clone();
r.extend(&[c]);
r
};
for (i, eval) in eval_vec.iter().enumerate() {
if *eval
!= (G::Scalar::one() - rand_ext[0]) * self.eval_input_vec[i]
+ rand_ext[0] * self.eval_output2_vec[i]
{
return Err(NovaError::InvalidSumcheckProof);
}
}
transcript.absorb(b"i", &self.eval_input_vec.as_slice());
transcript.absorb(b"o", &self.eval_output2_vec.as_slice());
// squeeze a challenge to combine multiple claims into one
let powers_of_rho = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..num_claims {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
// take weighted sum of input, output, and their commitments
let product = prod_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let eval_output = self
.eval_output_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let comm_output = self
.comm_output_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(c, r_i)| *c * *r_i)
.fold(Commitment::<G>::default(), |acc, item| acc + item);
let eval_output2 = self
.eval_output2_vec
.iter()
.zip(powers_of_rho.iter())
.map(|(e, p)| *e * p)
.fold(G::Scalar::zero(), |acc, item| acc + item);
let mut u_vec = Vec::new();
// eval_output = output(rand)
u_vec.push(PolyEvalInstance {
c: comm_output,
x: rand.clone(),
e: eval_output,
});
// claimed_product = output(1, ..., 1, 0)
let x = {
let mut x = vec![G::Scalar::one(); rand.len()];
x[rand.len() - 1] = G::Scalar::zero();
x
};
u_vec.push(PolyEvalInstance {
c: comm_output,
x,
e: product,
});
// eval_output2 = output(rand_ext[1..])
u_vec.push(PolyEvalInstance {
c: comm_output,
x: rand_ext[1..].to_vec(),
e: eval_output2,
});
// input-related claims are checked by the caller
Ok((self.eval_input_vec.clone(), rand_ext[1..].to_vec(), u_vec))
}
}

+ 0
- 245
src/spartan/spark/mod.rs

@ -1,245 +0,0 @@
//! This module implements `CompCommitmentEngineTrait` using Spartan's SPARK compiler
//! We also provide a trivial implementation that has the verifier evaluate the sparse polynomials
use crate::{
errors::NovaError,
r1cs::R1CSShape,
spartan::{math::Math, CompCommitmentEngineTrait, PolyEvalInstance, PolyEvalWitness},
traits::{evaluation::EvaluationEngineTrait, Group, TranscriptReprTrait},
CommitmentKey,
};
use core::marker::PhantomData;
use serde::{Deserialize, Serialize};
/// A trivial implementation of `ComputationCommitmentEngineTrait`
pub struct TrivialCompComputationEngine<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> {
_p: PhantomData<G>,
_p2: PhantomData<EE>,
}
/// Provides an implementation of a trivial commitment
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct TrivialCommitment<G: Group> {
S: R1CSShape<G>,
}
/// Provides an implementation of a trivial decommitment
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct TrivialDecommitment<G: Group> {
_p: PhantomData<G>,
}
/// Provides an implementation of a trivial evaluation argument
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct TrivialEvaluationArgument<G: Group> {
_p: PhantomData<G>,
}
impl<G: Group> TranscriptReprTrait<G> for TrivialCommitment<G> {
fn to_transcript_bytes(&self) -> Vec<u8> {
self.S.to_transcript_bytes()
}
}
impl<G: Group, EE: EvaluationEngineTrait<G, CE = G::CE>> CompCommitmentEngineTrait<G>
for TrivialCompComputationEngine<G, EE>
{
type Decommitment = TrivialDecommitment<G>;
type Commitment = TrivialCommitment<G>;
type EvaluationArgument = TrivialEvaluationArgument<G>;
/// commits to R1CS matrices
fn commit(
_ck: &CommitmentKey<G>,
S: &R1CSShape<G>,
) -> Result<(Self::Commitment, Self::Decommitment), NovaError> {
Ok((
TrivialCommitment { S: S.clone() },
TrivialDecommitment {
_p: Default::default(),
},
))
}
/// proves an evaluation of R1CS matrices viewed as polynomials
fn prove(
_ck: &CommitmentKey<G>,
_S: &R1CSShape<G>,
_decomm: &Self::Decommitment,
_comm: &Self::Commitment,
_r: &(&[G::Scalar], &[G::Scalar]),
_transcript: &mut G::TE,
) -> Result<
(
Self::EvaluationArgument,
Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)>,
),
NovaError,
> {
Ok((
TrivialEvaluationArgument {
_p: Default::default(),
},
Vec::new(),
))
}
/// verifies an evaluation of R1CS matrices viewed as polynomials
fn verify(
comm: &Self::Commitment,
r: &(&[G::Scalar], &[G::Scalar]),
_arg: &Self::EvaluationArgument,
_transcript: &mut G::TE,
) -> Result<(G::Scalar, G::Scalar, G::Scalar, Vec<PolyEvalInstance<G>>), NovaError> {
let (r_x, r_y) = r;
let evals = SparsePolynomial::<G>::multi_evaluate(&[&comm.S.A, &comm.S.B, &comm.S.C], r_x, r_y);
Ok((evals[0], evals[1], evals[2], Vec::new()))
}
}
mod product;
mod sparse;
use sparse::{SparseEvaluationArgument, SparsePolynomial, SparsePolynomialCommitment};
/// A non-trivial implementation of `CompCommitmentEngineTrait` using Spartan's SPARK compiler
pub struct SparkEngine<G: Group> {
_p: PhantomData<G>,
}
/// An implementation of Spark decommitment
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparkDecommitment<G: Group> {
A: SparsePolynomial<G>,
B: SparsePolynomial<G>,
C: SparsePolynomial<G>,
}
impl<G: Group> SparkDecommitment<G> {
fn new(S: &R1CSShape<G>) -> Self {
let ell = (S.num_cons.log_2(), S.num_vars.log_2() + 1);
let A = SparsePolynomial::new(ell, &S.A);
let B = SparsePolynomial::new(ell, &S.B);
let C = SparsePolynomial::new(ell, &S.C);
Self { A, B, C }
}
fn commit(&self, ck: &CommitmentKey<G>) -> SparkCommitment<G> {
let comm_A = self.A.commit(ck);
let comm_B = self.B.commit(ck);
let comm_C = self.C.commit(ck);
SparkCommitment {
comm_A,
comm_B,
comm_C,
}
}
}
/// An implementation of Spark commitment
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparkCommitment<G: Group> {
comm_A: SparsePolynomialCommitment<G>,
comm_B: SparsePolynomialCommitment<G>,
comm_C: SparsePolynomialCommitment<G>,
}
impl<G: Group> TranscriptReprTrait<G> for SparkCommitment<G> {
fn to_transcript_bytes(&self) -> Vec<u8> {
let mut bytes = self.comm_A.to_transcript_bytes();
bytes.extend(self.comm_B.to_transcript_bytes());
bytes.extend(self.comm_C.to_transcript_bytes());
bytes
}
}
/// Provides an implementation of a trivial evaluation argument
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparkEvaluationArgument<G: Group> {
arg_A: SparseEvaluationArgument<G>,
arg_B: SparseEvaluationArgument<G>,
arg_C: SparseEvaluationArgument<G>,
}
impl<G: Group> CompCommitmentEngineTrait<G> for SparkEngine<G> {
type Decommitment = SparkDecommitment<G>;
type Commitment = SparkCommitment<G>;
type EvaluationArgument = SparkEvaluationArgument<G>;
/// commits to R1CS matrices
fn commit(
ck: &CommitmentKey<G>,
S: &R1CSShape<G>,
) -> Result<(Self::Commitment, Self::Decommitment), NovaError> {
let sparse = SparkDecommitment::new(S);
let comm = sparse.commit(ck);
Ok((comm, sparse))
}
/// proves an evaluation of R1CS matrices viewed as polynomials
fn prove(
ck: &CommitmentKey<G>,
S: &R1CSShape<G>,
decomm: &Self::Decommitment,
comm: &Self::Commitment,
r: &(&[G::Scalar], &[G::Scalar]),
transcript: &mut G::TE,
) -> Result<
(
Self::EvaluationArgument,
Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)>,
),
NovaError,
> {
let (arg_A, u_w_vec_A) =
SparseEvaluationArgument::prove(ck, &decomm.A, &S.A, &comm.comm_A, r, transcript)?;
let (arg_B, u_w_vec_B) =
SparseEvaluationArgument::prove(ck, &decomm.B, &S.B, &comm.comm_B, r, transcript)?;
let (arg_C, u_w_vec_C) =
SparseEvaluationArgument::prove(ck, &decomm.C, &S.C, &comm.comm_C, r, transcript)?;
let u_w_vec = {
let mut u_w_vec = u_w_vec_A;
u_w_vec.extend(u_w_vec_B);
u_w_vec.extend(u_w_vec_C);
u_w_vec
};
Ok((
SparkEvaluationArgument {
arg_A,
arg_B,
arg_C,
},
u_w_vec,
))
}
/// verifies an evaluation of R1CS matrices viewed as polynomials
fn verify(
comm: &Self::Commitment,
r: &(&[G::Scalar], &[G::Scalar]),
arg: &Self::EvaluationArgument,
transcript: &mut G::TE,
) -> Result<(G::Scalar, G::Scalar, G::Scalar, Vec<PolyEvalInstance<G>>), NovaError> {
let (eval_A, u_vec_A) = arg.arg_A.verify(&comm.comm_A, r, transcript)?;
let (eval_B, u_vec_B) = arg.arg_B.verify(&comm.comm_B, r, transcript)?;
let (eval_C, u_vec_C) = arg.arg_C.verify(&comm.comm_C, r, transcript)?;
let u_vec = {
let mut u_vec = u_vec_A;
u_vec.extend(u_vec_B);
u_vec.extend(u_vec_C);
u_vec
};
Ok((eval_A, eval_B, eval_C, u_vec))
}
}

+ 0
- 477
src/spartan/spark/product.rs

@ -1,477 +0,0 @@
use crate::{
errors::NovaError,
spartan::{
math::Math,
polynomial::{EqPolynomial, MultilinearPolynomial},
sumcheck::{CompressedUniPoly, SumcheckProof, UniPoly},
},
traits::{Group, TranscriptEngineTrait},
};
use core::marker::PhantomData;
use ff::{Field, PrimeField};
use serde::{Deserialize, Serialize};
pub(crate) struct IdentityPolynomial<Scalar: PrimeField> {
ell: usize,
_p: PhantomData<Scalar>,
}
impl<Scalar: PrimeField> IdentityPolynomial<Scalar> {
pub fn new(ell: usize) -> Self {
IdentityPolynomial {
ell,
_p: Default::default(),
}
}
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
assert_eq!(self.ell, r.len());
(0..self.ell)
.map(|i| Scalar::from(2_usize.pow((self.ell - i - 1) as u32) as u64) * r[i])
.fold(Scalar::zero(), |acc, item| acc + item)
}
}
impl<G: Group> SumcheckProof<G> {
pub fn prove_cubic<F>(
claim: &G::Scalar,
num_rounds: usize,
poly_A: &mut MultilinearPolynomial<G::Scalar>,
poly_B: &mut MultilinearPolynomial<G::Scalar>,
poly_C: &mut MultilinearPolynomial<G::Scalar>,
comb_func: F,
transcript: &mut G::TE,
) -> Result<(Self, Vec<G::Scalar>, Vec<G::Scalar>), NovaError>
where
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar,
{
let mut e = *claim;
let mut r: Vec<G::Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly<G>> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = G::Scalar::zero();
let mut eval_point_2 = G::Scalar::zero();
let mut eval_point_3 = G::Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
transcript.absorb(b"p", &poly);
//derive the verifier's challenge for the next round
let r_i = transcript.squeeze(b"c")?;
r.push(r_i);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_i);
poly_B.bound_poly_var_top(&r_i);
poly_C.bound_poly_var_top(&r_i);
e = poly.evaluate(&r_i);
cubic_polys.push(poly.compress());
}
Ok((
Self::new(cubic_polys),
r,
vec![poly_A[0], poly_B[0], poly_C[0]],
))
}
pub fn prove_cubic_batched<F>(
claim: &G::Scalar,
num_rounds: usize,
poly_vec: (
&mut Vec<&mut MultilinearPolynomial<G::Scalar>>,
&mut Vec<&mut MultilinearPolynomial<G::Scalar>>,
&mut MultilinearPolynomial<G::Scalar>,
),
coeffs: &[G::Scalar],
comb_func: F,
transcript: &mut G::TE,
) -> Result<
(
Self,
Vec<G::Scalar>,
(Vec<G::Scalar>, Vec<G::Scalar>, G::Scalar),
),
NovaError,
>
where
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar,
{
let (poly_A_vec, poly_B_vec, poly_C) = poly_vec;
let mut e = *claim;
let mut r: Vec<G::Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly<G>> = Vec::new();
for _j in 0..num_rounds {
let mut evals: Vec<(G::Scalar, G::Scalar, G::Scalar)> = Vec::new();
for (poly_A, poly_B) in poly_A_vec.iter().zip(poly_B_vec.iter()) {
let mut eval_point_0 = G::Scalar::zero();
let mut eval_point_2 = G::Scalar::zero();
let mut eval_point_3 = G::Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
let evals_combined_0 = (0..evals.len())
.map(|i| evals[i].0 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals_combined_2 = (0..evals.len())
.map(|i| evals[i].1 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals_combined_3 = (0..evals.len())
.map(|i| evals[i].2 * coeffs[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let evals = vec![
evals_combined_0,
e - evals_combined_0,
evals_combined_2,
evals_combined_3,
];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
transcript.absorb(b"p", &poly);
// derive the verifier's challenge for the next round
let r_i = transcript.squeeze(b"c")?;
r.push(r_i);
// bound all tables to the verifier's challenege
for (poly_A, poly_B) in poly_A_vec.iter_mut().zip(poly_B_vec.iter_mut()) {
poly_A.bound_poly_var_top(&r_i);
poly_B.bound_poly_var_top(&r_i);
}
poly_C.bound_poly_var_top(&r_i);
e = poly.evaluate(&r_i);
cubic_polys.push(poly.compress());
}
let poly_A_final = (0..poly_A_vec.len()).map(|i| poly_A_vec[i][0]).collect();
let poly_B_final = (0..poly_B_vec.len()).map(|i| poly_B_vec[i][0]).collect();
let claims_prod = (poly_A_final, poly_B_final, poly_C[0]);
Ok((SumcheckProof::new(cubic_polys), r, claims_prod))
}
}
#[derive(Debug)]
pub struct ProductArgumentInputs<G: Group> {
left_vec: Vec<MultilinearPolynomial<G::Scalar>>,
right_vec: Vec<MultilinearPolynomial<G::Scalar>>,
}
impl<G: Group> ProductArgumentInputs<G> {
fn compute_layer(
inp_left: &MultilinearPolynomial<G::Scalar>,
inp_right: &MultilinearPolynomial<G::Scalar>,
) -> (
MultilinearPolynomial<G::Scalar>,
MultilinearPolynomial<G::Scalar>,
) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<G::Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<G::Scalar>>();
(
MultilinearPolynomial::new(outp_left),
MultilinearPolynomial::new(outp_right),
)
}
pub fn new(poly: &MultilinearPolynomial<G::Scalar>) -> Self {
let mut left_vec: Vec<MultilinearPolynomial<G::Scalar>> = Vec::new();
let mut right_vec: Vec<MultilinearPolynomial<G::Scalar>> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) =
ProductArgumentInputs::<G>::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
Self {
left_vec,
right_vec,
}
}
pub fn evaluate(&self) -> G::Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct LayerProofBatched<G: Group> {
proof: SumcheckProof<G>,
claims_prod_left: Vec<G::Scalar>,
claims_prod_right: Vec<G::Scalar>,
}
impl<G: Group> LayerProofBatched<G> {
pub fn verify(
&self,
claim: G::Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut G::TE,
) -> Result<(G::Scalar, Vec<G::Scalar>), NovaError> {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub(crate) struct ProductArgumentBatched<G: Group> {
proof: Vec<LayerProofBatched<G>>,
}
impl<G: Group> ProductArgumentBatched<G> {
pub fn prove(
poly_vec: &[&MultilinearPolynomial<G::Scalar>],
transcript: &mut G::TE,
) -> Result<(Self, Vec<G::Scalar>, Vec<G::Scalar>), NovaError> {
let mut prod_circuit_vec: Vec<_> = (0..poly_vec.len())
.map(|i| ProductArgumentInputs::<G>::new(poly_vec[i]))
.collect();
let mut proof_layers: Vec<LayerProofBatched<G>> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let evals = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<G::Scalar>>();
let mut claims_to_verify = evals.clone();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C = MultilinearPolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod = |poly_A_comp: &G::Scalar,
poly_B_comp: &G::Scalar,
poly_C_comp: &G::Scalar|
-> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp };
let mut poly_A_batched: Vec<&mut MultilinearPolynomial<G::Scalar>> = Vec::new();
let mut poly_B_batched: Vec<&mut MultilinearPolynomial<G::Scalar>> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec = (&mut poly_A_batched, &mut poly_B_batched, &mut poly_C);
// produce a fresh set of coeffs and a joint claim
let coeff_vec = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..claims_to_verify.len() {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let (proof, rand_prod, claims_prod) = SumcheckProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec,
&coeff_vec,
comb_func_prod,
transcript,
)?;
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
let v = {
let mut v = claims_prod_left.clone();
v.extend(&claims_prod_right);
v
};
transcript.absorb(b"p", &v.as_slice());
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.squeeze(b"c")?;
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<G::Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
}
Ok((
ProductArgumentBatched {
proof: proof_layers,
},
evals,
rand,
))
}
pub fn verify(
&self,
claims_prod_vec: &[G::Scalar],
len: usize,
transcript: &mut G::TE,
) -> Result<(Vec<G::Scalar>, Vec<G::Scalar>), NovaError> {
let num_layers = len.log_2();
let mut rand: Vec<G::Scalar> = Vec::new();
if self.proof.len() != num_layers {
return Err(NovaError::InvalidProductProof);
}
let mut claims_to_verify = claims_prod_vec.to_owned();
for (num_rounds, i) in (0..num_layers).enumerate() {
// produce random coefficients, one for each instance
let coeff_vec = {
let s = transcript.squeeze(b"r")?;
let mut s_vec = vec![s];
for i in 1..claims_to_verify.len() {
s_vec.push(s_vec[i - 1] * s);
}
s_vec
};
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.fold(G::Scalar::zero(), |acc, item| acc + item);
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript)?;
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
if claims_prod_left.len() != claims_prod_vec.len()
|| claims_prod_right.len() != claims_prod_vec.len()
{
return Err(NovaError::InvalidProductProof);
}
let v = {
let mut v = claims_prod_left.clone();
v.extend(claims_prod_right);
v
};
transcript.absorb(b"p", &v.as_slice());
if rand.len() != rand_prod.len() {
return Err(NovaError::InvalidProductProof);
}
let eq: G::Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (G::Scalar::one() - rand[i]) * (G::Scalar::one() - rand_prod[i])
})
.fold(G::Scalar::one(), |acc, item| acc * item);
let claim_expected: G::Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.fold(G::Scalar::zero(), |acc, item| acc + item);
if claim_expected != claim_last {
return Err(NovaError::InvalidProductProof);
}
// produce a random challenge
let r_layer = transcript.squeeze(b"c")?;
claims_to_verify = (0..claims_prod_left.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<G::Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
Ok((claims_to_verify, rand))
}
}

+ 0
- 724
src/spartan/spark/sparse.rs

@ -1,724 +0,0 @@
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::needless_range_loop)]
use crate::{
errors::NovaError,
spartan::{
math::Math,
polynomial::{EqPolynomial, MultilinearPolynomial},
spark::product::{IdentityPolynomial, ProductArgumentBatched},
PolyEvalInstance, PolyEvalWitness, SumcheckProof,
},
traits::{commitment::CommitmentEngineTrait, Group, TranscriptEngineTrait, TranscriptReprTrait},
Commitment, CommitmentKey,
};
use ff::Field;
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
/// A type that holds a sparse polynomial in dense representation
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparsePolynomial<G: Group> {
ell: (usize, usize), // number of variables in each dimension
// dense representation
row: Vec<G::Scalar>,
col: Vec<G::Scalar>,
val: Vec<G::Scalar>,
// timestamp polynomials
row_read_ts: Vec<G::Scalar>,
row_audit_ts: Vec<G::Scalar>,
col_read_ts: Vec<G::Scalar>,
col_audit_ts: Vec<G::Scalar>,
}
/// A type that holds a commitment to a sparse polynomial
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparsePolynomialCommitment<G: Group> {
ell: (usize, usize), // number of variables
size: usize, // size of the dense representation
// commitments to the dense representation
comm_row: Commitment<G>,
comm_col: Commitment<G>,
comm_val: Commitment<G>,
// commitments to the timestamp polynomials
comm_row_read_ts: Commitment<G>,
comm_row_audit_ts: Commitment<G>,
comm_col_read_ts: Commitment<G>,
comm_col_audit_ts: Commitment<G>,
}
impl<G: Group> TranscriptReprTrait<G> for SparsePolynomialCommitment<G> {
fn to_transcript_bytes(&self) -> Vec<u8> {
[
self.comm_row,
self.comm_col,
self.comm_val,
self.comm_row_read_ts,
self.comm_row_audit_ts,
self.comm_col_read_ts,
self.comm_col_audit_ts,
]
.as_slice()
.to_transcript_bytes()
}
}
impl<G: Group> SparsePolynomial<G> {
pub fn new(ell: (usize, usize), M: &[(usize, usize, G::Scalar)]) -> Self {
let mut row = M.iter().map(|(r, _, _)| *r).collect::<Vec<usize>>();
let mut col = M.iter().map(|(_, c, _)| *c).collect::<Vec<usize>>();
let mut val = M.iter().map(|(_, _, v)| *v).collect::<Vec<G::Scalar>>();
let num_ops = M.len().next_power_of_two();
let num_cells_row = ell.0.pow2();
let num_cells_col = ell.1.pow2();
row.resize(num_ops, 0usize);
col.resize(num_ops, 0usize);
val.resize(num_ops, G::Scalar::zero());
// timestamp calculation routine
let timestamp_calc =
|num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> (Vec<usize>, Vec<usize>) {
let mut read_ts = vec![0usize; num_ops];
let mut audit_ts = vec![0usize; num_cells];
assert!(num_ops >= addr_trace.len());
for i in 0..addr_trace.len() {
let addr = addr_trace[i];
assert!(addr < num_cells);
let r_ts = audit_ts[addr];
read_ts[i] = r_ts;
let w_ts = r_ts + 1;
audit_ts[addr] = w_ts;
}
(read_ts, audit_ts)
};
// timestamp polynomials for row
let (row_read_ts, row_audit_ts) = timestamp_calc(num_ops, num_cells_row, &row);
let (col_read_ts, col_audit_ts) = timestamp_calc(num_ops, num_cells_col, &col);
let to_vec_scalar = |v: &[usize]| -> Vec<G::Scalar> {
(0..v.len())
.map(|i| G::Scalar::from(v[i] as u64))
.collect::<Vec<G::Scalar>>()
};
Self {
ell,
// dense representation
row: to_vec_scalar(&row),
col: to_vec_scalar(&col),
val,
// timestamp polynomials
row_read_ts: to_vec_scalar(&row_read_ts),
row_audit_ts: to_vec_scalar(&row_audit_ts),
col_read_ts: to_vec_scalar(&col_read_ts),
col_audit_ts: to_vec_scalar(&col_audit_ts),
}
}
pub fn commit(&self, ck: &CommitmentKey<G>) -> SparsePolynomialCommitment<G> {
let comm_vec: Vec<Commitment<G>> = [
&self.row,
&self.col,
&self.val,
&self.row_read_ts,
&self.row_audit_ts,
&self.col_read_ts,
&self.col_audit_ts,
]
.par_iter()
.map(|v| G::CE::commit(ck, v))
.collect();
SparsePolynomialCommitment {
ell: self.ell,
size: self.row.len(),
comm_row: comm_vec[0],
comm_col: comm_vec[1],
comm_val: comm_vec[2],
comm_row_read_ts: comm_vec[3],
comm_row_audit_ts: comm_vec[4],
comm_col_read_ts: comm_vec[5],
comm_col_audit_ts: comm_vec[6],
}
}
pub fn multi_evaluate(
M_vec: &[&[(usize, usize, G::Scalar)]],
r_x: &[G::Scalar],
r_y: &[G::Scalar],
) -> Vec<G::Scalar> {
let evaluate_with_table =
|M: &[(usize, usize, G::Scalar)], T_x: &[G::Scalar], T_y: &[G::Scalar]| -> G::Scalar {
(0..M.len())
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| {
let (row, col, val) = M[i];
T_x[row] * T_y[col] * val
})
.reduce(G::Scalar::zero, |acc, x| acc + x)
};
let (T_x, T_y) = rayon::join(
|| EqPolynomial::new(r_x.to_vec()).evals(),
|| EqPolynomial::new(r_y.to_vec()).evals(),
);
(0..M_vec.len())
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y))
.collect()
}
fn evaluation_oracles(
M: &[(usize, usize, G::Scalar)],
r_x: &[G::Scalar],
r_y: &[G::Scalar],
) -> (
Vec<G::Scalar>,
Vec<G::Scalar>,
Vec<G::Scalar>,
Vec<G::Scalar>,
) {
let evaluation_oracles_with_table = |M: &[(usize, usize, G::Scalar)],
T_x: &[G::Scalar],
T_y: &[G::Scalar]|
-> (Vec<G::Scalar>, Vec<G::Scalar>) {
(0..M.len())
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| {
let (row, col, _val) = M[i];
(T_x[row], T_y[col])
})
.collect::<Vec<(G::Scalar, G::Scalar)>>()
.into_par_iter()
.unzip()
};
let (T_x, T_y) = rayon::join(
|| EqPolynomial::new(r_x.to_vec()).evals(),
|| EqPolynomial::new(r_y.to_vec()).evals(),
);
let (mut E_row, mut E_col) = evaluation_oracles_with_table(M, &T_x, &T_y);
// resize the returned vectors
E_row.resize(M.len().next_power_of_two(), T_x[0]); // we place T_x[0] since resized row is appended with 0s
E_col.resize(M.len().next_power_of_two(), T_y[0]);
(E_row, E_col, T_x, T_y)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "")]
pub struct SparseEvaluationArgument<G: Group> {
// claimed evaluation
eval: G::Scalar,
// oracles
comm_E_row: Commitment<G>,
comm_E_col: Commitment<G>,
// proof of correct evaluation wrt oracles
sc_proof_eval: SumcheckProof<G>,
eval_E_row: G::Scalar,
eval_E_col: G::Scalar,
eval_val: G::Scalar,
// proof that E_row is well-formed
eval_init_row: G::Scalar,
eval_read_row: G::Scalar,
eval_write_row: G::Scalar,
eval_audit_row: G::Scalar,
eval_init_col: G::Scalar,
eval_read_col: G::Scalar,
eval_write_col: G::Scalar,
eval_audit_col: G::Scalar,
sc_prod_init_audit_row: ProductArgumentBatched<G>,
sc_prod_read_write_row_col: ProductArgumentBatched<G>,
sc_prod_init_audit_col: ProductArgumentBatched<G>,
eval_row: G::Scalar,
eval_row_read_ts: G::Scalar,
eval_E_row2: G::Scalar,
eval_row_audit_ts: G::Scalar,
eval_col: G::Scalar,
eval_col_read_ts: G::Scalar,
eval_E_col2: G::Scalar,
eval_col_audit_ts: G::Scalar,
}
impl<G: Group> SparseEvaluationArgument<G> {
pub fn prove(
ck: &CommitmentKey<G>,
poly: &SparsePolynomial<G>,
sparse: &[(usize, usize, G::Scalar)],
comm: &SparsePolynomialCommitment<G>,
r: &(&[G::Scalar], &[G::Scalar]),
transcript: &mut G::TE,
) -> Result<(Self, Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)>), NovaError> {
let (r_x, r_y) = r;
let eval = SparsePolynomial::<G>::multi_evaluate(&[sparse], r_x, r_y)[0];
// keep track of evaluation claims
let mut w_u_vec: Vec<(PolyEvalWitness<G>, PolyEvalInstance<G>)> = Vec::new();
// compute oracles to prove the correctness of `eval`
let (E_row, E_col, T_x, T_y) = SparsePolynomial::<G>::evaluation_oracles(sparse, r_x, r_y);
let val = poly.val.clone();
// commit to the two oracles
let comm_E_row = G::CE::commit(ck, &E_row);
let comm_E_col = G::CE::commit(ck, &E_col);
// absorb the commitments and the claimed evaluation
transcript.absorb(b"E", &vec![comm_E_row, comm_E_col].as_slice());
transcript.absorb(b"e", &eval);
let comb_func_eval = |poly_A_comp: &G::Scalar,
poly_B_comp: &G::Scalar,
poly_C_comp: &G::Scalar|
-> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp };
let (sc_proof_eval, r_eval, claims_eval) = SumcheckProof::<G>::prove_cubic(
&eval,
E_row.len().log_2(), // number of rounds
&mut MultilinearPolynomial::new(E_row.clone()),
&mut MultilinearPolynomial::new(E_col.clone()),
&mut MultilinearPolynomial::new(val.clone()),
comb_func_eval,
transcript,
)?;
// prove evaluations of E_row, E_col and val at r_eval
let rho = transcript.squeeze(b"r")?;
let comm_joint = comm_E_row + comm_E_col * rho + comm.comm_val * rho * rho;
let eval_joint = claims_eval[0] + rho * claims_eval[1] + rho * rho * claims_eval[2];
let poly_eval = E_row
.iter()
.zip(E_col.iter())
.zip(val.iter())
.map(|((a, b), c)| *a + rho * *b + rho * rho * *c)
.collect::<Vec<G::Scalar>>();
// add the claim to prove for later
w_u_vec.push((
PolyEvalWitness { p: poly_eval },
PolyEvalInstance {
c: comm_joint,
x: r_eval,
e: eval_joint,
},
));
// we now need to prove that E_row and E_col are well-formed
// we use memory checking: H(INIT) * H(WS) =? H(RS) * H(FINAL)
let gamma_1 = transcript.squeeze(b"g1")?;
let gamma_2 = transcript.squeeze(b"g2")?;
let gamma_1_sqr = gamma_1 * gamma_1;
let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar {
(*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2
};
let init_row = (0..T_x.len())
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &G::Scalar::zero()))
.collect::<Vec<G::Scalar>>();
let read_row = (0..E_row.len())
.map(|i| hash_func(&poly.row[i], &E_row[i], &poly.row_read_ts[i]))
.collect::<Vec<G::Scalar>>();
let write_row = (0..E_row.len())
.map(|i| {
hash_func(
&poly.row[i],
&E_row[i],
&(poly.row_read_ts[i] + G::Scalar::one()),
)
})
.collect::<Vec<G::Scalar>>();
let audit_row = (0..T_x.len())
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_x[i], &poly.row_audit_ts[i]))
.collect::<Vec<G::Scalar>>();
let init_col = (0..T_y.len())
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &G::Scalar::zero()))
.collect::<Vec<G::Scalar>>();
let read_col = (0..E_col.len())
.map(|i| hash_func(&poly.col[i], &E_col[i], &poly.col_read_ts[i]))
.collect::<Vec<G::Scalar>>();
let write_col = (0..E_col.len())
.map(|i| {
hash_func(
&poly.col[i],
&E_col[i],
&(poly.col_read_ts[i] + G::Scalar::one()),
)
})
.collect::<Vec<G::Scalar>>();
let audit_col = (0..T_y.len())
.map(|i| hash_func(&G::Scalar::from(i as u64), &T_y[i], &poly.col_audit_ts[i]))
.collect::<Vec<G::Scalar>>();
let (sc_prod_init_audit_row, eval_init_audit_row, r_init_audit_row) =
ProductArgumentBatched::prove(
&[
&MultilinearPolynomial::new(init_row),
&MultilinearPolynomial::new(audit_row),
],
transcript,
)?;
assert_eq!(init_col.len(), audit_col.len());
let (sc_prod_init_audit_col, eval_init_audit_col, r_init_audit_col) =
ProductArgumentBatched::prove(
&[
&MultilinearPolynomial::new(init_col),
&MultilinearPolynomial::new(audit_col),
],
transcript,
)?;
assert_eq!(read_row.len(), write_row.len());
assert_eq!(read_row.len(), read_col.len());
assert_eq!(read_row.len(), write_col.len());
let (sc_prod_read_write_row_col, eval_read_write_row_col, r_read_write_row_col) =
ProductArgumentBatched::prove(
&[
&MultilinearPolynomial::new(read_row),
&MultilinearPolynomial::new(write_row),
&MultilinearPolynomial::new(read_col),
&MultilinearPolynomial::new(write_col),
],
transcript,
)?;
// row-related claims of polynomial evaluations to aid the final check of the sum-check
let eval_row = MultilinearPolynomial::evaluate_with(&poly.row, &r_read_write_row_col);
let eval_row_read_ts =
MultilinearPolynomial::evaluate_with(&poly.row_read_ts, &r_read_write_row_col);
let eval_E_row2 = MultilinearPolynomial::evaluate_with(&E_row, &r_read_write_row_col);
let eval_row_audit_ts =
MultilinearPolynomial::evaluate_with(&poly.row_audit_ts, &r_init_audit_row);
// col-related claims of polynomial evaluations to aid the final check of the sum-check
let eval_col = MultilinearPolynomial::evaluate_with(&poly.col, &r_read_write_row_col);
let eval_col_read_ts =
MultilinearPolynomial::evaluate_with(&poly.col_read_ts, &r_read_write_row_col);
let eval_E_col2 = MultilinearPolynomial::evaluate_with(&E_col, &r_read_write_row_col);
let eval_col_audit_ts =
MultilinearPolynomial::evaluate_with(&poly.col_audit_ts, &r_init_audit_col);
// we can batch prove the first three claims
transcript.absorb(
b"e",
&[
eval_row,
eval_row_read_ts,
eval_E_row2,
eval_col,
eval_col_read_ts,
eval_E_col2,
]
.as_slice(),
);
let c = transcript.squeeze(b"c")?;
let eval_joint = eval_row
+ c * eval_row_read_ts
+ c * c * eval_E_row2
+ c * c * c * eval_col
+ c * c * c * c * eval_col_read_ts
+ c * c * c * c * c * eval_E_col2;
let comm_joint = comm.comm_row
+ comm.comm_row_read_ts * c
+ comm_E_row * c * c
+ comm.comm_col * c * c * c
+ comm.comm_col_read_ts * c * c * c * c
+ comm_E_col * c * c * c * c * c;
let poly_joint = poly
.row
.iter()
.zip(poly.row_read_ts.iter())
.zip(E_row.into_iter())
.zip(poly.col.iter())
.zip(poly.col_read_ts.iter())
.zip(E_col.into_iter())
.map(|(((((x, y), z), m), n), q)| {
*x + c * y + c * c * z + c * c * c * m + c * c * c * c * n + c * c * c * c * c * q
})
.collect::<Vec<_>>();
// add the claim to prove for later
w_u_vec.push((
PolyEvalWitness { p: poly_joint },
PolyEvalInstance {
c: comm_joint,
x: r_read_write_row_col,
e: eval_joint,
},
));
transcript.absorb(b"a", &eval_row_audit_ts); // add evaluation to transcript, commitment is already in
w_u_vec.push((
PolyEvalWitness {
p: poly.row_audit_ts.clone(),
},
PolyEvalInstance {
c: comm.comm_row_audit_ts,
x: r_init_audit_row,
e: eval_row_audit_ts,
},
));
transcript.absorb(b"a", &eval_col_audit_ts); // add evaluation to transcript, commitment is already in
w_u_vec.push((
PolyEvalWitness {
p: poly.col_audit_ts.clone(),
},
PolyEvalInstance {
c: comm.comm_col_audit_ts,
x: r_init_audit_col,
e: eval_col_audit_ts,
},
));
let eval_arg = Self {
// claimed evaluation
eval,
// oracles
comm_E_row,
comm_E_col,
// proof of correct evaluation wrt oracles
sc_proof_eval,
eval_E_row: claims_eval[0],
eval_E_col: claims_eval[1],
eval_val: claims_eval[2],
// proof that E_row and E_row are well-formed
eval_init_row: eval_init_audit_row[0],
eval_read_row: eval_read_write_row_col[0],
eval_write_row: eval_read_write_row_col[1],
eval_audit_row: eval_init_audit_row[1],
eval_init_col: eval_init_audit_col[0],
eval_read_col: eval_read_write_row_col[2],
eval_write_col: eval_read_write_row_col[3],
eval_audit_col: eval_init_audit_col[1],
sc_prod_init_audit_row,
sc_prod_read_write_row_col,
sc_prod_init_audit_col,
eval_row,
eval_row_read_ts,
eval_E_row2,
eval_row_audit_ts,
eval_col,
eval_col_read_ts,
eval_E_col2,
eval_col_audit_ts,
};
Ok((eval_arg, w_u_vec))
}
pub fn verify(
&self,
comm: &SparsePolynomialCommitment<G>,
r: &(&[G::Scalar], &[G::Scalar]),
transcript: &mut G::TE,
) -> Result<(G::Scalar, Vec<PolyEvalInstance<G>>), NovaError> {
let (r_x, r_y) = r;
// keep track of evaluation claims
let mut u_vec: Vec<PolyEvalInstance<G>> = Vec::new();
// append the transcript and scalar
transcript.absorb(b"E", &vec![self.comm_E_row, self.comm_E_col].as_slice());
transcript.absorb(b"e", &self.eval);
// (1) verify the correct evaluation of sparse polynomial
let (claim_eval_final, r_eval) = self.sc_proof_eval.verify(
self.eval,
comm.size.next_power_of_two().log_2(),
3,
transcript,
)?;
// verify the last step of the sum-check
if claim_eval_final != self.eval_E_row * self.eval_E_col * self.eval_val {
return Err(NovaError::InvalidSumcheckProof);
}
// prove evaluations of E_row, E_col and val at r_eval
let rho = transcript.squeeze(b"r")?;
let comm_joint = self.comm_E_row + self.comm_E_col * rho + comm.comm_val * rho * rho;
let eval_joint = self.eval_E_row + rho * self.eval_E_col + rho * rho * self.eval_val;
// add the claim to prove for later
u_vec.push(PolyEvalInstance {
c: comm_joint,
x: r_eval,
e: eval_joint,
});
// (2) verify if E_row and E_col are well formed
let gamma_1 = transcript.squeeze(b"g1")?;
let gamma_2 = transcript.squeeze(b"g2")?;
// hash function
let gamma_1_sqr = gamma_1 * gamma_1;
let hash_func = |addr: &G::Scalar, val: &G::Scalar, ts: &G::Scalar| -> G::Scalar {
(*ts * gamma_1_sqr + *val * gamma_1 + *addr) - gamma_2
};
// check the required multiset relationship
// row
if self.eval_init_row * self.eval_write_row != self.eval_read_row * self.eval_audit_row {
return Err(NovaError::InvalidMultisetProof);
}
// col
if self.eval_init_col * self.eval_write_col != self.eval_read_col * self.eval_audit_col {
return Err(NovaError::InvalidMultisetProof);
}
// verify the product proofs
let (claim_init_audit_row, r_init_audit_row) = self.sc_prod_init_audit_row.verify(
&[self.eval_init_row, self.eval_audit_row],
comm.ell.0.pow2(),
transcript,
)?;
let (claim_init_audit_col, r_init_audit_col) = self.sc_prod_init_audit_col.verify(
&[self.eval_init_col, self.eval_audit_col],
comm.ell.1.pow2(),
transcript,
)?;
let (claim_read_write_row_col, r_read_write_row_col) = self.sc_prod_read_write_row_col.verify(
&[
self.eval_read_row,
self.eval_write_row,
self.eval_read_col,
self.eval_write_col,
],
comm.size,
transcript,
)?;
// finish the final step of the three sum-checks
let (claim_init_expected_row, claim_audit_expected_row) = {
let addr = IdentityPolynomial::new(r_init_audit_row.len()).evaluate(&r_init_audit_row);
let val = EqPolynomial::new(r_x.to_vec()).evaluate(&r_init_audit_row);
(
hash_func(&addr, &val, &G::Scalar::zero()),
hash_func(&addr, &val, &self.eval_row_audit_ts),
)
};
let (claim_read_expected_row, claim_write_expected_row) = {
(
hash_func(&self.eval_row, &self.eval_E_row2, &self.eval_row_read_ts),
hash_func(
&self.eval_row,
&self.eval_E_row2,
&(self.eval_row_read_ts + G::Scalar::one()),
),
)
};
// multiset check for the row
if claim_init_expected_row != claim_init_audit_row[0]
|| claim_audit_expected_row != claim_init_audit_row[1]
|| claim_read_expected_row != claim_read_write_row_col[0]
|| claim_write_expected_row != claim_read_write_row_col[1]
{
return Err(NovaError::InvalidSumcheckProof);
}
let (claim_init_expected_col, claim_audit_expected_col) = {
let addr = IdentityPolynomial::new(r_init_audit_col.len()).evaluate(&r_init_audit_col);
let val = EqPolynomial::new(r_y.to_vec()).evaluate(&r_init_audit_col);
(
hash_func(&addr, &val, &G::Scalar::zero()),
hash_func(&addr, &val, &self.eval_col_audit_ts),
)
};
let (claim_read_expected_col, claim_write_expected_col) = {
(
hash_func(&self.eval_col, &self.eval_E_col2, &self.eval_col_read_ts),
hash_func(
&self.eval_col,
&self.eval_E_col2,
&(self.eval_col_read_ts + G::Scalar::one()),
),
)
};
// multiset check for the col
if claim_init_expected_col != claim_init_audit_col[0]
|| claim_audit_expected_col != claim_init_audit_col[1]
|| claim_read_expected_col != claim_read_write_row_col[2]
|| claim_write_expected_col != claim_read_write_row_col[3]
{
return Err(NovaError::InvalidSumcheckProof);
}
transcript.absorb(
b"e",
&[
self.eval_row,
self.eval_row_read_ts,
self.eval_E_row2,
self.eval_col,
self.eval_col_read_ts,
self.eval_E_col2,
]
.as_slice(),
);
let c = transcript.squeeze(b"c")?;
let eval_joint = self.eval_row
+ c * self.eval_row_read_ts
+ c * c * self.eval_E_row2
+ c * c * c * self.eval_col
+ c * c * c * c * self.eval_col_read_ts
+ c * c * c * c * c * self.eval_E_col2;
let comm_joint = comm.comm_row
+ comm.comm_row_read_ts * c
+ self.comm_E_row * c * c
+ comm.comm_col * c * c * c
+ comm.comm_col_read_ts * c * c * c * c
+ self.comm_E_col * c * c * c * c * c;
u_vec.push(PolyEvalInstance {
c: comm_joint,
x: r_read_write_row_col,
e: eval_joint,
});
transcript.absorb(b"a", &self.eval_row_audit_ts); // add evaluation to transcript, commitment is already in
u_vec.push(PolyEvalInstance {
c: comm.comm_row_audit_ts,
x: r_init_audit_row,
e: self.eval_row_audit_ts,
});
transcript.absorb(b"a", &self.eval_col_audit_ts); // add evaluation to transcript, commitment is already in
u_vec.push(PolyEvalInstance {
c: comm.comm_col_audit_ts,
x: r_init_audit_col,
e: self.eval_col_audit_ts,
});
Ok((self.eval, u_vec))
}
}

+ 88
- 0
src/spartan/sumcheck.rs

@ -197,6 +197,94 @@ impl SumcheckProof {
Ok((SumcheckProof::new(quad_polys), r, claims_prod))
}
pub fn prove_cubic<F>(
claim: &G::Scalar,
num_rounds: usize,
poly_A: &mut MultilinearPolynomial<G::Scalar>,
poly_B: &mut MultilinearPolynomial<G::Scalar>,
poly_C: &mut MultilinearPolynomial<G::Scalar>,
comb_func: F,
transcript: &mut G::TE,
) -> Result<(Self, Vec<G::Scalar>, Vec<G::Scalar>), NovaError>
where
F: Fn(&G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync,
{
let mut r: Vec<G::Scalar> = Vec::new();
let mut polys: Vec<CompressedUniPoly<G>> = Vec::new();
let mut claim_per_round = *claim;
for _ in 0..num_rounds {
let poly = {
let len = poly_A.len() / 2;
// Make an iterator returning the contributions to the evaluations
let (eval_point_0, eval_point_2, eval_point_3) = (0..len)
.into_par_iter()
.map(|i| {
// eval 0: bound_func is A(low)
let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
let eval_point_2 = comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
let eval_point_3 = comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
(eval_point_0, eval_point_2, eval_point_3)
})
.reduce(
|| (G::Scalar::zero(), G::Scalar::zero(), G::Scalar::zero()),
|a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2),
);
let evals = vec![
eval_point_0,
claim_per_round - eval_point_0,
eval_point_2,
eval_point_3,
];
UniPoly::from_evals(&evals)
};
// append the prover's message to the transcript
transcript.absorb(b"p", &poly);
//derive the verifier's challenge for the next round
let r_i = transcript.squeeze(b"c")?;
r.push(r_i);
polys.push(poly.compress());
// Set up next round
claim_per_round = poly.evaluate(&r_i);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_i);
poly_B.bound_poly_var_top(&r_i);
poly_C.bound_poly_var_top(&r_i);
}
Ok((
SumcheckProof {
compressed_polys: polys,
},
r,
vec![poly_A[0], poly_B[0], poly_C[0]],
))
}
pub fn prove_cubic_with_additive_term<F>(
claim: &G::Scalar,
num_rounds: usize,

Loading…
Cancel
Save